Repository: monzo/phosphor Branch: master Commit: ee1fad17320a Files: 163 Total size: 1.1 MB Directory structure: gitextract_lw0vpfqr/ ├── .gitignore ├── .travis.yml ├── Dockerfile ├── Godeps/ │ ├── Godeps.json │ └── Readme ├── LICENCE ├── README.md ├── apps/ │ ├── phosphor/ │ │ └── main.go │ └── phosphord/ │ └── main.go ├── dist.sh ├── internal/ │ ├── util/ │ │ └── stringarray.go │ └── version/ │ └── version.go ├── phosphor/ │ ├── README.md │ ├── context.go │ ├── domain.go │ ├── handler.go │ ├── ingester.go │ ├── marshaling.go │ ├── memorystore.go │ ├── options.go │ ├── phosphor.go │ └── store.go ├── phosphord/ │ ├── README.md │ ├── forwarder.go │ ├── options.go │ ├── phosphord.go │ ├── test/ │ │ └── test.go │ └── transport/ │ ├── nsq.go │ └── transport.go ├── proto/ │ ├── trace.pb.go │ └── trace.proto ├── script/ │ └── buildprotobufs.sh └── vendor/ ├── code.google.com/ │ └── p/ │ └── snappy-go/ │ └── snappy/ │ ├── decode.go │ ├── encode.go │ ├── snappy.go │ └── snappy_test.go ├── github.com/ │ ├── bitly/ │ │ └── go-nsq/ │ │ ├── .travis.yml │ │ ├── ChangeLog.md │ │ ├── LICENSE │ │ ├── README.md │ │ ├── UPGRADING.md │ │ ├── api_request.go │ │ ├── command.go │ │ ├── command_test.go │ │ ├── config.go │ │ ├── config_test.go │ │ ├── conn.go │ │ ├── consumer.go │ │ ├── consumer_test.go │ │ ├── delegates.go │ │ ├── errors.go │ │ ├── message.go │ │ ├── mock_test.go │ │ ├── producer.go │ │ ├── producer_test.go │ │ ├── protocol.go │ │ ├── states.go │ │ ├── test/ │ │ │ ├── ca.pem │ │ │ ├── server.key │ │ │ └── server.pem │ │ ├── test.sh │ │ └── version.go │ ├── cihub/ │ │ └── seelog/ │ │ ├── LICENSE.txt │ │ ├── README.markdown │ │ ├── behavior_adaptive_test.go │ │ ├── behavior_adaptivelogger.go │ │ ├── behavior_asynclogger.go │ │ ├── behavior_asyncloop_test.go │ │ ├── behavior_asynclooplogger.go │ │ ├── behavior_asynctimer_test.go │ │ ├── behavior_asynctimerlogger.go │ │ ├── behavior_synclogger.go │ │ ├── behavior_synclogger_test.go │ │ ├── cfg_config.go │ │ ├── cfg_errors.go │ │ ├── cfg_logconfig.go │ │ ├── cfg_logconfig_test.go │ │ ├── cfg_parser.go │ │ ├── cfg_parser_test.go │ │ ├── common_closer.go │ │ ├── common_constraints.go │ │ ├── common_constraints_test.go │ │ ├── common_context.go │ │ ├── common_context_test.go │ │ ├── common_exception.go │ │ ├── common_exception_test.go │ │ ├── common_flusher.go │ │ ├── common_loglevel.go │ │ ├── dispatch_custom.go │ │ ├── dispatch_customdispatcher_test.go │ │ ├── dispatch_dispatcher.go │ │ ├── dispatch_filterdispatcher.go │ │ ├── dispatch_filterdispatcher_test.go │ │ ├── dispatch_splitdispatcher.go │ │ ├── dispatch_splitdispatcher_test.go │ │ ├── doc.go │ │ ├── format.go │ │ ├── format_test.go │ │ ├── internals_baseerror.go │ │ ├── internals_byteverifiers_test.go │ │ ├── internals_fsutils.go │ │ ├── internals_xmlnode.go │ │ ├── internals_xmlnode_test.go │ │ ├── log.go │ │ ├── logger.go │ │ ├── writers_bufferedwriter.go │ │ ├── writers_bufferedwriter_test.go │ │ ├── writers_connwriter.go │ │ ├── writers_consolewriter.go │ │ ├── writers_filewriter.go │ │ ├── writers_filewriter_test.go │ │ ├── writers_formattedwriter.go │ │ ├── writers_formattedwriter_test.go │ │ ├── writers_rollingfilewriter.go │ │ ├── writers_rollingfilewriter_test.go │ │ └── writers_smtpwriter.go │ ├── golang/ │ │ └── protobuf/ │ │ └── proto/ │ │ ├── Makefile │ │ ├── all_test.go │ │ ├── clone.go │ │ ├── clone_test.go │ │ ├── decode.go │ │ ├── encode.go │ │ ├── equal.go │ │ ├── equal_test.go │ │ ├── extensions.go │ │ ├── extensions_test.go │ │ ├── lib.go │ │ ├── message_set.go │ │ ├── message_set_test.go │ │ ├── pointer_reflect.go │ │ ├── pointer_unsafe.go │ │ ├── properties.go │ │ ├── proto3_proto/ │ │ │ ├── proto3.pb.go │ │ │ └── proto3.proto │ │ ├── proto3_test.go │ │ ├── size2_test.go │ │ ├── size_test.go │ │ ├── text.go │ │ ├── text_parser.go │ │ ├── text_parser_test.go │ │ └── text_test.go │ └── mreiferson/ │ ├── go-options/ │ │ ├── .travis.yml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── example_test.go │ │ └── options.go │ └── go-snappystream/ │ ├── .travis.yml │ ├── LICENSE │ ├── README.md │ ├── fixturedata_test.go │ ├── reader.go │ ├── reader_test.go │ ├── readwrite_test.go │ ├── snappystream.go │ ├── writer.go │ └── writer_test.go └── golang.org/ └── x/ └── net/ └── context/ ├── context.go ├── context_test.go ├── ctxhttp/ │ ├── cancelreq.go │ ├── cancelreq_go14.go │ ├── ctxhttp.go │ └── ctxhttp_test.go └── withtimeout_test.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ .DS_Store dist/ ================================================ FILE: .travis.yml ================================================ language: go go: - 1.5 - tip env: - GO15VENDOREXPERIMENT=1 install: true script: - "go install ./apps/phosphor" - "go install ./apps/phosphord" - "go test $(go list ./... | grep -v /vendor/)" ================================================ FILE: Dockerfile ================================================ FROM busybox ADD dist/docker/bin/ /phosphor_bin/ RUN cd / && ln -s /phosphor_bin/* . \ && cd /bin && ln -s /phosphor_bin/* . EXPOSE 7750 7760 7760/udp ================================================ FILE: Godeps/Godeps.json ================================================ { "ImportPath": "github.com/mondough/phosphor", "GoVersion": "go1.5", "Packages": [ "./..." ], "Deps": [ { "ImportPath": "code.google.com/p/snappy-go/snappy", "Comment": "null-15", "Rev": "12e4b4183793ac4b061921e7980845e750679fd0" }, { "ImportPath": "github.com/bitly/go-nsq", "Comment": "v1.0.4-13-ga3aee1d", "Rev": "a3aee1d8e104a99d8fedffe2c45832df6a96d735" }, { "ImportPath": "github.com/cihub/seelog", "Comment": "go1.1-55-gc40fd0a", "Rev": "c40fd0af694fa48ec870c030f495c26a5bffcf55" }, { "ImportPath": "github.com/golang/protobuf/proto", "Rev": "16256d3ce6929458613798ee44b7914a3f59f5c6" }, { "ImportPath": "github.com/mreiferson/go-options", "Rev": "7c174072188d0cfbe6f01bb457626abb22bdff52" }, { "ImportPath": "github.com/mreiferson/go-snappystream", "Comment": "v0.2.2-8-ga5260a3", "Rev": "a5260a307b3e7dd583283c1e2717445244d506c7" }, { "ImportPath": "golang.org/x/net/context", "Rev": "47990a1ba55743e6ef1affd3a14e5bac8553615d" } ] } ================================================ FILE: Godeps/Readme ================================================ This directory tree is generated automatically by godep. Please do not edit. See https://github.com/tools/godep for more information. ================================================ FILE: LICENCE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright 2015 Matt Heath Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: README.md ================================================ ![](docs/logo.png) Phosphor is a Distributed Tracing system, similar to [Google's Dapper](https://research.google.com/pubs/pub36356.html), [Twitter's Zipkin](https://twitter.github.io/zipkin), and [Hailo's Trace Service](https://speakerdeck.com/mattheath/scaling-microservices-in-go-high-load-strategy-2015?slide=45). ![](https://travis-ci.org/mondough/phosphor.svg?branch=master) It is comprised of a few simple components: - [Phosphor Client](https://github.com/mondough/phosphor-go), used to send traces from applications - [Phosphor Daemon](https://github.com/mondough/phosphor/tree/master/phosphord), collects traces and forwards onto the main server - [Phosphor Server](https://github.com/mondough/phosphor/tree/master/phosphor), stores traces and aggregated trace information - Phosphor UI, view trace and debug information about your infrastructure ![Phosphor Architecture](docs/phosphor/outline.png) ## Dependencies - [NSQ](https://nsq.io) is used as the delivery transport between PhosphorD and the Phosphor Server ## Caveats This system is currently in development, and some components are not yet open source. In particular, the persistence layer in this repository is an in-memory mock, and is therefore not appropriate for production usage. Additional storage adaptors will be added in the near future. ================================================ FILE: apps/phosphor/main.go ================================================ package main import ( "flag" "fmt" "math/rand" "os" "os/signal" "syscall" "time" "github.com/mondough/phosphor/internal/util" "github.com/mondough/phosphor/internal/version" "github.com/mondough/phosphor/phosphor" "github.com/mreiferson/go-options" ) func phosphorFlagSet() *flag.FlagSet { flagSet := flag.NewFlagSet("phosphor", flag.ExitOnError) // basic options flagSet.Bool("version", false, "print version string") flagSet.Bool("verbose", false, "enable verbose logging") flagSet.Int64("worker-id", 0, "unique seed for message ID generation (int) in range [0,4096) (will default to a hash of hostname)") flagSet.String("https-address", "", ": to listen on for HTTPS clients") flagSet.String("http-address", "0.0.0.0:7750", ": to listen on for HTTP clients") // NSQ Transport options nsqLookupdHTTPAddrs := util.StringArray{} flagSet.Var(&nsqLookupdHTTPAddrs, "nsqlookupd-http-address", "nsqlookupd HTTP address (may be given multiple times)") nsqdHTTPAddrs := util.StringArray{} flagSet.Var(&nsqdHTTPAddrs, "nsqd-http-address", "nsqd HTTP address (may be given multiple times)") flagSet.String("nsq-topic", "phosphor", "NSQ topic name to recieve traces from") flagSet.String("nsq-channel", "phosphor-server", "NSQ channel name to recieve traces from. This should be the same for all instances of the phosphor servers to spread ingestion work.") flagSet.Int("nsq-max-inflight", 200, "Number of traces to allow NSQ to keep inflight") flagSet.Int("nsq-num-handlers", 10, "Number of concurrent NSQ handlers to run") return flagSet } func main() { flagSet := phosphorFlagSet() flagSet.Parse(os.Args[1:]) // Globally seed rand rand.Seed(time.Now().UTC().UnixNano()) if flagSet.Lookup("version").Value.(flag.Getter).Get().(bool) { fmt.Println(version.String("phosphor")) return } signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) opts := phosphor.NewOptions() cfg := map[string]interface{}{} options.Resolve(opts, flagSet, cfg) p := phosphor.New(opts) p.Run() <-signalChan p.Exit() } ================================================ FILE: apps/phosphord/main.go ================================================ package main import ( "flag" "fmt" "math/rand" "os" "os/signal" "runtime" "syscall" "time" log "github.com/cihub/seelog" "github.com/mreiferson/go-options" "github.com/mondough/phosphor/internal/util" "github.com/mondough/phosphor/internal/version" "github.com/mondough/phosphor/phosphord" ) func phosphordFlagSet() *flag.FlagSet { flagSet := flag.NewFlagSet("phosphord", flag.ExitOnError) // basic options flagSet.Bool("version", false, "print version string") flagSet.Bool("verbose", false, "enable verbose logging") // forwarder options flagSet.String("udp-address", "0.0.0.0:7760", ": to listen for UDP traces") flagSet.Int("num-forwarders", 20, "set the number of workers which buffer and forward traces") flagSet.Int("buffer-size", 200, "set the maximum number of traces buffered per worker before batch sending") flagSet.Int("flush-interval", 2000, "set the maximum flush interval in ms") // NSQ Transport options flagSet.String("nsq-topic", "phosphor", "NSQ topic name to recieve traces from") nsqdTCPAddrs := util.StringArray{} flagSet.Var(&nsqdTCPAddrs, "nsqd-tcp-address", "nsqd TCP address (may be given multiple times)") return flagSet } func main() { flagSet := phosphordFlagSet() flagSet.Parse(os.Args[1:]) defer log.Flush() // Globally seed rand rand.Seed(time.Now().UTC().UnixNano()) // Use ALL the CPUs runtime.GOMAXPROCS(runtime.NumCPU()) // Immediately print and exit the version number if flagSet.Lookup("version").Value.(flag.Getter).Get().(bool) { fmt.Println(version.String("phosphord")) return } signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) opts := phosphord.NewOptions() cfg := map[string]interface{}{} options.Resolve(opts, flagSet, cfg) p := phosphord.New(opts) p.Run() <-signalChan p.Exit() } ================================================ FILE: dist.sh ================================================ #!/bin/bash set -e DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" rm -rf $DIR/dist/docker mkdir -p $DIR/dist/docker rm -rf $DIR/.godeps mkdir -p $DIR/.godeps export GOPATH=$DIR/vendor:$GOPATH arch=$(go env GOARCH) version=$(awk '/const Version/ {print $NF}' < $DIR/internal/version/version.go | sed 's/"//g') goversion=$(go version | awk '{print $3}') for os in linux darwin freebsd; do echo "... building v$version for $os/$arch" BUILD=$(mktemp -d -t phosphor) TARGET="phosphor-$version.$os-$arch.$goversion" for app in phosphor phosphord; do GOOS=$os GOARCH=$arch CGO_ENABLED=0 go build -o $BUILD/$TARGET/bin/$app ./apps/$app done pushd $BUILD if [ "$os" == "linux" ]; then cp -r $TARGET/bin $DIR/dist/docker/ fi tar czvf $TARGET.tar.gz $TARGET mv $TARGET.tar.gz $DIR/dist popd rm -r $BUILD done docker build -t mondough/phosphor:v$version . if [[ ! $version == *"-"* ]]; then echo "Tagging mondough/phosphor:v$version as the latest release." docker tag -f mondough/phosphor:v$version mondough/phosphor:latest fi ================================================ FILE: internal/util/stringarray.go ================================================ // The StringArray type is borrowed from NSQ // https://github.com/bitly/nsq/blob/master/util/string_array.go package util import ( "strings" ) type StringArray []string func (a *StringArray) Set(s string) error { *a = append(*a, s) return nil } func (a *StringArray) String() string { return strings.Join(*a, ",") } ================================================ FILE: internal/version/version.go ================================================ package version import ( "fmt" "runtime" ) // Version of the binaries const Version = "0.0.1" // String returns our formatted version string func String(app string) string { return fmt.Sprintf("%s v%s (built w/%s)", app, Version, runtime.Version()) } ================================================ FILE: phosphor/README.md ================================================ # Phosphor The Phosphor server receives traces from PhosphorD via NSQ and stores these for later retrieval via its API. ## Usage ``` -http-address string : to listen on for HTTP clients (default "0.0.0.0:7750") -https-address string : to listen on for HTTPS clients -nsq-channel string NSQ channel name to recieve traces from. This should be the same for all instances of the phosphor servers to spread ingestion work. (default "phosphor-server") -nsq-max-inflight int Number of traces to allow NSQ to keep inflight (default 200) -nsq-num-handlers int Number of concurrent NSQ handlers to run (default 10) -nsq-topic string NSQ topic name to recieve traces from (default "phosphor") -nsqd-http-address value nsqd HTTP address (may be given multiple times) -nsqlookupd-http-address value nsqlookupd HTTP address (may be given multiple times) -verbose enable verbose logging -version print version string -worker-id int unique seed for message ID generation (int) in range [0,4096) (will default to a hash of hostname) ``` ================================================ FILE: phosphor/context.go ================================================ package phosphor import ( "fmt" "golang.org/x/net/context" ) func phosphorFromContext(ctx context.Context) (*Phosphor, error) { if p, ok := ctx.Value("phosphor").(*Phosphor); ok { return p, nil } return nil, fmt.Errorf("Couldn't retrieve Phosphor from Context") } ================================================ FILE: phosphor/domain.go ================================================ package phosphor import ( "errors" "sync" "time" ) // NewTrace initialises and returns a new Trace func NewTrace() *Trace { return &Trace{ Annotation: make([]*Annotation, 0), } } // Trace represents a full trace of a request // comprised of a number of Annotations type Trace struct { sync.Mutex Annotation []*Annotation `json:"annotations"` } // AppendAnnotation to a Trace func (t *Trace) AppendAnnotation(a *Annotation) error { if t == nil { return errors.New("Trace is Nil") } t.Annotation = append(t.Annotation, a) return nil } // AnnotationType represents an Enum of types of Anotations which Phosphor supports type AnnotationType int32 const ( UnknownAnnotationType = AnnotationType(0) // No idea... // Calls Req = AnnotationType(1) // Client Request dispatch Rsp = AnnotationType(2) // Client Response received In = AnnotationType(3) // Server Request received Out = AnnotationType(4) // Server Response dispatched Timeout = AnnotationType(5) // Client timed out waiting // Developer initiated annotations // @todo // Annotation = AnnotationType(6) ) // An Annotation represents the smallest individually recorded component of a trace // These can be assembled into spans, and entire traces of a request to our systems type Annotation struct { TraceId string // Global Trace Identifier SpanId string // Identifier for this span, non unique - eg. RPC calls would have 4 annotation with this id ParentSpanId string // Parent span - eg. nested RPC calls Timestamp time.Time // Timestamp the event occured, can only be compared on the same machine Duration time.Duration // Optional: duration of the event, eg. RPC call Hostname string // Hostname this event originated from Origin string // Fully qualified name of the message origin Destination string // Optional: Fully qualified name of the message destination AnnotationType AnnotationType // The type of Annotation Async bool // If the request was fired asynchronously Payload string // The payload, eg. RPC body, or Annotation PayloadSize int32 // Bytes of payload KeyValue map[string]string // Key value debug information } ================================================ FILE: phosphor/handler.go ================================================ package phosphor import ( "encoding/json" "errors" "fmt" "net/http" "golang.org/x/net/context" log "github.com/cihub/seelog" "github.com/mondough/phosphor/internal/version" ) // Index // @todo return version information etc func Index(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusOK) fmt.Fprintf(w, fmt.Sprintf(`{ "name": "phosphor", "version": "%s" }`, version.Version)) } // TraceLookup retrieves a trace from the persistence layer func TraceLookup(ctx context.Context) func(http.ResponseWriter, *http.Request) { p, err := phosphorFromContext(ctx) if err != nil { panic(err) } return func(w http.ResponseWriter, r *http.Request) { traceId := r.URL.Query().Get("traceId") if traceId == "" { errorResponse(r, w, http.StatusBadRequest, errors.New("traceId param not provided")) return } log.Debugf("Trace lookup - TraceId: %s", traceId) t, err := p.Store.ReadTrace(traceId) if err != nil { log.Errorf("Trace lookup failed: %s", err) errorResponse(r, w, http.StatusInternalServerError, fmt.Errorf("could not load trace: %s", err)) return } // If we don't find the trace return 404 if t == nil { log.Debugf("Trace not found: %s", traceId) errorResponse(r, w, http.StatusNotFound, errors.New("traceId not found")) return } // Return trace response( r, w, map[string]interface{}{ "trace": prettyFormatTrace(t), }, ) } } // response sends the response back to the client, marshaling to JSON func response(r *http.Request, w http.ResponseWriter, resp interface{}) { writeResponse(r, w, http.StatusOK, resp) } // errorResponse marshals an error to JSON and returns this to the client func errorResponse(r *http.Request, w http.ResponseWriter, code int, err error) { resp := map[string]interface{}{ "error": err.Error(), } writeResponse(r, w, code, resp) } // response marshals a response to json and returns to the client func writeResponse(r *http.Request, w http.ResponseWriter, code int, resp interface{}) { // Deal with CORS if origin := r.Header.Get("Origin"); origin != "" { w.Header().Set("Access-Control-Allow-Origin", origin) w.Header().Set("Access-Control-Allow-Methods", "DELETE, GET, HEAD, OPTIONS, POST, PUT") w.Header().Set("Access-Control-Allow-Credentials", "true") // Allow any headers if wantedHeaders := r.Header.Get("Access-Control-Request-Headers"); wantedHeaders != "" { w.Header().Set("Access-Control-Allow-Headers", wantedHeaders) } } w.Header().Set("Content-Type", "text/plain; charset=utf-8") b, err := json.Marshal(resp) if err != nil { w.WriteHeader(http.StatusInternalServerError) fmt.Fprintln(w, `{"error":"failed to marshal json"}`) return } w.WriteHeader(code) fmt.Fprintln(w, string(b)) } ================================================ FILE: phosphor/ingester.go ================================================ package phosphor import ( "fmt" "os" nsq "github.com/bitly/go-nsq" log "github.com/cihub/seelog" "github.com/golang/protobuf/proto" traceproto "github.com/mondough/phosphor/proto" ) var ( topic = "trace" channel = "phosphor-server" maxInFlight = 200 concurrency = 10 ) // Run the trace ingester, ingesting traces into the provided store func (p *Phosphor) RunIngester() { cfg := nsq.NewConfig() cfg.UserAgent = fmt.Sprintf("phosphor go-nsq/%s", nsq.VERSION) cfg.MaxInFlight = p.opts.NSQMaxInflight consumer, err := nsq.NewConsumer(p.opts.NSQTopicName, p.opts.NSQChannelName, cfg) if err != nil { log.Critical(err) os.Exit(1) } consumer.AddConcurrentHandlers(&IngestionHandler{ store: p.Store, }, p.opts.NSQNumHandlers) if len(p.opts.NSQDHTTPAddresses) != 0 { err = consumer.ConnectToNSQDs(p.opts.NSQDHTTPAddresses) if err != nil { log.Critical(err) os.Exit(1) } } else { err = consumer.ConnectToNSQLookupds(p.opts.NSQLookupdHTTPAddresses) if err != nil { log.Critical(err) os.Exit(1) } } // Block until exit select { case <-consumer.StopChan: case <-p.exitChan: } } // IngestionHandler exists to match the NSQ handler interface type IngestionHandler struct { store Store } // HandleMessage delivered by NSQ func (ih *IngestionHandler) HandleMessage(message *nsq.Message) error { p := &traceproto.Annotation{} err := proto.Unmarshal(message.Body, p) if err != nil { // returning an error to NSQ will requeue this // failure to unmarshal is permanent return nil } a := ProtoToAnnotation(p) log.Debugf("Received annotation: %+v", a) // Write to our store ih.store.StoreAnnotation(a) return nil } ================================================ FILE: phosphor/marshaling.go ================================================ package phosphor import ( "sort" "time" "github.com/mondough/phosphor/proto" ) func prettyFormatTrace(t *Trace) interface{} { return map[string]interface{}{ "annotations": formatAnnotations(t.Annotation), } } func formatAnnotations(ans []*Annotation) interface{} { sort.Sort(ByTime(ans)) // Convert to proto pa := AnnotationsToProto(ans) // Format nicely as JSON m := make([]interface{}, 0, len(pa)) for _, a := range pa { m = append(m, formatAnnotation(a)) } return m } func formatAnnotation(a *traceproto.Annotation) interface{} { return map[string]interface{}{ "trace_id": a.TraceId, "span_id": a.SpanId, "parent_id": a.ParentId, "type": a.Type.String(), "async": a.Async, "timestamp": a.Timestamp, "duration": a.Duration, "hostname": a.Hostname, "origin": a.Origin, "destination": a.Destination, "payload": a.Payload, "key_value": a.KeyValue, } } type ByTime []*Annotation func (s ByTime) Len() int { return len(s) } func (s ByTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s ByTime) Less(i, j int) bool { return s[i].Timestamp.Before(s[j].Timestamp) } // --- // ProtoToAnnotation converts a proto annotation to our domain func ProtoToAnnotation(p *traceproto.Annotation) *Annotation { if p == nil { return &Annotation{} } return &Annotation{ TraceId: p.TraceId, SpanId: p.SpanId, ParentSpanId: p.ParentId, Timestamp: microsecondInt64ToTime(p.Timestamp), Duration: microsecondInt64ToDuration(p.Duration), Hostname: p.Hostname, Origin: p.Origin, Destination: p.Destination, AnnotationType: protoToAnnotationType(p.Type), Async: p.Async, Payload: p.Payload, PayloadSize: int32(len(p.Payload)), KeyValue: protoToKeyValue(p.KeyValue), } } // protoToAnnotationType converts a annotation type in our proto to our domain func protoToAnnotationType(p traceproto.AnnotationType) AnnotationType { // Ensure we are within bounds at := int32(p) if at > 6 || at < 1 { at = 0 } return AnnotationType(at) } // annotationTypeToProto converts a annotation type in our domain to proto format func annotationTypeToProto(at AnnotationType) traceproto.AnnotationType { // Ensure we are within bounds p := int32(at) if p > 6 || p < 1 { p = 0 } return traceproto.AnnotationType(p) } // microsecondInt64ToTime converts an integer number of microseconds // since the epoch to a time func microsecondInt64ToTime(i int64) time.Time { µsec := i % 1e6 sec := (i - µsec) / 1e6 return time.Unix(sec, µsec*1e3) } // timeToMicrosecondInt64 converts a time to µseconds since epoch as int64 func timeToMicrosecondInt64(t time.Time) int64 { sec := t.Unix() * 1e6 µsec := int64(t.Nanosecond() / 1e3) return sec + µsec } // microsecondInt64ToDuration converts an integer number // of microseconds to a duration func microsecondInt64ToDuration(i int64) time.Duration { return time.Duration(i) * time.Microsecond } // durationToMicrosecondInt64 returns a duration to the nearest µs func durationToMicrosecondInt64(d time.Duration) int64 { return d.Nanoseconds() / 1e3 } // protoToKeyValue converts a repeated set of proto key values // to a map of keys => values func protoToKeyValue(p []*traceproto.KeyValue) map[string]string { ret := make(map[string]string) for _, kv := range p { if p == nil { continue } ret[kv.Key] = kv.Value } return ret } // keyValueToProto converts a map of keys => values to a repeated set // of proto key values func keyValueToProto(m map[string]string) []*traceproto.KeyValue { ret := make([]*traceproto.KeyValue, 0, len(m)) for k, v := range m { kv := &traceproto.KeyValue{ Key: k, Value: v, } ret = append(ret, kv) } return ret } // AnnotationsToProto converts a slice of domain annotations to our proto format func AnnotationsToProto(a []*Annotation) []*traceproto.Annotation { ret := make([]*traceproto.Annotation, 0, len(a)) for _, v := range a { ret = append(ret, AnnotationToProto(v)) } return ret } // AnnotationToProto converts a domain annotation to our proto format func AnnotationToProto(a *Annotation) *traceproto.Annotation { if a == nil { return &traceproto.Annotation{} } return &traceproto.Annotation{ TraceId: a.TraceId, SpanId: a.SpanId, ParentId: a.ParentSpanId, Type: annotationTypeToProto(a.AnnotationType), Async: a.Async, Timestamp: timeToMicrosecondInt64(a.Timestamp), Duration: durationToMicrosecondInt64(a.Duration), Hostname: a.Hostname, Origin: a.Origin, Destination: a.Destination, Payload: a.Payload, KeyValue: keyValueToProto(a.KeyValue), } } ================================================ FILE: phosphor/memorystore.go ================================================ package phosphor import ( "sync" "time" log "github.com/cihub/seelog" ) type MemoryStore struct { sync.RWMutex traces map[string]*Trace } // NewMemoryStore initialises and returns a new MemoryStore func NewMemoryStore() *MemoryStore { s := &MemoryStore{ traces: make(map[string]*Trace), } // run stats worker go s.statsLoop() return s } // ReadTrace retrieves a full Trace, composed of Annotations from the store by ID func (s *MemoryStore) ReadTrace(id string) (*Trace, error) { if s == nil { return nil, ErrStoreNotInitialised } s.RLock() defer s.RUnlock() return s.traces[id], nil } // StoreAnnotation into the store, if the trace doesn't not already exist // this will be created for the global trace ID func (s *MemoryStore) StoreAnnotation(a *Annotation) error { s.Lock() defer s.Unlock() if s == nil { return ErrStoreNotInitialised } if a == nil { return ErrInvalidAnnotation } if a.TraceId == "" { return ErrInvalidTraceId } // Load our current trace t := s.traces[a.TraceId] // Initialise a new trace if we don't have it already if t == nil { t = NewTrace() } // Add the new annotation to this t.AppendAnnotation(a) // Store it back s.traces[a.TraceId] = t return nil } // statsLoop loops and outputs stats every 5 seconds func (s *MemoryStore) statsLoop() { tick := time.NewTicker(5 * time.Second) // @todo listen for shutdown, stop ticker and exit cleanly for { <-tick.C // block until tick s.printStats() } } // printStats about the status of the memorystore to stdout func (s *MemoryStore) printStats() { // Get some data while under the mutex s.RLock() count := len(s.traces) s.RUnlock() // Separate processing and logging outside of mutex log.Infof("[MemoryStore] Traces stored: %v", count) } ================================================ FILE: phosphor/options.go ================================================ package phosphor import ( "crypto/md5" "hash/crc32" "io" "log" "os" ) type Options struct { // basic options ID int64 `flag:"worker-id" cfg:"id"` Verbose bool `flag:"verbose"` HTTPAddress string `flag:"http-address"` HTTPSAddress string `flag:"https-address"` // NSQ Transport options NSQLookupdHTTPAddresses []string `flag:"nsqlookupd-http-address"` NSQDHTTPAddresses []string `flag:"nsqd-http-address"` NSQTopicName string `flag:"nsq-topic"` NSQChannelName string `flag:"nsq-channel"` NSQMaxInflight int `flag:"nsq-max-inflight"` NSQNumHandlers int `flag:"nsq-num-handlers"` } func NewOptions() *Options { hostname, err := os.Hostname() if err != nil { log.Fatal(err) } h := md5.New() io.WriteString(h, hostname) defaultID := int64(crc32.ChecksumIEEE(h.Sum(nil)) % 1024) return &Options{ ID: defaultID, HTTPAddress: "0.0.0.0:7750", NSQTopicName: "phosphor", NSQChannelName: "phosphor-server", NSQMaxInflight: 200, NSQNumHandlers: 10, } } ================================================ FILE: phosphor/phosphor.go ================================================ package phosphor import ( "net/http" "golang.org/x/net/context" log "github.com/cihub/seelog" ) type Phosphor struct { opts *Options Store Store exitChan chan struct{} } func New(opts *Options) *Phosphor { return &Phosphor{ opts: opts, // Store: opts.Store, exitChan: make(chan struct{}), } } func (p *Phosphor) Run() { log.Infof("Phosphor starting up") defer log.Flush() // Store a reference to phosphor in our context which we can pass // to other areas of the application, eg the HTTP api ctx := context.Background() ctx = context.WithValue(ctx, "phosphor", p) // Initialise a persistent store // if p.Store == nil { p.Store = NewMemoryStore() // } // Initialise trace ingestion go p.RunIngester() // Set up API and serve requests http.HandleFunc("/", Index) http.HandleFunc("/trace", TraceLookup(ctx)) go http.ListenAndServe(p.opts.HTTPAddress, nil) } func (p *Phosphor) Exit() { log.Infof("Phosphor exiting") select { case <-p.exitChan: // check if already closed default: close(p.exitChan) } } ================================================ FILE: phosphor/store.go ================================================ package phosphor import "errors" type Store interface { ReadTrace(id string) (*Trace, error) StoreAnnotation(a *Annotation) error } var ( ErrStoreNotInitialised = errors.New("Store is not initialised") ErrInvalidAnnotation = errors.New("Annotation is invalid") ErrInvalidTrace = errors.New("Trace is invalid") ErrInvalidTraceId = errors.New("TraceId is invalid") ) ================================================ FILE: phosphord/README.md ================================================ # PhosphorD PhosphorD is a local forwarder, like StatsD, which receives traces from the Phosphor client, and forwards to the [Phosphor server](https://github.com/mondough/phosphor/tree/master/phosphor). Currently this receives Traces over UDP, which prevents clients blocking, but is reasonably reliable on a local machine. In the event this blocks, traces will be dropped and lost. A future improvement would make this configurable to read from local files, mirroring the behaviour of Dapper Daemons as described in the [Google Dapper](https://research.google.com/pubs/pub36356.html) paper. ## Usage ``` -buffer-size int set the maximum number of traces buffered per worker before batch sending (default 200) -flush-interval int set the maximum flush interval in ms (default 2000) -nsq-topic string NSQ topic name to recieve traces from (default "phosphor") -nsqd-tcp-address value nsqd TCP address (may be given multiple times) -num-forwarders int set the number of workers which buffer and forward traces (default 20) -udp-address string : to listen for UDP traces (default "0.0.0.0:7760") -verbose enable verbose logging -version print version string ``` ================================================ FILE: phosphord/forwarder.go ================================================ package phosphord import ( "encoding/json" "time" log "github.com/cihub/seelog" "github.com/golang/protobuf/proto" pb "github.com/mondough/phosphor/proto" ) func (p *PhosphorD) forward(id int) { log.Debugf("[Forwarder %v] started", id) var ( b []byte i int decoded *pb.Annotation js []byte buf = make([][]byte, 0, p.opts.BufferSize) metricsTick = time.NewTicker(5 * time.Second) timeoutTick = time.NewTicker(time.Duration(p.opts.FlushInterval) * time.Millisecond) ) for { select { case <-p.exitChan: return case b = <-p.traceChan: i++ // Log the frame if we're in verbose mode if p.opts.Verbose { decoded = &pb.Annotation{} if err := proto.Unmarshal(b, decoded); err != nil { log.Warnf("[Forwarder %v] Couldn't decode trace frame", id) continue } js, _ = json.Marshal(decoded) log.Tracef("[Forwarder %v] Received message: %s", id, string(js)) } // Add message to our buffer buf = append(buf, b) // Forward on if we're at our buffer size if len(buf) >= p.opts.BufferSize { p.sendTraces(id, &buf) } case <-timeoutTick.C: p.sendTraces(id, &buf) case <-metricsTick.C: log.Debugf("[Forwarder %v] Processed %v messages", id, i) } } } func (p *PhosphorD) sendTraces(id int, buf *[][]byte) error { // Don't publish empty buffers if buf == nil || len(*buf) == 0 { return nil } // Attempt to publish log.Debugf("[Forwarder %v] Sending %v traces", id, len(*buf)) if err := p.tr.MultiPublish(*buf); err != nil { // we return an error here, but currently ignore it // therefore the behaviour will be reattempting to republish the // buffer when the next trace arrives to this forwarder return err } // Empty the buffer on success *buf = nil return nil } ================================================ FILE: phosphord/options.go ================================================ package phosphord type Options struct { // basic options Verbose bool `flag:"verbose"` UDPAddress string `flag:"udp-address"` NumForwarders int `flag:"num-forwarders"` BufferSize int `flag:"buffer-size"` FlushInterval int `flag:"flush-interval"` // NSQ Transport options NSQDTCPAddresses []string `flag:"nsqd-tcp-address"` NSQTopicName string `flag:"nsq-topic"` NSQMaxInflight int NSQNumHandlers int } func NewOptions() *Options { return &Options{ UDPAddress: "0.0.0.0:7760", NumForwarders: 20, BufferSize: 200, FlushInterval: 2000, NSQTopicName: "phosphor", NSQMaxInflight: 200, NSQNumHandlers: 10, } } ================================================ FILE: phosphord/phosphord.go ================================================ package phosphord import ( "bytes" "net" "os" "runtime" "time" log "github.com/cihub/seelog" "github.com/mondough/phosphor/phosphord/transport" ) const ( UDP = "udp" ) var ( packetSize = 65536 - 8 - 20 // 8-byte UDP header, 20-byte IP header ) type PhosphorD struct { opts *Options tr transport.Transport traceChan chan []byte exitChan chan struct{} } func New(opts *Options) *PhosphorD { // Initialise our transport // TODO ensure this doesn't connect until we Run() tr, err := transport.NewNSQTransport(opts.NSQTopicName, opts.NSQDTCPAddresses) if err != nil { log.Criticalf(err.Error()) os.Exit(1) } return &PhosphorD{ opts: opts, tr: tr, traceChan: make(chan []byte), exitChan: make(chan struct{}), } } func (p *PhosphorD) Run() { log.Infof("PhosphorD started at %v using %v CPUs", time.Now(), runtime.NumCPU()) // Fire up a number of forwarders to process inbound messages log.Infof("Starting %v forwarders with buffer size of %v", p.opts.NumForwarders, p.opts.BufferSize) for i := 0; i < p.opts.NumForwarders; i++ { go p.forward(i) } // Bind and listen to UDP traffic go p.listen() } // Exit and shut down func (p *PhosphorD) Exit() { log.Infof("PhosphorD exiting") select { case <-p.exitChan: // check if already closed default: close(p.exitChan) } } // listen on a UDP socket for trace frames func (p *PhosphorD) listen() { // Resolve bind address address, err := net.ResolveUDPAddr(UDP, p.opts.UDPAddress) if err != nil { log.Errorf("Failed to resolve address: %s", err.Error()) return } // Take the resolved address and attempt to listen on the UDP socket listener, err := net.ListenUDP(UDP, address) if err != nil { log.Errorf("ListenUDP error: %s", err.Error()) return } defer listener.Close() // Listen loop log.Infof("Listening on %s for UDP trace frames", address.String()) for { message := make([]byte, packetSize) n, _, err := listener.ReadFrom(message) if err != nil { continue } buf := bytes.NewBuffer(message[0:n]) // log.Infof("Packet received from %s: %s", remaddr, string(message[0:n])) // Attempt to push into our channel to be processed by a worker select { // Successfully write inbound message to queue case p.traceChan <- buf.Bytes(): // Stop listening and shut down case <-p.exitChan: return // Drop message to prevent blocking default: } } } ================================================ FILE: phosphord/test/test.go ================================================ package main import ( "fmt" "net" "time" "github.com/golang/protobuf/proto" pb "github.com/mondough/phosphor/proto" ) const ( MAX_PACKET_SIZE = 1500 - 8 - 20 // 8-byte UDP header, 20-byte IP header ) func main() { // Make example trace frame t := &pb.Annotation{ TraceId: "aasldjaskjdlsakjdkasjdklasjdlasjdkljdas", SpanId: "8yf8sdg76sg897b98fbuys8b9s6rvs6ducghkfhi27tuw", ParentId: "97as8d7s9a7a7dv32hrkqehfkuh23hq8d7h4g7iygs7ih", Type: pb.AnnotationType_CLIENT_SEND, Timestamp: time.Now().UnixNano() / 1e3, Duration: 1231312, Hostname: "somehostname", Origin: "some.api", Destination: "some.service", Payload: `{"boop":123}`, } // Marshal to bytes b, err := proto.Marshal(t) if err != nil { panic(err) } fmt.Printf("Encoded: %s\n", string(b)) fmt.Printf("Encoded bytes: %v\n", b) // Send via UDP! // Get a conn c, err := net.DialTimeout("udp", "localhost:7760", time.Second) if err != nil { panic(err) } // Write into the connection var i int for j := 0; j < 20; j++ { for i = 0; i < 500; i++ { _, err := c.Write([]byte(b)) if err != nil { panic(err) } } time.Sleep(500 * time.Millisecond) } fmt.Println("Sent", i, "messages") // fmt.Println("Sent %v bytes", n) } ================================================ FILE: phosphord/transport/nsq.go ================================================ package transport import ( "errors" "math/rand" nsq "github.com/bitly/go-nsq" log "github.com/cihub/seelog" "github.com/mondough/phosphor/internal/util" ) var ( ErrPublishFailure = errors.New("Failed to publish to NSQD") ErrNoConfiguredNodes = errors.New("No NSQD nodes are configured") ) // NewNSQTransport initialises a Transport over NSQ func NewNSQTransport(topic string, nsqdTCPAddrs util.StringArray) (Transport, error) { // Currently using default config cfg := nsq.NewConfig() // Create a producer for each nsqd node provided producers := make(map[string]*nsq.Producer) producersIndex := make([]*nsq.Producer, 0, len(nsqdTCPAddrs)) for _, addr := range nsqdTCPAddrs { producer, err := nsq.NewProducer(addr, cfg) if err != nil { log.Warnf("failed to create nsq.Producer - %s", err) } producers[addr] = producer producersIndex = append(producersIndex, producers[addr]) } return &NSQPublisher{ topic: topic, producers: producers, producersIndex: producersIndex, }, nil } type NSQPublisher struct { topic string producers map[string]*nsq.Producer producersIndex []*nsq.Producer } func (p *NSQPublisher) MultiPublish(body [][]byte) error { if len(p.producers) == 0 { return ErrNoConfiguredNodes } // Round robin, from a random starting position i := rand.Intn(len(p.producers)) - 1 // Attempt up to our number of configured nodes for attempt := 0; attempt < len(p.producers); attempt++ { // Move to next host, or cycle back around i++ if i >= len(p.producers) { i = 0 } // Attempt to publish pd := p.producersIndex[i] if err := pd.MultiPublish(p.topic, body); err == nil { // success! return nil } } // We've run out of nodes, and not managed to publish return ErrPublishFailure } ================================================ FILE: phosphord/transport/transport.go ================================================ package transport type Transport interface { MultiPublish(body [][]byte) error } ================================================ FILE: proto/trace.pb.go ================================================ // Code generated by protoc-gen-go. // source: github.com/mondough/phosphor/proto/trace.proto // DO NOT EDIT! /* Package traceproto is a generated protocol buffer package. It is generated from these files: github.com/mondough/phosphor/proto/trace.proto It has these top-level messages: Annotation KeyValue */ package traceproto import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf type AnnotationType int32 const ( AnnotationType_UNKNOWN AnnotationType = 0 AnnotationType_CLIENT_SEND AnnotationType = 1 AnnotationType_CLIENT_RECV AnnotationType = 2 AnnotationType_SERVER_RECV AnnotationType = 3 AnnotationType_SERVER_SEND AnnotationType = 4 AnnotationType_TIMEOUT AnnotationType = 5 AnnotationType_ANNOTATION AnnotationType = 6 ) var AnnotationType_name = map[int32]string{ 0: "UNKNOWN", 1: "CLIENT_SEND", 2: "CLIENT_RECV", 3: "SERVER_RECV", 4: "SERVER_SEND", 5: "TIMEOUT", 6: "ANNOTATION", } var AnnotationType_value = map[string]int32{ "UNKNOWN": 0, "CLIENT_SEND": 1, "CLIENT_RECV": 2, "SERVER_RECV": 3, "SERVER_SEND": 4, "TIMEOUT": 5, "ANNOTATION": 6, } func (x AnnotationType) String() string { return proto.EnumName(AnnotationType_name, int32(x)) } type Annotation struct { // The ID of the trace this annotation is a component of TraceId string `protobuf:"bytes,1,opt,name=trace_id" json:"trace_id,omitempty"` // The span this trace corresponds to, in the case this // is representing a service (REQ/REP) call SpanId string `protobuf:"bytes,2,opt,name=span_id" json:"span_id,omitempty"` // The parent span this trace corresponds to, allowing us // to correlate trace frames and reconstruct the request ParentId string `protobuf:"bytes,3,opt,name=parent_id" json:"parent_id,omitempty"` // The type of annotation we're capturing Type AnnotationType `protobuf:"varint,4,opt,name=type,enum=traceproto.AnnotationType" json:"type,omitempty"` // Flag to indicate this is an asynchronous span, which will not have a // response - eg. just client send and server recv annotations Async bool `protobuf:"varint,5,opt,name=async" json:"async,omitempty"` // Time since the epoch in microseconds Timestamp int64 `protobuf:"varint,6,opt,name=timestamp" json:"timestamp,omitempty"` // Duration in microseconds // This should only be used to measure time on the same node // eg. the duration of service / rpc calls Duration int64 `protobuf:"varint,7,opt,name=duration" json:"duration,omitempty"` // Machine hostname, container name etc Hostname string `protobuf:"bytes,8,opt,name=hostname" json:"hostname,omitempty"` // Origin of this annotation, likely a service or application for a RPC Origin string `protobuf:"bytes,9,opt,name=origin" json:"origin,omitempty"` // Destination of this annotations action // eg. the service which a request was destined for // likely not set for annotations Destination string `protobuf:"bytes,10,opt,name=destination" json:"destination,omitempty"` // Payload as a string - eg. JSON encoded Payload string `protobuf:"bytes,11,opt,name=payload" json:"payload,omitempty"` // Repeated series of key value fields for arbitrary data KeyValue []*KeyValue `protobuf:"bytes,12,rep,name=key_value" json:"key_value,omitempty"` } func (m *Annotation) Reset() { *m = Annotation{} } func (m *Annotation) String() string { return proto.CompactTextString(m) } func (*Annotation) ProtoMessage() {} func (m *Annotation) GetKeyValue() []*KeyValue { if m != nil { return m.KeyValue } return nil } type KeyValue struct { Key string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` } func (m *KeyValue) Reset() { *m = KeyValue{} } func (m *KeyValue) String() string { return proto.CompactTextString(m) } func (*KeyValue) ProtoMessage() {} func init() { proto.RegisterEnum("traceproto.AnnotationType", AnnotationType_name, AnnotationType_value) } ================================================ FILE: proto/trace.proto ================================================ syntax="proto3"; package traceproto; message Annotation { // The ID of the trace this annotation is a component of string trace_id = 1; // The span this trace corresponds to, in the case this // is representing a service (REQ/REP) call string span_id = 2; // The parent span this trace corresponds to, allowing us // to correlate trace frames and reconstruct the request string parent_id = 3; // The type of annotation we're capturing AnnotationType type = 4; // Flag to indicate this is an asynchronous span, which will not have a // response - eg. just client send and server recv annotations bool async = 5; // Time since the epoch in microseconds int64 timestamp = 6; // Duration in microseconds // This should only be used to measure time on the same node // eg. the duration of service / rpc calls int64 duration = 7; // Machine hostname, container name etc string hostname = 8; // Origin of this annotation, likely a service or application for a RPC string origin = 9; // Destination of this annotations action // eg. the service which a request was destined for // likely not set for annotations string destination = 10; // Payload as a string - eg. JSON encoded string payload = 11; // Repeated series of key value fields for arbitrary data repeated KeyValue key_value = 12; } enum AnnotationType { UNKNOWN = 0; CLIENT_SEND = 1; CLIENT_RECV = 2; SERVER_RECV = 3; SERVER_SEND = 4; TIMEOUT = 5; ANNOTATION = 6; } message KeyValue { string key = 1; string value = 2; } ================================================ FILE: script/buildprotobufs.sh ================================================ #!/bin/bash # Basic path locations ROOT=$(cd $(dirname -- "$0" ) && cd .. && pwd) MESSAGEPATH=${ROOT}/proto # SRCPATH is the path to our src directory - everything from here is fully qualified # This depends on your storing your code in your GOPATH # eg. xxx/github.com/mondough/phosphor SRCPATH=$(cd ${ROOT}/../../.. && pwd) # Cakes are important. and delicious. and should be given out for success. function dispatchCake() { printf "\n \033[1;33m*\033[0m \033[1;33m*\033[0m \033[1;33m*\033[0m \n" printf " \033[1;33m*\033[0m\033[0;31m|\033[0m_\033[1;33m*\033[0m\033[0;31m|\033[0m_\033[1;33m*\033[0m\033[0;31m|\033[0m_\033[1;33m*\033[0m \n" printf " .-'\`\033[0;31m|\033[0m \033[0;31m|\033[0m \033[0;31m|\033[0m \033[0;31m|\033[0m\`'-. \n" printf " |\`-............-'| \n" printf " | | \n" printf " \ _ .-. _ / \n" printf " ,-|'-' '-' '-' '-'|-, \n" printf " /\` \._ _./ \`\ \n" printf " '._ \`\"\"\"\"\"\"\"\"\"\` _.'\n" printf " \`''--..........--''\` \n\n" printf " \033[1;5;7;32m GREAT SUCCESS! \033[0m\n\n" printf "\n\n" } # Show which protobufs were found printf "\nLocating protobufs...\n" find ${MESSAGEPATH} -name '*.proto' -exec echo {} \; echo "" # Clean out current protos find ${MESSAGEPATH} -name '*.pb.go' | xargs rm -f # Try to rebuild all the things echo "Generating Go protobuf classes..." find $MESSAGEPATH -name '*.proto' -exec protoc -I${SRCPATH} --go_out=${SRCPATH} {} \; printf "Complete\n\n" # GREAT SUCCESS dispatchCake ================================================ FILE: vendor/code.google.com/p/snappy-go/snappy/decode.go ================================================ // Copyright 2011 The Snappy-Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package snappy import ( "encoding/binary" "errors" ) // ErrCorrupt reports that the input is invalid. var ErrCorrupt = errors.New("snappy: corrupt input") // DecodedLen returns the length of the decoded block. func DecodedLen(src []byte) (int, error) { v, _, err := decodedLen(src) return v, err } // decodedLen returns the length of the decoded block and the number of bytes // that the length header occupied. func decodedLen(src []byte) (blockLen, headerLen int, err error) { v, n := binary.Uvarint(src) if n == 0 { return 0, 0, ErrCorrupt } if uint64(int(v)) != v { return 0, 0, errors.New("snappy: decoded block is too large") } return int(v), n, nil } // Decode returns the decoded form of src. The returned slice may be a sub- // slice of dst if dst was large enough to hold the entire decoded block. // Otherwise, a newly allocated slice will be returned. // It is valid to pass a nil dst. func Decode(dst, src []byte) ([]byte, error) { dLen, s, err := decodedLen(src) if err != nil { return nil, err } if len(dst) < dLen { dst = make([]byte, dLen) } var d, offset, length int for s < len(src) { switch src[s] & 0x03 { case tagLiteral: x := uint(src[s] >> 2) switch { case x < 60: s += 1 case x == 60: s += 2 if s > len(src) { return nil, ErrCorrupt } x = uint(src[s-1]) case x == 61: s += 3 if s > len(src) { return nil, ErrCorrupt } x = uint(src[s-2]) | uint(src[s-1])<<8 case x == 62: s += 4 if s > len(src) { return nil, ErrCorrupt } x = uint(src[s-3]) | uint(src[s-2])<<8 | uint(src[s-1])<<16 case x == 63: s += 5 if s > len(src) { return nil, ErrCorrupt } x = uint(src[s-4]) | uint(src[s-3])<<8 | uint(src[s-2])<<16 | uint(src[s-1])<<24 } length = int(x + 1) if length <= 0 { return nil, errors.New("snappy: unsupported literal length") } if length > len(dst)-d || length > len(src)-s { return nil, ErrCorrupt } copy(dst[d:], src[s:s+length]) d += length s += length continue case tagCopy1: s += 2 if s > len(src) { return nil, ErrCorrupt } length = 4 + int(src[s-2])>>2&0x7 offset = int(src[s-2])&0xe0<<3 | int(src[s-1]) case tagCopy2: s += 3 if s > len(src) { return nil, ErrCorrupt } length = 1 + int(src[s-3])>>2 offset = int(src[s-2]) | int(src[s-1])<<8 case tagCopy4: return nil, errors.New("snappy: unsupported COPY_4 tag") } end := d + length if offset > d || end > len(dst) { return nil, ErrCorrupt } for ; d < end; d++ { dst[d] = dst[d-offset] } } if d != dLen { return nil, ErrCorrupt } return dst[:d], nil } ================================================ FILE: vendor/code.google.com/p/snappy-go/snappy/encode.go ================================================ // Copyright 2011 The Snappy-Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package snappy import ( "encoding/binary" ) // We limit how far copy back-references can go, the same as the C++ code. const maxOffset = 1 << 15 // emitLiteral writes a literal chunk and returns the number of bytes written. func emitLiteral(dst, lit []byte) int { i, n := 0, uint(len(lit)-1) switch { case n < 60: dst[0] = uint8(n)<<2 | tagLiteral i = 1 case n < 1<<8: dst[0] = 60<<2 | tagLiteral dst[1] = uint8(n) i = 2 case n < 1<<16: dst[0] = 61<<2 | tagLiteral dst[1] = uint8(n) dst[2] = uint8(n >> 8) i = 3 case n < 1<<24: dst[0] = 62<<2 | tagLiteral dst[1] = uint8(n) dst[2] = uint8(n >> 8) dst[3] = uint8(n >> 16) i = 4 case int64(n) < 1<<32: dst[0] = 63<<2 | tagLiteral dst[1] = uint8(n) dst[2] = uint8(n >> 8) dst[3] = uint8(n >> 16) dst[4] = uint8(n >> 24) i = 5 default: panic("snappy: source buffer is too long") } if copy(dst[i:], lit) != len(lit) { panic("snappy: destination buffer is too short") } return i + len(lit) } // emitCopy writes a copy chunk and returns the number of bytes written. func emitCopy(dst []byte, offset, length int) int { i := 0 for length > 0 { x := length - 4 if 0 <= x && x < 1<<3 && offset < 1<<11 { dst[i+0] = uint8(offset>>8)&0x07<<5 | uint8(x)<<2 | tagCopy1 dst[i+1] = uint8(offset) i += 2 break } x = length if x > 1<<6 { x = 1 << 6 } dst[i+0] = uint8(x-1)<<2 | tagCopy2 dst[i+1] = uint8(offset) dst[i+2] = uint8(offset >> 8) i += 3 length -= x } return i } // Encode returns the encoded form of src. The returned slice may be a sub- // slice of dst if dst was large enough to hold the entire encoded block. // Otherwise, a newly allocated slice will be returned. // It is valid to pass a nil dst. func Encode(dst, src []byte) ([]byte, error) { if n := MaxEncodedLen(len(src)); len(dst) < n { dst = make([]byte, n) } // The block starts with the varint-encoded length of the decompressed bytes. d := binary.PutUvarint(dst, uint64(len(src))) // Return early if src is short. if len(src) <= 4 { if len(src) != 0 { d += emitLiteral(dst[d:], src) } return dst[:d], nil } // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. const maxTableSize = 1 << 14 shift, tableSize := uint(32-8), 1<<8 for tableSize < maxTableSize && tableSize < len(src) { shift-- tableSize *= 2 } var table [maxTableSize]int // Iterate over the source bytes. var ( s int // The iterator position. t int // The last position with the same hash as s. lit int // The start position of any pending literal bytes. ) for s+3 < len(src) { // Update the hash table. b0, b1, b2, b3 := src[s], src[s+1], src[s+2], src[s+3] h := uint32(b0) | uint32(b1)<<8 | uint32(b2)<<16 | uint32(b3)<<24 p := &table[(h*0x1e35a7bd)>>shift] // We need to to store values in [-1, inf) in table. To save // some initialization time, (re)use the table's zero value // and shift the values against this zero: add 1 on writes, // subtract 1 on reads. t, *p = *p-1, s+1 // If t is invalid or src[s:s+4] differs from src[t:t+4], accumulate a literal byte. if t < 0 || s-t >= maxOffset || b0 != src[t] || b1 != src[t+1] || b2 != src[t+2] || b3 != src[t+3] { s++ continue } // Otherwise, we have a match. First, emit any pending literal bytes. if lit != s { d += emitLiteral(dst[d:], src[lit:s]) } // Extend the match to be as long as possible. s0 := s s, t = s+4, t+4 for s < len(src) && src[s] == src[t] { s++ t++ } // Emit the copied bytes. d += emitCopy(dst[d:], s-t, s-s0) lit = s } // Emit any final pending literal bytes and return. if lit != len(src) { d += emitLiteral(dst[d:], src[lit:]) } return dst[:d], nil } // MaxEncodedLen returns the maximum length of a snappy block, given its // uncompressed length. func MaxEncodedLen(srcLen int) int { // Compressed data can be defined as: // compressed := item* literal* // item := literal* copy // // The trailing literal sequence has a space blowup of at most 62/60 // since a literal of length 60 needs one tag byte + one extra byte // for length information. // // Item blowup is trickier to measure. Suppose the "copy" op copies // 4 bytes of data. Because of a special check in the encoding code, // we produce a 4-byte copy only if the offset is < 65536. Therefore // the copy op takes 3 bytes to encode, and this type of item leads // to at most the 62/60 blowup for representing literals. // // Suppose the "copy" op copies 5 bytes of data. If the offset is big // enough, it will take 5 bytes to encode the copy op. Therefore the // worst case here is a one-byte literal followed by a five-byte copy. // That is, 6 bytes of input turn into 7 bytes of "compressed" data. // // This last factor dominates the blowup, so the final estimate is: return 32 + srcLen + srcLen/6 } ================================================ FILE: vendor/code.google.com/p/snappy-go/snappy/snappy.go ================================================ // Copyright 2011 The Snappy-Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package snappy implements the snappy block-based compression format. // It aims for very high speeds and reasonable compression. // // The C++ snappy implementation is at http://code.google.com/p/snappy/ package snappy /* Each encoded block begins with the varint-encoded length of the decoded data, followed by a sequence of chunks. Chunks begin and end on byte boundaries. The first byte of each chunk is broken into its 2 least and 6 most significant bits called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. Zero means a literal tag. All other values mean a copy tag. For literal tags: - If m < 60, the next 1 + m bytes are literal bytes. - Otherwise, let n be the little-endian unsigned integer denoted by the next m - 59 bytes. The next 1 + n bytes after that are literal bytes. For copy tags, length bytes are copied from offset bytes ago, in the style of Lempel-Ziv compression algorithms. In particular: - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 of the offset. The next byte is bits 0-7 of the offset. - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). The length is 1 + m. The offset is the little-endian unsigned integer denoted by the next 2 bytes. - For l == 3, this tag is a legacy format that is no longer supported. */ const ( tagLiteral = 0x00 tagCopy1 = 0x01 tagCopy2 = 0x02 tagCopy4 = 0x03 ) ================================================ FILE: vendor/code.google.com/p/snappy-go/snappy/snappy_test.go ================================================ // Copyright 2011 The Snappy-Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package snappy import ( "bytes" "flag" "fmt" "io" "io/ioutil" "math/rand" "net/http" "os" "path/filepath" "strings" "testing" ) var download = flag.Bool("download", false, "If true, download any missing files before running benchmarks") func roundtrip(b, ebuf, dbuf []byte) error { e, err := Encode(ebuf, b) if err != nil { return fmt.Errorf("encoding error: %v", err) } d, err := Decode(dbuf, e) if err != nil { return fmt.Errorf("decoding error: %v", err) } if !bytes.Equal(b, d) { return fmt.Errorf("roundtrip mismatch:\n\twant %v\n\tgot %v", b, d) } return nil } func TestEmpty(t *testing.T) { if err := roundtrip(nil, nil, nil); err != nil { t.Fatal(err) } } func TestSmallCopy(t *testing.T) { for _, ebuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { for _, dbuf := range [][]byte{nil, make([]byte, 20), make([]byte, 64)} { for i := 0; i < 32; i++ { s := "aaaa" + strings.Repeat("b", i) + "aaaabbbb" if err := roundtrip([]byte(s), ebuf, dbuf); err != nil { t.Errorf("len(ebuf)=%d, len(dbuf)=%d, i=%d: %v", len(ebuf), len(dbuf), i, err) } } } } } func TestSmallRand(t *testing.T) { rand.Seed(27354294) for n := 1; n < 20000; n += 23 { b := make([]byte, n) for i, _ := range b { b[i] = uint8(rand.Uint32()) } if err := roundtrip(b, nil, nil); err != nil { t.Fatal(err) } } } func TestSmallRegular(t *testing.T) { for n := 1; n < 20000; n += 23 { b := make([]byte, n) for i, _ := range b { b[i] = uint8(i%10 + 'a') } if err := roundtrip(b, nil, nil); err != nil { t.Fatal(err) } } } func benchDecode(b *testing.B, src []byte) { encoded, err := Encode(nil, src) if err != nil { b.Fatal(err) } // Bandwidth is in amount of uncompressed data. b.SetBytes(int64(len(src))) b.ResetTimer() for i := 0; i < b.N; i++ { Decode(src, encoded) } } func benchEncode(b *testing.B, src []byte) { // Bandwidth is in amount of uncompressed data. b.SetBytes(int64(len(src))) dst := make([]byte, MaxEncodedLen(len(src))) b.ResetTimer() for i := 0; i < b.N; i++ { Encode(dst, src) } } func readFile(b *testing.B, filename string) []byte { src, err := ioutil.ReadFile(filename) if err != nil { b.Fatalf("failed reading %s: %s", filename, err) } if len(src) == 0 { b.Fatalf("%s has zero length", filename) } return src } // expand returns a slice of length n containing repeated copies of src. func expand(src []byte, n int) []byte { dst := make([]byte, n) for x := dst; len(x) > 0; { i := copy(x, src) x = x[i:] } return dst } func benchWords(b *testing.B, n int, decode bool) { // Note: the file is OS-language dependent so the resulting values are not // directly comparable for non-US-English OS installations. data := expand(readFile(b, "/usr/share/dict/words"), n) if decode { benchDecode(b, data) } else { benchEncode(b, data) } } func BenchmarkWordsDecode1e3(b *testing.B) { benchWords(b, 1e3, true) } func BenchmarkWordsDecode1e4(b *testing.B) { benchWords(b, 1e4, true) } func BenchmarkWordsDecode1e5(b *testing.B) { benchWords(b, 1e5, true) } func BenchmarkWordsDecode1e6(b *testing.B) { benchWords(b, 1e6, true) } func BenchmarkWordsEncode1e3(b *testing.B) { benchWords(b, 1e3, false) } func BenchmarkWordsEncode1e4(b *testing.B) { benchWords(b, 1e4, false) } func BenchmarkWordsEncode1e5(b *testing.B) { benchWords(b, 1e5, false) } func BenchmarkWordsEncode1e6(b *testing.B) { benchWords(b, 1e6, false) } // testFiles' values are copied directly from // https://code.google.com/p/snappy/source/browse/trunk/snappy_unittest.cc. // The label field is unused in snappy-go. var testFiles = []struct { label string filename string }{ {"html", "html"}, {"urls", "urls.10K"}, {"jpg", "house.jpg"}, {"pdf", "mapreduce-osdi-1.pdf"}, {"html4", "html_x_4"}, {"cp", "cp.html"}, {"c", "fields.c"}, {"lsp", "grammar.lsp"}, {"xls", "kennedy.xls"}, {"txt1", "alice29.txt"}, {"txt2", "asyoulik.txt"}, {"txt3", "lcet10.txt"}, {"txt4", "plrabn12.txt"}, {"bin", "ptt5"}, {"sum", "sum"}, {"man", "xargs.1"}, {"pb", "geo.protodata"}, {"gaviota", "kppkn.gtb"}, } // The test data files are present at this canonical URL. const baseURL = "https://snappy.googlecode.com/svn/trunk/testdata/" func downloadTestdata(basename string) (errRet error) { filename := filepath.Join("testdata", basename) f, err := os.Create(filename) if err != nil { return fmt.Errorf("failed to create %s: %s", filename, err) } defer f.Close() defer func() { if errRet != nil { os.Remove(filename) } }() resp, err := http.Get(baseURL + basename) if err != nil { return fmt.Errorf("failed to download %s: %s", baseURL+basename, err) } defer resp.Body.Close() _, err = io.Copy(f, resp.Body) if err != nil { return fmt.Errorf("failed to write %s: %s", filename, err) } return nil } func benchFile(b *testing.B, n int, decode bool) { filename := filepath.Join("testdata", testFiles[n].filename) if stat, err := os.Stat(filename); err != nil || stat.Size() == 0 { if !*download { b.Fatal("test data not found; skipping benchmark without the -download flag") } // Download the official snappy C++ implementation reference test data // files for benchmarking. if err := os.Mkdir("testdata", 0777); err != nil && !os.IsExist(err) { b.Fatalf("failed to create testdata: %s", err) } for _, tf := range testFiles { if err := downloadTestdata(tf.filename); err != nil { b.Fatalf("failed to download testdata: %s", err) } } } data := readFile(b, filename) if decode { benchDecode(b, data) } else { benchEncode(b, data) } } // Naming convention is kept similar to what snappy's C++ implementation uses. func Benchmark_UFlat0(b *testing.B) { benchFile(b, 0, true) } func Benchmark_UFlat1(b *testing.B) { benchFile(b, 1, true) } func Benchmark_UFlat2(b *testing.B) { benchFile(b, 2, true) } func Benchmark_UFlat3(b *testing.B) { benchFile(b, 3, true) } func Benchmark_UFlat4(b *testing.B) { benchFile(b, 4, true) } func Benchmark_UFlat5(b *testing.B) { benchFile(b, 5, true) } func Benchmark_UFlat6(b *testing.B) { benchFile(b, 6, true) } func Benchmark_UFlat7(b *testing.B) { benchFile(b, 7, true) } func Benchmark_UFlat8(b *testing.B) { benchFile(b, 8, true) } func Benchmark_UFlat9(b *testing.B) { benchFile(b, 9, true) } func Benchmark_UFlat10(b *testing.B) { benchFile(b, 10, true) } func Benchmark_UFlat11(b *testing.B) { benchFile(b, 11, true) } func Benchmark_UFlat12(b *testing.B) { benchFile(b, 12, true) } func Benchmark_UFlat13(b *testing.B) { benchFile(b, 13, true) } func Benchmark_UFlat14(b *testing.B) { benchFile(b, 14, true) } func Benchmark_UFlat15(b *testing.B) { benchFile(b, 15, true) } func Benchmark_UFlat16(b *testing.B) { benchFile(b, 16, true) } func Benchmark_UFlat17(b *testing.B) { benchFile(b, 17, true) } func Benchmark_ZFlat0(b *testing.B) { benchFile(b, 0, false) } func Benchmark_ZFlat1(b *testing.B) { benchFile(b, 1, false) } func Benchmark_ZFlat2(b *testing.B) { benchFile(b, 2, false) } func Benchmark_ZFlat3(b *testing.B) { benchFile(b, 3, false) } func Benchmark_ZFlat4(b *testing.B) { benchFile(b, 4, false) } func Benchmark_ZFlat5(b *testing.B) { benchFile(b, 5, false) } func Benchmark_ZFlat6(b *testing.B) { benchFile(b, 6, false) } func Benchmark_ZFlat7(b *testing.B) { benchFile(b, 7, false) } func Benchmark_ZFlat8(b *testing.B) { benchFile(b, 8, false) } func Benchmark_ZFlat9(b *testing.B) { benchFile(b, 9, false) } func Benchmark_ZFlat10(b *testing.B) { benchFile(b, 10, false) } func Benchmark_ZFlat11(b *testing.B) { benchFile(b, 11, false) } func Benchmark_ZFlat12(b *testing.B) { benchFile(b, 12, false) } func Benchmark_ZFlat13(b *testing.B) { benchFile(b, 13, false) } func Benchmark_ZFlat14(b *testing.B) { benchFile(b, 14, false) } func Benchmark_ZFlat15(b *testing.B) { benchFile(b, 15, false) } func Benchmark_ZFlat16(b *testing.B) { benchFile(b, 16, false) } func Benchmark_ZFlat17(b *testing.B) { benchFile(b, 17, false) } ================================================ FILE: vendor/github.com/bitly/go-nsq/.travis.yml ================================================ language: go go: - 1.4.2 env: - NSQ_DOWNLOAD=nsq-0.2.30.linux-amd64.go1.3 GOARCH=amd64 - NSQ_DOWNLOAD=nsq-0.2.30.linux-amd64.go1.3 GOARCH=386 - NSQ_DOWNLOAD=nsq-0.2.31.linux-amd64.go1.3.1 GOARCH=amd64 - NSQ_DOWNLOAD=nsq-0.2.31.linux-amd64.go1.3.1 GOARCH=386 - NSQ_DOWNLOAD=nsq-0.3.0.linux-amd64.go1.3.3 GOARCH=amd64 - NSQ_DOWNLOAD=nsq-0.3.0.linux-amd64.go1.3.3 GOARCH=386 - NSQ_DOWNLOAD=nsq-0.3.1.linux-amd64.go1.4.1 GOARCH=amd64 - NSQ_DOWNLOAD=nsq-0.3.1.linux-amd64.go1.4.1 GOARCH=386 - NSQ_DOWNLOAD=nsq-0.3.2.linux-amd64.go1.4.1 GOARCH=amd64 - NSQ_DOWNLOAD=nsq-0.3.2.linux-amd64.go1.4.1 GOARCH=386 install: - go get github.com/bitly/go-simplejson - go get github.com/mreiferson/go-snappystream script: - wget http://bitly-downloads.s3.amazonaws.com/nsq/$NSQ_DOWNLOAD.tar.gz - tar zxvf $NSQ_DOWNLOAD.tar.gz - export PATH=$NSQ_DOWNLOAD/bin:$PATH - pushd $TRAVIS_BUILD_DIR - ./test.sh - popd notifications: email: false sudo: false ================================================ FILE: vendor/github.com/bitly/go-nsq/ChangeLog.md ================================================ ## go-nsq Change Log ### 1.0.4 - 2015-04-07 **Upgrading from 1.0.3**: There are no backward incompatible changes. * #133 - fix `ErrNotConnected` race during `Producer` connection (thanks @jeddenlea) * #132 - fix `RDY` redistribution after backoff with no connections * #128 - fix backoff stall when using `RequeueWithoutBackoff` * #127 - fix handling of connection closing when resuming after backoff (thanks @jnewmano) * #126 - allow `BackoffStrategy` to be set via flag (thanks @twmb) * #125 - add pluggable consumer `BackoffStrategy`; add full-jitter strategy (thanks @hden) * #124 - add `DialTimeout` and `LocalAddr` config (thanks @yashkin) * #119 - add `Producer.Ping()` method (thanks @zulily) * #122 - refactor log level string handling * #120 - fix `Message` data races on `responded` * #114 - fix lookupd jitter having no effect (thanks @judwhite) ### 1.0.3 - 2015-02-07 **Upgrading from 1.0.2**: There are no backward incompatible changes. * #104 - fix reconnect address bug (thanks @ryanslade) * #106 - fix backoff reconnect deadlock (thanks @ryanslade) * #107 - fix out-of-bounds error when removing nsqlookupd addresses (thanks @andreas) * #108 - fix potential logger race conditions (thanks @judwhite) * #111 - fix resolved address error in reconnect loop (thanks @twmb) ### 1.0.2 - 2015-01-21 **Upgrading from 1.0.1**: There are no backward incompatible changes. * #102 - TLS min/max config defaults (thanks @twmb) * #99 - fix `Consumer.Stop()` race and `Producer.Stop()` deadlock (thanks @tylertreat) * #92 - expose `Message.NSQDAddress` * #95 - cleanup panic during `Consumer.Stop()` if handlers are deadlocked * #98 - add `tls-min-version` option (thanks @twmb) * #93 - expose a way to get `Consumer` runtime stats (thanks @dcarney) * #94 - allow `#ephemeral` topic names (thanks @jamesgroat) ### 1.0.1 - 2014-11-09 **Upgrading from 1.0.0**: There are no backward incompatible changes functionally, however this release no longer compiles with Go `1.0.x`. * #89 - don't spam connection teardown cleanup messages * #91 - add consumer `DisconnectFrom*` * #87 - allow `heartbeat_interval` and `output_buffer_timeout` to be disabled * #86 - pluggable `nsqlookupd` behaviors * #83 - send `RDY` before `FIN`/`REQ` (forwards compatibility with bitly/nsq#404) * #82 - fix panic when conn isn't assigned * #75/#76 - minor config related bug fixes * #75/#77/#78 - add `tls-cert` and `tls-key` config options ### 1.0.0 - 2014-08-11 **Upgrading from 0.3.7**: The public API was significantly refactored and is not backwards compatible, please read [UPGRADING](UPGRADING.md). * #58 - support `IDENTIFY` `msg_timeout` * #54 - per-connection TLS config and set `ServerName` * #49 - add common connect helpers * #43/#63 - more flexible `nsqlookupd` URL specification * #35 - `AUTH` support * #41/#62 - use package private RNG * #36 - support 64 character topic/channel names * #30/#38/#39/#42/#45/#46/#48/#51/#52/#65/#70 - refactor public API (see [UPGRADING](UPGRADING.md)) ### 0.3.7 - 2014-05-25 **Upgrading from 0.3.6**: There are no backward incompatible changes. **THIS IS THE LAST STABLE RELEASE PROVIDING THIS API**. Future releases will be based on the api in #30 and **will not be backwards compatible!** This is a bug fix release relating to the refactoring done in `0.3.6`. * #32 - fix potential panic for race condition when # conns == 0 * #33/#34 - more granular connection locking ### 0.3.6 - 2014-04-29 **Upgrading from 0.3.5**: There are no backward incompatible changes. This release includes a significant internal refactoring, designed to better encapsulate responsibility, see #19. Specifically: * make `Conn` public * move transport responsibilities into `Conn` from `Reader`/`Writer` * supply callbacks for hooking into `Conn` events As part of the refactoring, a few additional clean exit related issues were resolved: * wait group now includes all exit related goroutines * ensure that readLoop exits before exiting cleanup * always check messagesInFlight at readLoop exit * close underlying connection last ### 0.3.5 - 2014-04-05 **Upgrading from 0.3.4**: There are no backward incompatible changes. This release includes a few new features such as support for channel sampling and sending along a user agent string (which is now displayed in `nsqadmin`). Also, a critical bug fix for potential deadlocks (thanks @kjk for reporting and help testing). New Features/Improvements: * #27 - reader logs disambiguate topic/channel * #22 - channel sampling * #23 - user agent Bug Fixes: * #24 - fix racey reader IDENTIFY buffering * #29 - fix recursive RLock deadlocks ### 0.3.4 - 2013-11-19 **Upgrading from 0.3.3**: There are no backward incompatible changes. This is a bug fix release, notably potential deadlocks in `Message.Requeue()` and `Message.Touch()` as well as a potential busy loop cleaning up closed connections with in-flight messages. New Features/Improvements: * #14 - add `Reader.Configure()` * #18 - return an exported error when an `nsqlookupd` address is already configured Bug Fixes: * #15 - dont let `handleError()` loop if already connected * #17 - resolve potential deadlocks on `Message` responders * #16 - eliminate busy loop when draining `finishedMessages` ### 0.3.3 - 2013-10-21 **Upgrading from 0.3.2**: This release requires NSQ binary version `0.2.23+` for compression support. This release contains significant `Reader` refactoring of the RDY handling code paths. The motivation is documented in #1 however the commits in #8 identify individual changes. Additionally, we eliminated deadlocks during connection cleanup in `Writer`. As a result, both user-facing APIs should now be considerably more robust and stable. Additionally, `Reader` should behave better when backing off. New Features/Improvements: * #9 - ability to ignore publish responses in `Writer` * #12 - `Requeue()` method on `Message` * #6 - `Touch()` method on `Message` * #4 - snappy/deflate feature negotiation Bug Fixes: * #8 - `Reader` RDY handling refactoring (race conditions, deadlocks, consolidation) * #13 - fix `Writer` deadlocks * #10 - stop accessing simplejson internals * #5 - fix `max-in-flight` race condition ### 0.3.2 - 2013-08-26 **Upgrading from 0.3.1**: This release requires NSQ binary version `0.2.22+` for TLS support. New Features/Improvements: * #227 - TLS feature negotiation * #164/#202/#255 - add `Writer` * #186 - `MaxBackoffDuration` of `0` disables backoff * #175 - support for `nsqd` config option `--max-rdy-count` * #169 - auto-reconnect to hard-coded `nsqd` Bug Fixes: * #254/#256/#257 - new connection RDY starvation * #250 - `nsqlookupd` polling improvements * #243 - limit `IsStarved()` to connections w/ in-flight messages * #169 - use last RDY count for `IsStarved()`; redistribute RDY state * #204 - fix early termination blocking * #177 - support `broadcast_address` * #161 - connection pool goroutine safety ### 0.3.1 - 2013-02-07 **Upgrading from 0.3.0**: This release requires NSQ binary version `0.2.17+` for `TOUCH` support. * #119 - add TOUCH command * #133 - improved handling of errors/magic * #127 - send IDENTIFY (missed in #90) * #16 - add backoff to Reader ### 0.3.0 - 2013-01-07 **Upgrading from 0.2.4**: There are no backward incompatible changes to applications written against the public `nsq.Reader` API. However, there *are* a few backward incompatible changes to the API for applications that directly use other public methods, or properties of a few NSQ data types: `nsq.Message` IDs are now a type `nsq.MessageID` (a `[16]byte` array). The signatures of `nsq.Finish()` and `nsq.Requeue()` reflect this change. `nsq.SendCommand()` and `nsq.Frame()` were removed in favor of `nsq.SendFramedResponse()`. `nsq.Subscribe()` no longer accepts `shortId` and `longId`. If upgrading your consumers before upgrading your `nsqd` binaries to `0.2.16-rc.1` they will not be able to send the optional custom identifiers. * #90 performance optimizations * #81 reader performance improvements / MPUB support ### 0.2.4 - 2012-10-15 * #69 added IsStarved() to reader API ### 0.2.3 - 2012-10-11 * #64 timeouts on reader queries to lookupd * #54 fix crash issue with reader cleaning up from unexpectedly closed nsqd connections ### 0.2.2 - 2012-10-09 * Initial public release ================================================ FILE: vendor/github.com/bitly/go-nsq/LICENSE ================================================ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/bitly/go-nsq/README.md ================================================ ## go-nsq The official Go package for [NSQ][nsq]. [![Build Status](https://secure.travis-ci.org/bitly/go-nsq.png?branch=master)][travis] [![GoDoc](https://godoc.org/github.com/bitly/go-nsq?status.svg)](https://godoc.org/github.com/bitly/go-nsq) The latest stable release is **[1.0.4][latest_tag]**. NOTE: The public API has been refactored as of `v1.0.0` and is not backwards compatible with previous releases. **[0.3.7][legacy]** is the last stable release compatible with the legacy API. Please read the [UPGRADING](UPGRADING.md) guide. ### Docs See [godoc][nsq_gopkgdoc]. See the [main repo apps][apps] directory for examples of clients built using this package. [nsq]: https://github.com/bitly/nsq [nsq_gopkgdoc]: http://godoc.org/github.com/bitly/go-nsq [protocol]: http://bitly.github.io/nsq/clients/tcp_protocol_spec.html [apps]: https://github.com/bitly/nsq/tree/master/apps [consumer]: http://godoc.org/github.com/bitly/go-nsq#Consumer [producer]: http://godoc.org/github.com/bitly/go-nsq#Producer [pr30]: https://github.com/bitly/go-nsq/pull/30 [legacy]: https://github.com/bitly/go-nsq/releases/tag/v0.3.7 [travis]: http://travis-ci.org/bitly/go-nsq [latest_tag]: https://github.com/bitly/go-nsq/releases/tag/v1.0.4 ================================================ FILE: vendor/github.com/bitly/go-nsq/UPGRADING.md ================================================ This outlines the backwards incompatible changes that were made to the public API after the `v0.3.7` stable release, and and how to migrate existing legacy codebases. #### Background The original `go-nsq` codebase is some of our earliest Go code, and one of our first attempts at a public Go library. We've learned a lot over the last 2 years and we wanted `go-nsq` to reflect the experiences we've had working with the library as well as the general Go conventions and best practices we picked up along the way. The diff can be seen via: https://github.com/bitly/go-nsq/compare/v0.3.7...HEAD The bulk of the refactoring came via: https://github.com/bitly/go-nsq/pull/30 #### Naming Previously, the high-level types we exposed were named `nsq.Reader` and `nsq.Writer`. These reflected internal naming conventions we had used at bitly for some time but conflated semantics with what a typical Go developer would expect (they obviously did not implement `io.Reader` and `io.Writer`). We renamed these types to `nsq.Consumer` and `nsq.Producer`, which more effectively communicate their purpose and is consistent with the NSQ documentation. #### Configuration In the previous API there were inconsistent and confusing ways to configure your clients. Now, configuration is performed *before* creating an `nsq.Consumer` or `nsq.Producer` by creating an `nsq.Config` struct. The only valid way to do this is via `nsq.NewConfig` (i.e. using a struct literal will panic due to invalid internal state). The `nsq.Config` struct has exported variables that can be set directly in a type-safe manner. You can also call `cfg.Validate()` to check that the values are correct and within range. `nsq.Config` also exposes a convenient helper method `Set(k string, v interface{})` that can set options by *coercing* the supplied `interface{}` value. This is incredibly convenient if you're reading options from a config file or in a serialized format that does not exactly match the native types. It is both flexible and forgiving. #### Improving the nsq.Handler interface `go-nsq` attempts to make writing the common use case consumer incredibly easy. You specify a type that implements the `nsq.Handler` interface, the interface method is called per message, and the return value of said method indicates to the library what the response to `nsqd` should be (`FIN` or `REQ`), all the while managing flow control and backoff. However, more advanced use cases require the ability to respond to a message *later* ("asynchronously", if you will). Our original API provided a *second* message handler interface called `nsq.AsyncHandler`. Unfortunately, it was never obvious from the name alone (or even the documentation) how to properly use this form. The API was needlessly complex, involving the garbage creation of wrapping structs to track state and respond to messages. We originally had the same problem in `pynsq`, our Python client library, and we were able to resolve the tension and expose an API that was robust and supported all use cases. The new `go-nsq` message handler interface exposes only `nsq.Handler`, and its `HandleMessage` method remains identical (specifically, `nsq.AsyncHandler` has been removed). Additionally, the API to configure handlers has been improved to provide better first-class support for common operations. We've added `AddConcurrentHandlers` (for quickly spawning multiple handler goroutines). For the most common use case, where you want `go-nsq` to respond to messages on your behalf, there are no changes required! In fact, we've made it even easier to implement the `nsq.Handler` interface for simple functions by providing the `nsq.HandlerFunc` type (in the spirit of the Go standard library's `http.HandlerFunc`): ```go r, err := nsq.NewConsumer("test_topic", "test_channel", nsq.NewConfig()) if err != nil { log.Fatalf(err.Error()) } r.AddHandler(nsq.HandlerFunc(func(m *nsq.Message) error { return doSomeWork(m) }) err := r.ConnectToNSQD(nsqdAddr) if err != nil { log.Fatalf(err.Error()) } <-r.StopChan ``` In the new API, we've made the `nsq.Message` struct more robust, giving it the ability to proxy responses. If you want to usurp control of the message from `go-nsq`, you simply call `msg.DisableAutoResponse()`. This is effectively the same as if you had used `nsq.AsyncHandler`, only you don't need to manage `nsq.FinishedMessage` structs or implement a separate interface. Instead you just keep/pass references to the `nsq.Message` itself, and when you're ready to respond you call `msg.Finish()`, `msg.Requeue()` or `msg.Touch()`. Additionally, this means you can make this decision on a *per-message* basis rather than for the lifetime of the handler. Here is an example: ```go type myHandler struct {} func (h *myHandler) HandleMessage(m *nsq.Message) error { m.DisableAutoResponse() workerChan <- m return nil } go func() { for m := range workerChan { err := doSomeWork(m) if err != nil { m.Requeue(-1) continue } m.Finish() } }() cfg := nsq.NewConfig() cfg.MaxInFlight = 1000 r, err := nsq.NewConsumer("test_topic", "test_channel", cfg) if err != nil { log.Fatalf(err.Error()) } r.AddConcurrentHandlers(&myHandler{}, 20) err := r.ConnectToNSQD(nsqdAddr) if err != nil { log.Fatalf(err.Error()) } <-r.StopChan ``` #### Requeue without backoff As a side effect of the message handler restructuring above, it is now trivial to respond to a message without triggering a backoff state in `nsq.Consumer` (which was not possible in the previous API). The `nsq.Message` type now has a `msg.RequeueWithoutBackoff()` method for this purpose. #### Producer Error Handling Previously, `Writer` (now `Producer`) returned a triplicate of `frameType`, `responseBody`, and `error` from calls to `*Publish`. This required the caller to check both `error` and `frameType` to confirm success. `Producer` publish methods now return only `error`. #### Logging One of the challenges library implementors face is how to provide feedback via logging, while exposing an interface that follows the standard library and still provides a means to control and configure the output. In the new API, we've provided a method on `Consumer` and `Producer` called `SetLogger` that takes an interface compatible with the Go standard library `log.Logger` (which can be instantiated via `log.NewLogger`) and a traditional log level integer `nsq.LogLevel{Debug,Info,Warning,Error}`: Output(maxdepth int, s string) error This gives the user the flexibility to control the format, destination, and verbosity while still conforming to standard library logging conventions. #### Misc. Un-exported `NewDeadlineTransport` and `ApiRequest`, which never should have been exported in the first place. `nsq.Message` serialization switched away from `binary.{Read,Write}` for performance and `nsq.Message` now implements the `io.WriterTo` interface. ================================================ FILE: vendor/github.com/bitly/go-nsq/api_request.go ================================================ package nsq import ( "encoding/json" "fmt" "io" "io/ioutil" "net" "net/http" "time" ) type deadlinedConn struct { Timeout time.Duration net.Conn } func (c *deadlinedConn) Read(b []byte) (n int, err error) { c.Conn.SetReadDeadline(time.Now().Add(c.Timeout)) return c.Conn.Read(b) } func (c *deadlinedConn) Write(b []byte) (n int, err error) { c.Conn.SetWriteDeadline(time.Now().Add(c.Timeout)) return c.Conn.Write(b) } func newDeadlineTransport(timeout time.Duration) *http.Transport { transport := &http.Transport{ Dial: func(netw, addr string) (net.Conn, error) { c, err := net.DialTimeout(netw, addr, timeout) if err != nil { return nil, err } return &deadlinedConn{timeout, c}, nil }, } return transport } type wrappedResp struct { Status string `json:"status_txt"` StatusCode int `json:"status_code"` Data interface{} `json:"data"` } // stores the result in the value pointed to by ret(must be a pointer) func apiRequestNegotiateV1(method string, endpoint string, body io.Reader, ret interface{}) error { httpclient := &http.Client{Transport: newDeadlineTransport(2 * time.Second)} req, err := http.NewRequest(method, endpoint, body) if err != nil { return err } req.Header.Add("Accept", "application/vnd.nsq; version=1.0") resp, err := httpclient.Do(req) if err != nil { return err } respBody, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("got response %s %q", resp.Status, respBody) } if len(respBody) == 0 { respBody = []byte("{}") } if resp.Header.Get("X-NSQ-Content-Type") == "nsq; version=1.0" { return json.Unmarshal(respBody, ret) } wResp := &wrappedResp{ Data: ret, } if err = json.Unmarshal(respBody, wResp); err != nil { return err } // wResp.StatusCode here is equal to resp.StatusCode, so ignore it return nil } ================================================ FILE: vendor/github.com/bitly/go-nsq/command.go ================================================ package nsq import ( "bytes" "encoding/binary" "encoding/json" "fmt" "io" "strconv" "time" ) var byteSpace = []byte(" ") var byteNewLine = []byte("\n") // Command represents a command from a client to an NSQ daemon type Command struct { Name []byte Params [][]byte Body []byte } // String returns the name and parameters of the Command func (c *Command) String() string { if len(c.Params) > 0 { return fmt.Sprintf("%s %s", c.Name, string(bytes.Join(c.Params, byteSpace))) } return string(c.Name) } // WriteTo implements the WriterTo interface and // serializes the Command to the supplied Writer. // // It is suggested that the target Writer is buffered // to avoid performing many system calls. func (c *Command) WriteTo(w io.Writer) (int64, error) { var total int64 var buf [4]byte n, err := w.Write(c.Name) total += int64(n) if err != nil { return total, err } for _, param := range c.Params { n, err := w.Write(byteSpace) total += int64(n) if err != nil { return total, err } n, err = w.Write(param) total += int64(n) if err != nil { return total, err } } n, err = w.Write(byteNewLine) total += int64(n) if err != nil { return total, err } if c.Body != nil { bufs := buf[:] binary.BigEndian.PutUint32(bufs, uint32(len(c.Body))) n, err := w.Write(bufs) total += int64(n) if err != nil { return total, err } n, err = w.Write(c.Body) total += int64(n) if err != nil { return total, err } } return total, nil } // Identify creates a new Command to provide information about the client. After connecting, // it is generally the first message sent. // // The supplied map is marshaled into JSON to provide some flexibility // for this command to evolve over time. // // See http://nsq.io/clients/tcp_protocol_spec.html#identify for information // on the supported options func Identify(js map[string]interface{}) (*Command, error) { body, err := json.Marshal(js) if err != nil { return nil, err } return &Command{[]byte("IDENTIFY"), nil, body}, nil } // Auth sends credentials for authentication // // After `Identify`, this is usually the first message sent, if auth is used. func Auth(secret string) (*Command, error) { return &Command{[]byte("AUTH"), nil, []byte(secret)}, nil } // Register creates a new Command to add a topic/channel for the connected nsqd func Register(topic string, channel string) *Command { params := [][]byte{[]byte(topic)} if len(channel) > 0 { params = append(params, []byte(channel)) } return &Command{[]byte("REGISTER"), params, nil} } // UnRegister creates a new Command to remove a topic/channel for the connected nsqd func UnRegister(topic string, channel string) *Command { params := [][]byte{[]byte(topic)} if len(channel) > 0 { params = append(params, []byte(channel)) } return &Command{[]byte("UNREGISTER"), params, nil} } // Ping creates a new Command to keep-alive the state of all the // announced topic/channels for a given client func Ping() *Command { return &Command{[]byte("PING"), nil, nil} } // Publish creates a new Command to write a message to a given topic func Publish(topic string, body []byte) *Command { var params = [][]byte{[]byte(topic)} return &Command{[]byte("PUB"), params, body} } // DeferredPublish creates a new Command to write a message to a given topic // where the message will queue at the channel level until the timeout expires func DeferredPublish(topic string, delay time.Duration, body []byte) *Command { var params = [][]byte{[]byte(topic), []byte(strconv.Itoa(int(delay / time.Millisecond)))} return &Command{[]byte("DPUB"), params, body} } // MultiPublish creates a new Command to write more than one message to a given topic // (useful for high-throughput situations to avoid roundtrips and saturate the pipe) func MultiPublish(topic string, bodies [][]byte) (*Command, error) { var params = [][]byte{[]byte(topic)} num := uint32(len(bodies)) bodySize := 4 for _, b := range bodies { bodySize += len(b) + 4 } body := make([]byte, 0, bodySize) buf := bytes.NewBuffer(body) err := binary.Write(buf, binary.BigEndian, &num) if err != nil { return nil, err } for _, b := range bodies { err = binary.Write(buf, binary.BigEndian, int32(len(b))) if err != nil { return nil, err } _, err = buf.Write(b) if err != nil { return nil, err } } return &Command{[]byte("MPUB"), params, buf.Bytes()}, nil } // Subscribe creates a new Command to subscribe to the given topic/channel func Subscribe(topic string, channel string) *Command { var params = [][]byte{[]byte(topic), []byte(channel)} return &Command{[]byte("SUB"), params, nil} } // Ready creates a new Command to specify // the number of messages a client is willing to receive func Ready(count int) *Command { var params = [][]byte{[]byte(strconv.Itoa(count))} return &Command{[]byte("RDY"), params, nil} } // Finish creates a new Command to indiciate that // a given message (by id) has been processed successfully func Finish(id MessageID) *Command { var params = [][]byte{id[:]} return &Command{[]byte("FIN"), params, nil} } // Requeue creates a new Command to indicate that // a given message (by id) should be requeued after the given delay // NOTE: a delay of 0 indicates immediate requeue func Requeue(id MessageID, delay time.Duration) *Command { var params = [][]byte{id[:], []byte(strconv.Itoa(int(delay / time.Millisecond)))} return &Command{[]byte("REQ"), params, nil} } // Touch creates a new Command to reset the timeout for // a given message (by id) func Touch(id MessageID) *Command { var params = [][]byte{id[:]} return &Command{[]byte("TOUCH"), params, nil} } // StartClose creates a new Command to indicate that the // client would like to start a close cycle. nsqd will no longer // send messages to a client in this state and the client is expected // finish pending messages and close the connection func StartClose() *Command { return &Command{[]byte("CLS"), nil, nil} } // Nop creates a new Command that has no effect server side. // Commonly used to respond to heartbeats func Nop() *Command { return &Command{[]byte("NOP"), nil, nil} } ================================================ FILE: vendor/github.com/bitly/go-nsq/command_test.go ================================================ package nsq import ( "bytes" "testing" ) func BenchmarkCommand(b *testing.B) { b.StopTimer() data := make([]byte, 2048) cmd := Publish("test", data) var buf bytes.Buffer b.StartTimer() for i := 0; i < b.N; i++ { cmd.WriteTo(&buf) } } ================================================ FILE: vendor/github.com/bitly/go-nsq/config.go ================================================ package nsq import ( "crypto/tls" "crypto/x509" "errors" "fmt" "io/ioutil" "log" "math" "math/rand" "net" "os" "reflect" "strconv" "strings" "sync" "time" "unsafe" ) // Define handlers for setting config defaults, and setting config values from command line arguments or config files type configHandler interface { HandlesOption(c *Config, option string) bool Set(c *Config, option string, value interface{}) error Validate(c *Config) error } type defaultsHandler interface { SetDefaults(c *Config) error } // BackoffStrategy defines a strategy for calculating the duration of time // a consumer should backoff for a given attempt type BackoffStrategy interface { Calculate(attempt int) time.Duration } // ExponentialStrategy implements an exponential backoff strategy (default) type ExponentialStrategy struct { cfg *Config } // Calculate returns a duration of time: 2 ^ attempt func (s *ExponentialStrategy) Calculate(attempt int) time.Duration { backoffDuration := s.cfg.BackoffMultiplier * time.Duration(math.Pow(2, float64(attempt))) return backoffDuration } func (s *ExponentialStrategy) setConfig(cfg *Config) { s.cfg = cfg } // FullJitterStrategy implements http://www.awsarchitectureblog.com/2015/03/backoff.html type FullJitterStrategy struct { cfg *Config rngOnce sync.Once rng *rand.Rand } // Calculate returns a random duration of time [0, 2 ^ attempt] func (s *FullJitterStrategy) Calculate(attempt int) time.Duration { // lazily initialize the RNG s.rngOnce.Do(func() { if s.rng != nil { return } s.rng = rand.New(rand.NewSource(time.Now().UnixNano())) }) backoffDuration := s.cfg.BackoffMultiplier * time.Duration(math.Pow(2, float64(attempt))) return time.Duration(s.rng.Intn(int(backoffDuration))) } func (s *FullJitterStrategy) setConfig(cfg *Config) { s.cfg = cfg } // Config is a struct of NSQ options // // The only valid way to create a Config is via NewConfig, using a struct literal will panic. // After Config is passed into a high-level type (like Consumer, Producer, etc.) the values are no // longer mutable (they are copied). // // Use Set(option string, value interface{}) as an alternate way to set parameters type Config struct { initialized bool // used to Initialize, Validate configHandlers []configHandler DialTimeout time.Duration `opt:"dial_timeout" default:"1s"` // Deadlines for network reads and writes ReadTimeout time.Duration `opt:"read_timeout" min:"100ms" max:"5m" default:"60s"` WriteTimeout time.Duration `opt:"write_timeout" min:"100ms" max:"5m" default:"1s"` // LocalAddr is the local address to use when dialing an nsqd. // If empty, a local address is automatically chosen. LocalAddr net.Addr `opt:"local_addr"` // Duration between polling lookupd for new producers, and fractional jitter to add to // the lookupd pool loop. this helps evenly distribute requests even if multiple consumers // restart at the same time // // NOTE: when not using nsqlookupd, LookupdPollInterval represents the duration of time between // reconnection attempts LookupdPollInterval time.Duration `opt:"lookupd_poll_interval" min:"10ms" max:"5m" default:"60s"` LookupdPollJitter float64 `opt:"lookupd_poll_jitter" min:"0" max:"1" default:"0.3"` // Maximum duration when REQueueing (for doubling of deferred requeue) MaxRequeueDelay time.Duration `opt:"max_requeue_delay" min:"0" max:"60m" default:"15m"` DefaultRequeueDelay time.Duration `opt:"default_requeue_delay" min:"0" max:"60m" default:"90s"` // Backoff strategy, defaults to exponential backoff. Overwrite this to define alternative backoff algrithms. BackoffStrategy BackoffStrategy `opt:"backoff_strategy" default:"exponential"` // Maximum amount of time to backoff when processing fails 0 == no backoff MaxBackoffDuration time.Duration `opt:"max_backoff_duration" min:"0" max:"60m" default:"2m"` // Unit of time for calculating consumer backoff BackoffMultiplier time.Duration `opt:"backoff_multiplier" min:"0" max:"60m" default:"1s"` // Maximum number of times this consumer will attempt to process a message before giving up MaxAttempts uint16 `opt:"max_attempts" min:"0" max:"65535" default:"5"` // Duration to wait for a message from a producer when in a state where RDY // counts are re-distributed (ie. max_in_flight < num_producers) LowRdyIdleTimeout time.Duration `opt:"low_rdy_idle_timeout" min:"1s" max:"5m" default:"10s"` // Duration between redistributing max-in-flight to connections RDYRedistributeInterval time.Duration `opt:"rdy_redistribute_interval" min:"1ms" max:"5s" default:"5s"` // Identifiers sent to nsqd representing this client // UserAgent is in the spirit of HTTP (default: "/") ClientID string `opt:"client_id"` // (defaults: short hostname) Hostname string `opt:"hostname"` UserAgent string `opt:"user_agent"` // Duration of time between heartbeats. This must be less than ReadTimeout HeartbeatInterval time.Duration `opt:"heartbeat_interval" default:"30s"` // Integer percentage to sample the channel (requires nsqd 0.2.25+) SampleRate int32 `opt:"sample_rate" min:"0" max:"99"` // To set TLS config, use the following options: // // tls_v1 - Bool enable TLS negotiation // tls_root_ca_file - String path to file containing root CA // tls_insecure_skip_verify - Bool indicates whether this client should verify server certificates // tls_cert - String path to file containing public key for certificate // tls_key - String path to file containing private key for certificate // tls_min_version - String indicating the minimum version of tls acceptable ('ssl3.0', 'tls1.0', 'tls1.1', 'tls1.2') // TlsV1 bool `opt:"tls_v1"` TlsConfig *tls.Config `opt:"tls_config"` // Compression Settings Deflate bool `opt:"deflate"` DeflateLevel int `opt:"deflate_level" min:"1" max:"9" default:"6"` Snappy bool `opt:"snappy"` // Size of the buffer (in bytes) used by nsqd for buffering writes to this connection OutputBufferSize int64 `opt:"output_buffer_size" default:"16384"` // Timeout used by nsqd before flushing buffered writes (set to 0 to disable). // // WARNING: configuring clients with an extremely low // (< 25ms) output_buffer_timeout has a significant effect // on nsqd CPU usage (particularly with > 50 clients connected). OutputBufferTimeout time.Duration `opt:"output_buffer_timeout" default:"250ms"` // Maximum number of messages to allow in flight (concurrency knob) MaxInFlight int `opt:"max_in_flight" min:"0" default:"1"` // The server-side message timeout for messages delivered to this client MsgTimeout time.Duration `opt:"msg_timeout" min:"0"` // secret for nsqd authentication (requires nsqd 0.2.29+) AuthSecret string `opt:"auth_secret"` } // NewConfig returns a new default nsq configuration. // // This must be used to initialize Config structs. Values can be set directly, or through Config.Set() func NewConfig() *Config { c := &Config{ configHandlers: []configHandler{&structTagsConfig{}, &tlsConfig{}}, initialized: true, } if err := c.setDefaults(); err != nil { panic(err.Error()) } return c } // Set takes an option as a string and a value as an interface and // attempts to set the appropriate configuration option. // // It attempts to coerce the value into the right format depending on the named // option and the underlying type of the value passed in. // // Calls to Set() that take a time.Duration as an argument can be input as: // // "1000ms" (a string parsed by time.ParseDuration()) // 1000 (an integer interpreted as milliseconds) // 1000*time.Millisecond (a literal time.Duration value) // // Calls to Set() that take bool can be input as: // // "true" (a string parsed by strconv.ParseBool()) // true (a boolean) // 1 (an int where 1 == true and 0 == false) // // It returns an error for an invalid option or value. func (c *Config) Set(option string, value interface{}) error { c.assertInitialized() option = strings.Replace(option, "-", "_", -1) for _, h := range c.configHandlers { if h.HandlesOption(c, option) { return h.Set(c, option, value) } } return fmt.Errorf("invalid option %s", option) } func (c *Config) assertInitialized() { if !c.initialized { panic("Config{} must be created with NewConfig()") } } // Validate checks that all values are within specified min/max ranges func (c *Config) Validate() error { c.assertInitialized() for _, h := range c.configHandlers { if err := h.Validate(c); err != nil { return err } } return nil } func (c *Config) setDefaults() error { for _, h := range c.configHandlers { hh, ok := h.(defaultsHandler) if ok { if err := hh.SetDefaults(c); err != nil { return err } } } return nil } type structTagsConfig struct{} // Handle options that are listed in StructTags func (h *structTagsConfig) HandlesOption(c *Config, option string) bool { val := reflect.ValueOf(c).Elem() typ := val.Type() for i := 0; i < typ.NumField(); i++ { field := typ.Field(i) opt := field.Tag.Get("opt") if opt == option { return true } } return false } // Set values based on parameters in StructTags func (h *structTagsConfig) Set(c *Config, option string, value interface{}) error { val := reflect.ValueOf(c).Elem() typ := val.Type() for i := 0; i < typ.NumField(); i++ { field := typ.Field(i) opt := field.Tag.Get("opt") if option != opt { continue } min := field.Tag.Get("min") max := field.Tag.Get("max") fieldVal := val.FieldByName(field.Name) dest := unsafeValueOf(fieldVal) coercedVal, err := coerce(value, field.Type) if err != nil { return fmt.Errorf("failed to coerce option %s (%v) - %s", option, value, err) } if min != "" { coercedMinVal, _ := coerce(min, field.Type) if valueCompare(coercedVal, coercedMinVal) == -1 { return fmt.Errorf("invalid %s ! %v < %v", option, coercedVal.Interface(), coercedMinVal.Interface()) } } if max != "" { coercedMaxVal, _ := coerce(max, field.Type) if valueCompare(coercedVal, coercedMaxVal) == 1 { return fmt.Errorf("invalid %s ! %v > %v", option, coercedVal.Interface(), coercedMaxVal.Interface()) } } if coercedVal.Type().String() == "nsq.BackoffStrategy" { v := coercedVal.Interface().(BackoffStrategy) if v, ok := v.(interface { setConfig(*Config) }); ok { v.setConfig(c) } } dest.Set(coercedVal) return nil } return fmt.Errorf("unknown option %s", option) } func (h *structTagsConfig) SetDefaults(c *Config) error { val := reflect.ValueOf(c).Elem() typ := val.Type() for i := 0; i < typ.NumField(); i++ { field := typ.Field(i) opt := field.Tag.Get("opt") defaultVal := field.Tag.Get("default") if defaultVal == "" || opt == "" { continue } if err := c.Set(opt, defaultVal); err != nil { return err } } hostname, err := os.Hostname() if err != nil { log.Fatalf("ERROR: unable to get hostname %s", err.Error()) } c.ClientID = strings.Split(hostname, ".")[0] c.Hostname = hostname c.UserAgent = fmt.Sprintf("go-nsq/%s", VERSION) return nil } func (h *structTagsConfig) Validate(c *Config) error { val := reflect.ValueOf(c).Elem() typ := val.Type() for i := 0; i < typ.NumField(); i++ { field := typ.Field(i) min := field.Tag.Get("min") max := field.Tag.Get("max") if min == "" && max == "" { continue } value := val.FieldByName(field.Name) if min != "" { coercedMinVal, _ := coerce(min, field.Type) if valueCompare(value, coercedMinVal) == -1 { return fmt.Errorf("invalid %s ! %v < %v", field.Name, value.Interface(), coercedMinVal.Interface()) } } if max != "" { coercedMaxVal, _ := coerce(max, field.Type) if valueCompare(value, coercedMaxVal) == 1 { return fmt.Errorf("invalid %s ! %v > %v", field.Name, value.Interface(), coercedMaxVal.Interface()) } } } if c.HeartbeatInterval > c.ReadTimeout { return fmt.Errorf("HeartbeatInterval %v must be less than ReadTimeout %v", c.HeartbeatInterval, c.ReadTimeout) } return nil } // Parsing for higher order TLS settings type tlsConfig struct { certFile string keyFile string } func (t *tlsConfig) HandlesOption(c *Config, option string) bool { switch option { case "tls_root_ca_file", "tls_insecure_skip_verify", "tls_cert", "tls_key", "tls_min_version": return true } return false } func (t *tlsConfig) Set(c *Config, option string, value interface{}) error { if c.TlsConfig == nil { c.TlsConfig = &tls.Config{ MinVersion: tls.VersionTLS10, MaxVersion: tls.VersionTLS12, // enable TLS_FALLBACK_SCSV prior to Go 1.5: https://go-review.googlesource.com/#/c/1776/ } } val := reflect.ValueOf(c.TlsConfig).Elem() switch option { case "tls_cert", "tls_key": if option == "tls_cert" { t.certFile = value.(string) } else { t.keyFile = value.(string) } if t.certFile != "" && t.keyFile != "" && len(c.TlsConfig.Certificates) == 0 { cert, err := tls.LoadX509KeyPair(t.certFile, t.keyFile) if err != nil { return err } c.TlsConfig.Certificates = []tls.Certificate{cert} } return nil case "tls_root_ca_file": filename, ok := value.(string) if !ok { return fmt.Errorf("ERROR: %v is not a string", value) } tlsCertPool := x509.NewCertPool() caCertFile, err := ioutil.ReadFile(filename) if err != nil { return fmt.Errorf("ERROR: failed to read custom Certificate Authority file %s", err) } if !tlsCertPool.AppendCertsFromPEM(caCertFile) { return fmt.Errorf("ERROR: failed to append certificates from Certificate Authority file") } c.TlsConfig.RootCAs = tlsCertPool return nil case "tls_insecure_skip_verify": fieldVal := val.FieldByName("InsecureSkipVerify") dest := unsafeValueOf(fieldVal) coercedVal, err := coerce(value, fieldVal.Type()) if err != nil { return fmt.Errorf("failed to coerce option %s (%v) - %s", option, value, err) } dest.Set(coercedVal) return nil case "tls_min_version": version, ok := value.(string) if !ok { return fmt.Errorf("ERROR: %v is not a string", value) } switch version { case "ssl3.0": c.TlsConfig.MinVersion = tls.VersionSSL30 case "tls1.0": c.TlsConfig.MinVersion = tls.VersionTLS10 case "tls1.1": c.TlsConfig.MinVersion = tls.VersionTLS11 case "tls1.2": c.TlsConfig.MinVersion = tls.VersionTLS12 default: return fmt.Errorf("ERROR: %v is not a tls version", value) } return nil } return fmt.Errorf("unknown option %s", option) } func (t *tlsConfig) Validate(c *Config) error { return nil } // because Config contains private structs we can't use reflect.Value // directly, instead we need to "unsafely" address the variable func unsafeValueOf(val reflect.Value) reflect.Value { uptr := unsafe.Pointer(val.UnsafeAddr()) return reflect.NewAt(val.Type(), uptr).Elem() } func valueCompare(v1 reflect.Value, v2 reflect.Value) int { switch v1.Type().String() { case "int", "int16", "int32", "int64": if v1.Int() > v2.Int() { return 1 } else if v1.Int() < v2.Int() { return -1 } return 0 case "uint", "uint16", "uint32", "uint64": if v1.Uint() > v2.Uint() { return 1 } else if v1.Uint() < v2.Uint() { return -1 } return 0 case "float32", "float64": if v1.Float() > v2.Float() { return 1 } else if v1.Float() < v2.Float() { return -1 } return 0 case "time.Duration": if v1.Interface().(time.Duration) > v2.Interface().(time.Duration) { return 1 } else if v1.Interface().(time.Duration) < v2.Interface().(time.Duration) { return -1 } return 0 } panic("impossible") } func coerce(v interface{}, typ reflect.Type) (reflect.Value, error) { var err error if typ.Kind() == reflect.Ptr { return reflect.ValueOf(v), nil } switch typ.String() { case "string": v, err = coerceString(v) case "int", "int16", "int32", "int64": v, err = coerceInt64(v) case "uint", "uint16", "uint32", "uint64": v, err = coerceUint64(v) case "float32", "float64": v, err = coerceFloat64(v) case "bool": v, err = coerceBool(v) case "time.Duration": v, err = coerceDuration(v) case "net.Addr": v, err = coerceAddr(v) case "nsq.BackoffStrategy": v, err = coerceBackoffStrategy(v) default: v = nil err = fmt.Errorf("invalid type %s", typ.String()) } return valueTypeCoerce(v, typ), err } func valueTypeCoerce(v interface{}, typ reflect.Type) reflect.Value { val := reflect.ValueOf(v) if reflect.TypeOf(v) == typ { return val } tval := reflect.New(typ).Elem() switch typ.String() { case "int", "int16", "int32", "int64": tval.SetInt(val.Int()) case "uint", "uint16", "uint32", "uint64": tval.SetUint(val.Uint()) case "float32", "float64": tval.SetFloat(val.Float()) default: tval.Set(val) } return tval } func coerceString(v interface{}) (string, error) { switch v := v.(type) { case string: return v, nil case int, int16, int32, int64, uint, uint16, uint32, uint64: return fmt.Sprintf("%d", v), nil case float32, float64: return fmt.Sprintf("%f", v), nil } return fmt.Sprintf("%s", v), nil } func coerceDuration(v interface{}) (time.Duration, error) { switch v := v.(type) { case string: return time.ParseDuration(v) case int, int16, int32, int64: // treat like ms return time.Duration(reflect.ValueOf(v).Int()) * time.Millisecond, nil case uint, uint16, uint32, uint64: // treat like ms return time.Duration(reflect.ValueOf(v).Uint()) * time.Millisecond, nil case time.Duration: return v, nil } return 0, errors.New("invalid value type") } func coerceAddr(v interface{}) (net.Addr, error) { switch v := v.(type) { case string: return net.ResolveTCPAddr("tcp", v) case net.Addr: return v, nil } return nil, errors.New("invalid value type") } func coerceBackoffStrategy(v interface{}) (BackoffStrategy, error) { switch v := v.(type) { case string: switch v { case "", "exponential": return &ExponentialStrategy{}, nil case "full_jitter": return &FullJitterStrategy{}, nil } case BackoffStrategy: return v, nil } return nil, errors.New("invalid value type") } func coerceBool(v interface{}) (bool, error) { switch v := v.(type) { case bool: return v, nil case string: return strconv.ParseBool(v) case int, int16, int32, int64: return reflect.ValueOf(v).Int() != 0, nil case uint, uint16, uint32, uint64: return reflect.ValueOf(v).Uint() != 0, nil } return false, errors.New("invalid value type") } func coerceFloat64(v interface{}) (float64, error) { switch v := v.(type) { case string: return strconv.ParseFloat(v, 64) case int, int16, int32, int64: return float64(reflect.ValueOf(v).Int()), nil case uint, uint16, uint32, uint64: return float64(reflect.ValueOf(v).Uint()), nil case float32: return float64(v), nil case float64: return v, nil } return 0, errors.New("invalid value type") } func coerceInt64(v interface{}) (int64, error) { switch v := v.(type) { case string: return strconv.ParseInt(v, 10, 64) case int, int16, int32, int64: return reflect.ValueOf(v).Int(), nil case uint, uint16, uint32, uint64: return int64(reflect.ValueOf(v).Uint()), nil } return 0, errors.New("invalid value type") } func coerceUint64(v interface{}) (uint64, error) { switch v := v.(type) { case string: return strconv.ParseUint(v, 10, 64) case int, int16, int32, int64: return uint64(reflect.ValueOf(v).Int()), nil case uint, uint16, uint32, uint64: return reflect.ValueOf(v).Uint(), nil } return 0, errors.New("invalid value type") } ================================================ FILE: vendor/github.com/bitly/go-nsq/config_test.go ================================================ package nsq import ( "math/rand" "net" "reflect" "testing" "time" ) func TestConfigSet(t *testing.T) { c := NewConfig() if err := c.Set("not a real config value", struct{}{}); err == nil { t.Error("No error when setting an invalid value") } if err := c.Set("tls_v1", "lol"); err == nil { t.Error("No error when setting `tls_v1` to an invalid value") } if err := c.Set("tls_v1", true); err != nil { t.Errorf("Error setting `tls_v1` config. %s", err) } if err := c.Set("tls-insecure-skip-verify", true); err != nil { t.Errorf("Error setting `tls-insecure-skip-verify` config. %v", err) } if c.TlsConfig.InsecureSkipVerify != true { t.Errorf("Error setting `tls-insecure-skip-verify` config: %v", c.TlsConfig) } if err := c.Set("tls-min-version", "tls1.2"); err != nil { t.Errorf("Error setting `tls-min-version` config: %s", err) } if err := c.Set("tls-min-version", "tls1.3"); err == nil { t.Error("No error when setting `tls-min-version` to an invalid value") } if err := c.Set("local_addr", &net.TCPAddr{}); err != nil { t.Errorf("Error setting `local_addr` config: %s", err) } if err := c.Set("local_addr", "1.2.3.4:27015"); err != nil { t.Errorf("Error setting `local_addr` config: %s", err) } if err := c.Set("dial_timeout", "5s"); err != nil { t.Errorf("Error setting `dial_timeout` config: %s", err) } if c.LocalAddr.String() != "1.2.3.4:27015" { t.Error("Failed to assign `local_addr` config") } if reflect.ValueOf(c.BackoffStrategy).Type().String() != "*nsq.ExponentialStrategy" { t.Error("Failed to set default `exponential` backoff strategy") } if err := c.Set("backoff_strategy", "full_jitter"); err != nil { t.Errorf("Failed to assign `backoff_strategy` config: %v", err) } if reflect.ValueOf(c.BackoffStrategy).Type().String() != "*nsq.FullJitterStrategy" { t.Error("Failed to set `full_jitter` backoff strategy") } } func TestConfigValidate(t *testing.T) { c := NewConfig() if err := c.Validate(); err != nil { t.Error("initialized config is invalid") } c.DeflateLevel = 100 if err := c.Validate(); err == nil { t.Error("no error set for invalid value") } } func TestExponentialBackoff(t *testing.T) { expected := []time.Duration{ 1 * time.Second, 2 * time.Second, 8 * time.Second, 32 * time.Second, } backoffTest(t, expected, func(c *Config) BackoffStrategy { return &ExponentialStrategy{cfg: c} }) } func TestFullJitterBackoff(t *testing.T) { expected := []time.Duration{ 566028617 * time.Nanosecond, 1365407263 * time.Nanosecond, 5232470547 * time.Nanosecond, 21467499218 * time.Nanosecond, } backoffTest(t, expected, func(c *Config) BackoffStrategy { return &FullJitterStrategy{cfg: c, rng: rand.New(rand.NewSource(99))} }) } func backoffTest(t *testing.T, expected []time.Duration, cb func(c *Config) BackoffStrategy) { config := NewConfig() attempts := []int{0, 1, 3, 5} s := cb(config) for i := range attempts { result := s.Calculate(attempts[i]) if result != expected[i] { t.Fatalf("wrong backoff duration %v for attempt %d (should be %v)", result, attempts[i], expected[i]) } } } ================================================ FILE: vendor/github.com/bitly/go-nsq/conn.go ================================================ package nsq import ( "bufio" "bytes" "compress/flate" "crypto/tls" "encoding/json" "errors" "fmt" "io" "net" "strings" "sync" "sync/atomic" "time" "github.com/mreiferson/go-snappystream" ) // IdentifyResponse represents the metadata // returned from an IDENTIFY command to nsqd type IdentifyResponse struct { MaxRdyCount int64 `json:"max_rdy_count"` TLSv1 bool `json:"tls_v1"` Deflate bool `json:"deflate"` Snappy bool `json:"snappy"` AuthRequired bool `json:"auth_required"` } // AuthResponse represents the metadata // returned from an AUTH command to nsqd type AuthResponse struct { Identity string `json:"identity"` IdentityUrl string `json:"identity_url"` PermissionCount int64 `json:"permission_count"` } type msgResponse struct { msg *Message cmd *Command success bool backoff bool } // Conn represents a connection to nsqd // // Conn exposes a set of callbacks for the // various events that occur on a connection type Conn struct { // 64bit atomic vars need to be first for proper alignment on 32bit platforms messagesInFlight int64 maxRdyCount int64 rdyCount int64 lastRdyCount int64 lastMsgTimestamp int64 mtx sync.Mutex config *Config conn *net.TCPConn tlsConn *tls.Conn addr string delegate ConnDelegate logger logger logLvl LogLevel logFmt string logGuard sync.RWMutex r io.Reader w io.Writer cmdChan chan *Command msgResponseChan chan *msgResponse exitChan chan int drainReady chan int closeFlag int32 stopper sync.Once wg sync.WaitGroup readLoopRunning int32 } // NewConn returns a new Conn instance func NewConn(addr string, config *Config, delegate ConnDelegate) *Conn { if !config.initialized { panic("Config must be created with NewConfig()") } return &Conn{ addr: addr, config: config, delegate: delegate, maxRdyCount: 2500, lastMsgTimestamp: time.Now().UnixNano(), cmdChan: make(chan *Command), msgResponseChan: make(chan *msgResponse), exitChan: make(chan int), drainReady: make(chan int), } } // SetLogger assigns the logger to use as well as a level. // // The format parameter is expected to be a printf compatible string with // a single %s argument. This is useful if you want to provide additional // context to the log messages that the connection will print, the default // is '(%s)'. // // The logger parameter is an interface that requires the following // method to be implemented (such as the the stdlib log.Logger): // // Output(calldepth int, s string) // func (c *Conn) SetLogger(l logger, lvl LogLevel, format string) { c.logGuard.Lock() defer c.logGuard.Unlock() c.logger = l c.logLvl = lvl c.logFmt = format if c.logFmt == "" { c.logFmt = "(%s)" } } func (c *Conn) getLogger() (logger, LogLevel, string) { c.logGuard.RLock() defer c.logGuard.RUnlock() return c.logger, c.logLvl, c.logFmt } // Connect dials and bootstraps the nsqd connection // (including IDENTIFY) and returns the IdentifyResponse func (c *Conn) Connect() (*IdentifyResponse, error) { dialer := &net.Dialer{ LocalAddr: c.config.LocalAddr, Timeout: c.config.DialTimeout, } conn, err := dialer.Dial("tcp", c.addr) if err != nil { return nil, err } c.conn = conn.(*net.TCPConn) c.r = conn c.w = conn _, err = c.Write(MagicV2) if err != nil { c.Close() return nil, fmt.Errorf("[%s] failed to write magic - %s", c.addr, err) } resp, err := c.identify() if err != nil { return nil, err } if resp != nil && resp.AuthRequired { if c.config.AuthSecret == "" { c.log(LogLevelError, "Auth Required") return nil, errors.New("Auth Required") } err := c.auth(c.config.AuthSecret) if err != nil { c.log(LogLevelError, "Auth Failed %s", err) return nil, err } } c.wg.Add(2) atomic.StoreInt32(&c.readLoopRunning, 1) go c.readLoop() go c.writeLoop() return resp, nil } // Close idempotently initiates connection close func (c *Conn) Close() error { atomic.StoreInt32(&c.closeFlag, 1) if c.conn != nil && atomic.LoadInt64(&c.messagesInFlight) == 0 { return c.conn.CloseRead() } return nil } // IsClosing indicates whether or not the // connection is currently in the processing of // gracefully closing func (c *Conn) IsClosing() bool { return atomic.LoadInt32(&c.closeFlag) == 1 } // RDY returns the current RDY count func (c *Conn) RDY() int64 { return atomic.LoadInt64(&c.rdyCount) } // LastRDY returns the previously set RDY count func (c *Conn) LastRDY() int64 { return atomic.LoadInt64(&c.lastRdyCount) } // SetRDY stores the specified RDY count func (c *Conn) SetRDY(rdy int64) { atomic.StoreInt64(&c.rdyCount, rdy) atomic.StoreInt64(&c.lastRdyCount, rdy) } // MaxRDY returns the nsqd negotiated maximum // RDY count that it will accept for this connection func (c *Conn) MaxRDY() int64 { return c.maxRdyCount } // LastMessageTime returns a time.Time representing // the time at which the last message was received func (c *Conn) LastMessageTime() time.Time { return time.Unix(0, atomic.LoadInt64(&c.lastMsgTimestamp)) } // RemoteAddr returns the configured destination nsqd address func (c *Conn) RemoteAddr() net.Addr { return c.conn.RemoteAddr() } // String returns the fully-qualified address func (c *Conn) String() string { return c.addr } // Read performs a deadlined read on the underlying TCP connection func (c *Conn) Read(p []byte) (int, error) { c.conn.SetReadDeadline(time.Now().Add(c.config.ReadTimeout)) return c.r.Read(p) } // Write performs a deadlined write on the underlying TCP connection func (c *Conn) Write(p []byte) (int, error) { c.conn.SetWriteDeadline(time.Now().Add(c.config.WriteTimeout)) return c.w.Write(p) } // WriteCommand is a goroutine safe method to write a Command // to this connection, and flush. func (c *Conn) WriteCommand(cmd *Command) error { c.mtx.Lock() _, err := cmd.WriteTo(c) if err != nil { goto exit } err = c.Flush() exit: c.mtx.Unlock() if err != nil { c.log(LogLevelError, "IO error - %s", err) c.delegate.OnIOError(c, err) } return err } type flusher interface { Flush() error } // Flush writes all buffered data to the underlying TCP connection func (c *Conn) Flush() error { if f, ok := c.w.(flusher); ok { return f.Flush() } return nil } func (c *Conn) identify() (*IdentifyResponse, error) { ci := make(map[string]interface{}) ci["client_id"] = c.config.ClientID ci["hostname"] = c.config.Hostname ci["user_agent"] = c.config.UserAgent ci["short_id"] = c.config.ClientID // deprecated ci["long_id"] = c.config.Hostname // deprecated ci["tls_v1"] = c.config.TlsV1 ci["deflate"] = c.config.Deflate ci["deflate_level"] = c.config.DeflateLevel ci["snappy"] = c.config.Snappy ci["feature_negotiation"] = true if c.config.HeartbeatInterval == -1 { ci["heartbeat_interval"] = -1 } else { ci["heartbeat_interval"] = int64(c.config.HeartbeatInterval / time.Millisecond) } ci["sample_rate"] = c.config.SampleRate ci["output_buffer_size"] = c.config.OutputBufferSize if c.config.OutputBufferTimeout == -1 { ci["output_buffer_timeout"] = -1 } else { ci["output_buffer_timeout"] = int64(c.config.OutputBufferTimeout / time.Millisecond) } ci["msg_timeout"] = int64(c.config.MsgTimeout / time.Millisecond) cmd, err := Identify(ci) if err != nil { return nil, ErrIdentify{err.Error()} } err = c.WriteCommand(cmd) if err != nil { return nil, ErrIdentify{err.Error()} } frameType, data, err := ReadUnpackedResponse(c) if err != nil { return nil, ErrIdentify{err.Error()} } if frameType == FrameTypeError { return nil, ErrIdentify{string(data)} } // check to see if the server was able to respond w/ capabilities // i.e. it was a JSON response if data[0] != '{' { return nil, nil } resp := &IdentifyResponse{} err = json.Unmarshal(data, resp) if err != nil { return nil, ErrIdentify{err.Error()} } c.log(LogLevelDebug, "IDENTIFY response: %+v", resp) c.maxRdyCount = resp.MaxRdyCount if resp.TLSv1 { c.log(LogLevelInfo, "upgrading to TLS") err := c.upgradeTLS(c.config.TlsConfig) if err != nil { return nil, ErrIdentify{err.Error()} } } if resp.Deflate { c.log(LogLevelInfo, "upgrading to Deflate") err := c.upgradeDeflate(c.config.DeflateLevel) if err != nil { return nil, ErrIdentify{err.Error()} } } if resp.Snappy { c.log(LogLevelInfo, "upgrading to Snappy") err := c.upgradeSnappy() if err != nil { return nil, ErrIdentify{err.Error()} } } // now that connection is bootstrapped, enable read buffering // (and write buffering if it's not already capable of Flush()) c.r = bufio.NewReader(c.r) if _, ok := c.w.(flusher); !ok { c.w = bufio.NewWriter(c.w) } return resp, nil } func (c *Conn) upgradeTLS(tlsConf *tls.Config) error { // create a local copy of the config to set ServerName for this connection var conf tls.Config if tlsConf != nil { conf = *tlsConf } host, _, err := net.SplitHostPort(c.addr) if err != nil { return err } conf.ServerName = host c.tlsConn = tls.Client(c.conn, &conf) err = c.tlsConn.Handshake() if err != nil { return err } c.r = c.tlsConn c.w = c.tlsConn frameType, data, err := ReadUnpackedResponse(c) if err != nil { return err } if frameType != FrameTypeResponse || !bytes.Equal(data, []byte("OK")) { return errors.New("invalid response from TLS upgrade") } return nil } func (c *Conn) upgradeDeflate(level int) error { conn := net.Conn(c.conn) if c.tlsConn != nil { conn = c.tlsConn } fw, _ := flate.NewWriter(conn, level) c.r = flate.NewReader(conn) c.w = fw frameType, data, err := ReadUnpackedResponse(c) if err != nil { return err } if frameType != FrameTypeResponse || !bytes.Equal(data, []byte("OK")) { return errors.New("invalid response from Deflate upgrade") } return nil } func (c *Conn) upgradeSnappy() error { conn := net.Conn(c.conn) if c.tlsConn != nil { conn = c.tlsConn } c.r = snappystream.NewReader(conn, snappystream.SkipVerifyChecksum) c.w = snappystream.NewWriter(conn) frameType, data, err := ReadUnpackedResponse(c) if err != nil { return err } if frameType != FrameTypeResponse || !bytes.Equal(data, []byte("OK")) { return errors.New("invalid response from Snappy upgrade") } return nil } func (c *Conn) auth(secret string) error { cmd, err := Auth(secret) if err != nil { return err } err = c.WriteCommand(cmd) if err != nil { return err } frameType, data, err := ReadUnpackedResponse(c) if err != nil { return err } if frameType == FrameTypeError { return errors.New("Error authenticating " + string(data)) } resp := &AuthResponse{} err = json.Unmarshal(data, resp) if err != nil { return err } c.log(LogLevelInfo, "Auth accepted. Identity: %q %s Permissions: %d", resp.Identity, resp.IdentityUrl, resp.PermissionCount) return nil } func (c *Conn) readLoop() { delegate := &connMessageDelegate{c} for { if atomic.LoadInt32(&c.closeFlag) == 1 { goto exit } frameType, data, err := ReadUnpackedResponse(c) if err != nil { if !strings.Contains(err.Error(), "use of closed network connection") { c.log(LogLevelError, "IO error - %s", err) c.delegate.OnIOError(c, err) } goto exit } if frameType == FrameTypeResponse && bytes.Equal(data, []byte("_heartbeat_")) { c.log(LogLevelDebug, "heartbeat received") c.delegate.OnHeartbeat(c) err := c.WriteCommand(Nop()) if err != nil { c.log(LogLevelError, "IO error - %s", err) c.delegate.OnIOError(c, err) goto exit } continue } switch frameType { case FrameTypeResponse: c.delegate.OnResponse(c, data) case FrameTypeMessage: msg, err := DecodeMessage(data) if err != nil { c.log(LogLevelError, "IO error - %s", err) c.delegate.OnIOError(c, err) goto exit } msg.Delegate = delegate msg.NSQDAddress = c.String() atomic.AddInt64(&c.rdyCount, -1) atomic.AddInt64(&c.messagesInFlight, 1) atomic.StoreInt64(&c.lastMsgTimestamp, time.Now().UnixNano()) c.delegate.OnMessage(c, msg) case FrameTypeError: c.log(LogLevelError, "protocol error - %s", data) c.delegate.OnError(c, data) default: c.log(LogLevelError, "IO error - %s", err) c.delegate.OnIOError(c, fmt.Errorf("unknown frame type %d", frameType)) } } exit: atomic.StoreInt32(&c.readLoopRunning, 0) // start the connection close messagesInFlight := atomic.LoadInt64(&c.messagesInFlight) if messagesInFlight == 0 { // if we exited readLoop with no messages in flight // we need to explicitly trigger the close because // writeLoop won't c.close() } else { c.log(LogLevelWarning, "delaying close, %d outstanding messages", messagesInFlight) } c.wg.Done() c.log(LogLevelInfo, "readLoop exiting") } func (c *Conn) writeLoop() { for { select { case <-c.exitChan: c.log(LogLevelInfo, "breaking out of writeLoop") // Indicate drainReady because we will not pull any more off msgResponseChan close(c.drainReady) goto exit case cmd := <-c.cmdChan: err := c.WriteCommand(cmd) if err != nil { c.log(LogLevelError, "error sending command %s - %s", cmd, err) c.close() continue } case resp := <-c.msgResponseChan: // Decrement this here so it is correct even if we can't respond to nsqd msgsInFlight := atomic.AddInt64(&c.messagesInFlight, -1) if resp.success { c.log(LogLevelDebug, "FIN %s", resp.msg.ID) c.delegate.OnMessageFinished(c, resp.msg) c.delegate.OnResume(c) } else { c.log(LogLevelDebug, "REQ %s", resp.msg.ID) c.delegate.OnMessageRequeued(c, resp.msg) if resp.backoff { c.delegate.OnBackoff(c) } else { c.delegate.OnContinue(c) } } err := c.WriteCommand(resp.cmd) if err != nil { c.log(LogLevelError, "error sending command %s - %s", resp.cmd, err) c.close() continue } if msgsInFlight == 0 && atomic.LoadInt32(&c.closeFlag) == 1 { c.close() continue } } } exit: c.wg.Done() c.log(LogLevelInfo, "writeLoop exiting") } func (c *Conn) close() { // a "clean" connection close is orchestrated as follows: // // 1. CLOSE cmd sent to nsqd // 2. CLOSE_WAIT response received from nsqd // 3. set c.closeFlag // 4. readLoop() exits // a. if messages-in-flight > 0 delay close() // i. writeLoop() continues receiving on c.msgResponseChan chan // x. when messages-in-flight == 0 call close() // b. else call close() immediately // 5. c.exitChan close // a. writeLoop() exits // i. c.drainReady close // 6a. launch cleanup() goroutine (we're racing with intraprocess // routed messages, see comments below) // a. wait on c.drainReady // b. loop and receive on c.msgResponseChan chan // until messages-in-flight == 0 // i. ensure that readLoop has exited // 6b. launch waitForCleanup() goroutine // b. wait on waitgroup (covers readLoop() and writeLoop() // and cleanup goroutine) // c. underlying TCP connection close // d. trigger Delegate OnClose() // c.stopper.Do(func() { c.log(LogLevelInfo, "beginning close") close(c.exitChan) c.conn.CloseRead() c.wg.Add(1) go c.cleanup() go c.waitForCleanup() }) } func (c *Conn) cleanup() { <-c.drainReady ticker := time.NewTicker(100 * time.Millisecond) lastWarning := time.Now() // writeLoop has exited, drain any remaining in flight messages for { // we're racing with readLoop which potentially has a message // for handling so infinitely loop until messagesInFlight == 0 // and readLoop has exited var msgsInFlight int64 select { case <-c.msgResponseChan: msgsInFlight = atomic.AddInt64(&c.messagesInFlight, -1) case <-ticker.C: msgsInFlight = atomic.LoadInt64(&c.messagesInFlight) } if msgsInFlight > 0 { if time.Now().Sub(lastWarning) > time.Second { c.log(LogLevelWarning, "draining... waiting for %d messages in flight", msgsInFlight) lastWarning = time.Now() } continue } // until the readLoop has exited we cannot be sure that there // still won't be a race if atomic.LoadInt32(&c.readLoopRunning) == 1 { if time.Now().Sub(lastWarning) > time.Second { c.log(LogLevelWarning, "draining... readLoop still running") lastWarning = time.Now() } continue } goto exit } exit: ticker.Stop() c.wg.Done() c.log(LogLevelInfo, "finished draining, cleanup exiting") } func (c *Conn) waitForCleanup() { // this blocks until readLoop and writeLoop // (and cleanup goroutine above) have exited c.wg.Wait() c.conn.CloseWrite() c.log(LogLevelInfo, "clean close complete") c.delegate.OnClose(c) } func (c *Conn) onMessageFinish(m *Message) { c.msgResponseChan <- &msgResponse{msg: m, cmd: Finish(m.ID), success: true} } func (c *Conn) onMessageRequeue(m *Message, delay time.Duration, backoff bool) { if delay == -1 { // linear delay delay = c.config.DefaultRequeueDelay * time.Duration(m.Attempts) // bound the requeueDelay to configured max if delay > c.config.MaxRequeueDelay { delay = c.config.MaxRequeueDelay } } c.msgResponseChan <- &msgResponse{msg: m, cmd: Requeue(m.ID, delay), success: false, backoff: backoff} } func (c *Conn) onMessageTouch(m *Message) { select { case c.cmdChan <- Touch(m.ID): case <-c.exitChan: } } func (c *Conn) log(lvl LogLevel, line string, args ...interface{}) { logger, logLvl, logFmt := c.getLogger() if logger == nil { return } if logLvl > lvl { return } logger.Output(2, fmt.Sprintf("%-4s %s %s", lvl, fmt.Sprintf(logFmt, c.String()), fmt.Sprintf(line, args...))) } ================================================ FILE: vendor/github.com/bitly/go-nsq/consumer.go ================================================ package nsq import ( "bytes" "errors" "fmt" "log" "math" "math/rand" "net" "net/url" "os" "strconv" "strings" "sync" "sync/atomic" "time" ) // Handler is the message processing interface for Consumer // // Implement this interface for handlers that return whether or not message // processing completed successfully. // // When the return value is nil Consumer will automatically handle FINishing. // // When the returned value is non-nil Consumer will automatically handle REQueing. type Handler interface { HandleMessage(message *Message) error } // HandlerFunc is a convenience type to avoid having to declare a struct // to implement the Handler interface, it can be used like this: // // consumer.AddHandler(nsq.HandlerFunc(func(m *Message) error { // // handle the message // })) type HandlerFunc func(message *Message) error // HandleMessage implements the Handler interface func (h HandlerFunc) HandleMessage(m *Message) error { return h(m) } // DiscoveryFilter is an interface accepted by `SetBehaviorDelegate()` // for filtering the nsqds returned from discovery via nsqlookupd type DiscoveryFilter interface { Filter([]string) []string } // FailedMessageLogger is an interface that can be implemented by handlers that wish // to receive a callback when a message is deemed "failed" (i.e. the number of attempts // exceeded the Consumer specified MaxAttemptCount) type FailedMessageLogger interface { LogFailedMessage(message *Message) } // ConsumerStats represents a snapshot of the state of a Consumer's connections and the messages // it has seen type ConsumerStats struct { MessagesReceived uint64 MessagesFinished uint64 MessagesRequeued uint64 Connections int } var instCount int64 type backoffSignal int const ( backoffFlag backoffSignal = iota continueFlag resumeFlag ) // Consumer is a high-level type to consume from NSQ. // // A Consumer instance is supplied a Handler that will be executed // concurrently via goroutines to handle processing the stream of messages // consumed from the specified topic/channel. See: Handler/HandlerFunc // for details on implementing the interface to create handlers. // // If configured, it will poll nsqlookupd instances and handle connection (and // reconnection) to any discovered nsqds. type Consumer struct { // 64bit atomic vars need to be first for proper alignment on 32bit platforms messagesReceived uint64 messagesFinished uint64 messagesRequeued uint64 totalRdyCount int64 backoffDuration int64 backoffCounter int32 maxInFlight int32 mtx sync.RWMutex logger logger logLvl LogLevel logGuard sync.RWMutex behaviorDelegate interface{} id int64 topic string channel string config Config rng *rand.Rand needRDYRedistributed int32 backoffMtx sync.RWMutex incomingMessages chan *Message rdyRetryMtx sync.RWMutex rdyRetryTimers map[string]*time.Timer pendingConnections map[string]*Conn connections map[string]*Conn nsqdTCPAddrs []string // used at connection close to force a possible reconnect lookupdRecheckChan chan int lookupdHTTPAddrs []string lookupdQueryIndex int wg sync.WaitGroup runningHandlers int32 stopFlag int32 connectedFlag int32 stopHandler sync.Once exitHandler sync.Once // read from this channel to block until consumer is cleanly stopped StopChan chan int exitChan chan int } // NewConsumer creates a new instance of Consumer for the specified topic/channel // // The only valid way to create a Config is via NewConfig, using a struct literal will panic. // After Config is passed into NewConsumer the values are no longer mutable (they are copied). func NewConsumer(topic string, channel string, config *Config) (*Consumer, error) { config.assertInitialized() if err := config.Validate(); err != nil { return nil, err } if !IsValidTopicName(topic) { return nil, errors.New("invalid topic name") } if !IsValidChannelName(channel) { return nil, errors.New("invalid channel name") } r := &Consumer{ id: atomic.AddInt64(&instCount, 1), topic: topic, channel: channel, config: *config, logger: log.New(os.Stderr, "", log.Flags()), logLvl: LogLevelInfo, maxInFlight: int32(config.MaxInFlight), incomingMessages: make(chan *Message), rdyRetryTimers: make(map[string]*time.Timer), pendingConnections: make(map[string]*Conn), connections: make(map[string]*Conn), lookupdRecheckChan: make(chan int, 1), rng: rand.New(rand.NewSource(time.Now().UnixNano())), StopChan: make(chan int), exitChan: make(chan int), } r.wg.Add(1) go r.rdyLoop() return r, nil } // Stats retrieves the current connection and message statistics for a Consumer func (r *Consumer) Stats() *ConsumerStats { return &ConsumerStats{ MessagesReceived: atomic.LoadUint64(&r.messagesReceived), MessagesFinished: atomic.LoadUint64(&r.messagesFinished), MessagesRequeued: atomic.LoadUint64(&r.messagesRequeued), Connections: len(r.conns()), } } func (r *Consumer) conns() []*Conn { r.mtx.RLock() conns := make([]*Conn, 0, len(r.connections)) for _, c := range r.connections { conns = append(conns, c) } r.mtx.RUnlock() return conns } // SetLogger assigns the logger to use as well as a level // // The logger parameter is an interface that requires the following // method to be implemented (such as the the stdlib log.Logger): // // Output(calldepth int, s string) // func (r *Consumer) SetLogger(l logger, lvl LogLevel) { r.logGuard.Lock() defer r.logGuard.Unlock() r.logger = l r.logLvl = lvl } func (r *Consumer) getLogger() (logger, LogLevel) { r.logGuard.RLock() defer r.logGuard.RUnlock() return r.logger, r.logLvl } // SetBehaviorDelegate takes a type implementing one or more // of the following interfaces that modify the behavior // of the `Consumer`: // // DiscoveryFilter // func (r *Consumer) SetBehaviorDelegate(cb interface{}) { matched := false if _, ok := cb.(DiscoveryFilter); ok { matched = true } if !matched { panic("behavior delegate does not have any recognized methods") } r.behaviorDelegate = cb } // perConnMaxInFlight calculates the per-connection max-in-flight count. // // This may change dynamically based on the number of connections to nsqd the Consumer // is responsible for. func (r *Consumer) perConnMaxInFlight() int64 { b := float64(r.getMaxInFlight()) s := b / float64(len(r.conns())) return int64(math.Min(math.Max(1, s), b)) } // IsStarved indicates whether any connections for this consumer are blocked on processing // before being able to receive more messages (ie. RDY count of 0 and not exiting) func (r *Consumer) IsStarved() bool { for _, conn := range r.conns() { threshold := int64(float64(atomic.LoadInt64(&conn.lastRdyCount)) * 0.85) inFlight := atomic.LoadInt64(&conn.messagesInFlight) if inFlight >= threshold && inFlight > 0 && !conn.IsClosing() { return true } } return false } func (r *Consumer) getMaxInFlight() int32 { return atomic.LoadInt32(&r.maxInFlight) } // ChangeMaxInFlight sets a new maximum number of messages this comsumer instance // will allow in-flight, and updates all existing connections as appropriate. // // For example, ChangeMaxInFlight(0) would pause message flow // // If already connected, it updates the reader RDY state for each connection. func (r *Consumer) ChangeMaxInFlight(maxInFlight int) { if r.getMaxInFlight() == int32(maxInFlight) { return } atomic.StoreInt32(&r.maxInFlight, int32(maxInFlight)) for _, c := range r.conns() { r.maybeUpdateRDY(c) } } // ConnectToNSQLookupd adds an nsqlookupd address to the list for this Consumer instance. // // If it is the first to be added, it initiates an HTTP request to discover nsqd // producers for the configured topic. // // A goroutine is spawned to handle continual polling. func (r *Consumer) ConnectToNSQLookupd(addr string) error { if atomic.LoadInt32(&r.stopFlag) == 1 { return errors.New("consumer stopped") } if atomic.LoadInt32(&r.runningHandlers) == 0 { return errors.New("no handlers") } if err := validatedLookupAddr(addr); err != nil { return err } atomic.StoreInt32(&r.connectedFlag, 1) r.mtx.Lock() for _, x := range r.lookupdHTTPAddrs { if x == addr { r.mtx.Unlock() return nil } } r.lookupdHTTPAddrs = append(r.lookupdHTTPAddrs, addr) numLookupd := len(r.lookupdHTTPAddrs) r.mtx.Unlock() // if this is the first one, kick off the go loop if numLookupd == 1 { r.queryLookupd() r.wg.Add(1) go r.lookupdLoop() } return nil } // ConnectToNSQLookupds adds multiple nsqlookupd address to the list for this Consumer instance. // // If adding the first address it initiates an HTTP request to discover nsqd // producers for the configured topic. // // A goroutine is spawned to handle continual polling. func (r *Consumer) ConnectToNSQLookupds(addresses []string) error { for _, addr := range addresses { err := r.ConnectToNSQLookupd(addr) if err != nil { return err } } return nil } func validatedLookupAddr(addr string) error { if strings.Contains(addr, "/") { _, err := url.Parse(addr) if err != nil { return err } return nil } if !strings.Contains(addr, ":") { return errors.New("missing port") } return nil } // poll all known lookup servers every LookupdPollInterval func (r *Consumer) lookupdLoop() { // add some jitter so that multiple consumers discovering the same topic, // when restarted at the same time, dont all connect at once. jitter := time.Duration(int64(r.rng.Float64() * r.config.LookupdPollJitter * float64(r.config.LookupdPollInterval))) var ticker *time.Ticker select { case <-time.After(jitter): case <-r.exitChan: goto exit } ticker = time.NewTicker(r.config.LookupdPollInterval) for { select { case <-ticker.C: r.queryLookupd() case <-r.lookupdRecheckChan: r.queryLookupd() case <-r.exitChan: goto exit } } exit: if ticker != nil { ticker.Stop() } r.log(LogLevelInfo, "exiting lookupdLoop") r.wg.Done() } // return the next lookupd endpoint to query // keeping track of which one was last used func (r *Consumer) nextLookupdEndpoint() string { r.mtx.RLock() if r.lookupdQueryIndex >= len(r.lookupdHTTPAddrs) { r.lookupdQueryIndex = 0 } addr := r.lookupdHTTPAddrs[r.lookupdQueryIndex] num := len(r.lookupdHTTPAddrs) r.mtx.RUnlock() r.lookupdQueryIndex = (r.lookupdQueryIndex + 1) % num urlString := addr if !strings.Contains(urlString, "://") { urlString = "http://" + addr } u, err := url.Parse(urlString) if err != nil { panic(err) } if u.Path == "/" || u.Path == "" { u.Path = "/lookup" } v, err := url.ParseQuery(u.RawQuery) v.Add("topic", r.topic) u.RawQuery = v.Encode() return u.String() } type lookupResp struct { Channels []string `json:"channels"` Producers []*peerInfo `json:"producers"` Timestamp int64 `json:"timestamp"` } type peerInfo struct { RemoteAddress string `json:"remote_address"` Hostname string `json:"hostname"` BroadcastAddress string `json:"broadcast_address"` TCPPort int `json:"tcp_port"` HTTPPort int `json:"http_port"` Version string `json:"version"` } // make an HTTP req to one of the configured nsqlookupd instances to discover // which nsqd's provide the topic we are consuming. // // initiate a connection to any new producers that are identified. func (r *Consumer) queryLookupd() { endpoint := r.nextLookupdEndpoint() r.log(LogLevelInfo, "querying nsqlookupd %s", endpoint) var data lookupResp err := apiRequestNegotiateV1("GET", endpoint, nil, &data) if err != nil { r.log(LogLevelError, "error querying nsqlookupd (%s) - %s", endpoint, err) return } var nsqdAddrs []string for _, producer := range data.Producers { broadcastAddress := producer.BroadcastAddress port := producer.TCPPort joined := net.JoinHostPort(broadcastAddress, strconv.Itoa(port)) nsqdAddrs = append(nsqdAddrs, joined) } // apply filter if discoveryFilter, ok := r.behaviorDelegate.(DiscoveryFilter); ok { nsqdAddrs = discoveryFilter.Filter(nsqdAddrs) } for _, addr := range nsqdAddrs { err = r.ConnectToNSQD(addr) if err != nil && err != ErrAlreadyConnected { r.log(LogLevelError, "(%s) error connecting to nsqd - %s", addr, err) continue } } } // ConnectToNSQDs takes multiple nsqd addresses to connect directly to. // // It is recommended to use ConnectToNSQLookupd so that topics are discovered // automatically. This method is useful when you want to connect to local instance. func (r *Consumer) ConnectToNSQDs(addresses []string) error { for _, addr := range addresses { err := r.ConnectToNSQD(addr) if err != nil { return err } } return nil } // ConnectToNSQD takes a nsqd address to connect directly to. // // It is recommended to use ConnectToNSQLookupd so that topics are discovered // automatically. This method is useful when you want to connect to a single, local, // instance. func (r *Consumer) ConnectToNSQD(addr string) error { if atomic.LoadInt32(&r.stopFlag) == 1 { return errors.New("consumer stopped") } if atomic.LoadInt32(&r.runningHandlers) == 0 { return errors.New("no handlers") } atomic.StoreInt32(&r.connectedFlag, 1) logger, logLvl := r.getLogger() conn := NewConn(addr, &r.config, &consumerConnDelegate{r}) conn.SetLogger(logger, logLvl, fmt.Sprintf("%3d [%s/%s] (%%s)", r.id, r.topic, r.channel)) r.mtx.Lock() _, pendingOk := r.pendingConnections[addr] _, ok := r.connections[addr] if ok || pendingOk { r.mtx.Unlock() return ErrAlreadyConnected } r.pendingConnections[addr] = conn if idx := indexOf(addr, r.nsqdTCPAddrs); idx == -1 { r.nsqdTCPAddrs = append(r.nsqdTCPAddrs, addr) } r.mtx.Unlock() r.log(LogLevelInfo, "(%s) connecting to nsqd", addr) cleanupConnection := func() { r.mtx.Lock() delete(r.pendingConnections, addr) r.mtx.Unlock() conn.Close() } resp, err := conn.Connect() if err != nil { cleanupConnection() return err } if resp != nil { if resp.MaxRdyCount < int64(r.getMaxInFlight()) { r.log(LogLevelWarning, "(%s) max RDY count %d < consumer max in flight %d, truncation possible", conn.String(), resp.MaxRdyCount, r.getMaxInFlight()) } } cmd := Subscribe(r.topic, r.channel) err = conn.WriteCommand(cmd) if err != nil { cleanupConnection() return fmt.Errorf("[%s] failed to subscribe to %s:%s - %s", conn, r.topic, r.channel, err.Error()) } r.mtx.Lock() delete(r.pendingConnections, addr) r.connections[addr] = conn r.mtx.Unlock() // pre-emptive signal to existing connections to lower their RDY count for _, c := range r.conns() { r.maybeUpdateRDY(c) } return nil } func indexOf(n string, h []string) int { for i, a := range h { if n == a { return i } } return -1 } // DisconnectFromNSQD closes the connection to and removes the specified // `nsqd` address from the list func (r *Consumer) DisconnectFromNSQD(addr string) error { r.mtx.Lock() defer r.mtx.Unlock() idx := indexOf(addr, r.nsqdTCPAddrs) if idx == -1 { return ErrNotConnected } // slice delete r.nsqdTCPAddrs = append(r.nsqdTCPAddrs[:idx], r.nsqdTCPAddrs[idx+1:]...) pendingConn, pendingOk := r.pendingConnections[addr] conn, ok := r.connections[addr] if ok { conn.Close() } else if pendingOk { pendingConn.Close() } return nil } // DisconnectFromNSQLookupd removes the specified `nsqlookupd` address // from the list used for periodic discovery. func (r *Consumer) DisconnectFromNSQLookupd(addr string) error { r.mtx.Lock() defer r.mtx.Unlock() idx := indexOf(addr, r.lookupdHTTPAddrs) if idx == -1 { return ErrNotConnected } if len(r.lookupdHTTPAddrs) == 1 { return fmt.Errorf("cannot disconnect from only remaining nsqlookupd HTTP address %s", addr) } r.lookupdHTTPAddrs = append(r.lookupdHTTPAddrs[:idx], r.lookupdHTTPAddrs[idx+1:]...) return nil } func (r *Consumer) onConnMessage(c *Conn, msg *Message) { atomic.AddInt64(&r.totalRdyCount, -1) atomic.AddUint64(&r.messagesReceived, 1) r.incomingMessages <- msg r.maybeUpdateRDY(c) } func (r *Consumer) onConnMessageFinished(c *Conn, msg *Message) { atomic.AddUint64(&r.messagesFinished, 1) } func (r *Consumer) onConnMessageRequeued(c *Conn, msg *Message) { atomic.AddUint64(&r.messagesRequeued, 1) } func (r *Consumer) onConnBackoff(c *Conn) { r.startStopContinueBackoff(c, backoffFlag) } func (r *Consumer) onConnContinue(c *Conn) { r.startStopContinueBackoff(c, continueFlag) } func (r *Consumer) onConnResume(c *Conn) { r.startStopContinueBackoff(c, resumeFlag) } func (r *Consumer) onConnResponse(c *Conn, data []byte) { switch { case bytes.Equal(data, []byte("CLOSE_WAIT")): // server is ready for us to close (it ack'd our StartClose) // we can assume we will not receive any more messages over this channel // (but we can still write back responses) r.log(LogLevelInfo, "(%s) received CLOSE_WAIT from nsqd", c.String()) c.Close() } } func (r *Consumer) onConnError(c *Conn, data []byte) {} func (r *Consumer) onConnHeartbeat(c *Conn) {} func (r *Consumer) onConnIOError(c *Conn, err error) { c.Close() } func (r *Consumer) onConnClose(c *Conn) { var hasRDYRetryTimer bool // remove this connections RDY count from the consumer's total rdyCount := c.RDY() atomic.AddInt64(&r.totalRdyCount, -rdyCount) r.rdyRetryMtx.Lock() if timer, ok := r.rdyRetryTimers[c.String()]; ok { // stop any pending retry of an old RDY update timer.Stop() delete(r.rdyRetryTimers, c.String()) hasRDYRetryTimer = true } r.rdyRetryMtx.Unlock() r.mtx.Lock() delete(r.connections, c.String()) left := len(r.connections) r.mtx.Unlock() r.log(LogLevelWarning, "there are %d connections left alive", left) if (hasRDYRetryTimer || rdyCount > 0) && (int32(left) == r.getMaxInFlight() || r.inBackoff()) { // we're toggling out of (normal) redistribution cases and this conn // had a RDY count... // // trigger RDY redistribution to make sure this RDY is moved // to a new connection atomic.StoreInt32(&r.needRDYRedistributed, 1) } // we were the last one (and stopping) if atomic.LoadInt32(&r.stopFlag) == 1 { if left == 0 { r.stopHandlers() } return } r.mtx.RLock() numLookupd := len(r.lookupdHTTPAddrs) reconnect := indexOf(c.String(), r.nsqdTCPAddrs) >= 0 r.mtx.RUnlock() if numLookupd > 0 { // trigger a poll of the lookupd select { case r.lookupdRecheckChan <- 1: default: } } else if reconnect { // there are no lookupd and we still have this nsqd TCP address in our list... // try to reconnect after a bit go func(addr string) { for { r.log(LogLevelInfo, "(%s) re-connecting in %s", addr, r.config.LookupdPollInterval) time.Sleep(r.config.LookupdPollInterval) if atomic.LoadInt32(&r.stopFlag) == 1 { break } r.mtx.RLock() reconnect := indexOf(addr, r.nsqdTCPAddrs) >= 0 r.mtx.RUnlock() if !reconnect { r.log(LogLevelWarning, "(%s) skipped reconnect after removal...", addr) return } err := r.ConnectToNSQD(addr) if err != nil && err != ErrAlreadyConnected { r.log(LogLevelError, "(%s) error connecting to nsqd - %s", addr, err) continue } break } }(c.String()) } } func (r *Consumer) startStopContinueBackoff(conn *Conn, signal backoffSignal) { // prevent many async failures/successes from immediately resulting in // max backoff/normal rate (by ensuring that we dont continually incr/decr // the counter during a backoff period) r.backoffMtx.Lock() if r.inBackoffTimeout() { r.backoffMtx.Unlock() return } defer r.backoffMtx.Unlock() // update backoff state backoffUpdated := false backoffCounter := atomic.LoadInt32(&r.backoffCounter) switch signal { case resumeFlag: if backoffCounter > 0 { backoffCounter-- backoffUpdated = true } case backoffFlag: nextBackoff := r.config.BackoffStrategy.Calculate(int(backoffCounter) + 1) if nextBackoff <= r.config.MaxBackoffDuration { backoffCounter++ backoffUpdated = true } } atomic.StoreInt32(&r.backoffCounter, backoffCounter) if r.backoffCounter == 0 && backoffUpdated { // exit backoff count := r.perConnMaxInFlight() r.log(LogLevelWarning, "exiting backoff, returning all to RDY %d", count) for _, c := range r.conns() { r.updateRDY(c, count) } } else if r.backoffCounter > 0 { // start or continue backoff backoffDuration := r.config.BackoffStrategy.Calculate(int(backoffCounter)) if backoffDuration > r.config.MaxBackoffDuration { backoffDuration = r.config.MaxBackoffDuration } r.log(LogLevelWarning, "backing off for %.04f seconds (backoff level %d), setting all to RDY 0", backoffDuration.Seconds(), backoffCounter) // send RDY 0 immediately (to *all* connections) for _, c := range r.conns() { r.updateRDY(c, 0) } r.backoff(backoffDuration) } } func (r *Consumer) backoff(d time.Duration) { atomic.StoreInt64(&r.backoffDuration, d.Nanoseconds()) time.AfterFunc(d, r.resume) } func (r *Consumer) resume() { if atomic.LoadInt32(&r.stopFlag) == 1 { atomic.StoreInt64(&r.backoffDuration, 0) return } // pick a random connection to test the waters conns := r.conns() if len(conns) == 0 { r.log(LogLevelWarning, "no connection available to resume") r.log(LogLevelWarning, "backing off for %.04f seconds", 1) r.backoff(time.Second) return } idx := r.rng.Intn(len(conns)) choice := conns[idx] r.log(LogLevelWarning, "(%s) backoff timeout expired, sending RDY 1", choice.String()) // while in backoff only ever let 1 message at a time through err := r.updateRDY(choice, 1) if err != nil { r.log(LogLevelWarning, "(%s) error resuming RDY 1 - %s", choice.String(), err) r.log(LogLevelWarning, "backing off for %.04f seconds", 1) r.backoff(time.Second) return } atomic.StoreInt64(&r.backoffDuration, 0) } func (r *Consumer) inBackoff() bool { return atomic.LoadInt32(&r.backoffCounter) > 0 } func (r *Consumer) inBackoffTimeout() bool { return atomic.LoadInt64(&r.backoffDuration) > 0 } func (r *Consumer) maybeUpdateRDY(conn *Conn) { inBackoff := r.inBackoff() inBackoffTimeout := r.inBackoffTimeout() if inBackoff || inBackoffTimeout { r.log(LogLevelDebug, "(%s) skip sending RDY inBackoff:%v || inBackoffTimeout:%v", conn, inBackoff, inBackoffTimeout) return } remain := conn.RDY() lastRdyCount := conn.LastRDY() count := r.perConnMaxInFlight() // refill when at 1, or at 25%, or if connections have changed and we're imbalanced if remain <= 1 || remain < (lastRdyCount/4) || (count > 0 && count < remain) { r.log(LogLevelDebug, "(%s) sending RDY %d (%d remain from last RDY %d)", conn, count, remain, lastRdyCount) r.updateRDY(conn, count) } else { r.log(LogLevelDebug, "(%s) skip sending RDY %d (%d remain out of last RDY %d)", conn, count, remain, lastRdyCount) } } func (r *Consumer) rdyLoop() { redistributeTicker := time.NewTicker(r.config.RDYRedistributeInterval) for { select { case <-redistributeTicker.C: r.redistributeRDY() case <-r.exitChan: goto exit } } exit: redistributeTicker.Stop() r.log(LogLevelInfo, "rdyLoop exiting") r.wg.Done() } func (r *Consumer) updateRDY(c *Conn, count int64) error { if c.IsClosing() { return ErrClosing } // never exceed the nsqd's configured max RDY count if count > c.MaxRDY() { count = c.MaxRDY() } // stop any pending retry of an old RDY update r.rdyRetryMtx.Lock() if timer, ok := r.rdyRetryTimers[c.String()]; ok { timer.Stop() delete(r.rdyRetryTimers, c.String()) } r.rdyRetryMtx.Unlock() // never exceed our global max in flight. truncate if possible. // this could help a new connection get partial max-in-flight rdyCount := c.RDY() maxPossibleRdy := int64(r.getMaxInFlight()) - atomic.LoadInt64(&r.totalRdyCount) + rdyCount if maxPossibleRdy > 0 && maxPossibleRdy < count { count = maxPossibleRdy } if maxPossibleRdy <= 0 && count > 0 { if rdyCount == 0 { // we wanted to exit a zero RDY count but we couldn't send it... // in order to prevent eternal starvation we reschedule this attempt // (if any other RDY update succeeds this timer will be stopped) r.rdyRetryMtx.Lock() r.rdyRetryTimers[c.String()] = time.AfterFunc(5*time.Second, func() { r.updateRDY(c, count) }) r.rdyRetryMtx.Unlock() } return ErrOverMaxInFlight } return r.sendRDY(c, count) } func (r *Consumer) sendRDY(c *Conn, count int64) error { if count == 0 && c.LastRDY() == 0 { // no need to send. It's already that RDY count return nil } atomic.AddInt64(&r.totalRdyCount, -c.RDY()+count) c.SetRDY(count) err := c.WriteCommand(Ready(int(count))) if err != nil { r.log(LogLevelError, "(%s) error sending RDY %d - %s", c.String(), count, err) return err } return nil } func (r *Consumer) redistributeRDY() { if r.inBackoffTimeout() { return } // if an external heuristic set needRDYRedistributed we want to wait // until we can actually redistribute to proceed conns := r.conns() if len(conns) == 0 { return } maxInFlight := r.getMaxInFlight() if len(conns) > int(maxInFlight) { r.log(LogLevelDebug, "redistributing RDY state (%d conns > %d max_in_flight)", len(conns), maxInFlight) atomic.StoreInt32(&r.needRDYRedistributed, 1) } if r.inBackoff() && len(conns) > 1 { r.log(LogLevelDebug, "redistributing RDY state (in backoff and %d conns > 1)", len(conns)) atomic.StoreInt32(&r.needRDYRedistributed, 1) } if !atomic.CompareAndSwapInt32(&r.needRDYRedistributed, 1, 0) { return } possibleConns := make([]*Conn, 0, len(conns)) for _, c := range conns { lastMsgDuration := time.Now().Sub(c.LastMessageTime()) rdyCount := c.RDY() r.log(LogLevelDebug, "(%s) rdy: %d (last message received %s)", c.String(), rdyCount, lastMsgDuration) if rdyCount > 0 && lastMsgDuration > r.config.LowRdyIdleTimeout { r.log(LogLevelDebug, "(%s) idle connection, giving up RDY", c.String()) r.updateRDY(c, 0) } possibleConns = append(possibleConns, c) } availableMaxInFlight := int64(maxInFlight) - atomic.LoadInt64(&r.totalRdyCount) if r.inBackoff() { availableMaxInFlight = 1 - atomic.LoadInt64(&r.totalRdyCount) } for len(possibleConns) > 0 && availableMaxInFlight > 0 { availableMaxInFlight-- i := r.rng.Int() % len(possibleConns) c := possibleConns[i] // delete possibleConns = append(possibleConns[:i], possibleConns[i+1:]...) r.log(LogLevelDebug, "(%s) redistributing RDY", c.String()) r.updateRDY(c, 1) } } // Stop will initiate a graceful stop of the Consumer (permanent) // // NOTE: receive on StopChan to block until this process completes func (r *Consumer) Stop() { if !atomic.CompareAndSwapInt32(&r.stopFlag, 0, 1) { return } r.log(LogLevelInfo, "stopping...") if len(r.conns()) == 0 { r.stopHandlers() } else { for _, c := range r.conns() { err := c.WriteCommand(StartClose()) if err != nil { r.log(LogLevelError, "(%s) error sending CLS - %s", c.String(), err) } } time.AfterFunc(time.Second*30, func() { // if we've waited this long handlers are blocked on processing messages // so we can't just stopHandlers (if any adtl. messages were pending processing // we would cause a panic on channel close) // // instead, we just bypass handler closing and skip to the final exit r.exit() }) } } func (r *Consumer) stopHandlers() { r.stopHandler.Do(func() { r.log(LogLevelInfo, "stopping handlers") close(r.incomingMessages) }) } // AddHandler sets the Handler for messages received by this Consumer. This can be called // multiple times to add additional handlers. Handler will have a 1:1 ratio to message handling goroutines. // // This panics if called after connecting to NSQD or NSQ Lookupd // // (see Handler or HandlerFunc for details on implementing this interface) func (r *Consumer) AddHandler(handler Handler) { r.AddConcurrentHandlers(handler, 1) } // AddConcurrentHandlers sets the Handler for messages received by this Consumer. It // takes a second argument which indicates the number of goroutines to spawn for // message handling. // // This panics if called after connecting to NSQD or NSQ Lookupd // // (see Handler or HandlerFunc for details on implementing this interface) func (r *Consumer) AddConcurrentHandlers(handler Handler, concurrency int) { if atomic.LoadInt32(&r.connectedFlag) == 1 { panic("already connected") } atomic.AddInt32(&r.runningHandlers, int32(concurrency)) for i := 0; i < concurrency; i++ { go r.handlerLoop(handler) } } func (r *Consumer) handlerLoop(handler Handler) { r.log(LogLevelDebug, "starting Handler") for { message, ok := <-r.incomingMessages if !ok { goto exit } if r.shouldFailMessage(message, handler) { message.Finish() continue } err := handler.HandleMessage(message) if err != nil { r.log(LogLevelError, "Handler returned error (%s) for msg %s", err, message.ID) if !message.IsAutoResponseDisabled() { message.Requeue(-1) } continue } if !message.IsAutoResponseDisabled() { message.Finish() } } exit: r.log(LogLevelDebug, "stopping Handler") if atomic.AddInt32(&r.runningHandlers, -1) == 0 { r.exit() } } func (r *Consumer) shouldFailMessage(message *Message, handler interface{}) bool { // message passed the max number of attempts if r.config.MaxAttempts > 0 && message.Attempts > r.config.MaxAttempts { r.log(LogLevelWarning, "msg %s attempted %d times, giving up", message.ID, message.Attempts) logger, ok := handler.(FailedMessageLogger) if ok { logger.LogFailedMessage(message) } return true } return false } func (r *Consumer) exit() { r.exitHandler.Do(func() { close(r.exitChan) r.wg.Wait() close(r.StopChan) }) } func (r *Consumer) log(lvl LogLevel, line string, args ...interface{}) { logger, logLvl := r.getLogger() if logger == nil { return } if logLvl > lvl { return } logger.Output(2, fmt.Sprintf("%-4s %3d [%s/%s] %s", lvl, r.id, r.topic, r.channel, fmt.Sprintf(line, args...))) } ================================================ FILE: vendor/github.com/bitly/go-nsq/consumer_test.go ================================================ package nsq import ( "bytes" "crypto/tls" "encoding/json" "errors" "fmt" "io/ioutil" "log" "net" "net/http" "os" "strconv" "strings" "testing" "time" ) type MyTestHandler struct { t *testing.T q *Consumer messagesSent int messagesReceived int messagesFailed int } var nullLogger = log.New(ioutil.Discard, "", log.LstdFlags) func (h *MyTestHandler) LogFailedMessage(message *Message) { h.messagesFailed++ h.q.Stop() } func (h *MyTestHandler) HandleMessage(message *Message) error { if string(message.Body) == "TOBEFAILED" { h.messagesReceived++ return errors.New("fail this message") } data := struct { Msg string }{} err := json.Unmarshal(message.Body, &data) if err != nil { return err } msg := data.Msg if msg != "single" && msg != "double" { h.t.Error("message 'action' was not correct: ", msg, data) } h.messagesReceived++ return nil } func SendMessage(t *testing.T, port int, topic string, method string, body []byte) { httpclient := &http.Client{} endpoint := fmt.Sprintf("http://127.0.0.1:%d/%s?topic=%s", port, method, topic) req, err := http.NewRequest("POST", endpoint, bytes.NewBuffer(body)) resp, err := httpclient.Do(req) if err != nil { t.Fatalf(err.Error()) return } resp.Body.Close() } func TestConsumer(t *testing.T) { consumerTest(t, nil) } func TestConsumerTLS(t *testing.T) { consumerTest(t, func(c *Config) { c.TlsV1 = true c.TlsConfig = &tls.Config{ InsecureSkipVerify: true, } }) } func TestConsumerDeflate(t *testing.T) { consumerTest(t, func(c *Config) { c.Deflate = true }) } func TestConsumerSnappy(t *testing.T) { consumerTest(t, func(c *Config) { c.Snappy = true }) } func TestConsumerTLSDeflate(t *testing.T) { consumerTest(t, func(c *Config) { c.TlsV1 = true c.TlsConfig = &tls.Config{ InsecureSkipVerify: true, } c.Deflate = true }) } func TestConsumerTLSSnappy(t *testing.T) { consumerTest(t, func(c *Config) { c.TlsV1 = true c.TlsConfig = &tls.Config{ InsecureSkipVerify: true, } c.Snappy = true }) } func TestConsumerTLSClientCert(t *testing.T) { envDl := os.Getenv("NSQ_DOWNLOAD") if strings.HasPrefix(envDl, "nsq-0.2.24") || strings.HasPrefix(envDl, "nsq-0.2.27") { t.Log("skipping due to older nsqd") return } cert, _ := tls.LoadX509KeyPair("./test/client.pem", "./test/client.key") consumerTest(t, func(c *Config) { c.TlsV1 = true c.TlsConfig = &tls.Config{ Certificates: []tls.Certificate{cert}, InsecureSkipVerify: true, } }) } func TestConsumerTLSClientCertViaSet(t *testing.T) { envDl := os.Getenv("NSQ_DOWNLOAD") if strings.HasPrefix(envDl, "nsq-0.2.24") || strings.HasPrefix(envDl, "nsq-0.2.27") { t.Log("skipping due to older nsqd") return } consumerTest(t, func(c *Config) { c.Set("tls_v1", true) c.Set("tls_cert", "./test/client.pem") c.Set("tls_key", "./test/client.key") c.Set("tls_insecure_skip_verify", true) }) } func consumerTest(t *testing.T, cb func(c *Config)) { config := NewConfig() laddr := "127.0.0.1" // so that the test can simulate binding consumer to specified address config.LocalAddr, _ = net.ResolveTCPAddr("tcp", laddr+":0") // so that the test can simulate reaching max requeues and a call to LogFailedMessage config.DefaultRequeueDelay = 0 // so that the test wont timeout from backing off config.MaxBackoffDuration = time.Millisecond * 50 if cb != nil { cb(config) } topicName := "rdr_test" if config.Deflate { topicName = topicName + "_deflate" } else if config.Snappy { topicName = topicName + "_snappy" } if config.TlsV1 { topicName = topicName + "_tls" } topicName = topicName + strconv.Itoa(int(time.Now().Unix())) q, _ := NewConsumer(topicName, "ch", config) // q.SetLogger(nullLogger, LogLevelInfo) h := &MyTestHandler{ t: t, q: q, } q.AddHandler(h) SendMessage(t, 4151, topicName, "put", []byte(`{"msg":"single"}`)) SendMessage(t, 4151, topicName, "mput", []byte("{\"msg\":\"double\"}\n{\"msg\":\"double\"}")) SendMessage(t, 4151, topicName, "put", []byte("TOBEFAILED")) h.messagesSent = 4 addr := "127.0.0.1:4150" err := q.ConnectToNSQD(addr) if err != nil { t.Fatal(err) } stats := q.Stats() if stats.Connections == 0 { t.Fatal("stats report 0 connections (should be > 0)") } err = q.ConnectToNSQD(addr) if err == nil { t.Fatal("should not be able to connect to the same NSQ twice") } conn := q.conns()[0] if !strings.HasPrefix(conn.conn.LocalAddr().String(), laddr) { t.Fatal("connection should be bound to the specified address:", conn.conn.LocalAddr()) } err = q.DisconnectFromNSQD("1.2.3.4:4150") if err == nil { t.Fatal("should not be able to disconnect from an unknown nsqd") } err = q.ConnectToNSQD("1.2.3.4:4150") if err == nil { t.Fatal("should not be able to connect to non-existent nsqd") } err = q.DisconnectFromNSQD("1.2.3.4:4150") if err != nil { t.Fatal("should be able to disconnect from an nsqd - " + err.Error()) } <-q.StopChan stats = q.Stats() if stats.Connections != 0 { t.Fatalf("stats report %d active connections (should be 0)", stats.Connections) } stats = q.Stats() if stats.MessagesReceived != uint64(h.messagesReceived+h.messagesFailed) { t.Fatalf("stats report %d messages received (should be %d)", stats.MessagesReceived, h.messagesReceived+h.messagesFailed) } if h.messagesReceived != 8 || h.messagesSent != 4 { t.Fatalf("end of test. should have handled a diff number of messages (got %d, sent %d)", h.messagesReceived, h.messagesSent) } if h.messagesFailed != 1 { t.Fatal("failed message not done") } } ================================================ FILE: vendor/github.com/bitly/go-nsq/delegates.go ================================================ package nsq import "time" type logger interface { Output(calldepth int, s string) error } // LogLevel specifies the severity of a given log message type LogLevel int // Log levels const ( LogLevelDebug LogLevel = iota LogLevelInfo LogLevelWarning LogLevelError ) // String returns the string form for a given LogLevel func (lvl LogLevel) String() string { switch lvl { case LogLevelInfo: return "INF" case LogLevelWarning: return "WRN" case LogLevelError: return "ERR" } return "DBG" } // MessageDelegate is an interface of methods that are used as // callbacks in Message type MessageDelegate interface { // OnFinish is called when the Finish() method // is triggered on the Message OnFinish(*Message) // OnRequeue is called when the Requeue() method // is triggered on the Message OnRequeue(m *Message, delay time.Duration, backoff bool) // OnTouch is called when the Touch() method // is triggered on the Message OnTouch(*Message) } type connMessageDelegate struct { c *Conn } func (d *connMessageDelegate) OnFinish(m *Message) { d.c.onMessageFinish(m) } func (d *connMessageDelegate) OnRequeue(m *Message, t time.Duration, b bool) { d.c.onMessageRequeue(m, t, b) } func (d *connMessageDelegate) OnTouch(m *Message) { d.c.onMessageTouch(m) } // ConnDelegate is an interface of methods that are used as // callbacks in Conn type ConnDelegate interface { // OnResponse is called when the connection // receives a FrameTypeResponse from nsqd OnResponse(*Conn, []byte) // OnError is called when the connection // receives a FrameTypeError from nsqd OnError(*Conn, []byte) // OnMessage is called when the connection // receives a FrameTypeMessage from nsqd OnMessage(*Conn, *Message) // OnMessageFinished is called when the connection // handles a FIN command from a message handler OnMessageFinished(*Conn, *Message) // OnMessageRequeued is called when the connection // handles a REQ command from a message handler OnMessageRequeued(*Conn, *Message) // OnBackoff is called when the connection triggers a backoff state OnBackoff(*Conn) // OnContinue is called when the connection finishes a message without adjusting backoff state OnContinue(*Conn) // OnResume is called when the connection triggers a resume state OnResume(*Conn) // OnIOError is called when the connection experiences // a low-level TCP transport error OnIOError(*Conn, error) // OnHeartbeat is called when the connection // receives a heartbeat from nsqd OnHeartbeat(*Conn) // OnClose is called when the connection // closes, after all cleanup OnClose(*Conn) } // keeps the exported Consumer struct clean of the exported methods // required to implement the ConnDelegate interface type consumerConnDelegate struct { r *Consumer } func (d *consumerConnDelegate) OnResponse(c *Conn, data []byte) { d.r.onConnResponse(c, data) } func (d *consumerConnDelegate) OnError(c *Conn, data []byte) { d.r.onConnError(c, data) } func (d *consumerConnDelegate) OnMessage(c *Conn, m *Message) { d.r.onConnMessage(c, m) } func (d *consumerConnDelegate) OnMessageFinished(c *Conn, m *Message) { d.r.onConnMessageFinished(c, m) } func (d *consumerConnDelegate) OnMessageRequeued(c *Conn, m *Message) { d.r.onConnMessageRequeued(c, m) } func (d *consumerConnDelegate) OnBackoff(c *Conn) { d.r.onConnBackoff(c) } func (d *consumerConnDelegate) OnContinue(c *Conn) { d.r.onConnContinue(c) } func (d *consumerConnDelegate) OnResume(c *Conn) { d.r.onConnResume(c) } func (d *consumerConnDelegate) OnIOError(c *Conn, err error) { d.r.onConnIOError(c, err) } func (d *consumerConnDelegate) OnHeartbeat(c *Conn) { d.r.onConnHeartbeat(c) } func (d *consumerConnDelegate) OnClose(c *Conn) { d.r.onConnClose(c) } // keeps the exported Producer struct clean of the exported methods // required to implement the ConnDelegate interface type producerConnDelegate struct { w *Producer } func (d *producerConnDelegate) OnResponse(c *Conn, data []byte) { d.w.onConnResponse(c, data) } func (d *producerConnDelegate) OnError(c *Conn, data []byte) { d.w.onConnError(c, data) } func (d *producerConnDelegate) OnMessage(c *Conn, m *Message) {} func (d *producerConnDelegate) OnMessageFinished(c *Conn, m *Message) {} func (d *producerConnDelegate) OnMessageRequeued(c *Conn, m *Message) {} func (d *producerConnDelegate) OnBackoff(c *Conn) {} func (d *producerConnDelegate) OnContinue(c *Conn) {} func (d *producerConnDelegate) OnResume(c *Conn) {} func (d *producerConnDelegate) OnIOError(c *Conn, err error) { d.w.onConnIOError(c, err) } func (d *producerConnDelegate) OnHeartbeat(c *Conn) { d.w.onConnHeartbeat(c) } func (d *producerConnDelegate) OnClose(c *Conn) { d.w.onConnClose(c) } ================================================ FILE: vendor/github.com/bitly/go-nsq/errors.go ================================================ package nsq import ( "errors" "fmt" ) // ErrNotConnected is returned when a publish command is made // against a Producer that is not connected var ErrNotConnected = errors.New("not connected") // ErrStopped is returned when a publish command is // made against a Producer that has been stopped var ErrStopped = errors.New("stopped") // ErrClosing is returned when a connection is closing var ErrClosing = errors.New("closing") // ErrAlreadyConnected is returned from ConnectToNSQD when already connected var ErrAlreadyConnected = errors.New("already connected") // ErrOverMaxInFlight is returned from Consumer if over max-in-flight var ErrOverMaxInFlight = errors.New("over configure max-inflight") // ErrIdentify is returned from Conn as part of the IDENTIFY handshake type ErrIdentify struct { Reason string } // Error returns a stringified error func (e ErrIdentify) Error() string { return fmt.Sprintf("failed to IDENTIFY - %s", e.Reason) } // ErrProtocol is returned from Producer when encountering // an NSQ protocol level error type ErrProtocol struct { Reason string } // Error returns a stringified error func (e ErrProtocol) Error() string { return e.Reason } ================================================ FILE: vendor/github.com/bitly/go-nsq/message.go ================================================ package nsq import ( "bytes" "encoding/binary" "io" "io/ioutil" "sync/atomic" "time" ) // The number of bytes for a Message.ID const MsgIDLength = 16 // MessageID is the ASCII encoded hexadecimal message ID type MessageID [MsgIDLength]byte // Message is the fundamental data type containing // the id, body, and metadata type Message struct { ID MessageID Body []byte Timestamp int64 Attempts uint16 NSQDAddress string Delegate MessageDelegate autoResponseDisabled int32 responded int32 } // NewMessage creates a Message, initializes some metadata, // and returns a pointer func NewMessage(id MessageID, body []byte) *Message { return &Message{ ID: id, Body: body, Timestamp: time.Now().UnixNano(), } } // DisableAutoResponse disables the automatic response that // would normally be sent when a handler.HandleMessage // returns (FIN/REQ based on the error value returned). // // This is useful if you want to batch, buffer, or asynchronously // respond to messages. func (m *Message) DisableAutoResponse() { atomic.StoreInt32(&m.autoResponseDisabled, 1) } // IsAutoResponseDisabled indicates whether or not this message // will be responded to automatically func (m *Message) IsAutoResponseDisabled() bool { return atomic.LoadInt32(&m.autoResponseDisabled) == 1 } // HasResponded indicates whether or not this message has been responded to func (m *Message) HasResponded() bool { return atomic.LoadInt32(&m.responded) == 1 } // Finish sends a FIN command to the nsqd which // sent this message func (m *Message) Finish() { if !atomic.CompareAndSwapInt32(&m.responded, 0, 1) { return } m.Delegate.OnFinish(m) } // Touch sends a TOUCH command to the nsqd which // sent this message func (m *Message) Touch() { if m.HasResponded() { return } m.Delegate.OnTouch(m) } // Requeue sends a REQ command to the nsqd which // sent this message, using the supplied delay. // // A delay of -1 will automatically calculate // based on the number of attempts and the // configured default_requeue_delay func (m *Message) Requeue(delay time.Duration) { m.doRequeue(delay, true) } // RequeueWithoutBackoff sends a REQ command to the nsqd which // sent this message, using the supplied delay. // // Notably, using this method to respond does not trigger a backoff // event on the configured Delegate. func (m *Message) RequeueWithoutBackoff(delay time.Duration) { m.doRequeue(delay, false) } func (m *Message) doRequeue(delay time.Duration, backoff bool) { if !atomic.CompareAndSwapInt32(&m.responded, 0, 1) { return } m.Delegate.OnRequeue(m, delay, backoff) } // WriteTo implements the WriterTo interface and serializes // the message into the supplied producer. // // It is suggested that the target Writer is buffered to // avoid performing many system calls. func (m *Message) WriteTo(w io.Writer) (int64, error) { var buf [10]byte var total int64 binary.BigEndian.PutUint64(buf[:8], uint64(m.Timestamp)) binary.BigEndian.PutUint16(buf[8:10], uint16(m.Attempts)) n, err := w.Write(buf[:]) total += int64(n) if err != nil { return total, err } n, err = w.Write(m.ID[:]) total += int64(n) if err != nil { return total, err } n, err = w.Write(m.Body) total += int64(n) if err != nil { return total, err } return total, nil } // DecodeMessage deseralizes data (as []byte) and creates a new Message func DecodeMessage(b []byte) (*Message, error) { var msg Message msg.Timestamp = int64(binary.BigEndian.Uint64(b[:8])) msg.Attempts = binary.BigEndian.Uint16(b[8:10]) buf := bytes.NewBuffer(b[10:]) _, err := io.ReadFull(buf, msg.ID[:]) if err != nil { return nil, err } msg.Body, err = ioutil.ReadAll(buf) if err != nil { return nil, err } return &msg, nil } ================================================ FILE: vendor/github.com/bitly/go-nsq/mock_test.go ================================================ package nsq import ( "bufio" "bytes" "encoding/binary" "errors" "fmt" "io" "log" "net" "strconv" "testing" "time" ) type tbLog interface { Log(...interface{}) } type testLogger struct { tbLog } func (tl *testLogger) Output(maxdepth int, s string) error { tl.Log(s) return nil } func newTestLogger(tbl tbLog) logger { return &testLogger{tbl} } type instruction struct { delay time.Duration frameType int32 body []byte } type mockNSQD struct { script []instruction got [][]byte tcpAddr *net.TCPAddr tcpListener net.Listener exitChan chan int } func newMockNSQD(script []instruction, addr string) *mockNSQD { n := &mockNSQD{ script: script, exitChan: make(chan int), } tcpListener, err := net.Listen("tcp", addr) if err != nil { log.Fatalf("FATAL: listen (%s) failed - %s", n.tcpAddr.String(), err) } n.tcpListener = tcpListener n.tcpAddr = tcpListener.Addr().(*net.TCPAddr) go n.listen() return n } func (n *mockNSQD) listen() { log.Printf("TCP: listening on %s", n.tcpListener.Addr().String()) for { conn, err := n.tcpListener.Accept() if err != nil { break } go n.handle(conn) } log.Printf("TCP: closing %s", n.tcpListener.Addr().String()) close(n.exitChan) } func (n *mockNSQD) handle(conn net.Conn) { var idx int log.Printf("TCP: new client(%s)", conn.RemoteAddr()) buf := make([]byte, 4) _, err := io.ReadFull(conn, buf) if err != nil { log.Fatalf("ERROR: failed to read protocol version - %s", err) } readChan := make(chan []byte) readDoneChan := make(chan int) scriptTime := time.After(n.script[0].delay) rdr := bufio.NewReader(conn) go func() { for { line, err := rdr.ReadBytes('\n') if err != nil { return } // trim the '\n' line = line[:len(line)-1] readChan <- line <-readDoneChan } }() var rdyCount int for idx < len(n.script) { select { case line := <-readChan: log.Printf("mock: %s", line) n.got = append(n.got, line) params := bytes.Split(line, []byte(" ")) switch { case bytes.Equal(params[0], []byte("IDENTIFY")): l := make([]byte, 4) _, err := io.ReadFull(rdr, l) if err != nil { log.Printf(err.Error()) goto exit } size := int32(binary.BigEndian.Uint32(l)) b := make([]byte, size) _, err = io.ReadFull(rdr, b) if err != nil { log.Printf(err.Error()) goto exit } log.Printf("%s", b) case bytes.Equal(params[0], []byte("RDY")): rdy, _ := strconv.Atoi(string(params[1])) rdyCount = rdy case bytes.Equal(params[0], []byte("FIN")): case bytes.Equal(params[0], []byte("REQ")): } readDoneChan <- 1 case <-scriptTime: inst := n.script[idx] if bytes.Equal(inst.body, []byte("exit")) { goto exit } if inst.frameType == FrameTypeMessage { if rdyCount == 0 { log.Printf("!!! RDY == 0") scriptTime = time.After(n.script[idx+1].delay) continue } rdyCount-- } _, err := conn.Write(framedResponse(inst.frameType, inst.body)) if err != nil { log.Printf(err.Error()) goto exit } scriptTime = time.After(n.script[idx+1].delay) idx++ } } exit: n.tcpListener.Close() conn.Close() } func framedResponse(frameType int32, data []byte) []byte { var w bytes.Buffer beBuf := make([]byte, 4) size := uint32(len(data)) + 4 binary.BigEndian.PutUint32(beBuf, size) _, err := w.Write(beBuf) if err != nil { return nil } binary.BigEndian.PutUint32(beBuf, uint32(frameType)) _, err = w.Write(beBuf) if err != nil { return nil } _, err = w.Write(data) return w.Bytes() } type testHandler struct{} func (h *testHandler) HandleMessage(message *Message) error { switch string(message.Body) { case "requeue": message.Requeue(-1) return nil case "requeue_no_backoff_1": if message.Attempts > 1 { return nil } message.RequeueWithoutBackoff(-1) return nil case "bad": return errors.New("bad") } return nil } func frameMessage(m *Message) []byte { var b bytes.Buffer m.WriteTo(&b) return b.Bytes() } func TestConsumerBackoff(t *testing.T) { msgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msgGood := NewMessage(msgIDGood, []byte("good")) msgIDBad := MessageID{'z', 'x', 'c', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msgBad := NewMessage(msgIDBad, []byte("bad")) script := []instruction{ // SUB instruction{0, FrameTypeResponse, []byte("OK")}, // IDENTIFY instruction{0, FrameTypeResponse, []byte("OK")}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgBad)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgBad)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, // needed to exit test instruction{200 * time.Millisecond, -1, []byte("exit")}, } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:0") n := newMockNSQD(script, addr.String()) topicName := "test_consumer_commands" + strconv.Itoa(int(time.Now().Unix())) config := NewConfig() config.MaxInFlight = 5 config.BackoffMultiplier = 10 * time.Millisecond q, _ := NewConsumer(topicName, "ch", config) q.SetLogger(newTestLogger(t), LogLevelDebug) q.AddHandler(&testHandler{}) err := q.ConnectToNSQD(n.tcpAddr.String()) if err != nil { t.Fatalf(err.Error()) } <-n.exitChan for i, r := range n.got { log.Printf("%d: %s", i, r) } expected := []string{ "IDENTIFY", "SUB " + topicName + " ch", "RDY 5", fmt.Sprintf("FIN %s", msgIDGood), fmt.Sprintf("FIN %s", msgIDGood), fmt.Sprintf("FIN %s", msgIDGood), "RDY 5", "RDY 0", fmt.Sprintf("REQ %s 0", msgIDBad), "RDY 1", "RDY 0", fmt.Sprintf("REQ %s 0", msgIDBad), "RDY 1", "RDY 0", fmt.Sprintf("FIN %s", msgIDGood), "RDY 1", "RDY 5", fmt.Sprintf("FIN %s", msgIDGood), } if len(n.got) != len(expected) { t.Fatalf("we got %d commands != %d expected", len(n.got), len(expected)) } for i, r := range n.got { if string(r) != expected[i] { t.Fatalf("cmd %d bad %s != %s", i, r, expected[i]) } } } func TestConsumerRequeueNoBackoff(t *testing.T) { msgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msgIDRequeue := MessageID{'r', 'e', 'q', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msgIDRequeueNoBackoff := MessageID{'r', 'e', 'q', 'n', 'b', 'a', 'c', 'k', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msgGood := NewMessage(msgIDGood, []byte("good")) msgRequeue := NewMessage(msgIDRequeue, []byte("requeue")) msgRequeueNoBackoff := NewMessage(msgIDRequeueNoBackoff, []byte("requeue_no_backoff_1")) script := []instruction{ // SUB instruction{0, FrameTypeResponse, []byte("OK")}, // IDENTIFY instruction{0, FrameTypeResponse, []byte("OK")}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeueNoBackoff)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, // needed to exit test instruction{100 * time.Millisecond, -1, []byte("exit")}, } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:0") n := newMockNSQD(script, addr.String()) topicName := "test_requeue" + strconv.Itoa(int(time.Now().Unix())) config := NewConfig() config.MaxInFlight = 1 config.BackoffMultiplier = 10 * time.Millisecond q, _ := NewConsumer(topicName, "ch", config) q.SetLogger(newTestLogger(t), LogLevelDebug) q.AddHandler(&testHandler{}) err := q.ConnectToNSQD(n.tcpAddr.String()) if err != nil { t.Fatalf(err.Error()) } select { case <-n.exitChan: log.Printf("clean exit") case <-time.After(500 * time.Millisecond): log.Printf("timeout") } for i, r := range n.got { log.Printf("%d: %s", i, r) } expected := []string{ "IDENTIFY", "SUB " + topicName + " ch", "RDY 1", "RDY 1", "RDY 0", fmt.Sprintf("REQ %s 0", msgIDRequeue), "RDY 1", "RDY 0", fmt.Sprintf("REQ %s 0", msgIDRequeueNoBackoff), "RDY 1", "RDY 1", fmt.Sprintf("FIN %s", msgIDGood), } if len(n.got) != len(expected) { t.Fatalf("we got %d commands != %d expected", len(n.got), len(expected)) } for i, r := range n.got { if string(r) != expected[i] { t.Fatalf("cmd %d bad %s != %s", i, r, expected[i]) } } } func TestConsumerBackoffDisconnect(t *testing.T) { msgIDGood := MessageID{'1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msgIDRequeue := MessageID{'r', 'e', 'q', 'v', 'b', '6', '7', '8', '9', '0', 'a', 's', 'd', 'f', 'g', 'h'} msgGood := NewMessage(msgIDGood, []byte("good")) msgRequeue := NewMessage(msgIDRequeue, []byte("requeue")) script := []instruction{ // SUB instruction{0, FrameTypeResponse, []byte("OK")}, // IDENTIFY instruction{0, FrameTypeResponse, []byte("OK")}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgRequeue)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, // needed to exit test instruction{100 * time.Millisecond, -1, []byte("exit")}, } addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:0") n := newMockNSQD(script, addr.String()) topicName := "test_requeue" + strconv.Itoa(int(time.Now().Unix())) config := NewConfig() config.MaxInFlight = 5 config.BackoffMultiplier = 10 * time.Millisecond config.LookupdPollInterval = 10 * time.Millisecond config.RDYRedistributeInterval = 10 * time.Millisecond q, _ := NewConsumer(topicName, "ch", config) q.SetLogger(newTestLogger(t), LogLevelDebug) q.AddHandler(&testHandler{}) err := q.ConnectToNSQD(n.tcpAddr.String()) if err != nil { t.Fatalf(err.Error()) } select { case <-n.exitChan: log.Printf("clean exit") case <-time.After(500 * time.Millisecond): log.Printf("timeout") } for i, r := range n.got { log.Printf("%d: %s", i, r) } expected := []string{ "IDENTIFY", "SUB " + topicName + " ch", "RDY 5", fmt.Sprintf("FIN %s", msgIDGood), "RDY 0", fmt.Sprintf("REQ %s 0", msgIDRequeue), "RDY 1", "RDY 0", fmt.Sprintf("REQ %s 0", msgIDRequeue), "RDY 1", "RDY 0", fmt.Sprintf("FIN %s", msgIDGood), "RDY 1", } if len(n.got) != len(expected) { t.Fatalf("we got %d commands != %d expected", len(n.got), len(expected)) } for i, r := range n.got { if string(r) != expected[i] { t.Fatalf("cmd %d bad %s != %s", i, r, expected[i]) } } script = []instruction{ // SUB instruction{0, FrameTypeResponse, []byte("OK")}, // IDENTIFY instruction{0, FrameTypeResponse, []byte("OK")}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, instruction{20 * time.Millisecond, FrameTypeMessage, frameMessage(msgGood)}, // needed to exit test instruction{100 * time.Millisecond, -1, []byte("exit")}, } n = newMockNSQD(script, n.tcpAddr.String()) select { case <-n.exitChan: log.Printf("clean exit") case <-time.After(500 * time.Millisecond): log.Printf("timeout") } for i, r := range n.got { log.Printf("%d: %s", i, r) } expected = []string{ "IDENTIFY", "SUB " + topicName + " ch", "RDY 1", "RDY 5", fmt.Sprintf("FIN %s", msgIDGood), fmt.Sprintf("FIN %s", msgIDGood), } if len(n.got) != len(expected) { t.Fatalf("we got %d commands != %d expected", len(n.got), len(expected)) } for i, r := range n.got { if string(r) != expected[i] { t.Fatalf("cmd %d bad %s != %s", i, r, expected[i]) } } } ================================================ FILE: vendor/github.com/bitly/go-nsq/producer.go ================================================ package nsq import ( "fmt" "log" "os" "sync" "sync/atomic" "time" ) type producerConn interface { String() string SetLogger(logger, LogLevel, string) Connect() (*IdentifyResponse, error) Close() error WriteCommand(*Command) error } // Producer is a high-level type to publish to NSQ. // // A Producer instance is 1:1 with a destination `nsqd` // and will lazily connect to that instance (and re-connect) // when Publish commands are executed. type Producer struct { id int64 addr string conn producerConn config Config logger logger logLvl LogLevel logGuard sync.RWMutex responseChan chan []byte errorChan chan []byte closeChan chan int transactionChan chan *ProducerTransaction transactions []*ProducerTransaction state int32 concurrentProducers int32 stopFlag int32 exitChan chan int wg sync.WaitGroup guard sync.Mutex } // ProducerTransaction is returned by the async publish methods // to retrieve metadata about the command after the // response is received. type ProducerTransaction struct { cmd *Command doneChan chan *ProducerTransaction Error error // the error (or nil) of the publish command Args []interface{} // the slice of variadic arguments passed to PublishAsync or MultiPublishAsync } func (t *ProducerTransaction) finish() { if t.doneChan != nil { t.doneChan <- t } } // NewProducer returns an instance of Producer for the specified address // // The only valid way to create a Config is via NewConfig, using a struct literal will panic. // After Config is passed into NewProducer the values are no longer mutable (they are copied). func NewProducer(addr string, config *Config) (*Producer, error) { config.assertInitialized() err := config.Validate() if err != nil { return nil, err } p := &Producer{ id: atomic.AddInt64(&instCount, 1), addr: addr, config: *config, logger: log.New(os.Stderr, "", log.Flags()), logLvl: LogLevelInfo, transactionChan: make(chan *ProducerTransaction), exitChan: make(chan int), responseChan: make(chan []byte), errorChan: make(chan []byte), } return p, nil } // Ping causes the Producer to connect to it's configured nsqd (if not already // connected) and send a `Nop` command, returning any error that might occur. // // This method can be used to verify that a newly-created Producer instance is // configured correctly, rather than relying on the lazy "connect on Publish" // behavior of a Producer. func (w *Producer) Ping() error { if atomic.LoadInt32(&w.state) != StateConnected { err := w.connect() if err != nil { return err } } return w.conn.WriteCommand(Nop()) } // SetLogger assigns the logger to use as well as a level // // The logger parameter is an interface that requires the following // method to be implemented (such as the the stdlib log.Logger): // // Output(calldepth int, s string) // func (w *Producer) SetLogger(l logger, lvl LogLevel) { w.logGuard.Lock() defer w.logGuard.Unlock() w.logger = l w.logLvl = lvl } func (w *Producer) getLogger() (logger, LogLevel) { w.logGuard.RLock() defer w.logGuard.RUnlock() return w.logger, w.logLvl } // String returns the address of the Producer func (w *Producer) String() string { return w.addr } // Stop initiates a graceful stop of the Producer (permanent) // // NOTE: this blocks until completion func (w *Producer) Stop() { w.guard.Lock() if !atomic.CompareAndSwapInt32(&w.stopFlag, 0, 1) { w.guard.Unlock() return } w.log(LogLevelInfo, "stopping") close(w.exitChan) w.close() w.guard.Unlock() w.wg.Wait() } // PublishAsync publishes a message body to the specified topic // but does not wait for the response from `nsqd`. // // When the Producer eventually receives the response from `nsqd`, // the supplied `doneChan` (if specified) // will receive a `ProducerTransaction` instance with the supplied variadic arguments // and the response error if present func (w *Producer) PublishAsync(topic string, body []byte, doneChan chan *ProducerTransaction, args ...interface{}) error { return w.sendCommandAsync(Publish(topic, body), doneChan, args) } // MultiPublishAsync publishes a slice of message bodies to the specified topic // but does not wait for the response from `nsqd`. // // When the Producer eventually receives the response from `nsqd`, // the supplied `doneChan` (if specified) // will receive a `ProducerTransaction` instance with the supplied variadic arguments // and the response error if present func (w *Producer) MultiPublishAsync(topic string, body [][]byte, doneChan chan *ProducerTransaction, args ...interface{}) error { cmd, err := MultiPublish(topic, body) if err != nil { return err } return w.sendCommandAsync(cmd, doneChan, args) } // Publish synchronously publishes a message body to the specified topic, returning // an error if publish failed func (w *Producer) Publish(topic string, body []byte) error { return w.sendCommand(Publish(topic, body)) } // MultiPublish synchronously publishes a slice of message bodies to the specified topic, returning // an error if publish failed func (w *Producer) MultiPublish(topic string, body [][]byte) error { cmd, err := MultiPublish(topic, body) if err != nil { return err } return w.sendCommand(cmd) } func (w *Producer) sendCommand(cmd *Command) error { doneChan := make(chan *ProducerTransaction) err := w.sendCommandAsync(cmd, doneChan, nil) if err != nil { close(doneChan) return err } t := <-doneChan return t.Error } func (w *Producer) sendCommandAsync(cmd *Command, doneChan chan *ProducerTransaction, args []interface{}) error { // keep track of how many outstanding producers we're dealing with // in order to later ensure that we clean them all up... atomic.AddInt32(&w.concurrentProducers, 1) defer atomic.AddInt32(&w.concurrentProducers, -1) if atomic.LoadInt32(&w.state) != StateConnected { err := w.connect() if err != nil { return err } } t := &ProducerTransaction{ cmd: cmd, doneChan: doneChan, Args: args, } select { case w.transactionChan <- t: case <-w.exitChan: return ErrStopped } return nil } func (w *Producer) connect() error { w.guard.Lock() defer w.guard.Unlock() if atomic.LoadInt32(&w.stopFlag) == 1 { return ErrStopped } switch state := atomic.LoadInt32(&w.state); state { case StateInit: case StateConnected: return nil default: return ErrNotConnected } w.log(LogLevelInfo, "(%s) connecting to nsqd", w.addr) logger, logLvl := w.getLogger() w.conn = NewConn(w.addr, &w.config, &producerConnDelegate{w}) w.conn.SetLogger(logger, logLvl, fmt.Sprintf("%3d (%%s)", w.id)) _, err := w.conn.Connect() if err != nil { w.conn.Close() w.log(LogLevelError, "(%s) error connecting to nsqd - %s", w.addr, err) return err } atomic.StoreInt32(&w.state, StateConnected) w.closeChan = make(chan int) w.wg.Add(1) go w.router() return nil } func (w *Producer) close() { if !atomic.CompareAndSwapInt32(&w.state, StateConnected, StateDisconnected) { return } w.conn.Close() go func() { // we need to handle this in a goroutine so we don't // block the caller from making progress w.wg.Wait() atomic.StoreInt32(&w.state, StateInit) }() } func (w *Producer) router() { for { select { case t := <-w.transactionChan: w.transactions = append(w.transactions, t) err := w.conn.WriteCommand(t.cmd) if err != nil { w.log(LogLevelError, "(%s) sending command - %s", w.conn.String(), err) w.close() } case data := <-w.responseChan: w.popTransaction(FrameTypeResponse, data) case data := <-w.errorChan: w.popTransaction(FrameTypeError, data) case <-w.closeChan: goto exit case <-w.exitChan: goto exit } } exit: w.transactionCleanup() w.wg.Done() w.log(LogLevelInfo, "exiting router") } func (w *Producer) popTransaction(frameType int32, data []byte) { t := w.transactions[0] w.transactions = w.transactions[1:] if frameType == FrameTypeError { t.Error = ErrProtocol{string(data)} } t.finish() } func (w *Producer) transactionCleanup() { // clean up transactions we can easily account for for _, t := range w.transactions { t.Error = ErrNotConnected t.finish() } w.transactions = w.transactions[:0] // spin and free up any writes that might have raced // with the cleanup process (blocked on writing // to transactionChan) for { select { case t := <-w.transactionChan: t.Error = ErrNotConnected t.finish() default: // keep spinning until there are 0 concurrent producers if atomic.LoadInt32(&w.concurrentProducers) == 0 { return } // give the runtime a chance to schedule other racing goroutines time.Sleep(5 * time.Millisecond) } } } func (w *Producer) log(lvl LogLevel, line string, args ...interface{}) { logger, logLvl := w.getLogger() if logger == nil { return } if logLvl > lvl { return } logger.Output(2, fmt.Sprintf("%-4s %3d %s", lvl, w.id, fmt.Sprintf(line, args...))) } func (w *Producer) onConnResponse(c *Conn, data []byte) { w.responseChan <- data } func (w *Producer) onConnError(c *Conn, data []byte) { w.errorChan <- data } func (w *Producer) onConnHeartbeat(c *Conn) {} func (w *Producer) onConnIOError(c *Conn, err error) { w.close() } func (w *Producer) onConnClose(c *Conn) { w.guard.Lock() defer w.guard.Unlock() close(w.closeChan) } ================================================ FILE: vendor/github.com/bitly/go-nsq/producer_test.go ================================================ package nsq import ( "bytes" "errors" "io/ioutil" "log" "net" "os" "runtime" "strconv" "strings" "sync" "sync/atomic" "testing" "time" ) type ConsumerHandler struct { t *testing.T q *Consumer messagesGood int messagesFailed int } func (h *ConsumerHandler) LogFailedMessage(message *Message) { h.messagesFailed++ h.q.Stop() } func (h *ConsumerHandler) HandleMessage(message *Message) error { msg := string(message.Body) if msg == "bad_test_case" { return errors.New("fail this message") } if msg != "multipublish_test_case" && msg != "publish_test_case" { h.t.Error("message 'action' was not correct:", msg) } h.messagesGood++ return nil } func TestProducerConnection(t *testing.T) { config := NewConfig() laddr := "127.0.0.2" config.LocalAddr, _ = net.ResolveTCPAddr("tcp", laddr+":0") w, _ := NewProducer("127.0.0.1:4150", config) w.SetLogger(nullLogger, LogLevelInfo) err := w.Publish("write_test", []byte("test")) if err != nil { t.Fatalf("should lazily connect - %s", err) } conn := w.conn.(*Conn) if !strings.HasPrefix(conn.conn.LocalAddr().String(), laddr) { t.Fatal("producer connection should be bound to specified address:", conn.conn.LocalAddr()) } w.Stop() err = w.Publish("write_test", []byte("fail test")) if err != ErrStopped { t.Fatalf("should not be able to write after Stop()") } } func TestProducerPing(t *testing.T) { log.SetOutput(ioutil.Discard) defer log.SetOutput(os.Stdout) config := NewConfig() w, _ := NewProducer("127.0.0.1:4150", config) w.SetLogger(nullLogger, LogLevelInfo) err := w.Ping() if err != nil { t.Fatalf("should connect on ping") } w.Stop() err = w.Ping() if err != ErrStopped { t.Fatalf("should not be able to ping after Stop()") } } func TestProducerPublish(t *testing.T) { topicName := "publish" + strconv.Itoa(int(time.Now().Unix())) msgCount := 10 config := NewConfig() w, _ := NewProducer("127.0.0.1:4150", config) w.SetLogger(nullLogger, LogLevelInfo) defer w.Stop() for i := 0; i < msgCount; i++ { err := w.Publish(topicName, []byte("publish_test_case")) if err != nil { t.Fatalf("error %s", err) } } err := w.Publish(topicName, []byte("bad_test_case")) if err != nil { t.Fatalf("error %s", err) } readMessages(topicName, t, msgCount) } func TestProducerMultiPublish(t *testing.T) { topicName := "multi_publish" + strconv.Itoa(int(time.Now().Unix())) msgCount := 10 config := NewConfig() w, _ := NewProducer("127.0.0.1:4150", config) w.SetLogger(nullLogger, LogLevelInfo) defer w.Stop() var testData [][]byte for i := 0; i < msgCount; i++ { testData = append(testData, []byte("multipublish_test_case")) } err := w.MultiPublish(topicName, testData) if err != nil { t.Fatalf("error %s", err) } err = w.Publish(topicName, []byte("bad_test_case")) if err != nil { t.Fatalf("error %s", err) } readMessages(topicName, t, msgCount) } func TestProducerPublishAsync(t *testing.T) { topicName := "async_publish" + strconv.Itoa(int(time.Now().Unix())) msgCount := 10 config := NewConfig() w, _ := NewProducer("127.0.0.1:4150", config) w.SetLogger(nullLogger, LogLevelInfo) defer w.Stop() responseChan := make(chan *ProducerTransaction, msgCount) for i := 0; i < msgCount; i++ { err := w.PublishAsync(topicName, []byte("publish_test_case"), responseChan, "test") if err != nil { t.Fatalf(err.Error()) } } for i := 0; i < msgCount; i++ { trans := <-responseChan if trans.Error != nil { t.Fatalf(trans.Error.Error()) } if trans.Args[0].(string) != "test" { t.Fatalf(`proxied arg "%s" != "test"`, trans.Args[0].(string)) } } err := w.Publish(topicName, []byte("bad_test_case")) if err != nil { t.Fatalf("error %s", err) } readMessages(topicName, t, msgCount) } func TestProducerMultiPublishAsync(t *testing.T) { topicName := "multi_publish" + strconv.Itoa(int(time.Now().Unix())) msgCount := 10 config := NewConfig() w, _ := NewProducer("127.0.0.1:4150", config) w.SetLogger(nullLogger, LogLevelInfo) defer w.Stop() var testData [][]byte for i := 0; i < msgCount; i++ { testData = append(testData, []byte("multipublish_test_case")) } responseChan := make(chan *ProducerTransaction) err := w.MultiPublishAsync(topicName, testData, responseChan, "test0", 1) if err != nil { t.Fatalf(err.Error()) } trans := <-responseChan if trans.Error != nil { t.Fatalf(trans.Error.Error()) } if trans.Args[0].(string) != "test0" { t.Fatalf(`proxied arg "%s" != "test0"`, trans.Args[0].(string)) } if trans.Args[1].(int) != 1 { t.Fatalf(`proxied arg %d != 1`, trans.Args[1].(int)) } err = w.Publish(topicName, []byte("bad_test_case")) if err != nil { t.Fatalf("error %s", err) } readMessages(topicName, t, msgCount) } func TestProducerHeartbeat(t *testing.T) { topicName := "heartbeat" + strconv.Itoa(int(time.Now().Unix())) config := NewConfig() config.HeartbeatInterval = 100 * time.Millisecond w, _ := NewProducer("127.0.0.1:4150", config) w.SetLogger(nullLogger, LogLevelInfo) defer w.Stop() err := w.Publish(topicName, []byte("publish_test_case")) if err == nil { t.Fatalf("error should not be nil") } if identifyError, ok := err.(ErrIdentify); !ok || identifyError.Reason != "E_BAD_BODY IDENTIFY heartbeat interval (100) is invalid" { t.Fatalf("wrong error - %s", err) } config = NewConfig() config.HeartbeatInterval = 1000 * time.Millisecond w, _ = NewProducer("127.0.0.1:4150", config) w.SetLogger(nullLogger, LogLevelInfo) defer w.Stop() err = w.Publish(topicName, []byte("publish_test_case")) if err != nil { t.Fatalf(err.Error()) } time.Sleep(1100 * time.Millisecond) msgCount := 10 for i := 0; i < msgCount; i++ { err := w.Publish(topicName, []byte("publish_test_case")) if err != nil { t.Fatalf("error %s", err) } } err = w.Publish(topicName, []byte("bad_test_case")) if err != nil { t.Fatalf("error %s", err) } readMessages(topicName, t, msgCount+1) } func readMessages(topicName string, t *testing.T, msgCount int) { config := NewConfig() config.DefaultRequeueDelay = 0 config.MaxBackoffDuration = 50 * time.Millisecond q, _ := NewConsumer(topicName, "ch", config) q.SetLogger(nullLogger, LogLevelInfo) h := &ConsumerHandler{ t: t, q: q, } q.AddHandler(h) err := q.ConnectToNSQD("127.0.0.1:4150") if err != nil { t.Fatalf(err.Error()) } <-q.StopChan if h.messagesGood != msgCount { t.Fatalf("end of test. should have handled a diff number of messages %d != %d", h.messagesGood, msgCount) } if h.messagesFailed != 1 { t.Fatal("failed message not done") } } type mockProducerConn struct { delegate ConnDelegate closeCh chan struct{} pubCh chan struct{} } func newMockProducerConn(delegate ConnDelegate) producerConn { m := &mockProducerConn{ delegate: delegate, closeCh: make(chan struct{}), pubCh: make(chan struct{}, 4), } go m.router() return m } func (m *mockProducerConn) String() string { return "127.0.0.1:0" } func (m *mockProducerConn) SetLogger(logger logger, level LogLevel, prefix string) {} func (m *mockProducerConn) Connect() (*IdentifyResponse, error) { return &IdentifyResponse{}, nil } func (m *mockProducerConn) Close() error { close(m.closeCh) return nil } func (m *mockProducerConn) WriteCommand(cmd *Command) error { if bytes.Equal(cmd.Name, []byte("PUB")) { m.pubCh <- struct{}{} } return nil } func (m *mockProducerConn) router() { for { select { case <-m.closeCh: goto exit case <-m.pubCh: m.delegate.OnResponse(nil, framedResponse(FrameTypeResponse, []byte("OK"))) } } exit: } func BenchmarkProducer(b *testing.B) { b.StopTimer() body := make([]byte, 512) config := NewConfig() p, _ := NewProducer("127.0.0.1:0", config) p.conn = newMockProducerConn(&producerConnDelegate{p}) atomic.StoreInt32(&p.state, StateConnected) p.closeChan = make(chan int) go p.router() startCh := make(chan struct{}) var wg sync.WaitGroup parallel := runtime.GOMAXPROCS(0) for j := 0; j < parallel; j++ { wg.Add(1) go func() { <-startCh for i := 0; i < b.N/parallel; i++ { p.Publish("test", body) } wg.Done() }() } b.StartTimer() close(startCh) wg.Wait() } ================================================ FILE: vendor/github.com/bitly/go-nsq/protocol.go ================================================ package nsq import ( "encoding/binary" "errors" "io" "regexp" ) // MagicV1 is the initial identifier sent when connecting for V1 clients var MagicV1 = []byte(" V1") // MagicV2 is the initial identifier sent when connecting for V2 clients var MagicV2 = []byte(" V2") // frame types const ( FrameTypeResponse int32 = 0 FrameTypeError int32 = 1 FrameTypeMessage int32 = 2 ) var validTopicChannelNameRegex = regexp.MustCompile(`^[\.a-zA-Z0-9_-]+(#ephemeral)?$`) // IsValidTopicName checks a topic name for correctness func IsValidTopicName(name string) bool { return isValidName(name) } // IsValidChannelName checks a channel name for correctness func IsValidChannelName(name string) bool { return isValidName(name) } func isValidName(name string) bool { if len(name) > 64 || len(name) < 1 { return false } return validTopicChannelNameRegex.MatchString(name) } // ReadResponse is a client-side utility function to read from the supplied Reader // according to the NSQ protocol spec: // // [x][x][x][x][x][x][x][x]... // | (int32) || (binary) // | 4-byte || N-byte // ------------------------... // size data func ReadResponse(r io.Reader) ([]byte, error) { var msgSize int32 // message size err := binary.Read(r, binary.BigEndian, &msgSize) if err != nil { return nil, err } // message binary data buf := make([]byte, msgSize) _, err = io.ReadFull(r, buf) if err != nil { return nil, err } return buf, nil } // UnpackResponse is a client-side utility function that unpacks serialized data // according to NSQ protocol spec: // // [x][x][x][x][x][x][x][x]... // | (int32) || (binary) // | 4-byte || N-byte // ------------------------... // frame ID data // // Returns a triplicate of: frame type, data ([]byte), error func UnpackResponse(response []byte) (int32, []byte, error) { if len(response) < 4 { return -1, nil, errors.New("length of response is too small") } return int32(binary.BigEndian.Uint32(response)), response[4:], nil } // ReadUnpackedResponse reads and parses data from the underlying // TCP connection according to the NSQ TCP protocol spec and // returns the frameType, data or error func ReadUnpackedResponse(r io.Reader) (int32, []byte, error) { resp, err := ReadResponse(r) if err != nil { return -1, nil, err } return UnpackResponse(resp) } ================================================ FILE: vendor/github.com/bitly/go-nsq/states.go ================================================ package nsq // states const ( StateInit = iota StateDisconnected StateConnected StateSubscribed // StateClosing means CLOSE has started... // (responses are ok, but no new messages will be sent) StateClosing ) ================================================ FILE: vendor/github.com/bitly/go-nsq/test/ca.pem ================================================ -----BEGIN CERTIFICATE----- MIID9zCCAt+gAwIBAgIJAPYpAVNDj2lgMA0GCSqGSIb3DQEBBQUAMIGRMQswCQYD VQQGEwJERTEMMAoGA1UECAwDTlJXMQ4wDAYDVQQHDAVFYXJ0aDEXMBUGA1UECgwO UmFuZG9tIENvbXBhbnkxCzAJBgNVBAsMAklUMRcwFQYDVQQDDA53d3cucmFuZG9t LmNvbTElMCMGCSqGSIb3DQEJARYWS3J5cHRvS2luZ3NAcmFuZG9tLmNvbTAeFw0x NDA0MDIyMTE0NTJaFw0xNTA0MDIyMTE0NTJaMIGRMQswCQYDVQQGEwJERTEMMAoG A1UECAwDTlJXMQ4wDAYDVQQHDAVFYXJ0aDEXMBUGA1UECgwOUmFuZG9tIENvbXBh bnkxCzAJBgNVBAsMAklUMRcwFQYDVQQDDA53d3cucmFuZG9tLmNvbTElMCMGCSqG SIb3DQEJARYWS3J5cHRvS2luZ3NAcmFuZG9tLmNvbTCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBAL/sJU6ODQCsdWAmq3Qyp6vCqVFkSIHwR3oH8vPuwwob IOrx/pXz2LIRekQ4egT8LCH3QDxhEvFhDNXYM4h/mkQ+GpgzynoIqYrw+yF93pik T9Tpel2IuntThlZrO/4APRbVw4Ihf3zp47AY71J+8usJjmfWsId4dhqa1lTYecXK Zwxii8RTH/7LsuwIDOW1QJLGGKNzvVYA42Gh8Cw3uHlmqZ2tA/sp5qg1Z3QU5g7y EzzRybotHaRb5XMUWHAlGbIl/TW4KlFqFZ0kCXJXL1uO3uq2nIS3bG7ryjbobRVn dZ6sV34eenIeZWu6zlDxQP/EqxAezz5Ndyt9uYWb/JECAwEAAaNQME4wHQYDVR0O BBYEFI9l/QHE30clqx+1oCR6IhUYEdqLMB8GA1UdIwQYMBaAFI9l/QHE30clqx+1 oCR6IhUYEdqLMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAES6GKxL oeCLZa83TjJXLagcc9mmdQZgfF3/o61+ye7D9BLqBwN5lx4+kIE1LAUI/Id0mCdW 9uXmPhpCJ926krahNc4Ol+wQTmZ3j7Mn3DCkFufjr64cGPU/UzH4yjMg9wEf84qz 5oH+dBifwJM8yoRCxbnMqGBu3xY8WCjPlw8E8lizXFk8wUbLZ/EC5Rjm+KmdT5ud KTEgM+K6RMNo9vLn5ZasrYyhVcHdEKIzo6qLm1ZVIgpi/1WX0m8hACMfEcqee6ot 76LEyM3kwfqRkWGZWHEF9D4emp3quU+0AmjM57LHrYjidpDJkVTUHDoMBFHl9Uiq 0O9+azN48F/bVgU= -----END CERTIFICATE----- ================================================ FILE: vendor/github.com/bitly/go-nsq/test/server.key ================================================ -----BEGIN PRIVATE KEY----- MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDFHWaPfRA5nU/F E8AVoFj2TAgMRISLduWlbAgDnMtFLSGVwgjxU13Txsv0LgwJgo4A5xpd2WNV0dIQ brerxvPVJruKO8KxKFS2U58BCFIG0xGrlQSg5wDGyqxEQY80XlrBtxs81v79GYHy fBhizg7onlmbNZzxPy7idU0a7TpgzakeDrfJHQ7rI3llvR0U0TdOLno82CtPvosY 6TYZAIFYgH05yN7DWKuDUI8Fa2RFVkbHPUlJVKROw/0n1yWy7XcwTmQQyaodFYgg KMCdyR0ElPxLv8dKYFjLvef2DTmuYwbalt5hiQqOpY1wm616Xf4ywz2uEU+ooLW4 /Q6DcRUBAgMBAAECggEBAKDUgVc4YuGvkmOp3sOGhq/Hj5V63m7wvyV/BPb1mwFB drK7lBJbxCXEgaslWxrzVCw2ZFQIyL1AKtbPj7tv5ImZRdHfOtbTonL1vbnY8Ryy YSuPtiwW75JD0dULbO38wq5mWaoFfh5DDr0sNbWAjbeNZG14jCpnNDkAHr6Gq2hJ VzwEwy+W7LXn8s8lYJHi3MsxCJqAFN1u2FOkjBtrcVW06VgV87IX59SOFns80x4E Nn0ZKH7RU2DuJ7Fm4HtaNH+yaDYxUeo2A/2/UoavyYYCgC5gThsNjlp9/R4gtm0h VO+8cN5a3s7zL+aITIusY7H/rwWc6XpRmxQn+jwqF8ECgYEA5PpAz4fhXA9abFZx 0XqCgcwTFY5gTX/JDd1qv0b/PueAR7dY5d37bRbTXpzrHiNFVrq3x432V3+KY0b5 55PEB1YxwBUga5DvTSa5fLfUibvLpdZjganzdTOsG53wMvNwUT8iUzUQDLkyRfIi mV0r4Sa34RrBZdWJ2Aou9by2SlkCgYEA3GCHTP7nAcuHXRTsXH3eK/HsfwxdwjhA G5SG7L7KSoMpzCbe90DuYEr6J/O1nnP0QiSQ2uEeTOARzMfio4E16exWlDDtvPBQ HqSuQKg4M7fMTN1tj95xmk1yGZMyPxgEfCScBeCbYQzOyZ0j93iFjqMnb2mlriq5 MoSPat3BeukCgYEAjSGaFNABnUZxS1k0qhLCodHw6VZqERp0B7Gze9X8uP7jWFCv 4G6j66cn/KbnXBoNQNmxMLRVY7TezTYQDiZLquH7pBLheqtIc9ssdKyxuXsgmES9 7EueHV0N9a+xPxZA4jLxqyuHivATBn2pybFdvFaq+3oMPgISBjCwpRH9oXECgYAN +n16j8ydW4iZieM4Nq+p/+1tXZ5w3FqMpU4tpCh2s30qOuj3rAGyz+6wLBBAHcDH lUQu7gqa+7eFUsR4dJCz5s7TFYtu6ZtbZjy7UzBFb4og8gaqEoUIMZNkNecBA4f9 S+EtqkKQ1Fwlg7ctUlK+anDs6zmcI4+dubTTJX/JSQKBgQCsu/gCgoOi2GFgebIh URvEMrhaiHxcw5u30nMNjWUGpDQK3lVTK51+7wj4xmVfiomvUW6M/HaR2+5xF1U1 QV08cKeWCGfGUFetTxjdhsVhMIk84ygF2l9K6jiHqvtd5rIoQ9Lf8XXbYaQVicRg qmB2iOzmbQQM/GOSofAeUfE7/A== -----END PRIVATE KEY----- ================================================ FILE: vendor/github.com/bitly/go-nsq/test/server.pem ================================================ -----BEGIN CERTIFICATE----- MIID7zCCAtegAwIBAgIJAMsErP97ZQmgMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYD VQQGEwJERTEMMAoGA1UECAwDTlJXMQ4wDAYDVQQHDAVFYXJ0aDEXMBUGA1UECgwO UmFuZG9tIENvbXBhbnkxCzAJBgNVBAsMAklUMRcwFQYDVQQDDA53d3cucmFuZG9t LmNvbTEhMB8GCSqGSIb3DQEJARYSZm9vYmFyQGV4YW1wbGUuY29tMB4XDTE0MDQw MjIxMTQ1MloXDTI0MDMzMDIxMTQ1MlowgY0xCzAJBgNVBAYTAkRFMQwwCgYDVQQI DANOUlcxDjAMBgNVBAcMBUVhcnRoMRcwFQYDVQQKDA5SYW5kb20gQ29tcGFueTEL MAkGA1UECwwCSVQxFzAVBgNVBAMMDnd3dy5yYW5kb20uY29tMSEwHwYJKoZIhvcN AQkBFhJmb29iYXJAZXhhbXBsZS5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw ggEKAoIBAQDFHWaPfRA5nU/FE8AVoFj2TAgMRISLduWlbAgDnMtFLSGVwgjxU13T xsv0LgwJgo4A5xpd2WNV0dIQbrerxvPVJruKO8KxKFS2U58BCFIG0xGrlQSg5wDG yqxEQY80XlrBtxs81v79GYHyfBhizg7onlmbNZzxPy7idU0a7TpgzakeDrfJHQ7r I3llvR0U0TdOLno82CtPvosY6TYZAIFYgH05yN7DWKuDUI8Fa2RFVkbHPUlJVKRO w/0n1yWy7XcwTmQQyaodFYggKMCdyR0ElPxLv8dKYFjLvef2DTmuYwbalt5hiQqO pY1wm616Xf4ywz2uEU+ooLW4/Q6DcRUBAgMBAAGjUDBOMB0GA1UdDgQWBBTxyT32 Exu5TuortZY8zkVotLDNDTAfBgNVHSMEGDAWgBTxyT32Exu5TuortZY8zkVotLDN DTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAu+0B+caaV4HzIHyfX Zc6BUPcRoTEZIWX/7JLeeOVUztOjl9ExjYTzjo0QEt+PVcOzfQL/hxE2SPG6fRF7 YRZU1h9t5Ti9rTg9myAbGGMo6MdWZULFcxIWjxhv6qnFPk/fF47PvGwjygFNnzv8 FYmrAI99kK0CYolvXZ5ue250dpE/TCIAyk09a3WeBbHU/hMR/mBUNsitphelDbNK oohrY9D7QR5Mf/NZgx3a0eDH6zoMYDRPARY3M02EuHHiRKmlyfnPv4ns4/0wCarj pKpds+G80+k2fyiMgQ5bPTw8sfNgq1z0IvIuWB36XSNenTgnnjArbWii+x95jjNw XcQg -----END CERTIFICATE----- ================================================ FILE: vendor/github.com/bitly/go-nsq/test.sh ================================================ #!/bin/bash set -e # a helper script to run tests if ! which nsqd >/dev/null; then echo "missing nsqd binary" && exit 1 fi if ! which nsqlookupd >/dev/null; then echo "missing nsqlookupd binary" && exit 1 fi # run nsqlookupd LOOKUP_LOGFILE=$(mktemp -t nsqlookupd.XXXXXXX) echo "starting nsqlookupd" echo " logging to $LOOKUP_LOGFILE" nsqlookupd >$LOOKUP_LOGFILE 2>&1 & LOOKUPD_PID=$! # run nsqd configured to use our lookupd above rm -f *.dat NSQD_LOGFILE=$(mktemp -t nsqlookupd.XXXXXXX) EXTRA_ARGS="--tls-root-ca-file=./test/ca.pem" if [[ $NSQ_DOWNLOAD == nsq-0.2.24* ]] || [[ $NSQ_DOWNLOAD == nsq-0.2.27* ]]; then EXTRA_ARGS="" fi echo "starting nsqd --data-path=/tmp --lookupd-tcp-address=127.0.0.1:4160 --tls-cert=./test/server.pem --tls-key=./test/server.key $EXTRA_ARGS" echo " logging to $NSQD_LOGFILE" nsqd --data-path=/tmp --lookupd-tcp-address=127.0.0.1:4160 --tls-cert=./test/server.pem --tls-key=./test/server.key $EXTRA_ARGS >$NSQD_LOGFILE 2>&1 & NSQD_PID=$! sleep 0.3 cleanup() { echo "killing nsqd PID $NSQD_PID" kill -s TERM $NSQD_PID || cat $NSQD_LOGFILE echo "killing nsqlookupd PID $LOOKUPD_PID" kill -s TERM $LOOKUPD_PID || cat $LOOKUP_LOGFILE } trap cleanup INT TERM EXIT go test -v -timeout 60s ================================================ FILE: vendor/github.com/bitly/go-nsq/version.go ================================================ // Package nsq is the official Go package for NSQ (http://nsq.io/) // // It provides high-level Consumer and Producer types as well as low-level // functions to communicate over the NSQ protocol package nsq // VERSION const VERSION = "1.0.5-alpha" ================================================ FILE: vendor/github.com/cihub/seelog/LICENSE.txt ================================================ Copyright (c) 2012, Cloud Instruments Co., Ltd. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Cloud Instruments Co., Ltd. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/cihub/seelog/README.markdown ================================================ Seelog ======= Seelog is a powerful and easy-to-learn logging framework that provides functionality for flexible dispatching, filtering, and formatting log messages. It is natively written in the [Go](http://golang.org/) programming language. [![Build Status](https://drone.io/github.com/cihub/seelog/status.png)](https://drone.io/github.com/cihub/seelog/latest) Features ------------------ * Xml configuring to be able to change logger parameters without recompilation * Changing configurations on the fly without app restart * Possibility to set different log configurations for different project files and functions * Adjustable message formatting * Simultaneous log output to multiple streams * Choosing logger priority strategy to minimize performance hit * Different output writers * Console writer * File writer * Buffered writer (Chunk writer) * Rolling log writer (Logging with rotation) * SMTP writer * Others... (See [Wiki](https://github.com/cihub/seelog/wiki)) * Log message wrappers (JSON, XML, etc.) * Global variables and functions for easy usage in standalone apps * Functions for flexible usage in libraries Quick-start ----------- ```go package main import log "github.com/cihub/seelog" func main() { defer log.Flush() log.Info("Hello from Seelog!") } ``` Installation ------------ If you don't have the Go development environment installed, visit the [Getting Started](http://golang.org/doc/install.html) document and follow the instructions. Once you're ready, execute the following command: ``` go get -u github.com/cihub/seelog ``` *IMPORTANT*: If you are not using the latest release version of Go, check out this [wiki page](https://github.com/cihub/seelog/wiki/Notes-on-'go-get') Documentation --------------- Seelog has github wiki pages, which contain detailed how-tos references: https://github.com/cihub/seelog/wiki Examples --------------- Seelog examples can be found here: [seelog-examples](https://github.com/cihub/seelog-examples) Issues --------------- Feel free to push issues that could make Seelog better: https://github.com/cihub/seelog/issues Changelog --------------- * **v2.5** : Interaction with other systems. Part 2: custom receivers * Finished custom receivers feature. Check [wiki](https://github.com/cihub/seelog/wiki/custom-receivers) * Added 'LoggerFromCustomReceiver' * Added 'LoggerFromWriterWithMinLevelAndFormat' * Added 'LoggerFromCustomReceiver' * Added 'LoggerFromParamConfigAs...' * **v2.4** : Interaction with other systems. Part 1: wrapping seelog * Added configurable caller stack skip logic * Added 'SetAdditionalStackDepth' to 'LoggerInterface' * **v2.3** : Rethinking 'rolling' receiver * Reimplemented 'rolling' receiver * Added 'Max rolls' feature for 'rolling' receiver with type='date' * Fixed 'rolling' receiver issue: renaming on Windows * **v2.2** : go1.0 compatibility point [go1.0 tag] * Fixed internal bugs * Added 'ANSI n [;k]' format identifier: %EscN * Made current release go1 compatible * **v2.1** : Some new features * Rolling receiver archiving option. * Added format identifier: %Line * Smtp: added paths to PEM files directories * Added format identifier: %FuncShort * Warn, Error and Critical methods now return an error * **v2.0** : Second major release. BREAKING CHANGES. * Support of binaries with stripped symbols * Added log strategy: adaptive * Critical message now forces Flush() * Added predefined formats: xml-debug, xml-debug-short, xml, xml-short, json-debug, json-debug-short, json, json-short, debug, debug-short, fast * Added receiver: conn (network connection writer) * BREAKING CHANGE: added Tracef, Debugf, Infof, etc. to satisfy the print/printf principle * Bug fixes * **v1.0** : Initial release. Features: * Xml config * Changing configurations on the fly without app restart * Contraints and exceptions * Formatting * Log strategies: sync, async loop, async timer * Receivers: buffered, console, file, rolling, smtp ================================================ FILE: vendor/github.com/cihub/seelog/behavior_adaptive_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "strconv" "testing" ) func countSequencedRowsInFile(filePath string) (int64, error) { bts, err := ioutil.ReadFile(filePath) if err != nil { return 0, err } bufReader := bufio.NewReader(bytes.NewBuffer(bts)) var gotCounter int64 for { line, _, bufErr := bufReader.ReadLine() if bufErr != nil && bufErr != io.EOF { return 0, bufErr } lineString := string(line) if lineString == "" { break } intVal, atoiErr := strconv.ParseInt(lineString, 10, 64) if atoiErr != nil { return 0, atoiErr } if intVal != gotCounter { return 0, fmt.Errorf("wrong order: %d Expected: %d\n", intVal, gotCounter) } gotCounter++ } return gotCounter, nil } func Test_Adaptive(t *testing.T) { fileName := "beh_test_adaptive.log" count := 100 Current.Close() if e := tryRemoveFile(fileName); e != nil { t.Error(e) return } defer func() { if e := tryRemoveFile(fileName); e != nil { t.Error(e) } }() testConfig := ` ` logger, _ := LoggerFromConfigAsString(testConfig) err := ReplaceLogger(logger) if err != nil { t.Error(err) return } for i := 0; i < count; i++ { Trace(strconv.Itoa(i)) } Flush() gotCount, err := countSequencedRowsInFile(fileName) if err != nil { t.Error(err) return } if int64(count) != gotCount { t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount) return } Current.Close() } ================================================ FILE: vendor/github.com/cihub/seelog/behavior_adaptivelogger.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "math" "time" ) var ( adaptiveLoggerMaxInterval = time.Minute adaptiveLoggerMaxCriticalMsgCount = uint32(1000) ) // asyncAdaptiveLogger represents asynchronous adaptive logger which acts like // an async timer logger, but its interval depends on the current message count // in the queue. // // Interval = I, minInterval = m, maxInterval = M, criticalMsgCount = C, msgCount = c: // I = m + (C - Min(c, C)) / C * (M - m) type asyncAdaptiveLogger struct { asyncLogger minInterval time.Duration criticalMsgCount uint32 maxInterval time.Duration } // newAsyncLoopLogger creates a new asynchronous adaptive logger func newAsyncAdaptiveLogger( config *logConfig, minInterval time.Duration, maxInterval time.Duration, criticalMsgCount uint32) (*asyncAdaptiveLogger, error) { if minInterval <= 0 { return nil, errors.New("async adaptive logger min interval should be > 0") } if maxInterval > adaptiveLoggerMaxInterval { return nil, fmt.Errorf("async adaptive logger max interval should be <= %s", adaptiveLoggerMaxInterval) } if criticalMsgCount <= 0 { return nil, errors.New("async adaptive logger critical msg count should be > 0") } if criticalMsgCount > adaptiveLoggerMaxCriticalMsgCount { return nil, fmt.Errorf("async adaptive logger critical msg count should be <= %s", adaptiveLoggerMaxInterval) } asnAdaptiveLogger := new(asyncAdaptiveLogger) asnAdaptiveLogger.asyncLogger = *newAsyncLogger(config) asnAdaptiveLogger.minInterval = minInterval asnAdaptiveLogger.maxInterval = maxInterval asnAdaptiveLogger.criticalMsgCount = criticalMsgCount go asnAdaptiveLogger.processQueue() return asnAdaptiveLogger, nil } func (asnAdaptiveLogger *asyncAdaptiveLogger) processItem() (closed bool, itemCount int) { asnAdaptiveLogger.queueHasElements.L.Lock() defer asnAdaptiveLogger.queueHasElements.L.Unlock() for asnAdaptiveLogger.msgQueue.Len() == 0 && !asnAdaptiveLogger.closed { asnAdaptiveLogger.queueHasElements.Wait() } if asnAdaptiveLogger.closed { return true, asnAdaptiveLogger.msgQueue.Len() } asnAdaptiveLogger.processQueueElement() return false, asnAdaptiveLogger.msgQueue.Len() - 1 } // I = m + (C - Min(c, C)) / C * (M - m) => // I = m + cDiff * mDiff, // cDiff = (C - Min(c, C)) / C) // mDiff = (M - m) func (asnAdaptiveLogger *asyncAdaptiveLogger) calcAdaptiveInterval(msgCount int) time.Duration { critCountF := float64(asnAdaptiveLogger.criticalMsgCount) cDiff := (critCountF - math.Min(float64(msgCount), critCountF)) / critCountF mDiff := float64(asnAdaptiveLogger.maxInterval - asnAdaptiveLogger.minInterval) return asnAdaptiveLogger.minInterval + time.Duration(cDiff*mDiff) } func (asnAdaptiveLogger *asyncAdaptiveLogger) processQueue() { for !asnAdaptiveLogger.closed { closed, itemCount := asnAdaptiveLogger.processItem() if closed { break } interval := asnAdaptiveLogger.calcAdaptiveInterval(itemCount) <-time.After(interval) } } ================================================ FILE: vendor/github.com/cihub/seelog/behavior_asynclogger.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "container/list" "fmt" "sync" ) // MaxQueueSize is the critical number of messages in the queue that result in an immediate flush. const ( MaxQueueSize = 10000 ) type msgQueueItem struct { level LogLevel context LogContextInterface message fmt.Stringer } // asyncLogger represents common data for all asynchronous loggers type asyncLogger struct { commonLogger msgQueue *list.List queueHasElements *sync.Cond } // newAsyncLogger creates a new asynchronous logger func newAsyncLogger(config *logConfig) *asyncLogger { asnLogger := new(asyncLogger) asnLogger.msgQueue = list.New() asnLogger.queueHasElements = sync.NewCond(new(sync.Mutex)) asnLogger.commonLogger = *newCommonLogger(config, asnLogger) return asnLogger } func (asnLogger *asyncLogger) innerLog( level LogLevel, context LogContextInterface, message fmt.Stringer) { asnLogger.addMsgToQueue(level, context, message) } func (asnLogger *asyncLogger) Close() { asnLogger.m.Lock() defer asnLogger.m.Unlock() if !asnLogger.closed { asnLogger.flushQueue(true) asnLogger.config.RootDispatcher.Flush() if err := asnLogger.config.RootDispatcher.Close(); err != nil { reportInternalError(err) } asnLogger.queueHasElements.Broadcast() } } func (asnLogger *asyncLogger) Flush() { asnLogger.m.Lock() defer asnLogger.m.Unlock() if !asnLogger.closed { asnLogger.flushQueue(true) asnLogger.config.RootDispatcher.Flush() } } func (asnLogger *asyncLogger) flushQueue(lockNeeded bool) { if lockNeeded { asnLogger.queueHasElements.L.Lock() defer asnLogger.queueHasElements.L.Unlock() } for asnLogger.msgQueue.Len() > 0 { asnLogger.processQueueElement() } } func (asnLogger *asyncLogger) processQueueElement() { if asnLogger.msgQueue.Len() > 0 { backElement := asnLogger.msgQueue.Front() msg, _ := backElement.Value.(msgQueueItem) asnLogger.processLogMsg(msg.level, msg.message, msg.context) asnLogger.msgQueue.Remove(backElement) } } func (asnLogger *asyncLogger) addMsgToQueue( level LogLevel, context LogContextInterface, message fmt.Stringer) { if !asnLogger.closed { asnLogger.queueHasElements.L.Lock() defer asnLogger.queueHasElements.L.Unlock() if asnLogger.msgQueue.Len() >= MaxQueueSize { fmt.Printf("Seelog queue overflow: more than %v messages in the queue. Flushing.\n", MaxQueueSize) asnLogger.flushQueue(false) } queueItem := msgQueueItem{level, context, message} asnLogger.msgQueue.PushBack(queueItem) asnLogger.queueHasElements.Broadcast() } else { err := fmt.Errorf("queue closed! Cannot process element: %d %#v", level, message) reportInternalError(err) } } ================================================ FILE: vendor/github.com/cihub/seelog/behavior_asyncloop_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "strconv" "testing" ) func Test_Asyncloop(t *testing.T) { fileName := "beh_test_asyncloop.log" count := 100 Current.Close() if e := tryRemoveFile(fileName); e != nil { t.Error(e) return } defer func() { if e := tryRemoveFile(fileName); e != nil { t.Error(e) } }() testConfig := ` ` logger, _ := LoggerFromConfigAsString(testConfig) err := ReplaceLogger(logger) if err != nil { t.Error(err) return } for i := 0; i < count; i++ { Trace(strconv.Itoa(i)) } Flush() gotCount, err := countSequencedRowsInFile(fileName) if err != nil { t.Error(err) return } if int64(count) != gotCount { t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount) return } Current.Close() } func Test_AsyncloopOff(t *testing.T) { fileName := "beh_test_asyncloopoff.log" count := 100 Current.Close() if e := tryRemoveFile(fileName); e != nil { t.Error(e) return } testConfig := ` ` logger, _ := LoggerFromConfigAsString(testConfig) err := ReplaceLogger(logger) if err != nil { t.Error(err) return } for i := 0; i < count; i++ { Trace(strconv.Itoa(i)) } Flush() ex, err := fileExists(fileName) if err != nil { t.Error(err) } if ex { t.Errorf("logger at level OFF is not expected to create log file at all.") defer func() { if e := tryRemoveFile(fileName); e != nil { t.Error(e) } }() } Current.Close() } ================================================ FILE: vendor/github.com/cihub/seelog/behavior_asynclooplogger.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog // asyncLoopLogger represents asynchronous logger which processes the log queue in // a 'for' loop type asyncLoopLogger struct { asyncLogger } // newAsyncLoopLogger creates a new asynchronous loop logger func newAsyncLoopLogger(config *logConfig) *asyncLoopLogger { asnLoopLogger := new(asyncLoopLogger) asnLoopLogger.asyncLogger = *newAsyncLogger(config) go asnLoopLogger.processQueue() return asnLoopLogger } func (asnLoopLogger *asyncLoopLogger) processItem() (closed bool) { asnLoopLogger.queueHasElements.L.Lock() defer asnLoopLogger.queueHasElements.L.Unlock() for asnLoopLogger.msgQueue.Len() == 0 && !asnLoopLogger.closed { asnLoopLogger.queueHasElements.Wait() } if asnLoopLogger.closed { return true } asnLoopLogger.processQueueElement() return false } func (asnLoopLogger *asyncLoopLogger) processQueue() { for !asnLoopLogger.closed { closed := asnLoopLogger.processItem() if closed { break } } } ================================================ FILE: vendor/github.com/cihub/seelog/behavior_asynctimer_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "strconv" "testing" ) func Test_Asynctimer(t *testing.T) { fileName := "beh_test_asynctimer.log" count := 100 Current.Close() if e := tryRemoveFile(fileName); e != nil { t.Error(e) return } defer func() { if e := tryRemoveFile(fileName); e != nil { t.Error(e) } }() testConfig := ` ` logger, _ := LoggerFromConfigAsString(testConfig) err := ReplaceLogger(logger) if err != nil { t.Error(err) return } for i := 0; i < count; i++ { Trace(strconv.Itoa(i)) } Flush() gotCount, err := countSequencedRowsInFile(fileName) if err != nil { t.Error(err) return } if int64(count) != gotCount { t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount) return } Current.Close() } ================================================ FILE: vendor/github.com/cihub/seelog/behavior_asynctimerlogger.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "time" ) // asyncTimerLogger represents asynchronous logger which processes the log queue each // 'duration' nanoseconds type asyncTimerLogger struct { asyncLogger interval time.Duration } // newAsyncLoopLogger creates a new asynchronous loop logger func newAsyncTimerLogger(config *logConfig, interval time.Duration) (*asyncTimerLogger, error) { if interval <= 0 { return nil, errors.New("async logger interval should be > 0") } asnTimerLogger := new(asyncTimerLogger) asnTimerLogger.asyncLogger = *newAsyncLogger(config) asnTimerLogger.interval = interval go asnTimerLogger.processQueue() return asnTimerLogger, nil } func (asnTimerLogger *asyncTimerLogger) processItem() (closed bool) { asnTimerLogger.queueHasElements.L.Lock() defer asnTimerLogger.queueHasElements.L.Unlock() for asnTimerLogger.msgQueue.Len() == 0 && !asnTimerLogger.closed { asnTimerLogger.queueHasElements.Wait() } if asnTimerLogger.closed { return true } asnTimerLogger.processQueueElement() return false } func (asnTimerLogger *asyncTimerLogger) processQueue() { for !asnTimerLogger.closed { closed := asnTimerLogger.processItem() if closed { break } <-time.After(asnTimerLogger.interval) } } ================================================ FILE: vendor/github.com/cihub/seelog/behavior_synclogger.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" ) // syncLogger performs logging in the same goroutine where 'Trace/Debug/...' // func was called type syncLogger struct { commonLogger } // newSyncLogger creates a new synchronous logger func newSyncLogger(config *logConfig) *syncLogger { syncLogger := new(syncLogger) syncLogger.commonLogger = *newCommonLogger(config, syncLogger) return syncLogger } func (syncLogger *syncLogger) innerLog( level LogLevel, context LogContextInterface, message fmt.Stringer) { syncLogger.processLogMsg(level, message, context) } func (syncLogger *syncLogger) Close() { syncLogger.m.Lock() defer syncLogger.m.Unlock() if !syncLogger.closed { if err := syncLogger.config.RootDispatcher.Close(); err != nil { reportInternalError(err) } } } func (syncLogger *syncLogger) Flush() { syncLogger.m.Lock() defer syncLogger.m.Unlock() if !syncLogger.closed { syncLogger.config.RootDispatcher.Flush() } } ================================================ FILE: vendor/github.com/cihub/seelog/behavior_synclogger_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "strconv" "testing" ) func Test_Sync(t *testing.T) { fileName := "beh_test_sync.log" count := 100 Current.Close() if e := tryRemoveFile(fileName); e != nil { t.Error(e) return } defer func() { if e := tryRemoveFile(fileName); e != nil { t.Error(e) } }() testConfig := ` ` logger, _ := LoggerFromConfigAsString(testConfig) err := ReplaceLogger(logger) if err != nil { t.Error(err) return } for i := 0; i < count; i++ { Trace(strconv.Itoa(i)) } gotCount, err := countSequencedRowsInFile(fileName) if err != nil { t.Error(err) return } if int64(count) != gotCount { t.Errorf("wrong count of log messages. Expected: %v, got: %v.", count, gotCount) return } Current.Close() } ================================================ FILE: vendor/github.com/cihub/seelog/cfg_config.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "bytes" "encoding/xml" "io" "os" ) // LoggerFromConfigAsFile creates logger with config from file. File should contain valid seelog xml. func LoggerFromConfigAsFile(fileName string) (LoggerInterface, error) { file, err := os.Open(fileName) if err != nil { return nil, err } defer file.Close() conf, err := configFromReader(file) if err != nil { return nil, err } return createLoggerFromConfig(conf) } // LoggerFromConfigAsBytes creates a logger with config from bytes stream. Bytes should contain valid seelog xml. func LoggerFromConfigAsBytes(data []byte) (LoggerInterface, error) { conf, err := configFromReader(bytes.NewBuffer(data)) if err != nil { return nil, err } return createLoggerFromConfig(conf) } // LoggerFromConfigAsString creates a logger with config from a string. String should contain valid seelog xml. func LoggerFromConfigAsString(data string) (LoggerInterface, error) { return LoggerFromConfigAsBytes([]byte(data)) } // LoggerFromParamConfigAsFile does the same as LoggerFromConfigAsFile, but includes special parser options. // See 'CfgParseParams' comments. func LoggerFromParamConfigAsFile(fileName string, parserParams *CfgParseParams) (LoggerInterface, error) { file, err := os.Open(fileName) if err != nil { return nil, err } defer file.Close() conf, err := configFromReaderWithConfig(file, parserParams) if err != nil { return nil, err } return createLoggerFromConfig(conf) } // LoggerFromParamConfigAsBytes does the same as LoggerFromConfigAsBytes, but includes special parser options. // See 'CfgParseParams' comments. func LoggerFromParamConfigAsBytes(data []byte, parserParams *CfgParseParams) (LoggerInterface, error) { conf, err := configFromReaderWithConfig(bytes.NewBuffer(data), parserParams) if err != nil { return nil, err } return createLoggerFromConfig(conf) } // LoggerFromParamConfigAsString does the same as LoggerFromConfigAsString, but includes special parser options. // See 'CfgParseParams' comments. func LoggerFromParamConfigAsString(data string, parserParams *CfgParseParams) (LoggerInterface, error) { return LoggerFromParamConfigAsBytes([]byte(data), parserParams) } // LoggerFromWriterWithMinLevel is shortcut for LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) func LoggerFromWriterWithMinLevel(output io.Writer, minLevel LogLevel) (LoggerInterface, error) { return LoggerFromWriterWithMinLevelAndFormat(output, minLevel, DefaultMsgFormat) } // LoggerFromWriterWithMinLevelAndFormat creates a proxy logger that uses io.Writer as the // receiver with minimal level = minLevel and with specified format. // // All messages with level more or equal to minLevel will be written to output and // formatted using the default seelog format. // // Can be called for usage with non-Seelog systems func LoggerFromWriterWithMinLevelAndFormat(output io.Writer, minLevel LogLevel, format string) (LoggerInterface, error) { constraints, err := newMinMaxConstraints(minLevel, CriticalLvl) if err != nil { return nil, err } formatter, err := newFormatter(format) if err != nil { return nil, err } dispatcher, err := newSplitDispatcher(formatter, []interface{}{output}) if err != nil { return nil, err } conf, err := newConfig(constraints, make([]*logLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) if err != nil { return nil, err } return createLoggerFromConfig(conf) } // LoggerFromXMLDecoder creates logger with config from a XML decoder starting from a specific node. // It should contain valid seelog xml, except for root node name. func LoggerFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (LoggerInterface, error) { conf, err := configFromXMLDecoder(xmlParser, rootNode) if err != nil { return nil, err } return createLoggerFromConfig(conf) } // LoggerFromCustomReceiver creates a proxy logger that uses a CustomReceiver as the // receiver. // // All messages will be sent to the specified custom receiver without additional // formatting ('%Msg' format is used). // // Check CustomReceiver, RegisterReceiver for additional info. // // NOTE 1: CustomReceiver.AfterParse is only called when a receiver is instantiated // by the config parser while parsing config. So, if you are not planning to use the // same CustomReceiver for both proxying (via LoggerFromCustomReceiver call) and // loading from config, just leave AfterParse implementation empty. // // NOTE 2: Unlike RegisterReceiver, LoggerFromCustomReceiver takes an already initialized // instance that implements CustomReceiver. So, fill it with data and perform any initialization // logic before calling this func and it won't be lost. // // So: // * RegisterReceiver takes value just to get the reflect.Type from it and then // instantiate it as many times as config is reloaded. // // * LoggerFromCustomReceiver takes value and uses it without modification and // reinstantiation, directy passing it to the dispatcher tree. func LoggerFromCustomReceiver(receiver CustomReceiver) (LoggerInterface, error) { constraints, err := newMinMaxConstraints(TraceLvl, CriticalLvl) if err != nil { return nil, err } output, err := newCustomReceiverDispatcherByValue(msgonlyformatter, receiver, "user-proxy", CustomReceiverInitArgs{}) if err != nil { return nil, err } dispatcher, err := newSplitDispatcher(msgonlyformatter, []interface{}{output}) if err != nil { return nil, err } conf, err := newConfig(constraints, make([]*logLevelException, 0), dispatcher, syncloggerTypeFromString, nil, nil) if err != nil { return nil, err } return createLoggerFromConfig(conf) } ================================================ FILE: vendor/github.com/cihub/seelog/cfg_errors.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" ) var ( errNodeMustHaveChildren = errors.New("node must have children") errNodeCannotHaveChildren = errors.New("node cannot have children") ) type unexpectedChildElementError struct { baseError } func newUnexpectedChildElementError(msg string) *unexpectedChildElementError { custmsg := "Unexpected child element: " + msg return &unexpectedChildElementError{baseError{message: custmsg}} } type missingArgumentError struct { baseError } func newMissingArgumentError(nodeName, attrName string) *missingArgumentError { custmsg := "Output '" + nodeName + "' has no '" + attrName + "' attribute" return &missingArgumentError{baseError{message: custmsg}} } type unexpectedAttributeError struct { baseError } func newUnexpectedAttributeError(nodeName, attr string) *unexpectedAttributeError { custmsg := nodeName + " has unexpected attribute: " + attr return &unexpectedAttributeError{baseError{message: custmsg}} } ================================================ FILE: vendor/github.com/cihub/seelog/cfg_logconfig.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" ) type loggerTypeFromString uint8 const ( syncloggerTypeFromString = iota asyncLooploggerTypeFromString asyncTimerloggerTypeFromString adaptiveLoggerTypeFromString defaultloggerTypeFromString = asyncLooploggerTypeFromString ) const ( syncloggerTypeFromStringStr = "sync" asyncloggerTypeFromStringStr = "asyncloop" asyncTimerloggerTypeFromStringStr = "asynctimer" adaptiveLoggerTypeFromStringStr = "adaptive" ) // asyncTimerLoggerData represents specific data for async timer logger type asyncTimerLoggerData struct { AsyncInterval uint32 } // adaptiveLoggerData represents specific data for adaptive timer logger type adaptiveLoggerData struct { MinInterval uint32 MaxInterval uint32 CriticalMsgCount uint32 } var loggerTypeToStringRepresentations = map[loggerTypeFromString]string{ syncloggerTypeFromString: syncloggerTypeFromStringStr, asyncLooploggerTypeFromString: asyncloggerTypeFromStringStr, asyncTimerloggerTypeFromString: asyncTimerloggerTypeFromStringStr, adaptiveLoggerTypeFromString: adaptiveLoggerTypeFromStringStr, } // getLoggerTypeFromString parses a string and returns a corresponding logger type, if successful. func getLoggerTypeFromString(logTypeString string) (level loggerTypeFromString, found bool) { for logType, logTypeStr := range loggerTypeToStringRepresentations { if logTypeStr == logTypeString { return logType, true } } return 0, false } // logConfig stores logging configuration. Contains messages dispatcher, allowed log level rules // (general constraints and exceptions), and messages formats (used by nodes of dispatcher tree) type logConfig struct { Constraints logLevelConstraints // General log level rules (>min and ` conf, err := configFromReader(strings.NewReader(testConfig)) if err != nil { t.Errorf("parse error: %s\n", err.Error()) return } context, err := currentContext() if err != nil { t.Errorf("cannot get current context:" + err.Error()) return } firstContext, err := getFirstContext() if err != nil { t.Errorf("cannot get current context:" + err.Error()) return } secondContext, err := getSecondContext() if err != nil { t.Errorf("cannot get current context:" + err.Error()) return } if !conf.IsAllowed(TraceLvl, context) { t.Errorf("error: deny trace in current context") } if conf.IsAllowed(TraceLvl, firstContext) { t.Errorf("error: allow trace in first context") } if conf.IsAllowed(ErrorLvl, context) { t.Errorf("error: allow error in current context") } if !conf.IsAllowed(ErrorLvl, secondContext) { t.Errorf("error: deny error in second context") } // cache test if !conf.IsAllowed(TraceLvl, context) { t.Errorf("error: deny trace in current context") } if conf.IsAllowed(TraceLvl, firstContext) { t.Errorf("error: allow trace in first context") } if conf.IsAllowed(ErrorLvl, context) { t.Errorf("error: allow error in current context") } if !conf.IsAllowed(ErrorLvl, secondContext) { t.Errorf("error: deny error in second context") } } func getFirstContext() (LogContextInterface, error) { return currentContext() } func getSecondContext() (LogContextInterface, error) { return currentContext() } ================================================ FILE: vendor/github.com/cihub/seelog/cfg_parser.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "crypto/tls" "encoding/xml" "errors" "fmt" "io" "strconv" "strings" "time" ) // Names of elements of seelog config. const ( seelogConfigID = "seelog" outputsID = "outputs" formatsID = "formats" minLevelID = "minlevel" maxLevelID = "maxlevel" levelsID = "levels" exceptionsID = "exceptions" exceptionID = "exception" funcPatternID = "funcpattern" filePatternID = "filepattern" formatID = "format" formatAttrID = "format" formatKeyAttrID = "id" outputFormatID = "formatid" pathID = "path" fileWriterID = "file" smtpWriterID = "smtp" senderaddressID = "senderaddress" senderNameID = "sendername" recipientID = "recipient" mailHeaderID = "header" mailHeaderNameID = "name" mailHeaderValueID = "value" addressID = "address" hostNameID = "hostname" hostPortID = "hostport" userNameID = "username" userPassID = "password" cACertDirpathID = "cacertdirpath" subjectID = "subject" splitterDispatcherID = "splitter" consoleWriterID = "console" customReceiverID = "custom" customNameAttrID = "name" customNameDataAttrPrefix = "data-" filterDispatcherID = "filter" filterLevelsAttrID = "levels" rollingfileWriterID = "rollingfile" rollingFileTypeAttr = "type" rollingFilePathAttr = "filename" rollingFileMaxSizeAttr = "maxsize" rollingFileMaxRollsAttr = "maxrolls" rollingFileNameModeAttr = "namemode" rollingFileDataPatternAttr = "datepattern" rollingFileArchiveAttr = "archivetype" rollingFileArchivePathAttr = "archivepath" bufferedWriterID = "buffered" bufferedSizeAttr = "size" bufferedFlushPeriodAttr = "flushperiod" loggerTypeFromStringAttr = "type" asyncLoggerIntervalAttr = "asyncinterval" adaptLoggerMinIntervalAttr = "mininterval" adaptLoggerMaxIntervalAttr = "maxinterval" adaptLoggerCriticalMsgCountAttr = "critmsgcount" predefinedPrefix = "std:" connWriterID = "conn" connWriterAddrAttr = "addr" connWriterNetAttr = "net" connWriterReconnectOnMsgAttr = "reconnectonmsg" connWriterUseTLSAttr = "tls" connWriterInsecureSkipVerifyAttr = "insecureskipverify" ) // CustomReceiverProducer is the signature of the function CfgParseParams needs to create // custom receivers. type CustomReceiverProducer func(CustomReceiverInitArgs) (CustomReceiver, error) // CfgParseParams represent specific parse options or flags used by parser. It is used if seelog parser needs // some special directives or additional info to correctly parse a config. type CfgParseParams struct { // CustomReceiverProducers expose the same functionality as RegisterReceiver func // but only in the scope (context) of the config parse func instead of a global package scope. // // It means that if you use custom receivers in your code, you may either register them globally once with // RegisterReceiver or you may call funcs like LoggerFromParamConfigAsFile (with 'ParamConfig') // and use CustomReceiverProducers to provide custom producer funcs. // // A producer func is called when config parser processes a '' element. It takes the 'name' attribute // of the element and tries to find a match in two places: // 1) CfgParseParams.CustomReceiverProducers map // 2) Global type map, filled by RegisterReceiver // // If a match is found in the CustomReceiverProducers map, parser calls the corresponding producer func // passing the init args to it. The func takes exactly the same args as CustomReceiver.AfterParse. // The producer func must return a correct receiver or an error. If case of error, seelog will behave // in the same way as with any other config error. // // You may use this param to set custom producers in case you need to pass some context when instantiating // a custom receiver or if you frequently change custom receivers with different parameters or in any other // situation where package-level registering (RegisterReceiver) is not an option for you. CustomReceiverProducers map[string]CustomReceiverProducer } func (cfg *CfgParseParams) String() string { return fmt.Sprintf("CfgParams: {custom_recs=%d}", len(cfg.CustomReceiverProducers)) } type elementMapEntry struct { constructor func(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) } var elementMap map[string]elementMapEntry var predefinedFormats map[string]*formatter func init() { elementMap = map[string]elementMapEntry{ fileWriterID: {createfileWriter}, splitterDispatcherID: {createSplitter}, customReceiverID: {createCustomReceiver}, filterDispatcherID: {createFilter}, consoleWriterID: {createConsoleWriter}, rollingfileWriterID: {createRollingFileWriter}, bufferedWriterID: {createbufferedWriter}, smtpWriterID: {createSMTPWriter}, connWriterID: {createconnWriter}, } err := fillPredefinedFormats() if err != nil { panic(fmt.Sprintf("Seelog couldn't start: predefined formats creation failed. Error: %s", err.Error())) } } func fillPredefinedFormats() error { predefinedFormatsWithoutPrefix := map[string]string{ "xml-debug": `%Lev%Msg%RelFile%Func%Line`, "xml-debug-short": `%Ns%l%Msg

%RelFile

%Func`, "xml": `%Lev%Msg`, "xml-short": `%Ns%l%Msg`, "json-debug": `{"time":%Ns,"lev":"%Lev","msg":"%Msg","path":"%RelFile","func":"%Func","line":"%Line"}`, "json-debug-short": `{"t":%Ns,"l":"%Lev","m":"%Msg","p":"%RelFile","f":"%Func"}`, "json": `{"time":%Ns,"lev":"%Lev","msg":"%Msg"}`, "json-short": `{"t":%Ns,"l":"%Lev","m":"%Msg"}`, "debug": `[%LEVEL] %RelFile:%Func.%Line %Date %Time %Msg%n`, "debug-short": `[%LEVEL] %Date %Time %Msg%n`, "fast": `%Ns %l %Msg%n`, } predefinedFormats = make(map[string]*formatter) for formatKey, format := range predefinedFormatsWithoutPrefix { formatter, err := newFormatter(format) if err != nil { return err } predefinedFormats[predefinedPrefix+formatKey] = formatter } return nil } // configFromXMLDecoder parses data from a given XML decoder. // Returns parsed config which can be used to create logger in case no errors occured. // Returns error if format is incorrect or anything happened. func configFromXMLDecoder(xmlParser *xml.Decoder, rootNode xml.Token) (*logConfig, error) { return configFromXMLDecoderWithConfig(xmlParser, rootNode, nil) } // configFromXMLDecoderWithConfig parses data from a given XML decoder. // Returns parsed config which can be used to create logger in case no errors occured. // Returns error if format is incorrect or anything happened. func configFromXMLDecoderWithConfig(xmlParser *xml.Decoder, rootNode xml.Token, cfg *CfgParseParams) (*logConfig, error) { _, ok := rootNode.(xml.StartElement) if !ok { return nil, errors.New("rootNode must be XML startElement") } config, err := unmarshalNode(xmlParser, rootNode) if err != nil { return nil, err } if config == nil { return nil, errors.New("xml has no content") } return configFromXMLNodeWithConfig(config, cfg) } // configFromReader parses data from a given reader. // Returns parsed config which can be used to create logger in case no errors occured. // Returns error if format is incorrect or anything happened. func configFromReader(reader io.Reader) (*logConfig, error) { return configFromReaderWithConfig(reader, nil) } // configFromReaderWithConfig parses data from a given reader. // Returns parsed config which can be used to create logger in case no errors occured. // Returns error if format is incorrect or anything happened. func configFromReaderWithConfig(reader io.Reader, cfg *CfgParseParams) (*logConfig, error) { config, err := unmarshalConfig(reader) if err != nil { return nil, err } if config.name != seelogConfigID { return nil, errors.New("root xml tag must be '" + seelogConfigID + "'") } return configFromXMLNodeWithConfig(config, cfg) } func configFromXMLNodeWithConfig(config *xmlNode, cfg *CfgParseParams) (*logConfig, error) { err := checkUnexpectedAttribute( config, minLevelID, maxLevelID, levelsID, loggerTypeFromStringAttr, asyncLoggerIntervalAttr, adaptLoggerMinIntervalAttr, adaptLoggerMaxIntervalAttr, adaptLoggerCriticalMsgCountAttr, ) if err != nil { return nil, err } err = checkExpectedElements(config, optionalElement(outputsID), optionalElement(formatsID), optionalElement(exceptionsID)) if err != nil { return nil, err } constraints, err := getConstraints(config) if err != nil { return nil, err } exceptions, err := getExceptions(config) if err != nil { return nil, err } err = checkDistinctExceptions(exceptions) if err != nil { return nil, err } formats, err := getFormats(config) if err != nil { return nil, err } dispatcher, err := getOutputsTree(config, formats, cfg) if err != nil { // If we open several files, but then fail to parse the config, we should close // those files before reporting that config is invalid. if dispatcher != nil { dispatcher.Close() } return nil, err } loggerType, logData, err := getloggerTypeFromStringData(config) if err != nil { return nil, err } return newConfig(constraints, exceptions, dispatcher, loggerType, logData, cfg) } func getConstraints(node *xmlNode) (logLevelConstraints, error) { minLevelStr, isMinLevel := node.attributes[minLevelID] maxLevelStr, isMaxLevel := node.attributes[maxLevelID] levelsStr, isLevels := node.attributes[levelsID] if isLevels && (isMinLevel && isMaxLevel) { return nil, errors.New("for level declaration use '" + levelsID + "'' OR '" + minLevelID + "', '" + maxLevelID + "'") } offString := LogLevel(Off).String() if (isLevels && strings.TrimSpace(levelsStr) == offString) || (isMinLevel && !isMaxLevel && minLevelStr == offString) { return newOffConstraints() } if isLevels { levels, err := parseLevels(levelsStr) if err != nil { return nil, err } return newListConstraints(levels) } var minLevel = LogLevel(TraceLvl) if isMinLevel { found := true minLevel, found = LogLevelFromString(minLevelStr) if !found { return nil, errors.New("declared " + minLevelID + " not found: " + minLevelStr) } } var maxLevel = LogLevel(CriticalLvl) if isMaxLevel { found := true maxLevel, found = LogLevelFromString(maxLevelStr) if !found { return nil, errors.New("declared " + maxLevelID + " not found: " + maxLevelStr) } } return newMinMaxConstraints(minLevel, maxLevel) } func parseLevels(str string) ([]LogLevel, error) { levelsStrArr := strings.Split(strings.Replace(str, " ", "", -1), ",") var levels []LogLevel for _, levelStr := range levelsStrArr { level, found := LogLevelFromString(levelStr) if !found { return nil, errors.New("declared level not found: " + levelStr) } levels = append(levels, level) } return levels, nil } func getExceptions(config *xmlNode) ([]*logLevelException, error) { var exceptions []*logLevelException var exceptionsNode *xmlNode for _, child := range config.children { if child.name == exceptionsID { exceptionsNode = child break } } if exceptionsNode == nil { return exceptions, nil } err := checkUnexpectedAttribute(exceptionsNode) if err != nil { return nil, err } err = checkExpectedElements(exceptionsNode, multipleMandatoryElements("exception")) if err != nil { return nil, err } for _, exceptionNode := range exceptionsNode.children { if exceptionNode.name != exceptionID { return nil, errors.New("incorrect nested element in exceptions section: " + exceptionNode.name) } err := checkUnexpectedAttribute(exceptionNode, minLevelID, maxLevelID, levelsID, funcPatternID, filePatternID) if err != nil { return nil, err } constraints, err := getConstraints(exceptionNode) if err != nil { return nil, errors.New("incorrect " + exceptionsID + " node: " + err.Error()) } funcPattern, isFuncPattern := exceptionNode.attributes[funcPatternID] filePattern, isFilePattern := exceptionNode.attributes[filePatternID] if !isFuncPattern { funcPattern = "*" } if !isFilePattern { filePattern = "*" } exception, err := newLogLevelException(funcPattern, filePattern, constraints) if err != nil { return nil, errors.New("incorrect exception node: " + err.Error()) } exceptions = append(exceptions, exception) } return exceptions, nil } func checkDistinctExceptions(exceptions []*logLevelException) error { for i, exception := range exceptions { for j, exception1 := range exceptions { if i == j { continue } if exception.FuncPattern() == exception1.FuncPattern() && exception.FilePattern() == exception1.FilePattern() { return fmt.Errorf("there are two or more duplicate exceptions. Func: %v, file %v", exception.FuncPattern(), exception.FilePattern()) } } } return nil } func getFormats(config *xmlNode) (map[string]*formatter, error) { formats := make(map[string]*formatter, 0) var formatsNode *xmlNode for _, child := range config.children { if child.name == formatsID { formatsNode = child break } } if formatsNode == nil { return formats, nil } err := checkUnexpectedAttribute(formatsNode) if err != nil { return nil, err } err = checkExpectedElements(formatsNode, multipleMandatoryElements("format")) if err != nil { return nil, err } for _, formatNode := range formatsNode.children { if formatNode.name != formatID { return nil, errors.New("incorrect nested element in " + formatsID + " section: " + formatNode.name) } err := checkUnexpectedAttribute(formatNode, formatKeyAttrID, formatID) if err != nil { return nil, err } id, isID := formatNode.attributes[formatKeyAttrID] formatStr, isFormat := formatNode.attributes[formatAttrID] if !isID { return nil, errors.New("format has no '" + formatKeyAttrID + "' attribute") } if !isFormat { return nil, errors.New("format[" + id + "] has no '" + formatAttrID + "' attribute") } formatter, err := newFormatter(formatStr) if err != nil { return nil, err } formats[id] = formatter } return formats, nil } func getloggerTypeFromStringData(config *xmlNode) (logType loggerTypeFromString, logData interface{}, err error) { logTypeStr, loggerTypeExists := config.attributes[loggerTypeFromStringAttr] if !loggerTypeExists { return defaultloggerTypeFromString, nil, nil } logType, found := getLoggerTypeFromString(logTypeStr) if !found { return 0, nil, fmt.Errorf("unknown logger type: %s", logTypeStr) } if logType == asyncTimerloggerTypeFromString { intervalStr, intervalExists := config.attributes[asyncLoggerIntervalAttr] if !intervalExists { return 0, nil, newMissingArgumentError(config.name, asyncLoggerIntervalAttr) } interval, err := strconv.ParseUint(intervalStr, 10, 32) if err != nil { return 0, nil, err } logData = asyncTimerLoggerData{uint32(interval)} } else if logType == adaptiveLoggerTypeFromString { // Min interval minIntStr, minIntExists := config.attributes[adaptLoggerMinIntervalAttr] if !minIntExists { return 0, nil, newMissingArgumentError(config.name, adaptLoggerMinIntervalAttr) } minInterval, err := strconv.ParseUint(minIntStr, 10, 32) if err != nil { return 0, nil, err } // Max interval maxIntStr, maxIntExists := config.attributes[adaptLoggerMaxIntervalAttr] if !maxIntExists { return 0, nil, newMissingArgumentError(config.name, adaptLoggerMaxIntervalAttr) } maxInterval, err := strconv.ParseUint(maxIntStr, 10, 32) if err != nil { return 0, nil, err } // Critical msg count criticalMsgCountStr, criticalMsgCountExists := config.attributes[adaptLoggerCriticalMsgCountAttr] if !criticalMsgCountExists { return 0, nil, newMissingArgumentError(config.name, adaptLoggerCriticalMsgCountAttr) } criticalMsgCount, err := strconv.ParseUint(criticalMsgCountStr, 10, 32) if err != nil { return 0, nil, err } logData = adaptiveLoggerData{uint32(minInterval), uint32(maxInterval), uint32(criticalMsgCount)} } return logType, logData, nil } func getOutputsTree(config *xmlNode, formats map[string]*formatter, cfg *CfgParseParams) (dispatcherInterface, error) { var outputsNode *xmlNode for _, child := range config.children { if child.name == outputsID { outputsNode = child break } } if outputsNode != nil { err := checkUnexpectedAttribute(outputsNode, outputFormatID) if err != nil { return nil, err } formatter, err := getCurrentFormat(outputsNode, defaultformatter, formats) if err != nil { return nil, err } output, err := createSplitter(outputsNode, formatter, formats, cfg) if err != nil { return nil, err } dispatcher, ok := output.(dispatcherInterface) if ok { return dispatcher, nil } } console, err := newConsoleWriter() if err != nil { return nil, err } return newSplitDispatcher(defaultformatter, []interface{}{console}) } func getCurrentFormat(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter) (*formatter, error) { formatID, isFormatID := node.attributes[outputFormatID] if !isFormatID { return formatFromParent, nil } format, ok := formats[formatID] if ok { return format, nil } // Test for predefined format match pdFormat, pdOk := predefinedFormats[formatID] if !pdOk { return nil, errors.New("formatid = '" + formatID + "' doesn't exist") } return pdFormat, nil } func createInnerReceivers(node *xmlNode, format *formatter, formats map[string]*formatter, cfg *CfgParseParams) ([]interface{}, error) { var outputs []interface{} for _, childNode := range node.children { entry, ok := elementMap[childNode.name] if !ok { return nil, errors.New("unnknown tag '" + childNode.name + "' in outputs section") } output, err := entry.constructor(childNode, format, formats, cfg) if err != nil { return nil, err } outputs = append(outputs, output) } return outputs, nil } func createSplitter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { err := checkUnexpectedAttribute(node, outputFormatID) if err != nil { return nil, err } if !node.hasChildren() { return nil, errNodeMustHaveChildren } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) if err != nil { return nil, err } return newSplitDispatcher(currentFormat, receivers) } func createCustomReceiver(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { dataCustomPrefixes := make(map[string]string) // Expecting only 'formatid', 'name' and 'data-' attrs for attr, attrval := range node.attributes { isExpected := false if attr == outputFormatID || attr == customNameAttrID { isExpected = true } if strings.HasPrefix(attr, customNameDataAttrPrefix) { dataCustomPrefixes[attr[len(customNameDataAttrPrefix):]] = attrval isExpected = true } if !isExpected { return nil, newUnexpectedAttributeError(node.name, attr) } } if node.hasChildren() { return nil, errNodeCannotHaveChildren } customName, hasCustomName := node.attributes[customNameAttrID] if !hasCustomName { return nil, newMissingArgumentError(node.name, customNameAttrID) } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } args := CustomReceiverInitArgs{ XmlCustomAttrs: dataCustomPrefixes, } if cfg != nil && cfg.CustomReceiverProducers != nil { if prod, ok := cfg.CustomReceiverProducers[customName]; ok { rec, err := prod(args) if err != nil { return nil, err } creceiver, err := newCustomReceiverDispatcherByValue(currentFormat, rec, customName, args) if err != nil { return nil, err } err = rec.AfterParse(args) if err != nil { return nil, err } return creceiver, nil } } return newCustomReceiverDispatcher(currentFormat, customName, args) } func createFilter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { err := checkUnexpectedAttribute(node, outputFormatID, filterLevelsAttrID) if err != nil { return nil, err } if !node.hasChildren() { return nil, errNodeMustHaveChildren } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } levelsStr, isLevels := node.attributes[filterLevelsAttrID] if !isLevels { return nil, newMissingArgumentError(node.name, filterLevelsAttrID) } levels, err := parseLevels(levelsStr) if err != nil { return nil, err } receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) if err != nil { return nil, err } return newFilterDispatcher(currentFormat, receivers, levels...) } func createfileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { err := checkUnexpectedAttribute(node, outputFormatID, pathID) if err != nil { return nil, err } if node.hasChildren() { return nil, errNodeCannotHaveChildren } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } path, isPath := node.attributes[pathID] if !isPath { return nil, newMissingArgumentError(node.name, pathID) } fileWriter, err := newFileWriter(path) if err != nil { return nil, err } return newFormattedWriter(fileWriter, currentFormat) } // Creates new SMTP writer if encountered in the config file. func createSMTPWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { err := checkUnexpectedAttribute(node, outputFormatID, senderaddressID, senderNameID, hostNameID, hostPortID, userNameID, userPassID, subjectID) if err != nil { return nil, err } // Node must have children. if !node.hasChildren() { return nil, errNodeMustHaveChildren } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } senderAddress, ok := node.attributes[senderaddressID] if !ok { return nil, newMissingArgumentError(node.name, senderaddressID) } senderName, ok := node.attributes[senderNameID] if !ok { return nil, newMissingArgumentError(node.name, senderNameID) } // Process child nodes scanning for recipient email addresses and/or CA certificate paths. var recipientAddresses []string var caCertDirPaths []string var mailHeaders []string for _, childNode := range node.children { switch childNode.name { // Extract recipient address from child nodes. case recipientID: address, ok := childNode.attributes[addressID] if !ok { return nil, newMissingArgumentError(childNode.name, addressID) } recipientAddresses = append(recipientAddresses, address) // Extract CA certificate file path from child nodes. case cACertDirpathID: path, ok := childNode.attributes[pathID] if !ok { return nil, newMissingArgumentError(childNode.name, pathID) } caCertDirPaths = append(caCertDirPaths, path) // Extract email headers from child nodes. case mailHeaderID: headerName, ok := childNode.attributes[mailHeaderNameID] if !ok { return nil, newMissingArgumentError(childNode.name, mailHeaderNameID) } headerValue, ok := childNode.attributes[mailHeaderValueID] if !ok { return nil, newMissingArgumentError(childNode.name, mailHeaderValueID) } // Build header line mailHeaders = append(mailHeaders, fmt.Sprintf("%s: %s", headerName, headerValue)) default: return nil, newUnexpectedChildElementError(childNode.name) } } hostName, ok := node.attributes[hostNameID] if !ok { return nil, newMissingArgumentError(node.name, hostNameID) } hostPort, ok := node.attributes[hostPortID] if !ok { return nil, newMissingArgumentError(node.name, hostPortID) } // Check if the string can really be converted into int. if _, err := strconv.Atoi(hostPort); err != nil { return nil, errors.New("invalid host port number") } userName, ok := node.attributes[userNameID] if !ok { return nil, newMissingArgumentError(node.name, userNameID) } userPass, ok := node.attributes[userPassID] if !ok { return nil, newMissingArgumentError(node.name, userPassID) } // subject is optionally set by configuration. // default value is defined by DefaultSubjectPhrase constant in the writers_smtpwriter.go var subjectPhrase = DefaultSubjectPhrase subject, ok := node.attributes[subjectID] if ok { subjectPhrase = subject } smtpWriter := newSMTPWriter( senderAddress, senderName, recipientAddresses, hostName, hostPort, userName, userPass, caCertDirPaths, subjectPhrase, mailHeaders, ) return newFormattedWriter(smtpWriter, currentFormat) } func createConsoleWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { err := checkUnexpectedAttribute(node, outputFormatID) if err != nil { return nil, err } if node.hasChildren() { return nil, errNodeCannotHaveChildren } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } consoleWriter, err := newConsoleWriter() if err != nil { return nil, err } return newFormattedWriter(consoleWriter, currentFormat) } func createconnWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { if node.hasChildren() { return nil, errNodeCannotHaveChildren } err := checkUnexpectedAttribute(node, outputFormatID, connWriterAddrAttr, connWriterNetAttr, connWriterReconnectOnMsgAttr, connWriterUseTLSAttr, connWriterInsecureSkipVerifyAttr) if err != nil { return nil, err } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } addr, isAddr := node.attributes[connWriterAddrAttr] if !isAddr { return nil, newMissingArgumentError(node.name, connWriterAddrAttr) } net, isNet := node.attributes[connWriterNetAttr] if !isNet { return nil, newMissingArgumentError(node.name, connWriterNetAttr) } reconnectOnMsg := false reconnectOnMsgStr, isReconnectOnMsgStr := node.attributes[connWriterReconnectOnMsgAttr] if isReconnectOnMsgStr { if reconnectOnMsgStr == "true" { reconnectOnMsg = true } else if reconnectOnMsgStr == "false" { reconnectOnMsg = false } else { return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterReconnectOnMsgAttr + "' attribute value") } } useTLS := false useTLSStr, isUseTLSStr := node.attributes[connWriterUseTLSAttr] if isUseTLSStr { if useTLSStr == "true" { useTLS = true } else if useTLSStr == "false" { useTLS = false } else { return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterUseTLSAttr + "' attribute value") } if useTLS { insecureSkipVerify := false insecureSkipVerifyStr, isInsecureSkipVerify := node.attributes[connWriterInsecureSkipVerifyAttr] if isInsecureSkipVerify { if insecureSkipVerifyStr == "true" { insecureSkipVerify = true } else if insecureSkipVerifyStr == "false" { insecureSkipVerify = false } else { return nil, errors.New("node '" + node.name + "' has incorrect '" + connWriterInsecureSkipVerifyAttr + "' attribute value") } } config := tls.Config{InsecureSkipVerify: insecureSkipVerify} connWriter := newTLSWriter(net, addr, reconnectOnMsg, &config) return newFormattedWriter(connWriter, currentFormat) } } connWriter := newConnWriter(net, addr, reconnectOnMsg) return newFormattedWriter(connWriter, currentFormat) } func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { if node.hasChildren() { return nil, errNodeCannotHaveChildren } rollingTypeStr, isRollingType := node.attributes[rollingFileTypeAttr] if !isRollingType { return nil, newMissingArgumentError(node.name, rollingFileTypeAttr) } rollingType, ok := rollingTypeFromString(rollingTypeStr) if !ok { return nil, errors.New("unknown rolling file type: " + rollingTypeStr) } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } path, isPath := node.attributes[rollingFilePathAttr] if !isPath { return nil, newMissingArgumentError(node.name, rollingFilePathAttr) } rollingArchiveStr, archiveAttrExists := node.attributes[rollingFileArchiveAttr] var rArchiveType rollingArchiveType var rArchivePath string if !archiveAttrExists { rArchiveType = rollingArchiveNone rArchivePath = "" } else { rArchiveType, ok = rollingArchiveTypeFromString(rollingArchiveStr) if !ok { return nil, errors.New("unknown rolling archive type: " + rollingArchiveStr) } if rArchiveType == rollingArchiveNone { rArchivePath = "" } else { rArchivePath, ok = node.attributes[rollingFileArchivePathAttr] if !ok { rArchivePath, ok = rollingArchiveTypesDefaultNames[rArchiveType] if !ok { return nil, fmt.Errorf("cannot get default filename for archive type = %v", rArchiveType) } } } } nameMode := rollingNameMode(rollingNameModePostfix) nameModeStr, ok := node.attributes[rollingFileNameModeAttr] if ok { mode, found := rollingNameModeFromString(nameModeStr) if !found { return nil, errors.New("unknown rolling filename mode: " + nameModeStr) } else { nameMode = mode } } if rollingType == rollingTypeSize { err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, rollingFileMaxSizeAttr, rollingFileMaxRollsAttr, rollingFileArchiveAttr, rollingFileArchivePathAttr, rollingFileNameModeAttr) if err != nil { return nil, err } maxSizeStr, ok := node.attributes[rollingFileMaxSizeAttr] if !ok { return nil, newMissingArgumentError(node.name, rollingFileMaxSizeAttr) } maxSize, err := strconv.ParseInt(maxSizeStr, 10, 64) if err != nil { return nil, err } maxRolls := 0 maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] if ok { maxRolls, err = strconv.Atoi(maxRollsStr) if err != nil { return nil, err } } rollingWriter, err := newRollingFileWriterSize(path, rArchiveType, rArchivePath, maxSize, maxRolls, nameMode) if err != nil { return nil, err } return newFormattedWriter(rollingWriter, currentFormat) } else if rollingType == rollingTypeTime { err := checkUnexpectedAttribute(node, outputFormatID, rollingFileTypeAttr, rollingFilePathAttr, rollingFileDataPatternAttr, rollingFileArchiveAttr, rollingFileMaxRollsAttr, rollingFileArchivePathAttr, rollingFileNameModeAttr) if err != nil { return nil, err } maxRolls := 0 maxRollsStr, ok := node.attributes[rollingFileMaxRollsAttr] if ok { maxRolls, err = strconv.Atoi(maxRollsStr) if err != nil { return nil, err } } dataPattern, ok := node.attributes[rollingFileDataPatternAttr] if !ok { return nil, newMissingArgumentError(node.name, rollingFileDataPatternAttr) } rollingWriter, err := newRollingFileWriterTime(path, rArchiveType, rArchivePath, maxRolls, dataPattern, rollingIntervalAny, nameMode) if err != nil { return nil, err } return newFormattedWriter(rollingWriter, currentFormat) } return nil, errors.New("incorrect rolling writer type " + rollingTypeStr) } func createbufferedWriter(node *xmlNode, formatFromParent *formatter, formats map[string]*formatter, cfg *CfgParseParams) (interface{}, error) { err := checkUnexpectedAttribute(node, outputFormatID, bufferedSizeAttr, bufferedFlushPeriodAttr) if err != nil { return nil, err } if !node.hasChildren() { return nil, errNodeMustHaveChildren } currentFormat, err := getCurrentFormat(node, formatFromParent, formats) if err != nil { return nil, err } sizeStr, isSize := node.attributes[bufferedSizeAttr] if !isSize { return nil, newMissingArgumentError(node.name, bufferedSizeAttr) } size, err := strconv.Atoi(sizeStr) if err != nil { return nil, err } flushPeriod := 0 flushPeriodStr, isFlushPeriod := node.attributes[bufferedFlushPeriodAttr] if isFlushPeriod { flushPeriod, err = strconv.Atoi(flushPeriodStr) if err != nil { return nil, err } } // Inner writer couldn't have its own format, so we pass 'currentFormat' as its parent format receivers, err := createInnerReceivers(node, currentFormat, formats, cfg) if err != nil { return nil, err } formattedWriter, ok := receivers[0].(*formattedWriter) if !ok { return nil, errors.New("buffered writer's child is not writer") } // ... and then we check that it hasn't changed if formattedWriter.Format() != currentFormat { return nil, errors.New("inner writer cannot have his own format") } bufferedWriter, err := newBufferedWriter(formattedWriter.Writer(), size, time.Duration(flushPeriod)) if err != nil { return nil, err } return newFormattedWriter(bufferedWriter, currentFormat) } // Returns an error if node has any attributes not listed in expectedAttrs. func checkUnexpectedAttribute(node *xmlNode, expectedAttrs ...string) error { for attr := range node.attributes { isExpected := false for _, expected := range expectedAttrs { if attr == expected { isExpected = true break } } if !isExpected { return newUnexpectedAttributeError(node.name, attr) } } return nil } type expectedElementInfo struct { name string mandatory bool multiple bool } func optionalElement(name string) expectedElementInfo { return expectedElementInfo{name, false, false} } func mandatoryElement(name string) expectedElementInfo { return expectedElementInfo{name, true, false} } func multipleElements(name string) expectedElementInfo { return expectedElementInfo{name, false, true} } func multipleMandatoryElements(name string) expectedElementInfo { return expectedElementInfo{name, true, true} } func checkExpectedElements(node *xmlNode, elements ...expectedElementInfo) error { for _, element := range elements { count := 0 for _, child := range node.children { if child.name == element.name { count++ } } if count == 0 && element.mandatory { return errors.New(node.name + " does not have mandatory subnode - " + element.name) } if count > 1 && !element.multiple { return errors.New(node.name + " has more then one subnode - " + element.name) } } for _, child := range node.children { isExpected := false for _, element := range elements { if child.name == element.name { isExpected = true } } if !isExpected { return errors.New(node.name + " has unexpected child: " + child.name) } } return nil } ================================================ FILE: vendor/github.com/cihub/seelog/cfg_parser_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" "path/filepath" "regexp" "strings" "testing" ) type customTestReceiverOutput struct { initCalled bool dataPassed string messageOutput string levelOutput LogLevel closed bool flushed bool } type customTestReceiver struct{ co *customTestReceiverOutput } func (cr *customTestReceiver) ReceiveMessage(message string, level LogLevel, context LogContextInterface) error { cr.co.messageOutput = message cr.co.levelOutput = level return nil } func (cr *customTestReceiver) String() string { return fmt.Sprintf("custom data='%s'", cr.co.dataPassed) } func (cr *customTestReceiver) AfterParse(initArgs CustomReceiverInitArgs) error { cr.co = new(customTestReceiverOutput) cr.co.initCalled = true cr.co.dataPassed = initArgs.XmlCustomAttrs["test"] return nil } func (cr *customTestReceiver) Flush() { cr.co.flushed = true } func (cr *customTestReceiver) Close() error { cr.co.closed = true return nil } var re = regexp.MustCompile(`[^a-zA-Z0-9]+`) func getTestFileName(testName, postfix string) string { if len(postfix) != 0 { return strings.ToLower(re.ReplaceAllString(testName, "_")) + "_" + postfix + "_test.log" } return strings.ToLower(re.ReplaceAllString(testName, "_")) + "_test.log" } var parserTests []parserTest type parserTest struct { testName string config string expected *logConfig //interface{} errorExpected bool parserConfig *CfgParseParams } func getParserTests() []parserTest { if parserTests == nil { parserTests = make([]parserTest, 0) testName := "Simple file output" testLogFileName := getTestFileName(testName, "") testConfig := ` ` testExpected := new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testfileWriter, _ := newFileWriter(testLogFileName) testHeadSplitter, _ := newSplitDispatcher(defaultformatter, []interface{}{testfileWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Filter dispatcher" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testfileWriter, _ = newFileWriter(testLogFileName) testFilter, _ := newFilterDispatcher(defaultformatter, []interface{}{testfileWriter}, DebugLvl, InfoLvl, CriticalLvl) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testFilter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Console writer" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testconsoleWriter, _ := newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "SMTP writer" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testSMTPWriter := newSMTPWriter( "sa", "sn", []string{"ra1", "ra2", "ra3"}, "hn", "123", "un", "up", []string{"cacdp1", "cacdp2"}, DefaultSubjectPhrase, nil, ) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testSMTPWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "SMTP writer custom header and subject configuration" testConfig = `
` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testSMTPWriter = newSMTPWriter( "sa", "sn", []string{"ra1"}, "hn", "123", "un", "up", []string{"cacdp1"}, "ohlala", []string{"Priority: Urgent", "Importance: high", "Sensitivity: Company-Confidential", "Auto-Submitted: auto-generated"}, ) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testSMTPWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Default output" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Asyncloop behavior" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Asynctimer behavior" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = asyncTimerloggerTypeFromString testExpected.LoggerData = asyncTimerLoggerData{101} testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Rolling file writer size" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testrollingFileWriter, _ := newRollingFileWriterSize(testLogFileName, rollingArchiveNone, "", 100, 5, rollingNameModePostfix) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Rolling file writer archive zip" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testrollingFileWriter, _ = newRollingFileWriterSize(testLogFileName, rollingArchiveZip, "log.zip", 100, 5, rollingNameModePostfix) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Rolling file writer archive zip with specified path" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testrollingFileWriter, _ = newRollingFileWriterSize(testLogFileName, rollingArchiveZip, "test.zip", 100, 5, rollingNameModePrefix) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Rolling file writer archive none" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testrollingFileWriter, _ = newRollingFileWriterSize(testLogFileName, rollingArchiveNone, "", 100, 5, rollingNameModePostfix) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Rolling file writer date" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testrollingFileWriterTime, _ := newRollingFileWriterTime(testLogFileName, rollingArchiveNone, "", 0, "2006-01-02T15:04:05Z07:00", rollingIntervalAny, rollingNameModePostfix) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testrollingFileWriterTime}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Buffered writer" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testrollingFileWriterTime, _ = newRollingFileWriterTime(testLogFileName, rollingArchiveNone, "", 0, "2006-01-02T15:04:05Z07:00", rollingIntervalDaily, rollingNameModePostfix) testbufferedWriter, _ := newBufferedWriter(testrollingFileWriterTime, 100500, 100) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testbufferedWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Inner splitter output" testLogFileName1 := getTestFileName(testName, "1") testLogFileName2 := getTestFileName(testName, "2") testLogFileName3 := getTestFileName(testName, "3") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testfileWriter1, _ := newFileWriter(testLogFileName2) testfileWriter2, _ := newFileWriter(testLogFileName3) testInnerSplitter, _ := newSplitDispatcher(defaultformatter, []interface{}{testfileWriter1, testfileWriter2}) testfileWriter, _ = newFileWriter(testLogFileName1) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testfileWriter, testInnerSplitter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) RegisterReceiver("custom-name-1", &customTestReceiver{}) testName = "Custom receiver 1" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testCustomReceiver, _ := newCustomReceiverDispatcher(defaultformatter, "custom-name-1", CustomReceiverInitArgs{ XmlCustomAttrs: map[string]string{ "test": "set", }, }) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testCustomReceiver}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Custom receiver 2" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil crec := &customTestReceiver{} cargs := CustomReceiverInitArgs{ XmlCustomAttrs: map[string]string{ "test": "set2", }, } crec.AfterParse(cargs) testCustomReceiver2, _ := newCustomReceiverDispatcherByValue(defaultformatter, crec, "custom-name-2", cargs) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testCustomReceiver2}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter fnc := func(initArgs CustomReceiverInitArgs) (CustomReceiver, error) { return &customTestReceiver{}, nil } cfg := CfgParseParams{ CustomReceiverProducers: map[string]CustomReceiverProducer{ "custom-name-2": CustomReceiverProducer(fnc), }, } testExpected.Params = &cfg parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, &cfg}) RegisterReceiver("-", &customTestReceiver{}) testName = "Custom receiver 3" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil creccustom := &customTestReceiver{} cargs3 := CustomReceiverInitArgs{ XmlCustomAttrs: map[string]string{ "test": "set3", }, } creccustom.AfterParse(cargs3) testCustomReceiver, _ = newCustomReceiverDispatcherByValue(defaultformatter, creccustom, "-", cargs3) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testCustomReceiver}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Custom receivers with formats" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testCustomReceivers := make([]*customReceiverDispatcher, 3) for i := 0; i < 3; i++ { testCustomReceivers[i], _ = newCustomReceiverDispatcher(defaultformatter, "custom-name-1", CustomReceiverInitArgs{ XmlCustomAttrs: map[string]string{ "test": fmt.Sprintf("set%d", i+1), }, }) } testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testCustomReceivers[0], testCustomReceivers[1], testCustomReceivers[2]}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Format" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testfileWriter, _ = newFileWriter(testLogFileName) testFormat, _ := newFormatter("%Level %Msg %File") testHeadSplitter, _ = newSplitDispatcher(testFormat, []interface{}{testfileWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Format2" testLogFileName = getTestFileName(testName, "") testLogFileName1 = getTestFileName(testName, "1") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testfileWriter, _ = newFileWriter(testLogFileName) testfileWriter1, _ = newFileWriter(testLogFileName1) testFormat1, _ := newFormatter("%Level %Msg %File") testFormat2, _ := newFormatter("%l %Msg") formattedWriter, _ := newFormattedWriter(testfileWriter1, testFormat2) testHeadSplitter, _ = newSplitDispatcher(testFormat1, []interface{}{testfileWriter, formattedWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Minlevel = warn" testConfig = `` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(WarnLvl, CriticalLvl) testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Maxlevel = trace" testConfig = `` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, TraceLvl) testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Level between info and error" testConfig = `` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(InfoLvl, ErrorLvl) testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Off with minlevel" testConfig = `` testExpected = new(logConfig) testExpected.Constraints, _ = newOffConstraints() testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Off with levels" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Levels list" testConfig = `` testExpected = new(logConfig) testExpected.Constraints, _ = newListConstraints([]LogLevel{ DebugLvl, InfoLvl, CriticalLvl}) testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = asyncLooploggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Errors #1" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #2" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #3" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #4" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #5" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #6" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #7" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #8" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #9" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #10" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #11" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #12" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #13" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #14" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #15" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #16" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #17" testLogFileName = getTestFileName(testName, "") testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #18" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #19" testConfig = `` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Exceptions: restricting" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) listConstraint, _ := newOffConstraints() exception, _ := newLogLevelException("Test*", "someFile.go", listConstraint) testExpected.Exceptions = []*logLevelException{exception} testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Exceptions: allowing #1" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newListConstraints([]LogLevel{ErrorLvl}) minMaxConstraint, _ := newMinMaxConstraints(TraceLvl, CriticalLvl) exception, _ = newLogLevelException("*", "testfile.go", minMaxConstraint) testExpected.Exceptions = []*logLevelException{exception} testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Exceptions: allowing #2" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newOffConstraints() minMaxConstraint, _ = newMinMaxConstraints(WarnLvl, CriticalLvl) exception, _ = newLogLevelException("*", "testfile.go", minMaxConstraint) testExpected.Exceptions = []*logLevelException{exception} testconsoleWriter, _ = newConsoleWriter() testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testconsoleWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Predefined formats" formatID := predefinedPrefix + "xml-debug-short" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testconsoleWriter, _ = newConsoleWriter() testFormat, _ = predefinedFormats[formatID] testHeadSplitter, _ = newSplitDispatcher(testFormat, []interface{}{testconsoleWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Predefined formats redefine" testLogFileName = getTestFileName(testName, "") formatID = predefinedPrefix + "xml-debug-short" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testfileWriter, _ = newFileWriter(testLogFileName) testFormat, _ = newFormatter("%Level %Msg %File") testHeadSplitter, _ = newSplitDispatcher(testFormat, []interface{}{testfileWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Conn writer 1" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testConnWriter := newConnWriter("tcp", ":8888", false) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testConnWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Conn writer 2" testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testConnWriter = newConnWriter("tcp", ":8888", true) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{testConnWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) testName = "Errors #11" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #12" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #13" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #14" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #15" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #16" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #17" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #18" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #19" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #20" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #21" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #22" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #23" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #24" testLogFileName = getTestFileName(testName, "") testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #25" testLogFileName = getTestFileName(testName, "") testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Errors #26" testConfig = ` ` parserTests = append(parserTests, parserTest{testName, testConfig, nil, true, nil}) testName = "Buffered writer same formatid override" testLogFileName = getTestFileName(testName, "") testConfig = ` ` testExpected = new(logConfig) testExpected.Constraints, _ = newMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil testrollingFileWriterTime, _ = newRollingFileWriterTime(testLogFileName, rollingArchiveNone, "", 0, "2006-01-02T15:04:05Z07:00", rollingIntervalDaily, rollingNameModePrefix) testbufferedWriter, _ = newBufferedWriter(testrollingFileWriterTime, 100500, 100) testFormat, _ = newFormatter("%Level %Msg %File 123") formattedWriter, _ = newFormattedWriter(testbufferedWriter, testFormat) testHeadSplitter, _ = newSplitDispatcher(defaultformatter, []interface{}{formattedWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) } return parserTests } // Temporary solution: compare by string identity. Not the best solution in // terms of performance, but a valid one in terms of comparison, because // every seelog dispatcher/receiver must have a valid String() func // that fully represents its internal parameters. func configsAreEqual(conf1 *logConfig, conf2 interface{}) bool { if conf1 == nil { return conf2 == nil } if conf2 == nil { return conf1 == nil } // logConfig, ok := conf2 //.(*logConfig) // if !ok { // return false // } return fmt.Sprintf("%v", conf1) == fmt.Sprintf("%v", conf2) //logConfig) } func testLogFileFilter(fn string) bool { return ".log" == filepath.Ext(fn) } func cleanupAfterCfgTest(t *testing.T) { toDel, err := getDirFilePaths(".", testLogFileFilter, true) if nil != err { t.Fatal("Cannot list files in test directory!") } for _, p := range toDel { err = tryRemoveFile(p) if nil != err { t.Errorf("cannot remove file %s in test directory: %s", p, err.Error()) } } } func parseTest(test parserTest, t *testing.T) { conf, err := configFromReaderWithConfig(strings.NewReader(test.config), test.parserConfig) if /*err != nil &&*/ conf != nil && conf.RootDispatcher != nil { defer func() { if err = conf.RootDispatcher.Close(); err != nil { t.Errorf("\n----ERROR while closing root dispatcher in %s test: %s", test.testName, err) } }() } if (err != nil) != test.errorExpected { t.Errorf("\n----ERROR in %s:\nConfig: %s\n* Expected error:%t. Got error: %t\n", test.testName, test.config, test.errorExpected, (err != nil)) if err != nil { t.Logf("%s\n", err.Error()) } return } if err == nil && !configsAreEqual(conf, test.expected) { t.Errorf("\n----ERROR in %s:\nConfig: %s\n* Expected: %v. \n* Got: %v\n", test.testName, test.config, test.expected, conf) } } func TestParser(t *testing.T) { defer cleanupAfterCfgTest(t) for _, test := range getParserTests() { parseTest(test, t) } } ================================================ FILE: vendor/github.com/cihub/seelog/common_closer.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog ================================================ FILE: vendor/github.com/cihub/seelog/common_constraints.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "strings" ) // Represents constraints which form a general rule for log levels selection type logLevelConstraints interface { IsAllowed(level LogLevel) bool } // A minMaxConstraints represents constraints which use minimal and maximal allowed log levels. type minMaxConstraints struct { min LogLevel max LogLevel } // newMinMaxConstraints creates a new minMaxConstraints struct with the specified min and max levels. func newMinMaxConstraints(min LogLevel, max LogLevel) (*minMaxConstraints, error) { if min > max { return nil, fmt.Errorf("min level can't be greater than max. Got min: %d, max: %d", min, max) } if min < TraceLvl || min > CriticalLvl { return nil, fmt.Errorf("min level can't be less than Trace or greater than Critical. Got min: %d", min) } if max < TraceLvl || max > CriticalLvl { return nil, fmt.Errorf("max level can't be less than Trace or greater than Critical. Got max: %d", max) } return &minMaxConstraints{min, max}, nil } // IsAllowed returns true, if log level is in [min, max] range (inclusive). func (minMaxConstr *minMaxConstraints) IsAllowed(level LogLevel) bool { return level >= minMaxConstr.min && level <= minMaxConstr.max } func (minMaxConstr *minMaxConstraints) String() string { return fmt.Sprintf("Min: %s. Max: %s", minMaxConstr.min, minMaxConstr.max) } //======================================================= // A listConstraints represents constraints which use allowed log levels list. type listConstraints struct { allowedLevels map[LogLevel]bool } // newListConstraints creates a new listConstraints struct with the specified allowed levels. func newListConstraints(allowList []LogLevel) (*listConstraints, error) { if allowList == nil { return nil, errors.New("list can't be nil") } allowLevels, err := createMapFromList(allowList) if err != nil { return nil, err } err = validateOffLevel(allowLevels) if err != nil { return nil, err } return &listConstraints{allowLevels}, nil } func (listConstr *listConstraints) String() string { allowedList := "List: " listLevel := make([]string, len(listConstr.allowedLevels)) var logLevel LogLevel i := 0 for logLevel = TraceLvl; logLevel <= Off; logLevel++ { if listConstr.allowedLevels[logLevel] { listLevel[i] = logLevel.String() i++ } } allowedList += strings.Join(listLevel, ",") return allowedList } func createMapFromList(allowedList []LogLevel) (map[LogLevel]bool, error) { allowedLevels := make(map[LogLevel]bool, 0) for _, level := range allowedList { if level < TraceLvl || level > Off { return nil, fmt.Errorf("level can't be less than Trace or greater than Critical. Got level: %d", level) } allowedLevels[level] = true } return allowedLevels, nil } func validateOffLevel(allowedLevels map[LogLevel]bool) error { if _, ok := allowedLevels[Off]; ok && len(allowedLevels) > 1 { return errors.New("logLevel Off cant be mixed with other levels") } return nil } // IsAllowed returns true, if log level is in allowed log levels list. // If the list contains the only item 'common.Off' then IsAllowed will always return false for any input values. func (listConstr *listConstraints) IsAllowed(level LogLevel) bool { for l := range listConstr.allowedLevels { if l == level && level != Off { return true } } return false } // AllowedLevels returns allowed levels configuration as a map. func (listConstr *listConstraints) AllowedLevels() map[LogLevel]bool { return listConstr.allowedLevels } //======================================================= type offConstraints struct { } func newOffConstraints() (*offConstraints, error) { return &offConstraints{}, nil } func (offConstr *offConstraints) IsAllowed(level LogLevel) bool { return false } func (offConstr *offConstraints) String() string { return "Off constraint" } ================================================ FILE: vendor/github.com/cihub/seelog/common_constraints_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "testing" ) func TestInvalidminMaxConstraints(t *testing.T) { constr, err := newMinMaxConstraints(CriticalLvl, WarnLvl) if err == nil || constr != nil { t.Errorf("expected an error and a nil value for minmax constraints: min = %d, max = %d. Got: %v, %v", CriticalLvl, WarnLvl, err, constr) return } } func TestInvalidLogLevels(t *testing.T) { var invalidMin uint8 = 123 var invalidMax uint8 = 124 minMaxConstr, errMinMax := newMinMaxConstraints(LogLevel(invalidMin), LogLevel(invalidMax)) if errMinMax == nil || minMaxConstr != nil { t.Errorf("expected an error and a nil value for minmax constraints: min = %d, max = %d. Got: %v, %v", invalidMin, invalidMax, errMinMax, minMaxConstr) return } invalidList := []LogLevel{145} listConstr, errList := newListConstraints(invalidList) if errList == nil || listConstr != nil { t.Errorf("expected an error and a nil value for constraints list: %v. Got: %v, %v", invalidList, errList, listConstr) return } } func TestlistConstraintsWithDuplicates(t *testing.T) { duplicateList := []LogLevel{TraceLvl, DebugLvl, InfoLvl, WarnLvl, ErrorLvl, CriticalLvl, CriticalLvl, CriticalLvl} listConstr, errList := newListConstraints(duplicateList) if errList != nil || listConstr == nil { t.Errorf("expected a valid constraints list struct for: %v, got error: %v, value: %v", duplicateList, errList, listConstr) return } listLevels := listConstr.AllowedLevels() if listLevels == nil { t.Fatalf("listConstr.AllowedLevels() == nil") return } if len(listLevels) != 6 { t.Errorf("expected: listConstr.AllowedLevels() length == 6. Got: %d", len(listLevels)) return } } func TestlistConstraintsWithOffInList(t *testing.T) { offList := []LogLevel{TraceLvl, DebugLvl, Off} listConstr, errList := newListConstraints(offList) if errList == nil || listConstr != nil { t.Errorf("expected an error and a nil value for constraints list with 'Off': %v. Got: %v, %v", offList, errList, listConstr) return } } type logLevelTestCase struct { level LogLevel allowed bool } var minMaxTests = []logLevelTestCase{ {TraceLvl, false}, {DebugLvl, false}, {InfoLvl, true}, {WarnLvl, true}, {ErrorLvl, false}, {CriticalLvl, false}, {123, false}, {6, false}, } func TestValidminMaxConstraints(t *testing.T) { constr, err := newMinMaxConstraints(InfoLvl, WarnLvl) if err != nil || constr == nil { t.Errorf("expected a valid constraints struct for minmax constraints: min = %d, max = %d. Got: %v, %v", InfoLvl, WarnLvl, err, constr) return } for _, minMaxTest := range minMaxTests { allowed := constr.IsAllowed(minMaxTest.level) if allowed != minMaxTest.allowed { t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t", minMaxTest.allowed, minMaxTest.level, allowed) return } } } var listTests = []logLevelTestCase{ {TraceLvl, true}, {DebugLvl, false}, {InfoLvl, true}, {WarnLvl, true}, {ErrorLvl, false}, {CriticalLvl, true}, {123, false}, {6, false}, } func TestValidlistConstraints(t *testing.T) { validList := []LogLevel{TraceLvl, InfoLvl, WarnLvl, CriticalLvl} constr, err := newListConstraints(validList) if err != nil || constr == nil { t.Errorf("expected a valid constraints list struct for: %v. Got error: %v, value: %v", validList, err, constr) return } for _, minMaxTest := range listTests { allowed := constr.IsAllowed(minMaxTest.level) if allowed != minMaxTest.allowed { t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t", minMaxTest.allowed, minMaxTest.level, allowed) return } } } var offTests = []logLevelTestCase{ {TraceLvl, false}, {DebugLvl, false}, {InfoLvl, false}, {WarnLvl, false}, {ErrorLvl, false}, {CriticalLvl, false}, {123, false}, {6, false}, } func TestValidListoffConstraints(t *testing.T) { validList := []LogLevel{Off} constr, err := newListConstraints(validList) if err != nil || constr == nil { t.Errorf("expected a valid constraints list struct for: %v. Got error: %v, value: %v", validList, err, constr) return } for _, minMaxTest := range offTests { allowed := constr.IsAllowed(minMaxTest.level) if allowed != minMaxTest.allowed { t.Errorf("expected IsAllowed() = %t for level = %d. Got: %t", minMaxTest.allowed, minMaxTest.level, allowed) return } } } ================================================ FILE: vendor/github.com/cihub/seelog/common_context.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "os" "path/filepath" "runtime" "strings" "time" ) var workingDir = "" func init() { setWorkDir() } func setWorkDir() { workDir, workingDirError := os.Getwd() if workingDirError != nil { workingDir = string(os.PathSeparator) return } workingDir = workDir + string(os.PathSeparator) } // Represents runtime caller context type LogContextInterface interface { // Caller func name Func() string // Caller line num Line() int // Caller file short path ShortPath() string // Caller file full path FullPath() string // Caller file name (without path) FileName() string // True if the context is correct and may be used. // If false, then an error in context evaluation occurred and // all its other data may be corrupted. IsValid() bool // Time when log func was called CallTime() time.Time } // Returns context of the caller func currentContext() (LogContextInterface, error) { return specificContext(1) } func extractCallerInfo(skip int) (fullPath string, shortPath string, funcName string, lineNumber int, err error) { pc, fullPath, line, ok := runtime.Caller(skip) if !ok { return "", "", "", 0, errors.New("error during runtime.Caller") } //TODO:Currently fixes bug in weekly.2012-03-13+: Caller returns incorrect separators //Delete later fullPath = strings.Replace(fullPath, "\\", string(os.PathSeparator), -1) fullPath = strings.Replace(fullPath, "/", string(os.PathSeparator), -1) if strings.HasPrefix(fullPath, workingDir) { shortPath = fullPath[len(workingDir):] } else { shortPath = fullPath } funName := runtime.FuncForPC(pc).Name() var functionName string if strings.HasPrefix(funName, workingDir) { functionName = funName[len(workingDir):] } else { functionName = funName } return fullPath, shortPath, functionName, line, nil } // Returns context of the function with placed "skip" stack frames of the caller // If skip == 0 then behaves like currentContext // Context is returned in any situation, even if error occurs. But, if an error // occurs, the returned context is an error context, which contains no paths // or names, but states that they can't be extracted. func specificContext(skip int) (LogContextInterface, error) { callTime := time.Now() if skip < 0 { negativeStackFrameErr := errors.New("can not skip negative stack frames") return &errorContext{callTime, negativeStackFrameErr}, negativeStackFrameErr } fullPath, shortPath, function, line, err := extractCallerInfo(skip + 2) if err != nil { return &errorContext{callTime, err}, err } _, fileName := filepath.Split(fullPath) return &logContext{function, line, shortPath, fullPath, fileName, callTime}, nil } // Represents a normal runtime caller context type logContext struct { funcName string line int shortPath string fullPath string fileName string callTime time.Time } func (context *logContext) IsValid() bool { return true } func (context *logContext) Func() string { return context.funcName } func (context *logContext) Line() int { return context.line } func (context *logContext) ShortPath() string { return context.shortPath } func (context *logContext) FullPath() string { return context.fullPath } func (context *logContext) FileName() string { return context.fileName } func (context *logContext) CallTime() time.Time { return context.callTime } const ( errorContextFunc = "Func() error:" errorContextShortPath = "ShortPath() error:" errorContextFullPath = "FullPath() error:" errorContextFileName = "FileName() error:" ) // Represents an error context type errorContext struct { errorTime time.Time err error } func (errContext *errorContext) IsValid() bool { return false } func (errContext *errorContext) Line() int { return -1 } func (errContext *errorContext) Func() string { return errorContextFunc + errContext.err.Error() } func (errContext *errorContext) ShortPath() string { return errorContextShortPath + errContext.err.Error() } func (errContext *errorContext) FullPath() string { return errorContextFullPath + errContext.err.Error() } func (errContext *errorContext) FileName() string { return errorContextFileName + errContext.err.Error() } func (errContext *errorContext) CallTime() time.Time { return errContext.errorTime } ================================================ FILE: vendor/github.com/cihub/seelog/common_context_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "os" "path/filepath" "strings" "testing" ) const ( shortPath = "common_context_test.go" ) func init() { // Here we remove the hardcoding of the package name which breaks forks and some CI environments // such as jenkins _, _, funcName, _, _ := extractCallerInfo(1) commonPrefix = funcName[:strings.Index(funcName, "init·")] } var commonPrefix string var testFullPath string func fullPath(t *testing.T) string { if testFullPath == "" { wd, err := os.Getwd() if err != nil { t.Fatalf("Cannot get working directory: %s", err.Error()) } testFullPath = filepath.Join(wd, shortPath) } return testFullPath } func TestContext(t *testing.T) { context, err := currentContext() nameFunc := commonPrefix + "TestContext" if err != nil { t.Fatalf("Unexpected error: %s", err.Error()) } if context == nil { t.Fatalf("Expected: context != nil") } if nf := context.Func(); nf != nameFunc { // Account for a case when the func full path is bigger than commonPrefix but includes it. if !strings.HasSuffix(nf, nameFunc) { t.Errorf("expected context.Func == %s ; got %s", nameFunc, context.Func()) } } if context.ShortPath() != shortPath { t.Errorf("expected context.ShortPath == %s ; got %s", shortPath, context.ShortPath()) } fp := fullPath(t) if context.FullPath() != fp { t.Errorf("expected context.FullPath == %s ; got %s", fp, context.FullPath()) } } func innerContext() (context LogContextInterface, err error) { return currentContext() } func TestInnerContext(t *testing.T) { context, err := innerContext() nameFunc := commonPrefix + "innerContext" if err != nil { t.Fatalf("Unexpected error: %s", err.Error()) } if context == nil { t.Fatalf("Expected: context != nil") } if cf := context.Func(); cf != nameFunc { // Account for a case when the func full path is bigger than commonPrefix but includes it. if !strings.HasSuffix(cf, nameFunc) { t.Errorf("expected context.Func == %s ; got %s", nameFunc, context.Func()) } } if context.ShortPath() != shortPath { t.Errorf("expected context.ShortPath == %s ; got %s", shortPath, context.ShortPath()) } fp := fullPath(t) if context.FullPath() != fp { t.Errorf("expected context.FullPath == %s ; got %s", fp, context.FullPath()) } } ================================================ FILE: vendor/github.com/cihub/seelog/common_exception.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "regexp" "strings" ) // Used in rules creation to validate input file and func filters var ( fileFormatValidator = regexp.MustCompile(`[a-zA-Z0-9\\/ _\*\.]*`) funcFormatValidator = regexp.MustCompile(`[a-zA-Z0-9_\*\.]*`) ) // logLevelException represents an exceptional case used when you need some specific files or funcs to // override general constraints and to use their own. type logLevelException struct { funcPatternParts []string filePatternParts []string funcPattern string filePattern string constraints logLevelConstraints } // newLogLevelException creates a new exception. func newLogLevelException(funcPattern string, filePattern string, constraints logLevelConstraints) (*logLevelException, error) { if constraints == nil { return nil, errors.New("constraints can not be nil") } exception := new(logLevelException) err := exception.initFuncPatternParts(funcPattern) if err != nil { return nil, err } exception.funcPattern = strings.Join(exception.funcPatternParts, "") err = exception.initFilePatternParts(filePattern) if err != nil { return nil, err } exception.filePattern = strings.Join(exception.filePatternParts, "") exception.constraints = constraints return exception, nil } // MatchesContext returns true if context matches the patterns of this logLevelException func (logLevelEx *logLevelException) MatchesContext(context LogContextInterface) bool { return logLevelEx.match(context.Func(), context.FullPath()) } // IsAllowed returns true if log level is allowed according to the constraints of this logLevelException func (logLevelEx *logLevelException) IsAllowed(level LogLevel) bool { return logLevelEx.constraints.IsAllowed(level) } // FuncPattern returns the function pattern of a exception func (logLevelEx *logLevelException) FuncPattern() string { return logLevelEx.funcPattern } // FuncPattern returns the file pattern of a exception func (logLevelEx *logLevelException) FilePattern() string { return logLevelEx.filePattern } // initFuncPatternParts checks whether the func filter has a correct format and splits funcPattern on parts func (logLevelEx *logLevelException) initFuncPatternParts(funcPattern string) (err error) { if funcFormatValidator.FindString(funcPattern) != funcPattern { return errors.New("func path \"" + funcPattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 _ * . allowed)") } logLevelEx.funcPatternParts = splitPattern(funcPattern) return nil } // Checks whether the file filter has a correct format and splits file patterns using splitPattern. func (logLevelEx *logLevelException) initFilePatternParts(filePattern string) (err error) { if fileFormatValidator.FindString(filePattern) != filePattern { return errors.New("file path \"" + filePattern + "\" contains incorrect symbols. Only a-z A-Z 0-9 \\ / _ * . allowed)") } logLevelEx.filePatternParts = splitPattern(filePattern) return err } func (logLevelEx *logLevelException) match(funcPath string, filePath string) bool { if !stringMatchesPattern(logLevelEx.funcPatternParts, funcPath) { return false } return stringMatchesPattern(logLevelEx.filePatternParts, filePath) } func (logLevelEx *logLevelException) String() string { str := fmt.Sprintf("Func: %s File: %s ", logLevelEx.funcPattern, logLevelEx.filePattern) if logLevelEx.constraints != nil { str += fmt.Sprintf("Constr: %s", logLevelEx.constraints) } else { str += "nil" } return str } // splitPattern splits pattern into strings and asterisks. Example: "ab*cde**f" -> ["ab", "*", "cde", "*", "f"] func splitPattern(pattern string) []string { var patternParts []string var lastChar rune for _, char := range pattern { if char == '*' { if lastChar != '*' { patternParts = append(patternParts, "*") } } else { if len(patternParts) != 0 && lastChar != '*' { patternParts[len(patternParts)-1] += string(char) } else { patternParts = append(patternParts, string(char)) } } lastChar = char } return patternParts } // stringMatchesPattern check whether testString matches pattern with asterisks. // Standard regexp functionality is not used here because of performance issues. func stringMatchesPattern(patternparts []string, testString string) bool { if len(patternparts) == 0 { return len(testString) == 0 } part := patternparts[0] if part != "*" { index := strings.Index(testString, part) if index == 0 { return stringMatchesPattern(patternparts[1:], testString[len(part):]) } } else { if len(patternparts) == 1 { return true } newTestString := testString part = patternparts[1] for { index := strings.Index(newTestString, part) if index == -1 { break } newTestString = newTestString[index+len(part):] result := stringMatchesPattern(patternparts[2:], newTestString) if result { return true } } } return false } ================================================ FILE: vendor/github.com/cihub/seelog/common_exception_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "testing" ) type exceptionTestCase struct { funcPattern string filePattern string funcName string fileName string match bool } var exceptionTestCases = []exceptionTestCase{ {"*", "*", "func", "file", true}, {"func*", "*", "func", "file", true}, {"*func", "*", "func", "file", true}, {"*func", "*", "1func", "file", true}, {"func*", "*", "func1", "file", true}, {"fu*nc", "*", "func", "file", true}, {"fu*nc", "*", "fu1nc", "file", true}, {"fu*nc", "*", "func1nc", "file", true}, {"*fu*nc*", "*", "somefuntonc", "file", true}, {"fu*nc", "*", "f1nc", "file", false}, {"func*", "*", "fun", "file", false}, {"fu*nc", "*", "func1n", "file", false}, {"**f**u**n**c**", "*", "func1n", "file", true}, } func TestMatchingCorrectness(t *testing.T) { constraints, err := newListConstraints([]LogLevel{TraceLvl}) if err != nil { t.Error(err) return } for _, testCase := range exceptionTestCases { rule, ruleError := newLogLevelException(testCase.funcPattern, testCase.filePattern, constraints) if ruleError != nil { t.Fatalf("Unexpected error on rule creation: [ %v, %v ]. %v", testCase.funcPattern, testCase.filePattern, ruleError) } match := rule.match(testCase.funcName, testCase.fileName) if match != testCase.match { t.Errorf("incorrect matching for [ %v, %v ] [ %v, %v ] Expected: %t. Got: %t", testCase.funcPattern, testCase.filePattern, testCase.funcName, testCase.fileName, testCase.match, match) } } } func TestAsterisksReducing(t *testing.T) { constraints, err := newListConstraints([]LogLevel{TraceLvl}) if err != nil { t.Error(err) return } rule, err := newLogLevelException("***func**", "fi*****le", constraints) if err != nil { t.Error(err) return } expectFunc := "*func*" if rule.FuncPattern() != expectFunc { t.Errorf("asterisks must be reduced. Expect:%v, Got:%v", expectFunc, rule.FuncPattern()) } expectFile := "fi*le" if rule.FilePattern() != expectFile { t.Errorf("asterisks must be reduced. Expect:%v, Got:%v", expectFile, rule.FilePattern()) } } ================================================ FILE: vendor/github.com/cihub/seelog/common_flusher.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog // flusherInterface represents all objects that have to do cleanup // at certain moments of time (e.g. before app shutdown to avoid data loss) type flusherInterface interface { Flush() } ================================================ FILE: vendor/github.com/cihub/seelog/common_loglevel.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog // Log level type type LogLevel uint8 // Log levels const ( TraceLvl = iota DebugLvl InfoLvl WarnLvl ErrorLvl CriticalLvl Off ) // Log level string representations (used in configuration files) const ( TraceStr = "trace" DebugStr = "debug" InfoStr = "info" WarnStr = "warn" ErrorStr = "error" CriticalStr = "critical" OffStr = "off" ) var levelToStringRepresentations = map[LogLevel]string{ TraceLvl: TraceStr, DebugLvl: DebugStr, InfoLvl: InfoStr, WarnLvl: WarnStr, ErrorLvl: ErrorStr, CriticalLvl: CriticalStr, Off: OffStr, } // LogLevelFromString parses a string and returns a corresponding log level, if sucessfull. func LogLevelFromString(levelStr string) (level LogLevel, found bool) { for lvl, lvlStr := range levelToStringRepresentations { if lvlStr == levelStr { return lvl, true } } return 0, false } // LogLevelToString returns seelog string representation for a specified level. Returns "" for invalid log levels. func (level LogLevel) String() string { levelStr, ok := levelToStringRepresentations[level] if ok { return levelStr } return "" } ================================================ FILE: vendor/github.com/cihub/seelog/dispatch_custom.go ================================================ // Copyright (c) 2013 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "reflect" "sort" ) var registeredReceivers = make(map[string]reflect.Type) // RegisterReceiver records a custom receiver type, identified by a value // of that type (second argument), under the specified name. Registered // names can be used in the "name" attribute of config items. // // RegisterReceiver takes the type of the receiver argument, without taking // the value into the account. So do NOT enter any data to the second argument // and only call it like: // RegisterReceiver("somename", &MyReceiverType{}) // // After that, when a '' config tag with this name is used, // a receiver of the specified type would be instantiated. Check // CustomReceiver comments for interface details. // // NOTE 1: RegisterReceiver fails if you attempt to register different types // with the same name. // // NOTE 2: RegisterReceiver registers those receivers that must be used in // the configuration files ( items). Basically it is just the way // you tell seelog config parser what should it do when it meets a // tag with a specific name and data attributes. // // But If you are only using seelog as a proxy to an already instantiated // CustomReceiver (via LoggerFromCustomReceiver func), you should not call RegisterReceiver. func RegisterReceiver(name string, receiver CustomReceiver) { newType := reflect.TypeOf(reflect.ValueOf(receiver).Elem().Interface()) if t, ok := registeredReceivers[name]; ok && t != newType { panic(fmt.Sprintf("duplicate types for %s: %s != %s", name, t, newType)) } registeredReceivers[name] = newType } func customReceiverByName(name string) (creceiver CustomReceiver, err error) { rt, ok := registeredReceivers[name] if !ok { return nil, fmt.Errorf("custom receiver name not registered: '%s'", name) } v, ok := reflect.New(rt).Interface().(CustomReceiver) if !ok { return nil, fmt.Errorf("cannot instantiate receiver with name='%s'", name) } return v, nil } // CustomReceiverInitArgs represent arguments passed to the CustomReceiver.Init // func when custom receiver is being initialized. type CustomReceiverInitArgs struct { // XmlCustomAttrs represent '' xml config item attributes that // start with "data-". Map keys will be the attribute names without the "data-". // Map values will the those attribute values. // // E.g. if you have a '' // you will get map with 2 key-value pairs: "attr1"->"a1", "attr2"->"a2" // // Note that in custom items you can only use allowed attributes, like "name" and // your custom attributes, starting with "data-". Any other will lead to a // parsing error. XmlCustomAttrs map[string]string } // CustomReceiver is the interface that external custom seelog message receivers // must implement in order to be able to process seelog messages. Those receivers // are set in the xml config file using the tag. Check receivers reference // wiki section on that. // // Use seelog.RegisterReceiver on the receiver type before using it. type CustomReceiver interface { // ReceiveMessage is called when the custom receiver gets seelog message from // a parent dispatcher. // // Message, level and context args represent all data that was included in the seelog // message at the time it was logged. // // The formatting is already applied to the message and depends on the config // like with any other receiver. // // If you would like to inform seelog of an error that happened during the handling of // the message, return a non-nil error. This way you'll end up seeing your error like // any other internal seelog error. ReceiveMessage(message string, level LogLevel, context LogContextInterface) error // AfterParse is called immediately after your custom receiver is instantiated by // the xml config parser. So, if you need to do any startup logic after config parsing, // like opening file or allocating any resources after the receiver is instantiated, do it here. // // If this func returns a non-nil error, then the loading procedure will fail. E.g. // if you are loading a seelog xml config, the parser would not finish the loading // procedure and inform about an error like with any other config error. // // If your custom logger needs some configuration, you can use custom attributes in // your config. Check CustomReceiverInitArgs.XmlCustomAttrs comments. // // IMPORTANT: This func is NOT called when the LoggerFromCustomReceiver func is used // to create seelog proxy logger using the custom receiver. This func is only called when // receiver is instantiated from a config. AfterParse(initArgs CustomReceiverInitArgs) error // Flush is called when the custom receiver gets a 'flush' directive from a // parent receiver. If custom receiver implements some kind of buffering or // queing, then the appropriate reaction on a flush message is synchronous // flushing of all those queues/buffers. If custom receiver doesn't have // such mechanisms, then flush implementation may be left empty. Flush() // Close is called when the custom receiver gets a 'close' directive from a // parent receiver. This happens when a top-level seelog dispatcher is sending // 'close' to all child nodes and it means that current seelog logger is being closed. // If you need to do any cleanup after your custom receiver is done, you should do // it here. Close() error } type customReceiverDispatcher struct { formatter *formatter innerReceiver CustomReceiver customReceiverName string usedArgs CustomReceiverInitArgs } // newCustomReceiverDispatcher creates a customReceiverDispatcher which dispatches data to a specific receiver created // using a tag in the config file. func newCustomReceiverDispatcher(formatter *formatter, customReceiverName string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { if formatter == nil { return nil, errors.New("formatter cannot be nil") } if len(customReceiverName) == 0 { return nil, errors.New("custom receiver name cannot be empty") } creceiver, err := customReceiverByName(customReceiverName) if err != nil { return nil, err } err = creceiver.AfterParse(cArgs) if err != nil { return nil, err } disp := &customReceiverDispatcher{formatter, creceiver, customReceiverName, cArgs} return disp, nil } // newCustomReceiverDispatcherByValue is basically the same as newCustomReceiverDispatcher, but using // a specific CustomReceiver value instead of instantiating a new one by type. func newCustomReceiverDispatcherByValue(formatter *formatter, customReceiver CustomReceiver, name string, cArgs CustomReceiverInitArgs) (*customReceiverDispatcher, error) { if formatter == nil { return nil, errors.New("formatter cannot be nil") } if customReceiver == nil { return nil, errors.New("customReceiver cannot be nil") } disp := &customReceiverDispatcher{formatter, customReceiver, name, cArgs} return disp, nil } // CustomReceiver implementation. Check CustomReceiver comments. func (disp *customReceiverDispatcher) Dispatch( message string, level LogLevel, context LogContextInterface, errorFunc func(err error)) { defer func() { if err := recover(); err != nil { errorFunc(fmt.Errorf("panic in custom receiver '%s'.Dispatch: %s", reflect.TypeOf(disp.innerReceiver), err)) } }() err := disp.innerReceiver.ReceiveMessage(disp.formatter.Format(message, level, context), level, context) if err != nil { errorFunc(err) } } // CustomReceiver implementation. Check CustomReceiver comments. func (disp *customReceiverDispatcher) Flush() { disp.innerReceiver.Flush() } // CustomReceiver implementation. Check CustomReceiver comments. func (disp *customReceiverDispatcher) Close() error { disp.innerReceiver.Flush() err := disp.innerReceiver.Close() if err != nil { return err } return nil } func (disp *customReceiverDispatcher) String() string { datas := "" skeys := make([]string, 0, len(disp.usedArgs.XmlCustomAttrs)) for i := range disp.usedArgs.XmlCustomAttrs { skeys = append(skeys, i) } sort.Strings(skeys) for _, key := range skeys { datas += fmt.Sprintf("<%s, %s> ", key, disp.usedArgs.XmlCustomAttrs[key]) } str := fmt.Sprintf("Custom receiver %s [fmt='%s'],[data='%s'],[inner='%s']\n", disp.customReceiverName, disp.formatter.String(), datas, disp.innerReceiver) return str } ================================================ FILE: vendor/github.com/cihub/seelog/dispatch_customdispatcher_test.go ================================================ // Copyright (c) 2013 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "testing" ) type testCustomDispatcherMessageReceiver struct { customTestReceiver } func TestCustomDispatcher_Message(t *testing.T) { recName := "TestCustomDispatcher_Message" RegisterReceiver(recName, &testCustomDispatcherMessageReceiver{}) customDispatcher, err := newCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{ XmlCustomAttrs: map[string]string{ "test": "testdata", }, }) if err != nil { t.Error(err) return } context, err := currentContext() if err != nil { t.Error(err) return } bytes := []byte("Hello") customDispatcher.Dispatch(string(bytes), TraceLvl, context, func(err error) {}) cout := customDispatcher.innerReceiver.(*testCustomDispatcherMessageReceiver).customTestReceiver.co if cout.initCalled != true { t.Error("Init not called") return } if cout.dataPassed != "testdata" { t.Errorf("wrong data passed: '%s'", cout.dataPassed) return } if cout.messageOutput != string(bytes) { t.Errorf("wrong message output: '%s'", cout.messageOutput) return } if cout.levelOutput != TraceLvl { t.Errorf("wrong log level: '%s'", cout.levelOutput) return } if cout.flushed { t.Error("Flush was not expected") return } if cout.closed { t.Error("Closing was not expected") return } } type testCustomDispatcherFlushReceiver struct { customTestReceiver } func TestCustomDispatcher_Flush(t *testing.T) { recName := "TestCustomDispatcher_Flush" RegisterReceiver(recName, &testCustomDispatcherFlushReceiver{}) customDispatcher, err := newCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{ XmlCustomAttrs: map[string]string{ "test": "testdata", }, }) if err != nil { t.Error(err) return } customDispatcher.Flush() cout := customDispatcher.innerReceiver.(*testCustomDispatcherFlushReceiver).customTestReceiver.co if cout.initCalled != true { t.Error("Init not called") return } if cout.dataPassed != "testdata" { t.Errorf("wrong data passed: '%s'", cout.dataPassed) return } if cout.messageOutput != "" { t.Errorf("wrong message output: '%s'", cout.messageOutput) return } if cout.levelOutput != TraceLvl { t.Errorf("wrong log level: '%s'", cout.levelOutput) return } if !cout.flushed { t.Error("Flush was expected") return } if cout.closed { t.Error("Closing was not expected") return } } type testCustomDispatcherCloseReceiver struct { customTestReceiver } func TestCustomDispatcher_Close(t *testing.T) { recName := "TestCustomDispatcher_Close" RegisterReceiver(recName, &testCustomDispatcherCloseReceiver{}) customDispatcher, err := newCustomReceiverDispatcher(onlyMessageFormatForTest, recName, CustomReceiverInitArgs{ XmlCustomAttrs: map[string]string{ "test": "testdata", }, }) if err != nil { t.Error(err) return } customDispatcher.Close() cout := customDispatcher.innerReceiver.(*testCustomDispatcherCloseReceiver).customTestReceiver.co if cout.initCalled != true { t.Error("Init not called") return } if cout.dataPassed != "testdata" { t.Errorf("wrong data passed: '%s'", cout.dataPassed) return } if cout.messageOutput != "" { t.Errorf("wrong message output: '%s'", cout.messageOutput) return } if cout.levelOutput != TraceLvl { t.Errorf("wrong log level: '%s'", cout.levelOutput) return } if !cout.flushed { t.Error("Flush was expected") return } if !cout.closed { t.Error("Closing was expected") return } } ================================================ FILE: vendor/github.com/cihub/seelog/dispatch_dispatcher.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "io" ) // A dispatcherInterface is used to dispatch message to all underlying receivers. // Dispatch logic depends on given context and log level. Any errors are reported using errorFunc. // Also, as underlying receivers may have a state, dispatcher has a ShuttingDown method which performs // an immediate cleanup of all data that is stored in the receivers type dispatcherInterface interface { flusherInterface io.Closer Dispatch(message string, level LogLevel, context LogContextInterface, errorFunc func(err error)) } type dispatcher struct { formatter *formatter writers []*formattedWriter dispatchers []dispatcherInterface } // Creates a dispatcher which dispatches data to a list of receivers. // Each receiver should be either a Dispatcher or io.Writer, otherwise an error will be returned func createDispatcher(formatter *formatter, receivers []interface{}) (*dispatcher, error) { if formatter == nil { return nil, errors.New("formatter cannot be nil") } if receivers == nil || len(receivers) == 0 { return nil, errors.New("receivers cannot be nil or empty") } disp := &dispatcher{formatter, make([]*formattedWriter, 0), make([]dispatcherInterface, 0)} for _, receiver := range receivers { writer, ok := receiver.(*formattedWriter) if ok { disp.writers = append(disp.writers, writer) continue } ioWriter, ok := receiver.(io.Writer) if ok { writer, err := newFormattedWriter(ioWriter, disp.formatter) if err != nil { return nil, err } disp.writers = append(disp.writers, writer) continue } dispInterface, ok := receiver.(dispatcherInterface) if ok { disp.dispatchers = append(disp.dispatchers, dispInterface) continue } return nil, errors.New("method can receive either io.Writer or dispatcherInterface") } return disp, nil } func (disp *dispatcher) Dispatch( message string, level LogLevel, context LogContextInterface, errorFunc func(err error)) { for _, writer := range disp.writers { err := writer.Write(message, level, context) if err != nil { errorFunc(err) } } for _, dispInterface := range disp.dispatchers { dispInterface.Dispatch(message, level, context, errorFunc) } } // Flush goes through all underlying writers which implement flusherInterface interface // and closes them. Recursively performs the same action for underlying dispatchers func (disp *dispatcher) Flush() { for _, disp := range disp.Dispatchers() { disp.Flush() } for _, formatWriter := range disp.Writers() { flusher, ok := formatWriter.Writer().(flusherInterface) if ok { flusher.Flush() } } } // Close goes through all underlying writers which implement io.Closer interface // and closes them. Recursively performs the same action for underlying dispatchers // Before closing, writers are flushed to prevent loss of any buffered data, so // a call to Flush() func before Close() is not necessary func (disp *dispatcher) Close() error { for _, disp := range disp.Dispatchers() { disp.Flush() err := disp.Close() if err != nil { return err } } for _, formatWriter := range disp.Writers() { flusher, ok := formatWriter.Writer().(flusherInterface) if ok { flusher.Flush() } closer, ok := formatWriter.Writer().(io.Closer) if ok { err := closer.Close() if err != nil { return err } } } return nil } func (disp *dispatcher) Writers() []*formattedWriter { return disp.writers } func (disp *dispatcher) Dispatchers() []dispatcherInterface { return disp.dispatchers } func (disp *dispatcher) String() string { str := "formatter: " + disp.formatter.String() + "\n" str += " ->Dispatchers:" if len(disp.dispatchers) == 0 { str += "none\n" } else { str += "\n" for _, disp := range disp.dispatchers { str += fmt.Sprintf(" ->%s", disp) } } str += " ->Writers:" if len(disp.writers) == 0 { str += "none\n" } else { str += "\n" for _, writer := range disp.writers { str += fmt.Sprintf(" ->%s\n", writer) } } return str } ================================================ FILE: vendor/github.com/cihub/seelog/dispatch_filterdispatcher.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" ) // A filterDispatcher writes the given message to underlying receivers only if message log level // is in the allowed list. type filterDispatcher struct { *dispatcher allowList map[LogLevel]bool } // newFilterDispatcher creates a new filterDispatcher using a list of allowed levels. func newFilterDispatcher(formatter *formatter, receivers []interface{}, allowList ...LogLevel) (*filterDispatcher, error) { disp, err := createDispatcher(formatter, receivers) if err != nil { return nil, err } allows := make(map[LogLevel]bool) for _, allowLevel := range allowList { allows[allowLevel] = true } return &filterDispatcher{disp, allows}, nil } func (filter *filterDispatcher) Dispatch( message string, level LogLevel, context LogContextInterface, errorFunc func(err error)) { isAllowed, ok := filter.allowList[level] if ok && isAllowed { filter.dispatcher.Dispatch(message, level, context, errorFunc) } } func (filter *filterDispatcher) String() string { return fmt.Sprintf("filterDispatcher ->\n%s", filter.dispatcher) } ================================================ FILE: vendor/github.com/cihub/seelog/dispatch_filterdispatcher_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "testing" ) func TestfilterDispatcher_Pass(t *testing.T) { writer, _ := newBytesVerifier(t) filter, err := newFilterDispatcher(onlyMessageFormatForTest, []interface{}{writer}, TraceLvl) if err != nil { t.Error(err) return } context, err := currentContext() if err != nil { t.Error(err) return } bytes := []byte("Hello") writer.ExpectBytes(bytes) filter.Dispatch(string(bytes), TraceLvl, context, func(err error) {}) writer.MustNotExpect() } func TestfilterDispatcher_Deny(t *testing.T) { writer, _ := newBytesVerifier(t) filter, err := newFilterDispatcher(defaultformatter, []interface{}{writer}) if err != nil { t.Error(err) return } context, err := currentContext() if err != nil { t.Error(err) return } bytes := []byte("Hello") filter.Dispatch(string(bytes), TraceLvl, context, func(err error) {}) } ================================================ FILE: vendor/github.com/cihub/seelog/dispatch_splitdispatcher.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" ) // A splitDispatcher just writes the given message to underlying receivers. (Splits the message stream.) type splitDispatcher struct { *dispatcher } func newSplitDispatcher(formatter *formatter, receivers []interface{}) (*splitDispatcher, error) { disp, err := createDispatcher(formatter, receivers) if err != nil { return nil, err } return &splitDispatcher{disp}, nil } func (splitter *splitDispatcher) String() string { return fmt.Sprintf("splitDispatcher ->\n%s", splitter.dispatcher.String()) } ================================================ FILE: vendor/github.com/cihub/seelog/dispatch_splitdispatcher_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" "testing" ) var onlyMessageFormatForTest *formatter func init() { var err error onlyMessageFormatForTest, err = newFormatter("%Msg") if err != nil { fmt.Println("Can not create only message format: " + err.Error()) } } func TestsplitDispatcher(t *testing.T) { writer1, _ := newBytesVerifier(t) writer2, _ := newBytesVerifier(t) spliter, err := newSplitDispatcher(onlyMessageFormatForTest, []interface{}{writer1, writer2}) if err != nil { t.Error(err) return } context, err := currentContext() if err != nil { t.Error(err) return } bytes := []byte("Hello") writer1.ExpectBytes(bytes) writer2.ExpectBytes(bytes) spliter.Dispatch(string(bytes), TraceLvl, context, func(err error) {}) writer1.MustNotExpect() writer2.MustNotExpect() } ================================================ FILE: vendor/github.com/cihub/seelog/doc.go ================================================ // Copyright (c) 2014 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* Package seelog implements logging functionality with flexible dispatching, filtering, and formatting. Creation To create a logger, use one of the following constructors: func LoggerFromConfigAsBytes func LoggerFromConfigAsFile func LoggerFromConfigAsString func LoggerFromWriterWithMinLevel func LoggerFromWriterWithMinLevelAndFormat func LoggerFromCustomReceiver (check https://github.com/cihub/seelog/wiki/Custom-receivers) Example: import log "github.com/cihub/seelog" func main() { logger, err := log.LoggerFromConfigAsFile("seelog.xml") if err != nil { panic(err) } defer logger.Flush() ... use logger ... } The "defer" line is important because if you are using asynchronous logger behavior, without this line you may end up losing some messages when you close your application because they are processed in another non-blocking goroutine. To avoid that you explicitly defer flushing all messages before closing. Usage Logger created using one of the LoggerFrom* funcs can be used directly by calling one of the main log funcs. Example: import log "github.com/cihub/seelog" func main() { logger, err := log.LoggerFromConfigAsFile("seelog.xml") if err != nil { panic(err) } defer logger.Flush() logger.Trace("test") logger.Debugf("var = %s", "abc") } Having loggers as variables is convenient if you are writing your own package with internal logging or if you have several loggers with different options. But for most standalone apps it is more convenient to use package level funcs and vars. There is a package level var 'Current' made for it. You can replace it with another logger using 'ReplaceLogger' and then use package level funcs: import log "github.com/cihub/seelog" func main() { logger, err := log.LoggerFromConfigAsFile("seelog.xml") if err != nil { panic(err) } log.ReplaceLogger(logger) defer log.Flush() log.Trace("test") log.Debugf("var = %s", "abc") } Last lines log.Trace("test") log.Debugf("var = %s", "abc") do the same as log.Current.Trace("test") log.Current.Debugf("var = %s", "abc") In this example the 'Current' logger was replaced using a 'ReplaceLogger' call and became equal to 'logger' variable created from config. This way you are able to use package level funcs instead of passing the logger variable. Configuration Main seelog point is to configure logger via config files and not the code. So you can only specify formats and log rules by changing the configuration. The configuration is read by LoggerFrom* funcs. These funcs read xml configuration from different sources and try to create a logger using it. All the configuration features are covered in detail in the official wiki: https://github.com/cihub/seelog/wiki. There are many sections covering different aspects of seelog, but the most important for understanding configs are: https://github.com/cihub/seelog/wiki/Constraints-and-exceptions https://github.com/cihub/seelog/wiki/Dispatchers-and-receivers https://github.com/cihub/seelog/wiki/Formatting https://github.com/cihub/seelog/wiki/Logger-types After you understand these concepts, check the 'Reference' section on the main wiki page to get the up-to-date list of dispatchers, receivers, formats, and logger types. Here is an example config with all these features: This config represents a logger with adaptive timeout between log messages (check logger types reference) which logs to console, all.log, and errors.log depending on the log level. Its output formats also depend on log level. This logger will only use log level 'debug' and higher (minlevel is set) for all files with names that don't start with 'test'. For files starting with 'test' this logger prohibits all levels below 'error'. Examples To learn seelog features faster you should check the examples package: https://github.com/cihub/seelog-examples It contains many example configs and usecases. */ package seelog ================================================ FILE: vendor/github.com/cihub/seelog/format.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "strconv" "strings" "unicode" "unicode/utf8" ) // FormatterSymbol is a special symbol used in config files to mark special format aliases. const ( FormatterSymbol = '%' ) const ( formatterSymbolString = "%" formatterParameterStart = '(' formatterParameterEnd = ')' ) // These are the time and date formats that are used when %Date or %Time format aliases are used. const ( DateDefaultFormat = "2006-01-02" TimeFormat = "15:04:05" ) var DefaultMsgFormat = "%Ns [%Level] %Msg%n" var defaultformatter *formatter var msgonlyformatter *formatter func init() { var err error defaultformatter, err = newFormatter(DefaultMsgFormat) if err != nil { fmt.Println("Error during defaultformatter creation: " + err.Error()) } msgonlyformatter, err = newFormatter("%Msg") if err != nil { fmt.Println("Error during msgonlyformatter creation: " + err.Error()) } } // FormatterFunc represents one formatter object that starts with '%' sign in the 'format' attribute // of the 'format' config item. These special symbols are replaced with context values or special // strings when message is written to byte receiver. // // Check https://github.com/cihub/seelog/wiki/Formatting for details. // Full list (with descriptions) of formatters: https://github.com/cihub/seelog/wiki/Format-reference // // FormatterFunc takes raw log message, level, log context and returns a string, number (of any type) or any object // that can be evaluated as string. type FormatterFunc func(message string, level LogLevel, context LogContextInterface) interface{} // FormatterFuncCreator is a factory of FormatterFunc objects. It is used to generate parameterized // formatters (such as %Date or %EscM) and custom user formatters. type FormatterFuncCreator func(param string) FormatterFunc var formatterFuncs = map[string]FormatterFunc{ "Level": formatterLevel, "Lev": formatterLev, "LEVEL": formatterLEVEL, "LEV": formatterLEV, "l": formatterl, "Msg": formatterMsg, "FullPath": formatterFullPath, "File": formatterFile, "RelFile": formatterRelFile, "Func": FormatterFunction, "FuncShort": FormatterFunctionShort, "Line": formatterLine, "Time": formatterTime, "UTCTime": formatterUTCTime, "Ns": formatterNs, "UTCNs": formatterUTCNs, "n": formattern, "t": formattert, } var formatterFuncsParameterized = map[string]FormatterFuncCreator{ "Date": createDateTimeFormatterFunc, "UTCDate": createUTCDateTimeFormatterFunc, "EscM": createANSIEscapeFunc, } func errorAliasReserved(name string) error { return fmt.Errorf("cannot use '%s' as custom formatter name. Name is reserved", name) } // RegisterCustomFormatter registers a new custom formatter factory with a given name. If returned error is nil, // then this name (prepended by '%' symbol) can be used in 'format' attributes in configuration and // it will be treated like the standard parameterized formatter identifiers. // // RegisterCustomFormatter needs to be called before creating a logger for it to take effect. The general recommendation // is to call it once in 'init' func of your application or any initializer func. // // For usage examples, check https://github.com/cihub/seelog/wiki/Custom-formatters. // // Name must only consist of letters (unicode.IsLetter). // // Name must not be one of the already registered standard formatter names // (https://github.com/cihub/seelog/wiki/Format-reference) and previously registered // custom format names. To avoid any potential name conflicts (in future releases), it is recommended // to start your custom formatter name with a namespace (e.g. 'MyCompanySomething') or a 'Custom' keyword. func RegisterCustomFormatter(name string, creator FormatterFuncCreator) error { if _, ok := formatterFuncs[name]; ok { return errorAliasReserved(name) } if _, ok := formatterFuncsParameterized[name]; ok { return errorAliasReserved(name) } formatterFuncsParameterized[name] = creator return nil } // formatter is used to write messages in a specific format, inserting such additional data // as log level, date/time, etc. type formatter struct { fmtStringOriginal string fmtString string formatterFuncs []FormatterFunc } // newFormatter creates a new formatter using a format string func newFormatter(formatString string) (*formatter, error) { newformatter := new(formatter) newformatter.fmtStringOriginal = formatString err := newformatter.buildFormatterFuncs() if err != nil { return nil, err } return newformatter, nil } func (formatter *formatter) buildFormatterFuncs() error { formatter.formatterFuncs = make([]FormatterFunc, 0) var fmtString string for i := 0; i < len(formatter.fmtStringOriginal); i++ { char := formatter.fmtStringOriginal[i] if char != FormatterSymbol { fmtString += string(char) continue } isEndOfStr := i == len(formatter.fmtStringOriginal)-1 if isEndOfStr { return fmt.Errorf("format error: %v - last symbol", formatterSymbolString) } isDoubledFormatterSymbol := formatter.fmtStringOriginal[i+1] == FormatterSymbol if isDoubledFormatterSymbol { fmtString += formatterSymbolString i++ continue } function, nextI, err := formatter.extractFormatterFunc(i + 1) if err != nil { return err } fmtString += "%v" i = nextI formatter.formatterFuncs = append(formatter.formatterFuncs, function) } formatter.fmtString = fmtString return nil } func (formatter *formatter) extractFormatterFunc(index int) (FormatterFunc, int, error) { letterSequence := formatter.extractLetterSequence(index) if len(letterSequence) == 0 { return nil, 0, fmt.Errorf("format error: lack of formatter after %v. At %v", formatterSymbolString, index) } function, formatterLength, ok := formatter.findFormatterFunc(letterSequence) if ok { return function, index + formatterLength - 1, nil } function, formatterLength, ok, err := formatter.findFormatterFuncParametrized(letterSequence, index) if err != nil { return nil, 0, err } if ok { return function, index + formatterLength - 1, nil } return nil, 0, errors.New("format error: unrecognized formatter at " + strconv.Itoa(index) + ": " + letterSequence) } func (formatter *formatter) extractLetterSequence(index int) string { letters := "" bytesToParse := []byte(formatter.fmtStringOriginal[index:]) runeCount := utf8.RuneCount(bytesToParse) for i := 0; i < runeCount; i++ { rune, runeSize := utf8.DecodeRune(bytesToParse) bytesToParse = bytesToParse[runeSize:] if unicode.IsLetter(rune) { letters += string(rune) } else { break } } return letters } func (formatter *formatter) findFormatterFunc(letters string) (FormatterFunc, int, bool) { currentVerb := letters for i := 0; i < len(letters); i++ { function, ok := formatterFuncs[currentVerb] if ok { return function, len(currentVerb), ok } currentVerb = currentVerb[:len(currentVerb)-1] } return nil, 0, false } func (formatter *formatter) findFormatterFuncParametrized(letters string, lettersStartIndex int) (FormatterFunc, int, bool, error) { currentVerb := letters for i := 0; i < len(letters); i++ { functionCreator, ok := formatterFuncsParameterized[currentVerb] if ok { parameter := "" parameterLen := 0 isVerbEqualsLetters := i == 0 // if not, then letter goes after formatter, and formatter is parameterless if isVerbEqualsLetters { userParameter := "" var err error userParameter, parameterLen, ok, err = formatter.findparameter(lettersStartIndex + len(currentVerb)) if ok { parameter = userParameter } else if err != nil { return nil, 0, false, err } } return functionCreator(parameter), len(currentVerb) + parameterLen, true, nil } currentVerb = currentVerb[:len(currentVerb)-1] } return nil, 0, false, nil } func (formatter *formatter) findparameter(startIndex int) (string, int, bool, error) { if len(formatter.fmtStringOriginal) == startIndex || formatter.fmtStringOriginal[startIndex] != formatterParameterStart { return "", 0, false, nil } endIndex := strings.Index(formatter.fmtStringOriginal[startIndex:], string(formatterParameterEnd)) if endIndex == -1 { return "", 0, false, fmt.Errorf("Unmatched parenthesis or invalid parameter at %d: %s", startIndex, formatter.fmtStringOriginal[startIndex:]) } endIndex += startIndex length := endIndex - startIndex + 1 return formatter.fmtStringOriginal[startIndex+1 : endIndex], length, true, nil } // Format processes a message with special formatters, log level, and context. Returns formatted string // with all formatter identifiers changed to appropriate values. func (formatter *formatter) Format(message string, level LogLevel, context LogContextInterface) string { if len(formatter.formatterFuncs) == 0 { return formatter.fmtString } params := make([]interface{}, len(formatter.formatterFuncs)) for i, function := range formatter.formatterFuncs { params[i] = function(message, level, context) } return fmt.Sprintf(formatter.fmtString, params...) } func (formatter *formatter) String() string { return formatter.fmtStringOriginal } //===================================================== const ( wrongLogLevel = "WRONG_LOGLEVEL" wrongEscapeCode = "WRONG_ESCAPE" ) var levelToString = map[LogLevel]string{ TraceLvl: "Trace", DebugLvl: "Debug", InfoLvl: "Info", WarnLvl: "Warn", ErrorLvl: "Error", CriticalLvl: "Critical", Off: "Off", } var levelToShortString = map[LogLevel]string{ TraceLvl: "Trc", DebugLvl: "Dbg", InfoLvl: "Inf", WarnLvl: "Wrn", ErrorLvl: "Err", CriticalLvl: "Crt", Off: "Off", } var levelToShortestString = map[LogLevel]string{ TraceLvl: "t", DebugLvl: "d", InfoLvl: "i", WarnLvl: "w", ErrorLvl: "e", CriticalLvl: "c", Off: "o", } func formatterLevel(message string, level LogLevel, context LogContextInterface) interface{} { levelStr, ok := levelToString[level] if !ok { return wrongLogLevel } return levelStr } func formatterLev(message string, level LogLevel, context LogContextInterface) interface{} { levelStr, ok := levelToShortString[level] if !ok { return wrongLogLevel } return levelStr } func formatterLEVEL(message string, level LogLevel, context LogContextInterface) interface{} { return strings.ToTitle(formatterLevel(message, level, context).(string)) } func formatterLEV(message string, level LogLevel, context LogContextInterface) interface{} { return strings.ToTitle(formatterLev(message, level, context).(string)) } func formatterl(message string, level LogLevel, context LogContextInterface) interface{} { levelStr, ok := levelToShortestString[level] if !ok { return wrongLogLevel } return levelStr } func formatterMsg(message string, level LogLevel, context LogContextInterface) interface{} { return message } func formatterFullPath(message string, level LogLevel, context LogContextInterface) interface{} { return context.FullPath() } func formatterFile(message string, level LogLevel, context LogContextInterface) interface{} { return context.FileName() } func formatterRelFile(message string, level LogLevel, context LogContextInterface) interface{} { return context.ShortPath() } func FormatterFunction(message string, level LogLevel, context LogContextInterface) interface{} { return context.Func() } func FormatterFunctionShort(message string, level LogLevel, context LogContextInterface) interface{} { f := context.Func() spl := strings.Split(f, ".") return spl[len(spl)-1] } func formatterLine(message string, level LogLevel, context LogContextInterface) interface{} { return context.Line() } func formatterTime(message string, level LogLevel, context LogContextInterface) interface{} { return context.CallTime().Format(TimeFormat) } func formatterUTCTime(message string, level LogLevel, context LogContextInterface) interface{} { return context.CallTime().UTC().Format(TimeFormat) } func formatterNs(message string, level LogLevel, context LogContextInterface) interface{} { return context.CallTime().UnixNano() } func formatterUTCNs(message string, level LogLevel, context LogContextInterface) interface{} { return context.CallTime().UTC().UnixNano() } func formattern(message string, level LogLevel, context LogContextInterface) interface{} { return "\n" } func formattert(message string, level LogLevel, context LogContextInterface) interface{} { return "\t" } func createDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { format := dateTimeFormat if format == "" { format = DateDefaultFormat } return func(message string, level LogLevel, context LogContextInterface) interface{} { return context.CallTime().Format(format) } } func createUTCDateTimeFormatterFunc(dateTimeFormat string) FormatterFunc { format := dateTimeFormat if format == "" { format = DateDefaultFormat } return func(message string, level LogLevel, context LogContextInterface) interface{} { return context.CallTime().UTC().Format(format) } } func createANSIEscapeFunc(escapeCodeString string) FormatterFunc { return func(message string, level LogLevel, context LogContextInterface) interface{} { if len(escapeCodeString) == 0 { return wrongEscapeCode } return fmt.Sprintf("%c[%sm", 0x1B, escapeCodeString) } } ================================================ FILE: vendor/github.com/cihub/seelog/format_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" "strings" "testing" "time" ) const ( TestFuncName = "TestFormats" ) type formatTest struct { formatString string input string inputLogLevel LogLevel expectedOutput string errorExpected bool } var formatTests = []formatTest{ {"test", "abcdef", TraceLvl, "test", false}, {"", "abcdef", TraceLvl, "", false}, {"%Level", "", TraceLvl, "Trace", false}, {"%Level", "", DebugLvl, "Debug", false}, {"%Level", "", InfoLvl, "Info", false}, {"%Level", "", WarnLvl, "Warn", false}, {"%Level", "", ErrorLvl, "Error", false}, {"%Level", "", CriticalLvl, "Critical", false}, {"[%Level]", "", TraceLvl, "[Trace]", false}, {"[%Level]", "abc", DebugLvl, "[Debug]", false}, {"%LevelLevel", "", InfoLvl, "InfoLevel", false}, {"[%Level][%Level]", "", WarnLvl, "[Warn][Warn]", false}, {"[%Level]X[%Level]", "", ErrorLvl, "[Error]X[Error]", false}, {"%Levelll", "", CriticalLvl, "Criticalll", false}, {"%Lvl", "", TraceLvl, "", true}, {"%%Level", "", DebugLvl, "%Level", false}, {"%Level%", "", InfoLvl, "", true}, {"%sevel", "", WarnLvl, "", true}, {"Level", "", ErrorLvl, "Level", false}, {"%LevelLevel", "", CriticalLvl, "CriticalLevel", false}, {"%Lev", "", TraceLvl, "Trc", false}, {"%Lev", "", DebugLvl, "Dbg", false}, {"%Lev", "", InfoLvl, "Inf", false}, {"%Lev", "", WarnLvl, "Wrn", false}, {"%Lev", "", ErrorLvl, "Err", false}, {"%Lev", "", CriticalLvl, "Crt", false}, {"[%Lev]", "", TraceLvl, "[Trc]", false}, {"[%Lev]", "abc", DebugLvl, "[Dbg]", false}, {"%LevLevel", "", InfoLvl, "InfLevel", false}, {"[%Level][%Lev]", "", WarnLvl, "[Warn][Wrn]", false}, {"[%Lev]X[%Lev]", "", ErrorLvl, "[Err]X[Err]", false}, {"%Levll", "", CriticalLvl, "Crtll", false}, {"%LEVEL", "", TraceLvl, "TRACE", false}, {"%LEVEL", "", DebugLvl, "DEBUG", false}, {"%LEVEL", "", InfoLvl, "INFO", false}, {"%LEVEL", "", WarnLvl, "WARN", false}, {"%LEVEL", "", ErrorLvl, "ERROR", false}, {"%LEVEL", "", CriticalLvl, "CRITICAL", false}, {"[%LEVEL]", "", TraceLvl, "[TRACE]", false}, {"[%LEVEL]", "abc", DebugLvl, "[DEBUG]", false}, {"%LEVELLEVEL", "", InfoLvl, "INFOLEVEL", false}, {"[%LEVEL][%LEVEL]", "", WarnLvl, "[WARN][WARN]", false}, {"[%LEVEL]X[%Level]", "", ErrorLvl, "[ERROR]X[Error]", false}, {"%LEVELLL", "", CriticalLvl, "CRITICALLL", false}, {"%LEV", "", TraceLvl, "TRC", false}, {"%LEV", "", DebugLvl, "DBG", false}, {"%LEV", "", InfoLvl, "INF", false}, {"%LEV", "", WarnLvl, "WRN", false}, {"%LEV", "", ErrorLvl, "ERR", false}, {"%LEV", "", CriticalLvl, "CRT", false}, {"[%LEV]", "", TraceLvl, "[TRC]", false}, {"[%LEV]", "abc", DebugLvl, "[DBG]", false}, {"%LEVLEVEL", "", InfoLvl, "INFLEVEL", false}, {"[%LEVEL][%LEV]", "", WarnLvl, "[WARN][WRN]", false}, {"[%LEV]X[%LEV]", "", ErrorLvl, "[ERR]X[ERR]", false}, {"%LEVLL", "", CriticalLvl, "CRTLL", false}, {"%l", "", TraceLvl, "t", false}, {"%l", "", DebugLvl, "d", false}, {"%l", "", InfoLvl, "i", false}, {"%l", "", WarnLvl, "w", false}, {"%l", "", ErrorLvl, "e", false}, {"%l", "", CriticalLvl, "c", false}, {"[%l]", "", TraceLvl, "[t]", false}, {"[%l]", "abc", DebugLvl, "[d]", false}, {"%Level%Msg", "", TraceLvl, "Trace", false}, {"%Level%Msg", "A", DebugLvl, "DebugA", false}, {"%Level%Msg", "", InfoLvl, "Info", false}, {"%Level%Msg", "test", WarnLvl, "Warntest", false}, {"%Level%Msg", " ", ErrorLvl, "Error ", false}, {"%Level%Msg", "", CriticalLvl, "Critical", false}, {"[%Level]", "", TraceLvl, "[Trace]", false}, {"[%Level]", "abc", DebugLvl, "[Debug]", false}, {"%Level%MsgLevel", "A", InfoLvl, "InfoALevel", false}, {"[%Level]%Msg[%Level]", "test", WarnLvl, "[Warn]test[Warn]", false}, {"[%Level]%MsgX[%Level]", "test", ErrorLvl, "[Error]testX[Error]", false}, {"%Levell%Msgl", "Test", CriticalLvl, "CriticallTestl", false}, {"%Lev%Msg%LEVEL%LEV%l%Msg", "Test", InfoLvl, "InfTestINFOINFiTest", false}, {"%n", "", CriticalLvl, "\n", false}, {"%t", "", CriticalLvl, "\t", false}, } func TestFormats(t *testing.T) { context, conErr := currentContext() if conErr != nil { t.Fatal("Cannot get current context:" + conErr.Error()) return } for _, test := range formatTests { form, err := newFormatter(test.formatString) if (err != nil) != test.errorExpected { t.Errorf("input: %s \nInput LL: %s\n* Expected error:%t Got error: %t\n", test.input, test.inputLogLevel, test.errorExpected, (err != nil)) if err != nil { t.Logf("%s\n", err.Error()) } continue } else if err != nil { continue } msg := form.Format(test.input, test.inputLogLevel, context) if err == nil && msg != test.expectedOutput { t.Errorf("format: %s \nInput: %s \nInput LL: %s\n* Expected: %s \n* Got: %s\n", test.formatString, test.input, test.inputLogLevel, test.expectedOutput, msg) } } } func TestDateFormat(t *testing.T) { _, err := newFormatter("%Date") if err != nil { t.Error("Unexpected error: " + err.Error()) } } func TestDateParameterizedFormat(t *testing.T) { testFormat := "Mon Jan 02 2006 15:04:05" preciseForamt := "Mon Jan 02 2006 15:04:05.000" context, conErr := currentContext() if conErr != nil { t.Fatal("Cannot get current context:" + conErr.Error()) return } form, err := newFormatter("%Date(" + preciseForamt + ")") if err != nil { t.Error("Unexpected error: " + err.Error()) } dateBefore := time.Now().Format(testFormat) msg := form.Format("", TraceLvl, context) dateAfter := time.Now().Format(testFormat) if !strings.HasPrefix(msg, dateBefore) && !strings.HasPrefix(msg, dateAfter) { t.Errorf("incorrect message: %v. Expected %v or %v", msg, dateBefore, dateAfter) } _, err = newFormatter("%Date(" + preciseForamt) if err == nil { t.Error("Expected error for invalid format") } } func createTestFormatter(format string) FormatterFunc { return func(message string, level LogLevel, context LogContextInterface) interface{} { return "TEST " + context.Func() + " TEST" } } func TestCustomFormatterRegistration(t *testing.T) { err := RegisterCustomFormatter("Level", createTestFormatter) if err == nil { t.Errorf("expected an error when trying to register a custom formatter with a reserved alias") } err = RegisterCustomFormatter("EscM", createTestFormatter) if err == nil { t.Errorf("expected an error when trying to register a custom formatter with a reserved parameterized alias") } err = RegisterCustomFormatter("TEST", createTestFormatter) if err != nil { t.Fatalf("Registering custom formatter: unexpected error: %s", err) } err = RegisterCustomFormatter("TEST", createTestFormatter) if err == nil { t.Errorf("expected an error when trying to register a custom formatter with duplicate name") } context, conErr := currentContext() if conErr != nil { t.Fatal("Cannot get current context:" + conErr.Error()) return } form, err := newFormatter("%Msg %TEST 123") if err != nil { t.Fatalf("%s\n", err.Error()) } expected := fmt.Sprintf("test TEST %sTestCustomFormatterRegistration TEST 123", commonPrefix) msg := form.Format("test", DebugLvl, context) if msg != expected { t.Fatalf("Custom formatter: invalid output. Expected: '%s'. Got: '%s'", expected, msg) } } ================================================ FILE: vendor/github.com/cihub/seelog/internals_baseerror.go ================================================ package seelog // Base struct for custom errors. type baseError struct { message string } func (be baseError) Error() string { return be.message } ================================================ FILE: vendor/github.com/cihub/seelog/internals_byteverifiers_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "strconv" "testing" ) // bytesVerifier is a byte receiver which is used for correct input testing. // It allows to compare expected result and actual result in context of received bytes. type bytesVerifier struct { expectedBytes []byte // bytes that are expected to be written in next Write call waitingForInput bool // true if verifier is waiting for a Write call writtenData []byte // real bytes that actually were received during the last Write call testEnv *testing.T } func newBytesVerifier(t *testing.T) (*bytesVerifier, error) { if t == nil { return nil, errors.New("testing environment param is nil") } verifier := new(bytesVerifier) verifier.testEnv = t return verifier, nil } // Write is used to check whether verifier was waiting for input and whether bytes are the same as expectedBytes. // After Write call, waitingForInput is set to false. func (verifier *bytesVerifier) Write(bytes []byte) (n int, err error) { if !verifier.waitingForInput { verifier.testEnv.Errorf("unexpected input: %v", string(bytes)) return } verifier.waitingForInput = false verifier.writtenData = bytes if verifier.expectedBytes != nil { if bytes == nil { verifier.testEnv.Errorf("incoming 'bytes' is nil") } else { if len(bytes) != len(verifier.expectedBytes) { verifier.testEnv.Errorf("'Bytes' has unexpected len. Expected: %d. Got: %d. . Expected string: %q. Got: %q", len(verifier.expectedBytes), len(bytes), string(verifier.expectedBytes), string(bytes)) } else { for i := 0; i < len(bytes); i++ { if verifier.expectedBytes[i] != bytes[i] { verifier.testEnv.Errorf("incorrect data on position %d. Expected: %d. Got: %d. Expected string: %q. Got: %q", i, verifier.expectedBytes[i], bytes[i], string(verifier.expectedBytes), string(bytes)) break } } } } } return len(bytes), nil } func (verifier *bytesVerifier) ExpectBytes(bytes []byte) { verifier.waitingForInput = true verifier.expectedBytes = bytes } func (verifier *bytesVerifier) MustNotExpect() { if verifier.waitingForInput { errorText := "Unexpected input: " if verifier.expectedBytes != nil { errorText += "len = " + strconv.Itoa(len(verifier.expectedBytes)) errorText += ". text = " + string(verifier.expectedBytes) } verifier.testEnv.Errorf(errorText) } } func (verifier *bytesVerifier) Close() error { return nil } // nullWriter implements io.Writer inteface and does nothing, always returning a successful write result type nullWriter struct { } func (writer *nullWriter) Write(bytes []byte) (n int, err error) { return len(bytes), nil } func (writer *nullWriter) Close() error { return nil } ================================================ FILE: vendor/github.com/cihub/seelog/internals_fsutils.go ================================================ package seelog import ( "archive/zip" "bytes" "fmt" "io" "io/ioutil" "os" "path/filepath" "sync" ) // File and directory permitions. const ( defaultFilePermissions = 0666 defaultDirectoryPermissions = 0767 ) const ( // Max number of directories can be read asynchronously. maxDirNumberReadAsync = 1000 ) type cannotOpenFileError struct { baseError } func newCannotOpenFileError(fname string) *cannotOpenFileError { return &cannotOpenFileError{baseError{message: "Cannot open file: " + fname}} } type notDirectoryError struct { baseError } func newNotDirectoryError(dname string) *notDirectoryError { return ¬DirectoryError{baseError{message: dname + " is not directory"}} } // fileFilter is a filtering criteria function for '*os.File'. // Must return 'false' to set aside the given file. type fileFilter func(os.FileInfo, *os.File) bool // filePathFilter is a filtering creteria function for file path. // Must return 'false' to set aside the given file. type filePathFilter func(filePath string) bool // GetSubdirNames returns a list of directories found in // the given one with dirPath. func getSubdirNames(dirPath string) ([]string, error) { fi, err := os.Stat(dirPath) if err != nil { return nil, err } if !fi.IsDir() { return nil, newNotDirectoryError(dirPath) } dd, err := os.Open(dirPath) // Cannot open file. if err != nil { if dd != nil { dd.Close() } return nil, err } defer dd.Close() // TODO: Improve performance by buffering reading. allEntities, err := dd.Readdir(-1) if err != nil { return nil, err } subDirs := []string{} for _, entity := range allEntities { if entity.IsDir() { subDirs = append(subDirs, entity.Name()) } } return subDirs, nil } // getSubdirAbsPaths recursively visit all the subdirectories // starting from the given directory and returns absolute paths for them. func getAllSubdirAbsPaths(dirPath string) (res []string, err error) { dps, err := getSubdirAbsPaths(dirPath) if err != nil { res = []string{} return } res = append(res, dps...) for _, dp := range dps { sdps, err := getAllSubdirAbsPaths(dp) if err != nil { return []string{}, err } res = append(res, sdps...) } return } // getSubdirAbsPaths supplies absolute paths for all subdirectiries in a given directory. // Input: (I1) dirPath - absolute path of a directory in question. // Out: (O1) - slice of subdir asbolute paths; (O2) - error of the operation. // Remark: If error (O2) is non-nil then (O1) is nil and vice versa. func getSubdirAbsPaths(dirPath string) ([]string, error) { sdns, err := getSubdirNames(dirPath) if err != nil { return nil, err } rsdns := []string{} for _, sdn := range sdns { rsdns = append(rsdns, filepath.Join(dirPath, sdn)) } return rsdns, nil } // getOpenFilesInDir supplies a slice of os.File pointers to files located in the directory. // Remark: Ignores files for which fileFilter returns false func getOpenFilesInDir(dirPath string, fFilter fileFilter) ([]*os.File, error) { dfi, err := os.Open(dirPath) if err != nil { return nil, newCannotOpenFileError("Cannot open directory " + dirPath) } defer dfi.Close() // Size of read buffer (i.e. chunk of items read at a time). rbs := 64 resFiles := []*os.File{} L: for { // Read directory entities by reasonable chuncks // to prevent overflows on big number of files. fis, e := dfi.Readdir(rbs) switch e { // It's OK. case nil: // Do nothing, just continue cycle. case io.EOF: break L // Something went wrong. default: return nil, e } // THINK: Maybe, use async running. for _, fi := range fis { // NB: On Linux this could be a problem as // there are lots of file types available. if !fi.IsDir() { f, e := os.Open(filepath.Join(dirPath, fi.Name())) if e != nil { if f != nil { f.Close() } // THINK: Add nil as indicator that a problem occurred. resFiles = append(resFiles, nil) continue } // Check filter condition. if fFilter != nil && !fFilter(fi, f) { continue } resFiles = append(resFiles, f) } } } return resFiles, nil } func isRegular(m os.FileMode) bool { return m&os.ModeType == 0 } // getDirFilePaths return full paths of the files located in the directory. // Remark: Ignores files for which fileFilter returns false. func getDirFilePaths(dirPath string, fpFilter filePathFilter, pathIsName bool) ([]string, error) { dfi, err := os.Open(dirPath) if err != nil { return nil, newCannotOpenFileError("Cannot open directory " + dirPath) } defer dfi.Close() var absDirPath string if !filepath.IsAbs(dirPath) { absDirPath, err = filepath.Abs(dirPath) if err != nil { return nil, fmt.Errorf("cannot get absolute path of directory: %s", err.Error()) } } else { absDirPath = dirPath } // TODO: check if dirPath is really directory. // Size of read buffer (i.e. chunk of items read at a time). rbs := 2 << 5 filePaths := []string{} var fp string L: for { // Read directory entities by reasonable chuncks // to prevent overflows on big number of files. fis, e := dfi.Readdir(rbs) switch e { // It's OK. case nil: // Do nothing, just continue cycle. case io.EOF: break L // Indicate that something went wrong. default: return nil, e } // THINK: Maybe, use async running. for _, fi := range fis { // NB: Should work on every Windows and non-Windows OS. if isRegular(fi.Mode()) { if pathIsName { fp = fi.Name() } else { // Build full path of a file. fp = filepath.Join(absDirPath, fi.Name()) } // Check filter condition. if fpFilter != nil && !fpFilter(fp) { continue } filePaths = append(filePaths, fp) } } } return filePaths, nil } // getOpenFilesByDirectoryAsync runs async reading directories 'dirPaths' and inserts pairs // in map 'filesInDirMap': Key - directory name, value - *os.File slice. func getOpenFilesByDirectoryAsync( dirPaths []string, fFilter fileFilter, filesInDirMap map[string][]*os.File, ) error { n := len(dirPaths) if n > maxDirNumberReadAsync { return fmt.Errorf("number of input directories to be read exceeded max value %d", maxDirNumberReadAsync) } type filesInDirResult struct { DirName string Files []*os.File Error error } dirFilesChan := make(chan *filesInDirResult, n) var wg sync.WaitGroup // Register n goroutines which are going to do work. wg.Add(n) for i := 0; i < n; i++ { // Launch asynchronously the piece of work. go func(dirPath string) { fs, e := getOpenFilesInDir(dirPath, fFilter) dirFilesChan <- &filesInDirResult{filepath.Base(dirPath), fs, e} // Mark the current goroutine as finished (work is done). wg.Done() }(dirPaths[i]) } // Wait for all goroutines to finish their work. wg.Wait() // Close the error channel to let for-range clause // get all the buffered values without blocking and quit in the end. close(dirFilesChan) for fidr := range dirFilesChan { if fidr.Error == nil { // THINK: What will happen if the key is already present? filesInDirMap[fidr.DirName] = fidr.Files } else { return fidr.Error } } return nil } func copyFile(sf *os.File, dst string) (int64, error) { df, err := os.Create(dst) if err != nil { return 0, err } defer df.Close() return io.Copy(df, sf) } // fileExists return flag whether a given file exists // and operation error if an unclassified failure occurs. func fileExists(path string) (bool, error) { _, err := os.Stat(path) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } return true, nil } // createDirectory makes directory with a given name // making all parent directories if necessary. func createDirectory(dirPath string) error { var dPath string var err error if !filepath.IsAbs(dirPath) { dPath, err = filepath.Abs(dirPath) if err != nil { return err } } else { dPath = dirPath } exists, err := fileExists(dPath) if err != nil { return err } if exists { return nil } return os.MkdirAll(dPath, os.ModeDir) } // tryRemoveFile gives a try removing the file // only ignoring an error when the file does not exist. func tryRemoveFile(filePath string) (err error) { err = os.Remove(filePath) if os.IsNotExist(err) { err = nil return } return } // Unzips a specified zip file. Returns filename->filebytes map. func unzip(archiveName string) (map[string][]byte, error) { // Open a zip archive for reading. r, err := zip.OpenReader(archiveName) if err != nil { return nil, err } defer r.Close() // Files to be added to archive // map file name to contents files := make(map[string][]byte) // Iterate through the files in the archive, // printing some of their contents. for _, f := range r.File { rc, err := f.Open() if err != nil { return nil, err } bts, err := ioutil.ReadAll(rc) rcErr := rc.Close() if err != nil { return nil, err } if rcErr != nil { return nil, rcErr } files[f.Name] = bts } return files, nil } // Creates a zip file with the specified file names and byte contents. func createZip(archiveName string, files map[string][]byte) error { // Create a buffer to write our archive to. buf := new(bytes.Buffer) // Create a new zip archive. w := zip.NewWriter(buf) // Write files for fpath, fcont := range files { f, err := w.Create(fpath) if err != nil { return err } _, err = f.Write([]byte(fcont)) if err != nil { return err } } // Make sure to check the error on Close. err := w.Close() if err != nil { return err } err = ioutil.WriteFile(archiveName, buf.Bytes(), defaultFilePermissions) if err != nil { return err } return nil } ================================================ FILE: vendor/github.com/cihub/seelog/internals_xmlnode.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "encoding/xml" "errors" "fmt" "io" "strings" ) type xmlNode struct { name string attributes map[string]string children []*xmlNode value string } func newNode() *xmlNode { node := new(xmlNode) node.children = make([]*xmlNode, 0) node.attributes = make(map[string]string) return node } func (node *xmlNode) String() string { str := fmt.Sprintf("<%s", node.name) for attrName, attrVal := range node.attributes { str += fmt.Sprintf(" %s=\"%s\"", attrName, attrVal) } str += ">" str += node.value if len(node.children) != 0 { for _, child := range node.children { str += fmt.Sprintf("%s", child) } } str += fmt.Sprintf("", node.name) return str } func (node *xmlNode) unmarshal(startEl xml.StartElement) error { node.name = startEl.Name.Local for _, v := range startEl.Attr { _, alreadyExists := node.attributes[v.Name.Local] if alreadyExists { return errors.New("tag '" + node.name + "' has duplicated attribute: '" + v.Name.Local + "'") } node.attributes[v.Name.Local] = v.Value } return nil } func (node *xmlNode) add(child *xmlNode) { if node.children == nil { node.children = make([]*xmlNode, 0) } node.children = append(node.children, child) } func (node *xmlNode) hasChildren() bool { return node.children != nil && len(node.children) > 0 } //============================================= func unmarshalConfig(reader io.Reader) (*xmlNode, error) { xmlParser := xml.NewDecoder(reader) config, err := unmarshalNode(xmlParser, nil) if err != nil { return nil, err } if config == nil { return nil, errors.New("xml has no content") } nextConfigEntry, err := unmarshalNode(xmlParser, nil) if nextConfigEntry != nil { return nil, errors.New("xml contains more than one root element") } return config, nil } func unmarshalNode(xmlParser *xml.Decoder, curToken xml.Token) (node *xmlNode, err error) { firstLoop := true for { var tok xml.Token if firstLoop && curToken != nil { tok = curToken firstLoop = false } else { tok, err = getNextToken(xmlParser) if err != nil || tok == nil { return } } switch tt := tok.(type) { case xml.SyntaxError: err = errors.New(tt.Error()) return case xml.CharData: value := strings.TrimSpace(string([]byte(tt))) if node != nil { node.value += value } case xml.StartElement: if node == nil { node = newNode() err := node.unmarshal(tt) if err != nil { return nil, err } } else { childNode, childErr := unmarshalNode(xmlParser, tok) if childErr != nil { return nil, childErr } if childNode != nil { node.add(childNode) } else { return } } case xml.EndElement: return } } } func getNextToken(xmlParser *xml.Decoder) (tok xml.Token, err error) { if tok, err = xmlParser.Token(); err != nil { if err == io.EOF { err = nil return } return } return } ================================================ FILE: vendor/github.com/cihub/seelog/internals_xmlnode_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "strings" "testing" //"fmt" "reflect" ) var testEnv *testing.T /*func TestWrapper(t *testing.T) { testEnv = t s := "" reader := strings.NewReader(s) config, err := unmarshalConfig(reader) if err != nil { testEnv.Error(err) return } printXML(config, 0) } func printXML(node *xmlNode, level int) { indent := strings.Repeat("\t", level) fmt.Print(indent + node.name) for key, value := range node.attributes { fmt.Print(" " + key + "/" + value) } fmt.Println() for _, child := range node.children { printXML(child, level+1) } }*/ var xmlNodeTests []xmlNodeTest type xmlNodeTest struct { testName string inputXML string expected interface{} errorExpected bool } func getXMLTests() []xmlNodeTest { if xmlNodeTests == nil { xmlNodeTests = make([]xmlNodeTest, 0) testName := "Simple test" testXML := `` testExpected := newNode() testExpected.name = "a" xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) testName = "Multiline test" testXML = ` ` testExpected = newNode() testExpected.name = "a" xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) testName = "Multiline test #2" testXML = ` ` testExpected = newNode() testExpected.name = "a" xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) testName = "Incorrect names" testXML = `< a >< /a >` xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true}) testName = "Comments" testXML = ` ` testExpected = newNode() testExpected.name = "a" xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) testName = "Multiple roots" testXML = `` xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true}) testName = "Multiple roots + incorrect xml" testXML = `` xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, nil, true}) testName = "Some unicode and data" testXML = `<俄语>данные` testExpected = newNode() testExpected.name = "俄语" testExpected.value = "данные" xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) testName = "Values and children" testXML = `<俄语>данные` testExpected = newNode() testExpected.name = "俄语" testExpected.value = "данные" child := newNode() child.name = "and_a_child" testExpected.children = append(testExpected.children, child) xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) testName = "Just children" testXML = `<俄语>` testExpected = newNode() testExpected.name = "俄语" child = newNode() child.name = "and_a_child" testExpected.children = append(testExpected.children, child) xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) testName = "Mixed test" testXML = `<俄语 a="1" b="2.13" c="abc">` testExpected = newNode() testExpected.name = "俄语" testExpected.attributes["a"] = "1" testExpected.attributes["b"] = "2.13" testExpected.attributes["c"] = "abc" child = newNode() child.name = "child" child.attributes["abc"] = "bca" testExpected.children = append(testExpected.children, child) child = newNode() child.name = "child" child.attributes["abc"] = "def" testExpected.children = append(testExpected.children, child) xmlNodeTests = append(xmlNodeTests, xmlNodeTest{testName, testXML, testExpected, false}) } return xmlNodeTests } func TestXmlNode(t *testing.T) { for _, test := range getXMLTests() { reader := strings.NewReader(test.inputXML) parsedXML, err := unmarshalConfig(reader) if (err != nil) != test.errorExpected { t.Errorf("\n%s:\nXML input: %s\nExpected error:%t. Got error: %t\n", test.testName, test.inputXML, test.errorExpected, (err != nil)) if err != nil { t.Logf("%s\n", err.Error()) } continue } if err == nil && !reflect.DeepEqual(parsedXML, test.expected) { t.Errorf("\n%s:\nXML input: %s\nExpected: %s. \nGot: %s\n", test.testName, test.inputXML, test.expected, parsedXML) } } } ================================================ FILE: vendor/github.com/cihub/seelog/log.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "sync" "time" ) const ( staticFuncCallDepth = 3 // See 'commonLogger.log' method comments loggerFuncCallDepth = 3 ) // Current is the logger used in all package level convenience funcs like 'Trace', 'Debug', 'Flush', etc. var Current LoggerInterface // Default logger that is created from an empty config: "". It is not closed by a ReplaceLogger call. var Default LoggerInterface // Disabled logger that doesn't produce any output in any circumstances. It is neither closed nor flushed by a ReplaceLogger call. var Disabled LoggerInterface var pkgOperationsMutex *sync.Mutex func init() { pkgOperationsMutex = new(sync.Mutex) var err error if Default == nil { Default, err = LoggerFromConfigAsBytes([]byte("")) } if Disabled == nil { Disabled, err = LoggerFromConfigAsBytes([]byte("")) } if err != nil { panic(fmt.Sprintf("Seelog couldn't start. Error: %s", err.Error())) } Current = Default } func createLoggerFromConfig(config *logConfig) (LoggerInterface, error) { if config.LogType == syncloggerTypeFromString { return newSyncLogger(config), nil } else if config.LogType == asyncLooploggerTypeFromString { return newAsyncLoopLogger(config), nil } else if config.LogType == asyncTimerloggerTypeFromString { logData := config.LoggerData if logData == nil { return nil, errors.New("async timer data not set") } asyncInt, ok := logData.(asyncTimerLoggerData) if !ok { return nil, errors.New("invalid async timer data") } logger, err := newAsyncTimerLogger(config, time.Duration(asyncInt.AsyncInterval)) if !ok { return nil, err } return logger, nil } else if config.LogType == adaptiveLoggerTypeFromString { logData := config.LoggerData if logData == nil { return nil, errors.New("adaptive logger parameters not set") } adaptData, ok := logData.(adaptiveLoggerData) if !ok { return nil, errors.New("invalid adaptive logger parameters") } logger, err := newAsyncAdaptiveLogger( config, time.Duration(adaptData.MinInterval), time.Duration(adaptData.MaxInterval), adaptData.CriticalMsgCount, ) if err != nil { return nil, err } return logger, nil } return nil, errors.New("invalid config log type/data") } // UseLogger sets the 'Current' package level logger variable to the specified value. // This variable is used in all Trace/Debug/... package level convenience funcs. // // Example: // // after calling // seelog.UseLogger(somelogger) // the following: // seelog.Debug("abc") // will be equal to // somelogger.Debug("abc") // // IMPORTANT: UseLogger do NOT close the previous logger (only flushes it). So if // you constantly use it to replace loggers and don't close them in other code, you'll // end up having memory leaks. // // To safely replace loggers, use ReplaceLogger. func UseLogger(logger LoggerInterface) error { if logger == nil { return errors.New("logger can not be nil") } pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() oldLogger := Current Current = logger if oldLogger != nil { oldLogger.Flush() } return nil } // ReplaceLogger acts as UseLogger but the logger that was previously // used is disposed (except Default and Disabled loggers). // // Example: // import log "github.com/cihub/seelog" // // func main() { // logger, err := log.LoggerFromConfigAsFile("seelog.xml") // // if err != nil { // panic(err) // } // // log.ReplaceLogger(logger) // defer log.Flush() // // log.Trace("test") // log.Debugf("var = %s", "abc") // } func ReplaceLogger(logger LoggerInterface) error { if logger == nil { return errors.New("logger can not be nil") } pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() defer func() { if err := recover(); err != nil { reportInternalError(fmt.Errorf("recovered from panic during ReplaceLogger: %s", err)) } }() if Current == Default { Current.Flush() } else if Current != nil && !Current.Closed() && Current != Disabled { Current.Flush() Current.Close() } Current = logger return nil } // Tracef formats message according to format specifier // and writes to default logger with log level = Trace. func Tracef(format string, params ...interface{}) { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() Current.traceWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) } // Debugf formats message according to format specifier // and writes to default logger with log level = Debug. func Debugf(format string, params ...interface{}) { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() Current.debugWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) } // Infof formats message according to format specifier // and writes to default logger with log level = Info. func Infof(format string, params ...interface{}) { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() Current.infoWithCallDepth(staticFuncCallDepth, newLogFormattedMessage(format, params)) } // Warnf formats message according to format specifier and writes to default logger with log level = Warn func Warnf(format string, params ...interface{}) error { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() message := newLogFormattedMessage(format, params) Current.warnWithCallDepth(staticFuncCallDepth, message) return errors.New(message.String()) } // Errorf formats message according to format specifier and writes to default logger with log level = Error func Errorf(format string, params ...interface{}) error { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() message := newLogFormattedMessage(format, params) Current.errorWithCallDepth(staticFuncCallDepth, message) return errors.New(message.String()) } // Criticalf formats message according to format specifier and writes to default logger with log level = Critical func Criticalf(format string, params ...interface{}) error { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() message := newLogFormattedMessage(format, params) Current.criticalWithCallDepth(staticFuncCallDepth, message) return errors.New(message.String()) } // Trace formats message using the default formats for its operands and writes to default logger with log level = Trace func Trace(v ...interface{}) { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() Current.traceWithCallDepth(staticFuncCallDepth, newLogMessage(v)) } // Debug formats message using the default formats for its operands and writes to default logger with log level = Debug func Debug(v ...interface{}) { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() Current.debugWithCallDepth(staticFuncCallDepth, newLogMessage(v)) } // Info formats message using the default formats for its operands and writes to default logger with log level = Info func Info(v ...interface{}) { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() Current.infoWithCallDepth(staticFuncCallDepth, newLogMessage(v)) } // Warn formats message using the default formats for its operands and writes to default logger with log level = Warn func Warn(v ...interface{}) error { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() message := newLogMessage(v) Current.warnWithCallDepth(staticFuncCallDepth, message) return errors.New(message.String()) } // Error formats message using the default formats for its operands and writes to default logger with log level = Error func Error(v ...interface{}) error { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() message := newLogMessage(v) Current.errorWithCallDepth(staticFuncCallDepth, message) return errors.New(message.String()) } // Critical formats message using the default formats for its operands and writes to default logger with log level = Critical func Critical(v ...interface{}) error { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() message := newLogMessage(v) Current.criticalWithCallDepth(staticFuncCallDepth, message) return errors.New(message.String()) } // Flush immediately processes all currently queued messages and all currently buffered messages. // It is a blocking call which returns only after the queue is empty and all the buffers are empty. // // If Flush is called for a synchronous logger (type='sync'), it only flushes buffers (e.g. '' receivers) // , because there is no queue. // // Call this method when your app is going to shut down not to lose any log messages. func Flush() { pkgOperationsMutex.Lock() defer pkgOperationsMutex.Unlock() Current.Flush() } ================================================ FILE: vendor/github.com/cihub/seelog/logger.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "os" "sync" ) func reportInternalError(err error) { fmt.Fprintln(os.Stderr, "Seelog error: "+err.Error()) } // LoggerInterface represents structs capable of logging Seelog messages type LoggerInterface interface { // Tracef formats message according to format specifier // and writes to log with level = Trace. Tracef(format string, params ...interface{}) // Debugf formats message according to format specifier // and writes to log with level = Debug. Debugf(format string, params ...interface{}) // Infof formats message according to format specifier // and writes to log with level = Info. Infof(format string, params ...interface{}) // Warnf formats message according to format specifier // and writes to log with level = Warn. Warnf(format string, params ...interface{}) error // Errorf formats message according to format specifier // and writes to log with level = Error. Errorf(format string, params ...interface{}) error // Criticalf formats message according to format specifier // and writes to log with level = Critical. Criticalf(format string, params ...interface{}) error // Trace formats message using the default formats for its operands // and writes to log with level = Trace Trace(v ...interface{}) // Debug formats message using the default formats for its operands // and writes to log with level = Debug Debug(v ...interface{}) // Info formats message using the default formats for its operands // and writes to log with level = Info Info(v ...interface{}) // Warn formats message using the default formats for its operands // and writes to log with level = Warn Warn(v ...interface{}) error // Error formats message using the default formats for its operands // and writes to log with level = Error Error(v ...interface{}) error // Critical formats message using the default formats for its operands // and writes to log with level = Critical Critical(v ...interface{}) error traceWithCallDepth(callDepth int, message fmt.Stringer) debugWithCallDepth(callDepth int, message fmt.Stringer) infoWithCallDepth(callDepth int, message fmt.Stringer) warnWithCallDepth(callDepth int, message fmt.Stringer) errorWithCallDepth(callDepth int, message fmt.Stringer) criticalWithCallDepth(callDepth int, message fmt.Stringer) // Close flushes all the messages in the logger and closes it. It cannot be used after this operation. Close() // Flush flushes all the messages in the logger. Flush() // Closed returns true if the logger was previously closed. Closed() bool // SetAdditionalStackDepth sets the additional number of frames to skip by runtime.Caller // when getting function information needed to print seelog format identifiers such as %Func or %File. // // This func may be used when you wrap seelog funcs and want to print caller info of you own // wrappers instead of seelog func callers. In this case you should set depth = 1. If you then // wrap your wrapper, you should set depth = 2, etc. // // NOTE: Incorrect depth value may lead to errors in runtime.Caller evaluation or incorrect // function/file names in log files. Do not use it if you are not going to wrap seelog funcs. // You may reset the value to default using a SetAdditionalStackDepth(0) call. SetAdditionalStackDepth(depth int) error } // innerLoggerInterface is an internal logging interface type innerLoggerInterface interface { innerLog(level LogLevel, context LogContextInterface, message fmt.Stringer) Flush() } // [file path][func name][level] -> [allowed] type allowedContextCache map[string]map[string]map[LogLevel]bool // commonLogger contains all common data needed for logging and contains methods used to log messages. type commonLogger struct { config *logConfig // Config used for logging contextCache allowedContextCache // Caches whether log is enabled for specific "full path-func name-level" sets closed bool // 'true' when all writers are closed, all data is flushed, logger is unusable. m sync.Mutex // Mutex for main operations unusedLevels []bool innerLogger innerLoggerInterface addStackDepth int // Additional stack depth needed for correct seelog caller context detection } func newCommonLogger(config *logConfig, internalLogger innerLoggerInterface) *commonLogger { cLogger := new(commonLogger) cLogger.config = config cLogger.contextCache = make(allowedContextCache) cLogger.unusedLevels = make([]bool, Off) cLogger.fillUnusedLevels() cLogger.innerLogger = internalLogger return cLogger } func (cLogger *commonLogger) SetAdditionalStackDepth(depth int) error { if depth < 0 { return fmt.Errorf("negative depth: %d", depth) } cLogger.m.Lock() cLogger.addStackDepth = depth cLogger.m.Unlock() return nil } func (cLogger *commonLogger) Tracef(format string, params ...interface{}) { cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) } func (cLogger *commonLogger) Debugf(format string, params ...interface{}) { cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) } func (cLogger *commonLogger) Infof(format string, params ...interface{}) { cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogFormattedMessage(format, params)) } func (cLogger *commonLogger) Warnf(format string, params ...interface{}) error { message := newLogFormattedMessage(format, params) cLogger.warnWithCallDepth(loggerFuncCallDepth, message) return errors.New(message.String()) } func (cLogger *commonLogger) Errorf(format string, params ...interface{}) error { message := newLogFormattedMessage(format, params) cLogger.errorWithCallDepth(loggerFuncCallDepth, message) return errors.New(message.String()) } func (cLogger *commonLogger) Criticalf(format string, params ...interface{}) error { message := newLogFormattedMessage(format, params) cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) return errors.New(message.String()) } func (cLogger *commonLogger) Trace(v ...interface{}) { cLogger.traceWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) } func (cLogger *commonLogger) Debug(v ...interface{}) { cLogger.debugWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) } func (cLogger *commonLogger) Info(v ...interface{}) { cLogger.infoWithCallDepth(loggerFuncCallDepth, newLogMessage(v)) } func (cLogger *commonLogger) Warn(v ...interface{}) error { message := newLogMessage(v) cLogger.warnWithCallDepth(loggerFuncCallDepth, message) return errors.New(message.String()) } func (cLogger *commonLogger) Error(v ...interface{}) error { message := newLogMessage(v) cLogger.errorWithCallDepth(loggerFuncCallDepth, message) return errors.New(message.String()) } func (cLogger *commonLogger) Critical(v ...interface{}) error { message := newLogMessage(v) cLogger.criticalWithCallDepth(loggerFuncCallDepth, message) return errors.New(message.String()) } func (cLogger *commonLogger) traceWithCallDepth(callDepth int, message fmt.Stringer) { cLogger.log(TraceLvl, message, callDepth) } func (cLogger *commonLogger) debugWithCallDepth(callDepth int, message fmt.Stringer) { cLogger.log(DebugLvl, message, callDepth) } func (cLogger *commonLogger) infoWithCallDepth(callDepth int, message fmt.Stringer) { cLogger.log(InfoLvl, message, callDepth) } func (cLogger *commonLogger) warnWithCallDepth(callDepth int, message fmt.Stringer) { cLogger.log(WarnLvl, message, callDepth) } func (cLogger *commonLogger) errorWithCallDepth(callDepth int, message fmt.Stringer) { cLogger.log(ErrorLvl, message, callDepth) } func (cLogger *commonLogger) criticalWithCallDepth(callDepth int, message fmt.Stringer) { cLogger.log(CriticalLvl, message, callDepth) cLogger.innerLogger.Flush() } func (cLogger *commonLogger) Closed() bool { return cLogger.closed } func (cLogger *commonLogger) fillUnusedLevels() { for i := 0; i < len(cLogger.unusedLevels); i++ { cLogger.unusedLevels[i] = true } cLogger.fillUnusedLevelsByContraint(cLogger.config.Constraints) for _, exception := range cLogger.config.Exceptions { cLogger.fillUnusedLevelsByContraint(exception) } } func (cLogger *commonLogger) fillUnusedLevelsByContraint(constraint logLevelConstraints) { for i := 0; i < len(cLogger.unusedLevels); i++ { if constraint.IsAllowed(LogLevel(i)) { cLogger.unusedLevels[i] = false } } } // stackCallDepth is used to indicate the call depth of 'log' func. // This depth level is used in the runtime.Caller(...) call. See // common_context.go -> specificContext, extractCallerInfo for details. func (cLogger *commonLogger) log( level LogLevel, message fmt.Stringer, stackCallDepth int) { cLogger.m.Lock() defer cLogger.m.Unlock() if cLogger.Closed() { return } if cLogger.unusedLevels[level] { return } context, _ := specificContext(stackCallDepth + cLogger.addStackDepth) // Context errors are not reported because there are situations // in which context errors are normal Seelog usage cases. For // example in executables with stripped symbols. // Error contexts are returned instead. See common_context.go. /*if err != nil { reportInternalError(err) return }*/ cLogger.innerLogger.innerLog(level, context, message) } func (cLogger *commonLogger) processLogMsg( level LogLevel, message fmt.Stringer, context LogContextInterface) { defer func() { if err := recover(); err != nil { reportInternalError(fmt.Errorf("recovered from panic during message processing: %s", err)) } }() if cLogger.config.IsAllowed(level, context) { cLogger.config.RootDispatcher.Dispatch(message.String(), level, context, reportInternalError) } } func (cLogger *commonLogger) isAllowed(level LogLevel, context LogContextInterface) bool { funcMap, ok := cLogger.contextCache[context.FullPath()] if !ok { funcMap = make(map[string]map[LogLevel]bool, 0) cLogger.contextCache[context.FullPath()] = funcMap } levelMap, ok := funcMap[context.Func()] if !ok { levelMap = make(map[LogLevel]bool, 0) funcMap[context.Func()] = levelMap } isAllowValue, ok := levelMap[level] if !ok { isAllowValue = cLogger.config.IsAllowed(level, context) levelMap[level] = isAllowValue } return isAllowValue } type logMessage struct { params []interface{} } type logFormattedMessage struct { format string params []interface{} } func newLogMessage(params []interface{}) fmt.Stringer { message := new(logMessage) message.params = params return message } func newLogFormattedMessage(format string, params []interface{}) *logFormattedMessage { message := new(logFormattedMessage) message.params = params message.format = format return message } func (message *logMessage) String() string { return fmt.Sprint(message.params...) } func (message *logFormattedMessage) String() string { return fmt.Sprintf(message.format, message.params...) } ================================================ FILE: vendor/github.com/cihub/seelog/writers_bufferedwriter.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "bufio" "errors" "fmt" "io" "sync" "time" ) // bufferedWriter stores data in memory and flushes it every flushPeriod or when buffer is full type bufferedWriter struct { flushPeriod time.Duration // data flushes interval (in microseconds) bufferMutex *sync.Mutex // mutex for buffer operations syncronization innerWriter io.Writer // inner writer buffer *bufio.Writer // buffered wrapper for inner writer bufferSize int // max size of data chunk in bytes } // newBufferedWriter creates a new buffered writer struct. // bufferSize -- size of memory buffer in bytes // flushPeriod -- period in which data flushes from memory buffer in milliseconds. 0 - turn off this functionality func newBufferedWriter(innerWriter io.Writer, bufferSize int, flushPeriod time.Duration) (*bufferedWriter, error) { if innerWriter == nil { return nil, errors.New("argument is nil: innerWriter") } if flushPeriod < 0 { return nil, fmt.Errorf("flushPeriod can not be less than 0. Got: %d", flushPeriod) } if bufferSize <= 0 { return nil, fmt.Errorf("bufferSize can not be less or equal to 0. Got: %d", bufferSize) } buffer := bufio.NewWriterSize(innerWriter, bufferSize) /*if err != nil { return nil, err }*/ newWriter := new(bufferedWriter) newWriter.innerWriter = innerWriter newWriter.buffer = buffer newWriter.bufferSize = bufferSize newWriter.flushPeriod = flushPeriod * 1e6 newWriter.bufferMutex = new(sync.Mutex) if flushPeriod != 0 { go newWriter.flushPeriodically() } return newWriter, nil } func (bufWriter *bufferedWriter) writeBigChunk(bytes []byte) (n int, err error) { bufferedLen := bufWriter.buffer.Buffered() n, err = bufWriter.flushInner() if err != nil { return } written, writeErr := bufWriter.innerWriter.Write(bytes) return bufferedLen + written, writeErr } // Sends data to buffer manager. Waits until all buffers are full. func (bufWriter *bufferedWriter) Write(bytes []byte) (n int, err error) { bufWriter.bufferMutex.Lock() defer bufWriter.bufferMutex.Unlock() bytesLen := len(bytes) if bytesLen > bufWriter.bufferSize { return bufWriter.writeBigChunk(bytes) } if bytesLen > bufWriter.buffer.Available() { n, err = bufWriter.flushInner() if err != nil { return } } bufWriter.buffer.Write(bytes) return len(bytes), nil } func (bufWriter *bufferedWriter) Close() error { closer, ok := bufWriter.innerWriter.(io.Closer) if ok { return closer.Close() } return nil } func (bufWriter *bufferedWriter) Flush() { bufWriter.bufferMutex.Lock() defer bufWriter.bufferMutex.Unlock() bufWriter.flushInner() } func (bufWriter *bufferedWriter) flushInner() (n int, err error) { bufferedLen := bufWriter.buffer.Buffered() flushErr := bufWriter.buffer.Flush() return bufWriter.buffer.Buffered() - bufferedLen, flushErr } func (bufWriter *bufferedWriter) flushBuffer() { bufWriter.bufferMutex.Lock() defer bufWriter.bufferMutex.Unlock() bufWriter.buffer.Flush() } func (bufWriter *bufferedWriter) flushPeriodically() { if bufWriter.flushPeriod > 0 { ticker := time.NewTicker(bufWriter.flushPeriod) for { <-ticker.C bufWriter.flushBuffer() } } } func (bufWriter *bufferedWriter) String() string { return fmt.Sprintf("bufferedWriter size: %d, flushPeriod: %d", bufWriter.bufferSize, bufWriter.flushPeriod) } ================================================ FILE: vendor/github.com/cihub/seelog/writers_bufferedwriter_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "testing" ) func TestChunkWriteOnFilling(t *testing.T) { writer, _ := newBytesVerifier(t) bufferedWriter, err := newBufferedWriter(writer, 1024, 0) if err != nil { t.Fatalf("Unexpected buffered writer creation error: %s", err.Error()) } bytes := make([]byte, 1000) bufferedWriter.Write(bytes) writer.ExpectBytes(bytes) bufferedWriter.Write(bytes) } func TestFlushByTimePeriod(t *testing.T) { writer, _ := newBytesVerifier(t) bufferedWriter, err := newBufferedWriter(writer, 1024, 10) if err != nil { t.Fatalf("Unexpected buffered writer creation error: %s", err.Error()) } bytes := []byte("Hello") for i := 0; i < 2; i++ { writer.ExpectBytes(bytes) bufferedWriter.Write(bytes) } } func TestBigMessageMustPassMemoryBuffer(t *testing.T) { writer, _ := newBytesVerifier(t) bufferedWriter, err := newBufferedWriter(writer, 1024, 0) if err != nil { t.Fatalf("Unexpected buffered writer creation error: %s", err.Error()) } bytes := make([]byte, 5000) for i := 0; i < len(bytes); i++ { bytes[i] = uint8(i % 255) } writer.ExpectBytes(bytes) bufferedWriter.Write(bytes) } ================================================ FILE: vendor/github.com/cihub/seelog/writers_connwriter.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "crypto/tls" "fmt" "io" "net" ) // connWriter is used to write to a stream-oriented network connection. type connWriter struct { innerWriter io.WriteCloser reconnectOnMsg bool reconnect bool net string addr string useTLS bool configTLS *tls.Config } // Creates writer to the address addr on the network netName. // Connection will be opened on each write if reconnectOnMsg = true func newConnWriter(netName string, addr string, reconnectOnMsg bool) *connWriter { newWriter := new(connWriter) newWriter.net = netName newWriter.addr = addr newWriter.reconnectOnMsg = reconnectOnMsg return newWriter } // Creates a writer that uses SSL/TLS func newTLSWriter(netName string, addr string, reconnectOnMsg bool, config *tls.Config) *connWriter { newWriter := new(connWriter) newWriter.net = netName newWriter.addr = addr newWriter.reconnectOnMsg = reconnectOnMsg newWriter.useTLS = true newWriter.configTLS = config return newWriter } func (connWriter *connWriter) Close() error { if connWriter.innerWriter == nil { return nil } return connWriter.innerWriter.Close() } func (connWriter *connWriter) Write(bytes []byte) (n int, err error) { if connWriter.neededConnectOnMsg() { err = connWriter.connect() if err != nil { return 0, err } } if connWriter.reconnectOnMsg { defer connWriter.innerWriter.Close() } n, err = connWriter.innerWriter.Write(bytes) if err != nil { connWriter.reconnect = true } return } func (connWriter *connWriter) String() string { return fmt.Sprintf("Conn writer: [%s, %s, %v]", connWriter.net, connWriter.addr, connWriter.reconnectOnMsg) } func (connWriter *connWriter) connect() error { if connWriter.innerWriter != nil { connWriter.innerWriter.Close() connWriter.innerWriter = nil } if connWriter.useTLS { conn, err := tls.Dial(connWriter.net, connWriter.addr, connWriter.configTLS) if err != nil { return err } connWriter.innerWriter = conn return nil } conn, err := net.Dial(connWriter.net, connWriter.addr) if err != nil { return err } tcpConn, ok := conn.(*net.TCPConn) if ok { tcpConn.SetKeepAlive(true) } connWriter.innerWriter = conn return nil } func (connWriter *connWriter) neededConnectOnMsg() bool { if connWriter.reconnect { connWriter.reconnect = false return true } if connWriter.innerWriter == nil { return true } return connWriter.reconnectOnMsg } ================================================ FILE: vendor/github.com/cihub/seelog/writers_consolewriter.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import "fmt" // consoleWriter is used to write to console type consoleWriter struct { } // Creates a new console writer. Returns error, if the console writer couldn't be created. func newConsoleWriter() (writer *consoleWriter, err error) { newWriter := new(consoleWriter) return newWriter, nil } // Create folder and file on WriteLog/Write first call func (console *consoleWriter) Write(bytes []byte) (int, error) { return fmt.Print(string(bytes)) } func (console *consoleWriter) String() string { return "Console writer" } ================================================ FILE: vendor/github.com/cihub/seelog/writers_filewriter.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" "io" "os" "path/filepath" ) // fileWriter is used to write to a file. type fileWriter struct { innerWriter io.WriteCloser fileName string } // Creates a new file and a corresponding writer. Returns error, if the file couldn't be created. func newFileWriter(fileName string) (writer *fileWriter, err error) { newWriter := new(fileWriter) newWriter.fileName = fileName return newWriter, nil } func (fw *fileWriter) Close() error { if fw.innerWriter != nil { err := fw.innerWriter.Close() if err != nil { return err } fw.innerWriter = nil } return nil } // Create folder and file on WriteLog/Write first call func (fw *fileWriter) Write(bytes []byte) (n int, err error) { if fw.innerWriter == nil { if err := fw.createFile(); err != nil { return 0, err } } return fw.innerWriter.Write(bytes) } func (fw *fileWriter) createFile() error { folder, _ := filepath.Split(fw.fileName) var err error if 0 != len(folder) { err = os.MkdirAll(folder, defaultDirectoryPermissions) if err != nil { return err } } // If exists fw.innerWriter, err = os.OpenFile(fw.fileName, os.O_WRONLY|os.O_APPEND|os.O_CREATE, defaultFilePermissions) if err != nil { return err } return nil } func (fw *fileWriter) String() string { return fmt.Sprintf("File writer: %s", fw.fileName) } ================================================ FILE: vendor/github.com/cihub/seelog/writers_filewriter_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" "io" "os" "path/filepath" "strings" "testing" ) const ( messageLen = 10 ) var bytesFileTest = []byte(strings.Repeat("A", messageLen)) func TestSimpleFileWriter(t *testing.T) { t.Logf("Starting file writer tests") newFileWriterTester(simplefileWriterTests, simplefileWriterGetter, t).test() } //=============================================================== func simplefileWriterGetter(testCase *fileWriterTestCase) (io.WriteCloser, error) { return newFileWriter(testCase.fileName) } //=============================================================== type fileWriterTestCase struct { files []string fileName string rollingType rollingType fileSize int64 maxRolls int datePattern string writeCount int resFiles []string nameMode rollingNameMode } func createSimplefileWriterTestCase(fileName string, writeCount int) *fileWriterTestCase { return &fileWriterTestCase{[]string{}, fileName, rollingTypeSize, 0, 0, "", writeCount, []string{fileName}, 0} } var simplefileWriterTests = []*fileWriterTestCase{ createSimplefileWriterTestCase("log.testlog", 1), createSimplefileWriterTestCase("log.testlog", 50), createSimplefileWriterTestCase(filepath.Join("dir", "log.testlog"), 50), } //=============================================================== type fileWriterTester struct { testCases []*fileWriterTestCase writerGetter func(*fileWriterTestCase) (io.WriteCloser, error) t *testing.T } func newFileWriterTester( testCases []*fileWriterTestCase, writerGetter func(*fileWriterTestCase) (io.WriteCloser, error), t *testing.T) *fileWriterTester { return &fileWriterTester{testCases, writerGetter, t} } func isWriterTestFile(fn string) bool { return strings.Contains(fn, ".testlog") } func cleanupWriterTest(t *testing.T) { toDel, err := getDirFilePaths(".", isWriterTestFile, true) if nil != err { t.Fatal("Cannot list files in test directory!") } for _, p := range toDel { if err = tryRemoveFile(p); nil != err { t.Errorf("cannot remove file %s in test directory: %s", p, err.Error()) } } if err = os.RemoveAll("dir"); nil != err { t.Errorf("cannot remove temp test directory: %s", err.Error()) } } func getWriterTestResultFiles() ([]string, error) { var p []string visit := func(path string, f os.FileInfo, err error) error { if !f.IsDir() && isWriterTestFile(path) { abs, err := filepath.Abs(path) if err != nil { return fmt.Errorf("filepath.Abs failed for %s", path) } p = append(p, abs) } return nil } err := filepath.Walk(".", visit) if nil != err { return nil, err } return p, nil } func (tester *fileWriterTester) testCase(testCase *fileWriterTestCase, testNum int) { defer cleanupWriterTest(tester.t) tester.t.Logf("Start test [%v]\n", testNum) for _, filePath := range testCase.files { dir, _ := filepath.Split(filePath) var err error if 0 != len(dir) { err = os.MkdirAll(dir, defaultDirectoryPermissions) if err != nil { tester.t.Error(err) return } } fi, err := os.Create(filePath) if err != nil { tester.t.Error(err) return } err = fi.Close() if err != nil { tester.t.Error(err) return } } fwc, err := tester.writerGetter(testCase) if err != nil { tester.t.Error(err) return } defer fwc.Close() tester.performWrite(fwc, testCase.writeCount) files, err := getWriterTestResultFiles() if err != nil { tester.t.Error(err) return } tester.checkRequiredFilesExist(testCase, files) tester.checkJustRequiredFilesExist(testCase, files) } func (tester *fileWriterTester) test() { for i, tc := range tester.testCases { cleanupWriterTest(tester.t) tester.testCase(tc, i) } } func (tester *fileWriterTester) performWrite(fileWriter io.Writer, count int) { for i := 0; i < count; i++ { _, err := fileWriter.Write(bytesFileTest) if err != nil { tester.t.Error(err) return } } } func (tester *fileWriterTester) checkRequiredFilesExist(testCase *fileWriterTestCase, files []string) { var found bool for _, expected := range testCase.resFiles { found = false exAbs, err := filepath.Abs(expected) if err != nil { tester.t.Errorf("filepath.Abs failed for %s", expected) continue } for _, f := range files { if af, e := filepath.Abs(f); e == nil { tester.t.Log(af) if exAbs == af { found = true break } } else { tester.t.Errorf("filepath.Abs failed for %s", f) } } if !found { tester.t.Errorf("expected file: %s doesn't exist. Got %v\n", exAbs, files) } } } func (tester *fileWriterTester) checkJustRequiredFilesExist(testCase *fileWriterTestCase, files []string) { for _, f := range files { found := false for _, expected := range testCase.resFiles { exAbs, err := filepath.Abs(expected) if err != nil { tester.t.Errorf("filepath.Abs failed for %s", expected) } else { if exAbs == f { found = true break } } } if !found { tester.t.Errorf("unexpected file: %v", f) } } } ================================================ FILE: vendor/github.com/cihub/seelog/writers_formattedwriter.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "errors" "fmt" "io" ) type formattedWriter struct { writer io.Writer formatter *formatter } func newFormattedWriter(writer io.Writer, formatter *formatter) (*formattedWriter, error) { if formatter == nil { return nil, errors.New("formatter can not be nil") } return &formattedWriter{writer, formatter}, nil } func (formattedWriter *formattedWriter) Write(message string, level LogLevel, context LogContextInterface) error { str := formattedWriter.formatter.Format(message, level, context) _, err := formattedWriter.writer.Write([]byte(str)) return err } func (formattedWriter *formattedWriter) String() string { return fmt.Sprintf("writer: %s, format: %s", formattedWriter.writer, formattedWriter.formatter) } func (formattedWriter *formattedWriter) Writer() io.Writer { return formattedWriter.writer } func (formattedWriter *formattedWriter) Format() *formatter { return formattedWriter.formatter } ================================================ FILE: vendor/github.com/cihub/seelog/writers_formattedwriter_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "testing" ) func TestformattedWriter(t *testing.T) { formatStr := "%Level %LEVEL %Msg" message := "message" var logLevel = LogLevel(TraceLvl) bytesVerifier, err := newBytesVerifier(t) if err != nil { t.Error(err) return } formatter, err := newFormatter(formatStr) if err != nil { t.Error(err) return } writer, err := newFormattedWriter(bytesVerifier, formatter) if err != nil { t.Error(err) return } context, err := currentContext() if err != nil { t.Error(err) return } logMessage := formatter.Format(message, logLevel, context) bytesVerifier.ExpectBytes([]byte(logMessage)) writer.Write(message, logLevel, context) bytesVerifier.MustNotExpect() } ================================================ FILE: vendor/github.com/cihub/seelog/writers_rollingfilewriter.go ================================================ // Copyright (c) 2013 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" "io/ioutil" "os" "path/filepath" "sort" "strconv" "strings" "time" ) // Common constants const ( rollingLogHistoryDelimiter = "." ) // Types of the rolling writer: roll by date, by time, etc. type rollingType uint8 const ( rollingTypeSize = iota rollingTypeTime ) // Types of the rolled file naming mode: prefix, postfix, etc. type rollingNameMode uint8 const ( rollingNameModePostfix = iota rollingNameModePrefix ) var rollingNameModesStringRepresentation = map[rollingNameMode]string{ rollingNameModePostfix: "postfix", rollingNameModePrefix: "prefix", } func rollingNameModeFromString(rollingNameStr string) (rollingNameMode, bool) { for tp, tpStr := range rollingNameModesStringRepresentation { if tpStr == rollingNameStr { return tp, true } } return 0, false } type rollingIntervalType uint8 const ( rollingIntervalAny = iota rollingIntervalDaily ) var rollingInvervalTypesStringRepresentation = map[rollingIntervalType]string{ rollingIntervalDaily: "daily", } func rollingIntervalTypeFromString(rollingTypeStr string) (rollingIntervalType, bool) { for tp, tpStr := range rollingInvervalTypesStringRepresentation { if tpStr == rollingTypeStr { return tp, true } } return 0, false } var rollingTypesStringRepresentation = map[rollingType]string{ rollingTypeSize: "size", rollingTypeTime: "date", } func rollingTypeFromString(rollingTypeStr string) (rollingType, bool) { for tp, tpStr := range rollingTypesStringRepresentation { if tpStr == rollingTypeStr { return tp, true } } return 0, false } // Old logs archivation type. type rollingArchiveType uint8 const ( rollingArchiveNone = iota rollingArchiveZip ) var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{ rollingArchiveNone: "none", rollingArchiveZip: "zip", } func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) { for tp, tpStr := range rollingArchiveTypesStringRepresentation { if tpStr == rollingArchiveTypeStr { return tp, true } } return 0, false } // Default names for different archivation types var rollingArchiveTypesDefaultNames = map[rollingArchiveType]string{ rollingArchiveZip: "log.zip", } // rollerVirtual is an interface that represents all virtual funcs that are // called in different rolling writer subtypes. type rollerVirtual interface { needsToRoll() (bool, error) // Returns true if needs to switch to another file. isFileRollNameValid(rname string) bool // Returns true if logger roll file name (postfix/prefix/etc.) is ok. sortFileRollNamesAsc(fs []string) ([]string, error) // Sorts logger roll file names in ascending order of their creation by logger. // Creates a new froll history file using the contents of current file and special filename of the latest roll (prefix/ postfix). // If lastRollName is empty (""), then it means that there is no latest roll (current is the first one) getNewHistoryRollFileName(lastRollName string) string getCurrentModifiedFileName(originalFileName string) string // Returns filename modified according to specific logger rules } // rollingFileWriter writes received messages to a file, until time interval passes // or file exceeds a specified limit. After that the current log file is renamed // and writer starts to log into a new file. You can set a limit for such renamed // files count, if you want, and then the rolling writer would delete older ones when // the files count exceed the specified limit. type rollingFileWriter struct { fileName string // current file name. May differ from original in date rolling loggers originalFileName string // original one currentDirPath string currentFile *os.File currentFileSize int64 rollingType rollingType // Rolling mode (Files roll by size/date/...) archiveType rollingArchiveType archivePath string maxRolls int nameMode rollingNameMode self rollerVirtual // Used for virtual calls } func newRollingFileWriter(fpath string, rtype rollingType, atype rollingArchiveType, apath string, maxr int, namemode rollingNameMode) (*rollingFileWriter, error) { rw := new(rollingFileWriter) rw.currentDirPath, rw.fileName = filepath.Split(fpath) if len(rw.currentDirPath) == 0 { rw.currentDirPath = "." } rw.originalFileName = rw.fileName rw.rollingType = rtype rw.archiveType = atype rw.archivePath = apath rw.nameMode = namemode rw.maxRolls = maxr return rw, nil } func (rw *rollingFileWriter) hasRollName(file string) bool { switch rw.nameMode { case rollingNameModePostfix: rname := rw.originalFileName + rollingLogHistoryDelimiter return strings.HasPrefix(file, rname) case rollingNameModePrefix: rname := rollingLogHistoryDelimiter + rw.originalFileName return strings.HasSuffix(file, rname) } return false } func (rw *rollingFileWriter) createFullFileName(originalName, rollname string) string { switch rw.nameMode { case rollingNameModePostfix: return originalName + rollingLogHistoryDelimiter + rollname case rollingNameModePrefix: return rollname + rollingLogHistoryDelimiter + originalName } return "" } func (rw *rollingFileWriter) getSortedLogHistory() ([]string, error) { files, err := getDirFilePaths(rw.currentDirPath, nil, true) if err != nil { return nil, err } var validRollNames []string for _, file := range files { if file != rw.fileName && rw.hasRollName(file) { rname := rw.getFileRollName(file) if rw.self.isFileRollNameValid(rname) { validRollNames = append(validRollNames, rname) } } } sortedTails, err := rw.self.sortFileRollNamesAsc(validRollNames) if err != nil { return nil, err } validSortedFiles := make([]string, len(sortedTails)) for i, v := range sortedTails { validSortedFiles[i] = rw.createFullFileName(rw.originalFileName, v) } return validSortedFiles, nil } func (rw *rollingFileWriter) createFileAndFolderIfNeeded() error { var err error if len(rw.currentDirPath) != 0 { err = os.MkdirAll(rw.currentDirPath, defaultDirectoryPermissions) if err != nil { return err } } rw.fileName = rw.self.getCurrentModifiedFileName(rw.originalFileName) filePath := filepath.Join(rw.currentDirPath, rw.fileName) // If exists stat, err := os.Lstat(filePath) if err == nil { rw.currentFile, err = os.OpenFile(filePath, os.O_WRONLY|os.O_APPEND, defaultFilePermissions) stat, err = os.Lstat(filePath) if err != nil { return err } rw.currentFileSize = stat.Size() } else { rw.currentFile, err = os.Create(filePath) rw.currentFileSize = 0 } if err != nil { return err } return nil } func (rw *rollingFileWriter) deleteOldRolls(history []string) error { if rw.maxRolls <= 0 { return nil } rollsToDelete := len(history) - rw.maxRolls if rollsToDelete <= 0 { return nil } switch rw.archiveType { case rollingArchiveZip: var files map[string][]byte // If archive exists _, err := os.Lstat(rw.archivePath) if nil == err { // Extract files and content from it files, err = unzip(rw.archivePath) if err != nil { return err } // Remove the original file err = tryRemoveFile(rw.archivePath) if err != nil { return err } } else { files = make(map[string][]byte) } // Add files to the existing files map, filled above for i := 0; i < rollsToDelete; i++ { rollPath := filepath.Join(rw.currentDirPath, history[i]) bts, err := ioutil.ReadFile(rollPath) if err != nil { return err } files[rollPath] = bts } // Put the final file set to zip file. err = createZip(rw.archivePath, files) if err != nil { return err } } // In all cases (archive files or not) the files should be deleted. for i := 0; i < rollsToDelete; i++ { rollPath := filepath.Join(rw.currentDirPath, history[i]) err := tryRemoveFile(rollPath) if err != nil { return err } } return nil } func (rw *rollingFileWriter) getFileRollName(fileName string) string { switch rw.nameMode { case rollingNameModePostfix: return fileName[len(rw.originalFileName+rollingLogHistoryDelimiter):] case rollingNameModePrefix: return fileName[:len(fileName)-len(rw.originalFileName+rollingLogHistoryDelimiter)] } return "" } func (rw *rollingFileWriter) Write(bytes []byte) (n int, err error) { if rw.currentFile == nil { err := rw.createFileAndFolderIfNeeded() if err != nil { return 0, err } } // needs to roll if: // * file roller max file size exceeded OR // * time roller interval passed nr, err := rw.self.needsToRoll() if err != nil { return 0, err } if nr { // First, close current file. err = rw.currentFile.Close() if err != nil { return 0, err } // Current history of all previous log files. // For file roller it may be like this: // * ... // * file.log.4 // * file.log.5 // * file.log.6 // // For date roller it may look like this: // * ... // * file.log.11.Aug.13 // * file.log.15.Aug.13 // * file.log.16.Aug.13 // Sorted log history does NOT include current file. history, err := rw.getSortedLogHistory() if err != nil { return 0, err } // Renames current file to create a new roll history entry // For file roller it may be like this: // * ... // * file.log.4 // * file.log.5 // * file.log.6 // n file.log.7 <---- RENAMED (from file.log) // Time rollers that doesn't modify file names (e.g. 'date' roller) skip this logic. var newHistoryName string var newRollMarkerName string if len(history) > 0 { // Create new rname name using last history file name newRollMarkerName = rw.self.getNewHistoryRollFileName(rw.getFileRollName(history[len(history)-1])) } else { // Create first rname name newRollMarkerName = rw.self.getNewHistoryRollFileName("") } if len(newRollMarkerName) != 0 { newHistoryName = rw.createFullFileName(rw.fileName, newRollMarkerName) } else { newHistoryName = rw.fileName } if newHistoryName != rw.fileName { err = os.Rename(filepath.Join(rw.currentDirPath, rw.fileName), filepath.Join(rw.currentDirPath, newHistoryName)) if err != nil { return 0, err } } // Finally, add the newly added history file to the history archive // and, if after that the archive exceeds the allowed max limit, older rolls // must the removed/archived. history = append(history, newHistoryName) if len(history) > rw.maxRolls { err = rw.deleteOldRolls(history) if err != nil { return 0, err } } err = rw.createFileAndFolderIfNeeded() if err != nil { return 0, err } } rw.currentFileSize += int64(len(bytes)) return rw.currentFile.Write(bytes) } func (rw *rollingFileWriter) Close() error { if rw.currentFile != nil { e := rw.currentFile.Close() if e != nil { return e } rw.currentFile = nil } return nil } // ============================================================================================= // Different types of rolling writers // ============================================================================================= // -------------------------------------------------- // Rolling writer by SIZE // -------------------------------------------------- // rollingFileWriterSize performs roll when file exceeds a specified limit. type rollingFileWriterSize struct { *rollingFileWriter maxFileSize int64 } func newRollingFileWriterSize(fpath string, atype rollingArchiveType, apath string, maxSize int64, maxRolls int, namemode rollingNameMode) (*rollingFileWriterSize, error) { rw, err := newRollingFileWriter(fpath, rollingTypeSize, atype, apath, maxRolls, namemode) if err != nil { return nil, err } rws := &rollingFileWriterSize{rw, maxSize} rws.self = rws return rws, nil } func (rws *rollingFileWriterSize) needsToRoll() (bool, error) { return rws.currentFileSize >= rws.maxFileSize, nil } func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool { if len(rname) == 0 { return false } _, err := strconv.Atoi(rname) return err == nil } type rollSizeFileTailsSlice []string func (p rollSizeFileTailsSlice) Len() int { return len(p) } func (p rollSizeFileTailsSlice) Less(i, j int) bool { v1, _ := strconv.Atoi(p[i]) v2, _ := strconv.Atoi(p[j]) return v1 < v2 } func (p rollSizeFileTailsSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) { ss := rollSizeFileTailsSlice(fs) sort.Sort(ss) return ss, nil } func (rws *rollingFileWriterSize) getNewHistoryRollFileName(lastRollName string) string { v := 0 if len(lastRollName) != 0 { v, _ = strconv.Atoi(lastRollName) } return fmt.Sprintf("%d", v+1) } func (rws *rollingFileWriterSize) getCurrentModifiedFileName(originalFileName string) string { return originalFileName } func (rws *rollingFileWriterSize) String() string { return fmt.Sprintf("Rolling file writer (By SIZE): filename: %s, archive: %s, archivefile: %s, maxFileSize: %v, maxRolls: %v", rws.fileName, rollingArchiveTypesStringRepresentation[rws.archiveType], rws.archivePath, rws.maxFileSize, rws.maxRolls) } // -------------------------------------------------- // Rolling writer by TIME // -------------------------------------------------- // rollingFileWriterTime performs roll when a specified time interval has passed. type rollingFileWriterTime struct { *rollingFileWriter timePattern string interval rollingIntervalType currentTimeFileName string } func newRollingFileWriterTime(fpath string, atype rollingArchiveType, apath string, maxr int, timePattern string, interval rollingIntervalType, namemode rollingNameMode) (*rollingFileWriterTime, error) { rw, err := newRollingFileWriter(fpath, rollingTypeTime, atype, apath, maxr, namemode) if err != nil { return nil, err } rws := &rollingFileWriterTime{rw, timePattern, interval, ""} rws.self = rws return rws, nil } func (rwt *rollingFileWriterTime) needsToRoll() (bool, error) { switch rwt.nameMode { case rollingNameModePostfix: if rwt.originalFileName+rollingLogHistoryDelimiter+time.Now().Format(rwt.timePattern) == rwt.fileName { return false, nil } case rollingNameModePrefix: if time.Now().Format(rwt.timePattern)+rollingLogHistoryDelimiter+rwt.originalFileName == rwt.fileName { return false, nil } } if rwt.interval == rollingIntervalAny { return true, nil } tprev, err := time.ParseInLocation(rwt.timePattern, rwt.getFileRollName(rwt.fileName), time.Local) if err != nil { return false, err } diff := time.Now().Sub(tprev) switch rwt.interval { case rollingIntervalDaily: return diff >= 24*time.Hour, nil } return false, fmt.Errorf("unknown interval type: %d", rwt.interval) } func (rwt *rollingFileWriterTime) isFileRollNameValid(rname string) bool { if len(rname) == 0 { return false } _, err := time.ParseInLocation(rwt.timePattern, rname, time.Local) return err == nil } type rollTimeFileTailsSlice struct { data []string pattern string } func (p rollTimeFileTailsSlice) Len() int { return len(p.data) } func (p rollTimeFileTailsSlice) Less(i, j int) bool { t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local) t2, _ := time.ParseInLocation(p.pattern, p.data[j], time.Local) return t1.Before(t2) } func (p rollTimeFileTailsSlice) Swap(i, j int) { p.data[i], p.data[j] = p.data[j], p.data[i] } func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) { ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern} sort.Sort(ss) return ss.data, nil } func (rwt *rollingFileWriterTime) getNewHistoryRollFileName(lastRollName string) string { return "" } func (rwt *rollingFileWriterTime) getCurrentModifiedFileName(originalFileName string) string { switch rwt.nameMode { case rollingNameModePostfix: return originalFileName + rollingLogHistoryDelimiter + time.Now().Format(rwt.timePattern) case rollingNameModePrefix: return time.Now().Format(rwt.timePattern) + rollingLogHistoryDelimiter + originalFileName } return "" } func (rwt *rollingFileWriterTime) String() string { return fmt.Sprintf("Rolling file writer (By TIME): filename: %s, archive: %s, archivefile: %s, maxInterval: %v, pattern: %s, maxRolls: %v", rwt.fileName, rollingArchiveTypesStringRepresentation[rwt.archiveType], rwt.archivePath, rwt.interval, rwt.timePattern, rwt.maxRolls) } ================================================ FILE: vendor/github.com/cihub/seelog/writers_rollingfilewriter_test.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "fmt" "io" "testing" ) // fileWriterTestCase is declared in writers_filewriter_test.go func createRollingSizeFileWriterTestCase( files []string, fileName string, fileSize int64, maxRolls int, writeCount int, resFiles []string, nameMode rollingNameMode) *fileWriterTestCase { return &fileWriterTestCase{files, fileName, rollingTypeSize, fileSize, maxRolls, "", writeCount, resFiles, nameMode} } func createRollingDatefileWriterTestCase( files []string, fileName string, datePattern string, writeCount int, resFiles []string, nameMode rollingNameMode) *fileWriterTestCase { return &fileWriterTestCase{files, fileName, rollingTypeTime, 0, 0, datePattern, writeCount, resFiles, nameMode} } func TestRollingFileWriter(t *testing.T) { t.Logf("Starting rolling file writer tests") newFileWriterTester(rollingfileWriterTests, rollingFileWriterGetter, t).test() } //=============================================================== func rollingFileWriterGetter(testCase *fileWriterTestCase) (io.WriteCloser, error) { if testCase.rollingType == rollingTypeSize { return newRollingFileWriterSize(testCase.fileName, rollingArchiveNone, "", testCase.fileSize, testCase.maxRolls, testCase.nameMode) } else if testCase.rollingType == rollingTypeTime { return newRollingFileWriterTime(testCase.fileName, rollingArchiveNone, "", -1, testCase.datePattern, rollingIntervalDaily, testCase.nameMode) } return nil, fmt.Errorf("incorrect rollingType") } //=============================================================== var rollingfileWriterTests = []*fileWriterTestCase{ createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 10, 1, []string{"log.testlog"}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 10, 2, []string{"log.testlog", "log.testlog.1"}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{"1.log.testlog"}, "log.testlog", 10, 10, 2, []string{"log.testlog", "1.log.testlog", "2.log.testlog"}, rollingNameModePrefix), createRollingSizeFileWriterTestCase([]string{"log.testlog.1"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.2"}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.1"}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{"log.testlog.9"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.10"}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{"log.testlog.a", "log.testlog.1b"}, "log.testlog", 10, 1, 2, []string{"log.testlog", "log.testlog.1", "log.testlog.a", "log.testlog.1b"}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{}, `dir/log.testlog`, 10, 10, 1, []string{`dir/log.testlog`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{}, `dir/log.testlog`, 10, 10, 2, []string{`dir/log.testlog`, `dir/1.log.testlog`}, rollingNameModePrefix), createRollingSizeFileWriterTestCase([]string{`dir/dir/log.testlog.1`}, `dir/dir/log.testlog`, 10, 10, 2, []string{`dir/dir/log.testlog`, `dir/dir/log.testlog.1`, `dir/dir/log.testlog.2`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{`dir/dir/dir/log.testlog.1`}, `dir/dir/dir/log.testlog`, 10, 1, 2, []string{`dir/dir/dir/log.testlog`, `dir/dir/dir/log.testlog.2`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{}, `./log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.1`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{`././././log.testlog.9`}, `log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.10`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{"dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, "dir/dir/log.testlog", 10, 1, 2, []string{"dir/dir/log.testlog", "dir/dir/log.testlog.1", "dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{}, `././dir/log.testlog`, 10, 10, 1, []string{`dir/log.testlog`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{}, `././dir/log.testlog`, 10, 10, 2, []string{`dir/log.testlog`, `dir/log.testlog.1`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{`././dir/dir/log.testlog.1`}, `dir/dir/log.testlog`, 10, 10, 2, []string{`dir/dir/log.testlog`, `dir/dir/log.testlog.1`, `dir/dir/log.testlog.2`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{`././dir/dir/dir/log.testlog.1`}, `dir/dir/dir/log.testlog`, 10, 1, 2, []string{`dir/dir/dir/log.testlog`, `dir/dir/dir/log.testlog.2`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{}, `././log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.1`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{`././././log.testlog.9`}, `log.testlog`, 10, 1, 2, []string{`log.testlog`, `log.testlog.10`}, rollingNameModePostfix), createRollingSizeFileWriterTestCase([]string{"././dir/dir/log.testlog.a", "././dir/dir/log.testlog.1b"}, "dir/dir/log.testlog", 10, 1, 2, []string{"dir/dir/log.testlog", "dir/dir/log.testlog.1", "dir/dir/log.testlog.a", "dir/dir/log.testlog.1b"}, rollingNameModePostfix), // ==================== } ================================================ FILE: vendor/github.com/cihub/seelog/writers_smtpwriter.go ================================================ // Copyright (c) 2012 - Cloud Instruments Co., Ltd. // // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR // ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package seelog import ( "crypto/tls" "crypto/x509" "errors" "fmt" "io/ioutil" "net/smtp" "path/filepath" "strings" ) const ( // Default subject phrase for sending emails. DefaultSubjectPhrase = "Diagnostic message from server: " // Message subject pattern composed according to RFC 5321. rfc5321SubjectPattern = "From: %s <%s>\nSubject: %s\n" ) // smtpWriter is used to send emails via given SMTP-server. type smtpWriter struct { auth smtp.Auth hostName string hostPort string hostNameWithPort string senderAddress string senderName string recipientAddresses []string caCertDirPaths []string mailHeaders []string subject string } // newSMTPWriter returns a new SMTP-writer. func newSMTPWriter(sa, sn string, ras []string, hn, hp, un, pwd string, cacdps []string, subj string, headers []string) *smtpWriter { return &smtpWriter{ auth: smtp.PlainAuth("", un, pwd, hn), hostName: hn, hostPort: hp, hostNameWithPort: fmt.Sprintf("%s:%s", hn, hp), senderAddress: sa, senderName: sn, recipientAddresses: ras, caCertDirPaths: cacdps, subject: subj, mailHeaders: headers, } } func prepareMessage(senderAddr, senderName, subject string, body []byte, headers []string) []byte { headerLines := fmt.Sprintf(rfc5321SubjectPattern, senderName, senderAddr, subject); // Build header lines if configured. if headers != nil && len(headers) > 0 { headerLines += strings.Join(headers, "\n") headerLines += "\n" } return append([]byte(headerLines), body...) } // getTLSConfig gets paths of PEM files with certificates, // host server name and tries to create an appropriate TLS.Config. func getTLSConfig(pemFileDirPaths []string, hostName string) (config *tls.Config, err error) { if pemFileDirPaths == nil || len(pemFileDirPaths) == 0 { err = errors.New("invalid PEM file paths") return } pemEncodedContent := []byte{} var ( e error bytes []byte ) // Create a file-filter-by-extension, set aside non-pem files. pemFilePathFilter := func(fp string) bool { if filepath.Ext(fp) == ".pem" { return true } return false } for _, pemFileDirPath := range pemFileDirPaths { pemFilePaths, err := getDirFilePaths(pemFileDirPath, pemFilePathFilter, false) if err != nil { return nil, err } // Put together all the PEM files to decode them as a whole byte slice. for _, pfp := range pemFilePaths { if bytes, e = ioutil.ReadFile(pfp); e == nil { pemEncodedContent = append(pemEncodedContent, bytes...) } else { return nil, fmt.Errorf("cannot read file: %s: %s", pfp, e.Error()) } } } config = &tls.Config{RootCAs: x509.NewCertPool(), ServerName: hostName} isAppended := config.RootCAs.AppendCertsFromPEM(pemEncodedContent) if !isAppended { // Extract this into a separate error. err = errors.New("invalid PEM content") return } return } // SendMail accepts TLS configuration, connects to the server at addr, // switches to TLS if possible, authenticates with mechanism a if possible, // and then sends an email from address from, to addresses to, with message msg. func sendMailWithTLSConfig(config *tls.Config, addr string, a smtp.Auth, from string, to []string, msg []byte) error { c, err := smtp.Dial(addr) if err != nil { return err } // Check if the server supports STARTTLS extension. if ok, _ := c.Extension("STARTTLS"); ok { if err = c.StartTLS(config); err != nil { return err } } // Check if the server supports AUTH extension and use given smtp.Auth. if a != nil { if isSupported, _ := c.Extension("AUTH"); isSupported { if err = c.Auth(a); err != nil { return err } } } // Portion of code from the official smtp.SendMail function, // see http://golang.org/src/pkg/net/smtp/smtp.go. if err = c.Mail(from); err != nil { return err } for _, addr := range to { if err = c.Rcpt(addr); err != nil { return err } } w, err := c.Data() if err != nil { return err } _, err = w.Write(msg) if err != nil { return err } err = w.Close() if err != nil { return err } return c.Quit() } // Write pushes a text message properly composed according to RFC 5321 // to a post server, which sends it to the recipients. func (smtpw *smtpWriter) Write(data []byte) (int, error) { var err error if smtpw.caCertDirPaths == nil { err = smtp.SendMail( smtpw.hostNameWithPort, smtpw.auth, smtpw.senderAddress, smtpw.recipientAddresses, prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), ) } else { config, e := getTLSConfig(smtpw.caCertDirPaths, smtpw.hostName) if e != nil { return 0, e } err = sendMailWithTLSConfig( config, smtpw.hostNameWithPort, smtpw.auth, smtpw.senderAddress, smtpw.recipientAddresses, prepareMessage(smtpw.senderAddress, smtpw.senderName, smtpw.subject, data, smtpw.mailHeaders), ) } if err != nil { return 0, err } return len(data), nil } // Close closes down SMTP-connection. func (smtpw *smtpWriter) Close() error { // Do nothing as Write method opens and closes connection automatically return nil } ================================================ FILE: vendor/github.com/golang/protobuf/proto/Makefile ================================================ # Go support for Protocol Buffers - Google's data interchange format # # Copyright 2010 The Go Authors. All rights reserved. # https://github.com/golang/protobuf # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. install: go install test: install generate-test-pbs go test generate-test-pbs: make install make -C testdata protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto make ================================================ FILE: vendor/github.com/golang/protobuf/proto/all_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "bytes" "encoding/json" "errors" "fmt" "math" "math/rand" "reflect" "runtime/debug" "strings" "testing" "time" . "github.com/golang/protobuf/proto" . "github.com/golang/protobuf/proto/testdata" ) var globalO *Buffer func old() *Buffer { if globalO == nil { globalO = NewBuffer(nil) } globalO.Reset() return globalO } func equalbytes(b1, b2 []byte, t *testing.T) { if len(b1) != len(b2) { t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2)) return } for i := 0; i < len(b1); i++ { if b1[i] != b2[i] { t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2) } } } func initGoTestField() *GoTestField { f := new(GoTestField) f.Label = String("label") f.Type = String("type") return f } // These are all structurally equivalent but the tag numbers differ. // (It's remarkable that required, optional, and repeated all have // 8 letters.) func initGoTest_RequiredGroup() *GoTest_RequiredGroup { return &GoTest_RequiredGroup{ RequiredField: String("required"), } } func initGoTest_OptionalGroup() *GoTest_OptionalGroup { return &GoTest_OptionalGroup{ RequiredField: String("optional"), } } func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { return &GoTest_RepeatedGroup{ RequiredField: String("repeated"), } } func initGoTest(setdefaults bool) *GoTest { pb := new(GoTest) if setdefaults { pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) } pb.Kind = GoTest_TIME.Enum() pb.RequiredField = initGoTestField() pb.F_BoolRequired = Bool(true) pb.F_Int32Required = Int32(3) pb.F_Int64Required = Int64(6) pb.F_Fixed32Required = Uint32(32) pb.F_Fixed64Required = Uint64(64) pb.F_Uint32Required = Uint32(3232) pb.F_Uint64Required = Uint64(6464) pb.F_FloatRequired = Float32(3232) pb.F_DoubleRequired = Float64(6464) pb.F_StringRequired = String("string") pb.F_BytesRequired = []byte("bytes") pb.F_Sint32Required = Int32(-32) pb.F_Sint64Required = Int64(-64) pb.Requiredgroup = initGoTest_RequiredGroup() return pb } func fail(msg string, b *bytes.Buffer, s string, t *testing.T) { data := b.Bytes() ld := len(data) ls := len(s) / 2 fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls) // find the interesting spot - n n := ls if ld < ls { n = ld } j := 0 for i := 0; i < n; i++ { bs := hex(s[j])*16 + hex(s[j+1]) j += 2 if data[i] == bs { continue } n = i break } l := n - 10 if l < 0 { l = 0 } h := n + 10 // find the interesting spot - n fmt.Printf("is[%d]:", l) for i := l; i < h; i++ { if i >= ld { fmt.Printf(" --") continue } fmt.Printf(" %.2x", data[i]) } fmt.Printf("\n") fmt.Printf("sb[%d]:", l) for i := l; i < h; i++ { if i >= ls { fmt.Printf(" --") continue } bs := hex(s[j])*16 + hex(s[j+1]) j += 2 fmt.Printf(" %.2x", bs) } fmt.Printf("\n") t.Fail() // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes()) // Print the output in a partially-decoded format; can // be helpful when updating the test. It produces the output // that is pasted, with minor edits, into the argument to verify(). // data := b.Bytes() // nesting := 0 // for b.Len() > 0 { // start := len(data) - b.Len() // var u uint64 // u, err := DecodeVarint(b) // if err != nil { // fmt.Printf("decode error on varint:", err) // return // } // wire := u & 0x7 // tag := u >> 3 // switch wire { // case WireVarint: // v, err := DecodeVarint(b) // if err != nil { // fmt.Printf("decode error on varint:", err) // return // } // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", // data[start:len(data)-b.Len()], tag, wire, v) // case WireFixed32: // v, err := DecodeFixed32(b) // if err != nil { // fmt.Printf("decode error on fixed32:", err) // return // } // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", // data[start:len(data)-b.Len()], tag, wire, v) // case WireFixed64: // v, err := DecodeFixed64(b) // if err != nil { // fmt.Printf("decode error on fixed64:", err) // return // } // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n", // data[start:len(data)-b.Len()], tag, wire, v) // case WireBytes: // nb, err := DecodeVarint(b) // if err != nil { // fmt.Printf("decode error on bytes:", err) // return // } // after_tag := len(data) - b.Len() // str := make([]byte, nb) // _, err = b.Read(str) // if err != nil { // fmt.Printf("decode error on bytes:", err) // return // } // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n", // data[start:after_tag], str, tag, wire) // case WireStartGroup: // nesting++ // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n", // data[start:len(data)-b.Len()], tag, nesting) // case WireEndGroup: // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n", // data[start:len(data)-b.Len()], tag, nesting) // nesting-- // default: // fmt.Printf("unrecognized wire type %d\n", wire) // return // } // } } func hex(c uint8) uint8 { if '0' <= c && c <= '9' { return c - '0' } if 'a' <= c && c <= 'f' { return 10 + c - 'a' } if 'A' <= c && c <= 'F' { return 10 + c - 'A' } return 0 } func equal(b []byte, s string, t *testing.T) bool { if 2*len(b) != len(s) { // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t) fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s)) return false } for i, j := 0, 0; i < len(b); i, j = i+1, j+2 { x := hex(s[j])*16 + hex(s[j+1]) if b[i] != x { // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t) fmt.Printf("bad byte[%d]:%x %x", i, b[i], x) return false } } return true } func overify(t *testing.T, pb *GoTest, expected string) { o := old() err := o.Marshal(pb) if err != nil { fmt.Printf("overify marshal-1 err = %v", err) o.DebugPrint("", o.Bytes()) t.Fatalf("expected = %s", expected) } if !equal(o.Bytes(), expected, t) { o.DebugPrint("overify neq 1", o.Bytes()) t.Fatalf("expected = %s", expected) } // Now test Unmarshal by recreating the original buffer. pbd := new(GoTest) err = o.Unmarshal(pbd) if err != nil { t.Fatalf("overify unmarshal err = %v", err) o.DebugPrint("", o.Bytes()) t.Fatalf("string = %s", expected) } o.Reset() err = o.Marshal(pbd) if err != nil { t.Errorf("overify marshal-2 err = %v", err) o.DebugPrint("", o.Bytes()) t.Fatalf("string = %s", expected) } if !equal(o.Bytes(), expected, t) { o.DebugPrint("overify neq 2", o.Bytes()) t.Fatalf("string = %s", expected) } } // Simple tests for numeric encode/decode primitives (varint, etc.) func TestNumericPrimitives(t *testing.T) { for i := uint64(0); i < 1e6; i += 111 { o := old() if o.EncodeVarint(i) != nil { t.Error("EncodeVarint") break } x, e := o.DecodeVarint() if e != nil { t.Fatal("DecodeVarint") } if x != i { t.Fatal("varint decode fail:", i, x) } o = old() if o.EncodeFixed32(i) != nil { t.Fatal("encFixed32") } x, e = o.DecodeFixed32() if e != nil { t.Fatal("decFixed32") } if x != i { t.Fatal("fixed32 decode fail:", i, x) } o = old() if o.EncodeFixed64(i*1234567) != nil { t.Error("encFixed64") break } x, e = o.DecodeFixed64() if e != nil { t.Error("decFixed64") break } if x != i*1234567 { t.Error("fixed64 decode fail:", i*1234567, x) break } o = old() i32 := int32(i - 12345) if o.EncodeZigzag32(uint64(i32)) != nil { t.Fatal("EncodeZigzag32") } x, e = o.DecodeZigzag32() if e != nil { t.Fatal("DecodeZigzag32") } if x != uint64(uint32(i32)) { t.Fatal("zigzag32 decode fail:", i32, x) } o = old() i64 := int64(i - 12345) if o.EncodeZigzag64(uint64(i64)) != nil { t.Fatal("EncodeZigzag64") } x, e = o.DecodeZigzag64() if e != nil { t.Fatal("DecodeZigzag64") } if x != uint64(i64) { t.Fatal("zigzag64 decode fail:", i64, x) } } } // fakeMarshaler is a simple struct implementing Marshaler and Message interfaces. type fakeMarshaler struct { b []byte err error } func (f fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err } func (f fakeMarshaler) String() string { return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err) } func (f fakeMarshaler) ProtoMessage() {} func (f fakeMarshaler) Reset() {} // Simple tests for proto messages that implement the Marshaler interface. func TestMarshalerEncoding(t *testing.T) { tests := []struct { name string m Message want []byte wantErr error }{ { name: "Marshaler that fails", m: fakeMarshaler{ err: errors.New("some marshal err"), b: []byte{5, 6, 7}, }, // Since there's an error, nothing should be written to buffer. want: nil, wantErr: errors.New("some marshal err"), }, { name: "Marshaler that succeeds", m: fakeMarshaler{ b: []byte{0, 1, 2, 3, 4, 127, 255}, }, want: []byte{0, 1, 2, 3, 4, 127, 255}, wantErr: nil, }, } for _, test := range tests { b := NewBuffer(nil) err := b.Marshal(test.m) if !reflect.DeepEqual(test.wantErr, err) { t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) } if !reflect.DeepEqual(test.want, b.Bytes()) { t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) } } } // Simple tests for bytes func TestBytesPrimitives(t *testing.T) { o := old() bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'} if o.EncodeRawBytes(bytes) != nil { t.Error("EncodeRawBytes") } decb, e := o.DecodeRawBytes(false) if e != nil { t.Error("DecodeRawBytes") } equalbytes(bytes, decb, t) } // Simple tests for strings func TestStringPrimitives(t *testing.T) { o := old() s := "now is the time" if o.EncodeStringBytes(s) != nil { t.Error("enc_string") } decs, e := o.DecodeStringBytes() if e != nil { t.Error("dec_string") } if s != decs { t.Error("string encode/decode fail:", s, decs) } } // Do we catch the "required bit not set" case? func TestRequiredBit(t *testing.T) { o := old() pb := new(GoTest) err := o.Marshal(pb) if err == nil { t.Error("did not catch missing required fields") } else if strings.Index(err.Error(), "Kind") < 0 { t.Error("wrong error type:", err) } } // Check that all fields are nil. // Clearly silly, and a residue from a more interesting test with an earlier, // different initialization property, but it once caught a compiler bug so // it lives. func checkInitialized(pb *GoTest, t *testing.T) { if pb.F_BoolDefaulted != nil { t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted) } if pb.F_Int32Defaulted != nil { t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted) } if pb.F_Int64Defaulted != nil { t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted) } if pb.F_Fixed32Defaulted != nil { t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted) } if pb.F_Fixed64Defaulted != nil { t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted) } if pb.F_Uint32Defaulted != nil { t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted) } if pb.F_Uint64Defaulted != nil { t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted) } if pb.F_FloatDefaulted != nil { t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted) } if pb.F_DoubleDefaulted != nil { t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted) } if pb.F_StringDefaulted != nil { t.Error("New or Reset did not set string:", *pb.F_StringDefaulted) } if pb.F_BytesDefaulted != nil { t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted)) } if pb.F_Sint32Defaulted != nil { t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted) } if pb.F_Sint64Defaulted != nil { t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted) } } // Does Reset() reset? func TestReset(t *testing.T) { pb := initGoTest(true) // muck with some values pb.F_BoolDefaulted = Bool(false) pb.F_Int32Defaulted = Int32(237) pb.F_Int64Defaulted = Int64(12346) pb.F_Fixed32Defaulted = Uint32(32000) pb.F_Fixed64Defaulted = Uint64(666) pb.F_Uint32Defaulted = Uint32(323232) pb.F_Uint64Defaulted = nil pb.F_FloatDefaulted = nil pb.F_DoubleDefaulted = Float64(0) pb.F_StringDefaulted = String("gotcha") pb.F_BytesDefaulted = []byte("asdfasdf") pb.F_Sint32Defaulted = Int32(123) pb.F_Sint64Defaulted = Int64(789) pb.Reset() checkInitialized(pb, t) } // All required fields set, no defaults provided. func TestEncodeDecode1(t *testing.T) { pb := initGoTest(false) overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 0x20 "714000000000000000"+ // field 14, encoding 1, value 0x40 "78a019"+ // field 15, encoding 0, value 0xca0 = 3232 "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string" "b304"+ // field 70, encoding 3, start group "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // field 70, encoding 4, end group "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f") // field 103, encoding 0, 0x7f zigzag64 } // All required fields set, defaults provided. func TestEncodeDecode2(t *testing.T) { pb := initGoTest(true) overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "c00201"+ // field 40, encoding 0, value 1 "c80220"+ // field 41, encoding 0, value 32 "d00240"+ // field 42, encoding 0, value 64 "dd0240010000"+ // field 43, encoding 5, value 320 "e1028002000000000000"+ // field 44, encoding 1, value 640 "e8028019"+ // field 45, encoding 0, value 3200 "f0028032"+ // field 46, encoding 0, value 6400 "fd02e0659948"+ // field 47, encoding 5, value 314159.0 "81030000000050971041"+ // field 48, encoding 1, value 271828.0 "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 "98197f") // field 403, encoding 0, value 127 } // All default fields set to their default value by hand func TestEncodeDecode3(t *testing.T) { pb := initGoTest(false) pb.F_BoolDefaulted = Bool(true) pb.F_Int32Defaulted = Int32(32) pb.F_Int64Defaulted = Int64(64) pb.F_Fixed32Defaulted = Uint32(320) pb.F_Fixed64Defaulted = Uint64(640) pb.F_Uint32Defaulted = Uint32(3200) pb.F_Uint64Defaulted = Uint64(6400) pb.F_FloatDefaulted = Float32(314159) pb.F_DoubleDefaulted = Float64(271828) pb.F_StringDefaulted = String("hello, \"world!\"\n") pb.F_BytesDefaulted = []byte("Bignose") pb.F_Sint32Defaulted = Int32(-32) pb.F_Sint64Defaulted = Int64(-64) overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "c00201"+ // field 40, encoding 0, value 1 "c80220"+ // field 41, encoding 0, value 32 "d00240"+ // field 42, encoding 0, value 64 "dd0240010000"+ // field 43, encoding 5, value 320 "e1028002000000000000"+ // field 44, encoding 1, value 640 "e8028019"+ // field 45, encoding 0, value 3200 "f0028032"+ // field 46, encoding 0, value 6400 "fd02e0659948"+ // field 47, encoding 5, value 314159.0 "81030000000050971041"+ // field 48, encoding 1, value 271828.0 "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 "98197f") // field 403, encoding 0, value 127 } // All required fields set, defaults provided, all non-defaulted optional fields have values. func TestEncodeDecode4(t *testing.T) { pb := initGoTest(true) pb.Table = String("hello") pb.Param = Int32(7) pb.OptionalField = initGoTestField() pb.F_BoolOptional = Bool(true) pb.F_Int32Optional = Int32(32) pb.F_Int64Optional = Int64(64) pb.F_Fixed32Optional = Uint32(3232) pb.F_Fixed64Optional = Uint64(6464) pb.F_Uint32Optional = Uint32(323232) pb.F_Uint64Optional = Uint64(646464) pb.F_FloatOptional = Float32(32.) pb.F_DoubleOptional = Float64(64.) pb.F_StringOptional = String("hello") pb.F_BytesOptional = []byte("Bignose") pb.F_Sint32Optional = Int32(-32) pb.F_Sint64Optional = Int64(-64) pb.Optionalgroup = initGoTest_OptionalGroup() overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello" "1807"+ // field 3, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "f00101"+ // field 30, encoding 0, value 1 "f80120"+ // field 31, encoding 0, value 32 "800240"+ // field 32, encoding 0, value 64 "8d02a00c0000"+ // field 33, encoding 5, value 3232 "91024019000000000000"+ // field 34, encoding 1, value 6464 "9802a0dd13"+ // field 35, encoding 0, value 323232 "a002c0ba27"+ // field 36, encoding 0, value 646464 "ad0200000042"+ // field 37, encoding 5, value 32.0 "b1020000000000005040"+ // field 38, encoding 1, value 64.0 "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello" "c00201"+ // field 40, encoding 0, value 1 "c80220"+ // field 41, encoding 0, value 32 "d00240"+ // field 42, encoding 0, value 64 "dd0240010000"+ // field 43, encoding 5, value 320 "e1028002000000000000"+ // field 44, encoding 1, value 640 "e8028019"+ // field 45, encoding 0, value 3200 "f0028032"+ // field 46, encoding 0, value 6400 "fd02e0659948"+ // field 47, encoding 5, value 314159.0 "81030000000050971041"+ // field 48, encoding 1, value 271828.0 "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "d305"+ // start group field 90 level 1 "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional" "d405"+ // end group field 90 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose" "f0123f"+ // field 302, encoding 0, value 63 "f8127f"+ // field 303, encoding 0, value 127 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 "98197f") // field 403, encoding 0, value 127 } // All required fields set, defaults provided, all repeated fields given two values. func TestEncodeDecode5(t *testing.T) { pb := initGoTest(true) pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()} pb.F_BoolRepeated = []bool{false, true} pb.F_Int32Repeated = []int32{32, 33} pb.F_Int64Repeated = []int64{64, 65} pb.F_Fixed32Repeated = []uint32{3232, 3333} pb.F_Fixed64Repeated = []uint64{6464, 6565} pb.F_Uint32Repeated = []uint32{323232, 333333} pb.F_Uint64Repeated = []uint64{646464, 656565} pb.F_FloatRepeated = []float32{32., 33.} pb.F_DoubleRepeated = []float64{64., 65.} pb.F_StringRepeated = []string{"hello", "sailor"} pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")} pb.F_Sint32Repeated = []int32{32, -32} pb.F_Sint64Repeated = []int64{64, -64} pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()} overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "a00100"+ // field 20, encoding 0, value 0 "a00101"+ // field 20, encoding 0, value 1 "a80120"+ // field 21, encoding 0, value 32 "a80121"+ // field 21, encoding 0, value 33 "b00140"+ // field 22, encoding 0, value 64 "b00141"+ // field 22, encoding 0, value 65 "bd01a00c0000"+ // field 23, encoding 5, value 3232 "bd01050d0000"+ // field 23, encoding 5, value 3333 "c1014019000000000000"+ // field 24, encoding 1, value 6464 "c101a519000000000000"+ // field 24, encoding 1, value 6565 "c801a0dd13"+ // field 25, encoding 0, value 323232 "c80195ac14"+ // field 25, encoding 0, value 333333 "d001c0ba27"+ // field 26, encoding 0, value 646464 "d001b58928"+ // field 26, encoding 0, value 656565 "dd0100000042"+ // field 27, encoding 5, value 32.0 "dd0100000442"+ // field 27, encoding 5, value 33.0 "e1010000000000005040"+ // field 28, encoding 1, value 64.0 "e1010000000000405040"+ // field 28, encoding 1, value 65.0 "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello" "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor" "c00201"+ // field 40, encoding 0, value 1 "c80220"+ // field 41, encoding 0, value 32 "d00240"+ // field 42, encoding 0, value 64 "dd0240010000"+ // field 43, encoding 5, value 320 "e1028002000000000000"+ // field 44, encoding 1, value 640 "e8028019"+ // field 45, encoding 0, value 3200 "f0028032"+ // field 46, encoding 0, value 6400 "fd02e0659948"+ // field 47, encoding 5, value 314159.0 "81030000000050971041"+ // field 48, encoding 1, value 271828.0 "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n" "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "8305"+ // start group field 80 level 1 "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" "8405"+ // end group field 80 level 1 "8305"+ // start group field 80 level 1 "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated" "8405"+ // end group field 80 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "ca0c03"+"626967"+ // field 201, encoding 2, string "big" "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose" "d00c40"+ // field 202, encoding 0, value 32 "d00c3f"+ // field 202, encoding 0, value -32 "d80c8001"+ // field 203, encoding 0, value 64 "d80c7f"+ // field 203, encoding 0, value -64 "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose" "90193f"+ // field 402, encoding 0, value 63 "98197f") // field 403, encoding 0, value 127 } // All required fields set, all packed repeated fields given two values. func TestEncodeDecode6(t *testing.T) { pb := initGoTest(false) pb.F_BoolRepeatedPacked = []bool{false, true} pb.F_Int32RepeatedPacked = []int32{32, 33} pb.F_Int64RepeatedPacked = []int64{64, 65} pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333} pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565} pb.F_Uint32RepeatedPacked = []uint32{323232, 333333} pb.F_Uint64RepeatedPacked = []uint64{646464, 656565} pb.F_FloatRepeatedPacked = []float32{32., 33.} pb.F_DoubleRepeatedPacked = []float64{64., 65.} pb.F_Sint32RepeatedPacked = []int32{32, -32} pb.F_Sint64RepeatedPacked = []int64{64, -64} overify(t, pb, "0807"+ // field 1, encoding 0, value 7 "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField) "5001"+ // field 10, encoding 0, value 1 "5803"+ // field 11, encoding 0, value 3 "6006"+ // field 12, encoding 0, value 6 "6d20000000"+ // field 13, encoding 5, value 32 "714000000000000000"+ // field 14, encoding 1, value 64 "78a019"+ // field 15, encoding 0, value 3232 "8001c032"+ // field 16, encoding 0, value 6464 "8d0100004a45"+ // field 17, encoding 5, value 3232.0 "9101000000000040b940"+ // field 18, encoding 1, value 6464.0 "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string" "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1 "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33 "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65 "aa0308"+ // field 53, encoding 2, 8 bytes "a00c0000050d0000"+ // value 3232, value 3333 "b20310"+ // field 54, encoding 2, 16 bytes "4019000000000000a519000000000000"+ // value 6464, value 6565 "ba0306"+ // field 55, encoding 2, 6 bytes "a0dd1395ac14"+ // value 323232, value 333333 "c20306"+ // field 56, encoding 2, 6 bytes "c0ba27b58928"+ // value 646464, value 656565 "ca0308"+ // field 57, encoding 2, 8 bytes "0000004200000442"+ // value 32.0, value 33.0 "d20310"+ // field 58, encoding 2, 16 bytes "00000000000050400000000000405040"+ // value 64.0, value 65.0 "b304"+ // start group field 70 level 1 "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required" "b404"+ // end group field 70 level 1 "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes" "b0063f"+ // field 102, encoding 0, 0x3f zigzag32 "b8067f"+ // field 103, encoding 0, 0x7f zigzag64 "b21f02"+ // field 502, encoding 2, 2 bytes "403f"+ // value 32, value -32 "ba1f03"+ // field 503, encoding 2, 3 bytes "80017f") // value 64, value -64 } // Test that we can encode empty bytes fields. func TestEncodeDecodeBytes1(t *testing.T) { pb := initGoTest(false) // Create our bytes pb.F_BytesRequired = []byte{} pb.F_BytesRepeated = [][]byte{{}} pb.F_BytesOptional = []byte{} d, err := Marshal(pb) if err != nil { t.Error(err) } pbd := new(GoTest) if err := Unmarshal(d, pbd); err != nil { t.Error(err) } if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 { t.Error("required empty bytes field is incorrect") } if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil { t.Error("repeated empty bytes field is incorrect") } if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 { t.Error("optional empty bytes field is incorrect") } } // Test that we encode nil-valued fields of a repeated bytes field correctly. // Since entries in a repeated field cannot be nil, nil must mean empty value. func TestEncodeDecodeBytes2(t *testing.T) { pb := initGoTest(false) // Create our bytes pb.F_BytesRepeated = [][]byte{nil} d, err := Marshal(pb) if err != nil { t.Error(err) } pbd := new(GoTest) if err := Unmarshal(d, pbd); err != nil { t.Error(err) } if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil { t.Error("Unexpected value for repeated bytes field") } } // All required fields set, defaults provided, all repeated fields given two values. func TestSkippingUnrecognizedFields(t *testing.T) { o := old() pb := initGoTestField() // Marshal it normally. o.Marshal(pb) // Now new a GoSkipTest record. skip := &GoSkipTest{ SkipInt32: Int32(32), SkipFixed32: Uint32(3232), SkipFixed64: Uint64(6464), SkipString: String("skipper"), Skipgroup: &GoSkipTest_SkipGroup{ GroupInt32: Int32(75), GroupString: String("wxyz"), }, } // Marshal it into same buffer. o.Marshal(skip) pbd := new(GoTestField) o.Unmarshal(pbd) // The __unrecognized field should be a marshaling of GoSkipTest skipd := new(GoSkipTest) o.SetBuf(pbd.XXX_unrecognized) o.Unmarshal(skipd) if *skipd.SkipInt32 != *skip.SkipInt32 { t.Error("skip int32", skipd.SkipInt32) } if *skipd.SkipFixed32 != *skip.SkipFixed32 { t.Error("skip fixed32", skipd.SkipFixed32) } if *skipd.SkipFixed64 != *skip.SkipFixed64 { t.Error("skip fixed64", skipd.SkipFixed64) } if *skipd.SkipString != *skip.SkipString { t.Error("skip string", *skipd.SkipString) } if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 { t.Error("skip group int32", skipd.Skipgroup.GroupInt32) } if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString { t.Error("skip group string", *skipd.Skipgroup.GroupString) } } // Check that unrecognized fields of a submessage are preserved. func TestSubmessageUnrecognizedFields(t *testing.T) { nm := &NewMessage{ Nested: &NewMessage_Nested{ Name: String("Nigel"), FoodGroup: String("carbs"), }, } b, err := Marshal(nm) if err != nil { t.Fatalf("Marshal of NewMessage: %v", err) } // Unmarshal into an OldMessage. om := new(OldMessage) if err := Unmarshal(b, om); err != nil { t.Fatalf("Unmarshal to OldMessage: %v", err) } exp := &OldMessage{ Nested: &OldMessage_Nested{ Name: String("Nigel"), // normal protocol buffer users should not do this XXX_unrecognized: []byte("\x12\x05carbs"), }, } if !Equal(om, exp) { t.Errorf("om = %v, want %v", om, exp) } // Clone the OldMessage. om = Clone(om).(*OldMessage) if !Equal(om, exp) { t.Errorf("Clone(om) = %v, want %v", om, exp) } // Marshal the OldMessage, then unmarshal it into an empty NewMessage. if b, err = Marshal(om); err != nil { t.Fatalf("Marshal of OldMessage: %v", err) } t.Logf("Marshal(%v) -> %q", om, b) nm2 := new(NewMessage) if err := Unmarshal(b, nm2); err != nil { t.Fatalf("Unmarshal to NewMessage: %v", err) } if !Equal(nm, nm2) { t.Errorf("NewMessage round-trip: %v => %v", nm, nm2) } } // Check that an int32 field can be upgraded to an int64 field. func TestNegativeInt32(t *testing.T) { om := &OldMessage{ Num: Int32(-1), } b, err := Marshal(om) if err != nil { t.Fatalf("Marshal of OldMessage: %v", err) } // Check the size. It should be 11 bytes; // 1 for the field/wire type, and 10 for the negative number. if len(b) != 11 { t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b) } // Unmarshal into a NewMessage. nm := new(NewMessage) if err := Unmarshal(b, nm); err != nil { t.Fatalf("Unmarshal to NewMessage: %v", err) } want := &NewMessage{ Num: Int64(-1), } if !Equal(nm, want) { t.Errorf("nm = %v, want %v", nm, want) } } // Check that we can grow an array (repeated field) to have many elements. // This test doesn't depend only on our encoding; for variety, it makes sure // we create, encode, and decode the correct contents explicitly. It's therefore // a bit messier. // This test also uses (and hence tests) the Marshal/Unmarshal functions // instead of the methods. func TestBigRepeated(t *testing.T) { pb := initGoTest(true) // Create the arrays const N = 50 // Internally the library starts much smaller. pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N) pb.F_Sint64Repeated = make([]int64, N) pb.F_Sint32Repeated = make([]int32, N) pb.F_BytesRepeated = make([][]byte, N) pb.F_StringRepeated = make([]string, N) pb.F_DoubleRepeated = make([]float64, N) pb.F_FloatRepeated = make([]float32, N) pb.F_Uint64Repeated = make([]uint64, N) pb.F_Uint32Repeated = make([]uint32, N) pb.F_Fixed64Repeated = make([]uint64, N) pb.F_Fixed32Repeated = make([]uint32, N) pb.F_Int64Repeated = make([]int64, N) pb.F_Int32Repeated = make([]int32, N) pb.F_BoolRepeated = make([]bool, N) pb.RepeatedField = make([]*GoTestField, N) // Fill in the arrays with checkable values. igtf := initGoTestField() igtrg := initGoTest_RepeatedGroup() for i := 0; i < N; i++ { pb.Repeatedgroup[i] = igtrg pb.F_Sint64Repeated[i] = int64(i) pb.F_Sint32Repeated[i] = int32(i) s := fmt.Sprint(i) pb.F_BytesRepeated[i] = []byte(s) pb.F_StringRepeated[i] = s pb.F_DoubleRepeated[i] = float64(i) pb.F_FloatRepeated[i] = float32(i) pb.F_Uint64Repeated[i] = uint64(i) pb.F_Uint32Repeated[i] = uint32(i) pb.F_Fixed64Repeated[i] = uint64(i) pb.F_Fixed32Repeated[i] = uint32(i) pb.F_Int64Repeated[i] = int64(i) pb.F_Int32Repeated[i] = int32(i) pb.F_BoolRepeated[i] = i%2 == 0 pb.RepeatedField[i] = igtf } // Marshal. buf, _ := Marshal(pb) // Now test Unmarshal by recreating the original buffer. pbd := new(GoTest) Unmarshal(buf, pbd) // Check the checkable values for i := uint64(0); i < N; i++ { if pbd.Repeatedgroup[i] == nil { // TODO: more checking? t.Error("pbd.Repeatedgroup bad") } var x uint64 x = uint64(pbd.F_Sint64Repeated[i]) if x != i { t.Error("pbd.F_Sint64Repeated bad", x, i) } x = uint64(pbd.F_Sint32Repeated[i]) if x != i { t.Error("pbd.F_Sint32Repeated bad", x, i) } s := fmt.Sprint(i) equalbytes(pbd.F_BytesRepeated[i], []byte(s), t) if pbd.F_StringRepeated[i] != s { t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i) } x = uint64(pbd.F_DoubleRepeated[i]) if x != i { t.Error("pbd.F_DoubleRepeated bad", x, i) } x = uint64(pbd.F_FloatRepeated[i]) if x != i { t.Error("pbd.F_FloatRepeated bad", x, i) } x = pbd.F_Uint64Repeated[i] if x != i { t.Error("pbd.F_Uint64Repeated bad", x, i) } x = uint64(pbd.F_Uint32Repeated[i]) if x != i { t.Error("pbd.F_Uint32Repeated bad", x, i) } x = pbd.F_Fixed64Repeated[i] if x != i { t.Error("pbd.F_Fixed64Repeated bad", x, i) } x = uint64(pbd.F_Fixed32Repeated[i]) if x != i { t.Error("pbd.F_Fixed32Repeated bad", x, i) } x = uint64(pbd.F_Int64Repeated[i]) if x != i { t.Error("pbd.F_Int64Repeated bad", x, i) } x = uint64(pbd.F_Int32Repeated[i]) if x != i { t.Error("pbd.F_Int32Repeated bad", x, i) } if pbd.F_BoolRepeated[i] != (i%2 == 0) { t.Error("pbd.F_BoolRepeated bad", x, i) } if pbd.RepeatedField[i] == nil { // TODO: more checking? t.Error("pbd.RepeatedField bad") } } } // Verify we give a useful message when decoding to the wrong structure type. func TestTypeMismatch(t *testing.T) { pb1 := initGoTest(true) // Marshal o := old() o.Marshal(pb1) // Now Unmarshal it to the wrong type. pb2 := initGoTestField() err := o.Unmarshal(pb2) if err == nil { t.Error("expected error, got no error") } else if !strings.Contains(err.Error(), "bad wiretype") { t.Error("expected bad wiretype error, got", err) } } func encodeDecode(t *testing.T, in, out Message, msg string) { buf, err := Marshal(in) if err != nil { t.Fatalf("failed marshaling %v: %v", msg, err) } if err := Unmarshal(buf, out); err != nil { t.Fatalf("failed unmarshaling %v: %v", msg, err) } } func TestPackedNonPackedDecoderSwitching(t *testing.T) { np, p := new(NonPackedTest), new(PackedTest) // non-packed -> packed np.A = []int32{0, 1, 1, 2, 3, 5} encodeDecode(t, np, p, "non-packed -> packed") if !reflect.DeepEqual(np.A, p.B) { t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B) } // packed -> non-packed np.Reset() p.B = []int32{3, 1, 4, 1, 5, 9} encodeDecode(t, p, np, "packed -> non-packed") if !reflect.DeepEqual(p.B, np.A) { t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A) } } func TestProto1RepeatedGroup(t *testing.T) { pb := &MessageList{ Message: []*MessageList_Message{ { Name: String("blah"), Count: Int32(7), }, // NOTE: pb.Message[1] is a nil nil, }, } o := old() err := o.Marshal(pb) if err == nil || !strings.Contains(err.Error(), "repeated field Message has nil") { t.Fatalf("unexpected or no error when marshaling: %v", err) } } // Test that enums work. Checks for a bug introduced by making enums // named types instead of int32: newInt32FromUint64 would crash with // a type mismatch in reflect.PointTo. func TestEnum(t *testing.T) { pb := new(GoEnum) pb.Foo = FOO_FOO1.Enum() o := old() if err := o.Marshal(pb); err != nil { t.Fatal("error encoding enum:", err) } pb1 := new(GoEnum) if err := o.Unmarshal(pb1); err != nil { t.Fatal("error decoding enum:", err) } if *pb1.Foo != FOO_FOO1 { t.Error("expected 7 but got ", *pb1.Foo) } } // Enum types have String methods. Check that enum fields can be printed. // We don't care what the value actually is, just as long as it doesn't crash. func TestPrintingNilEnumFields(t *testing.T) { pb := new(GoEnum) fmt.Sprintf("%+v", pb) } // Verify that absent required fields cause Marshal/Unmarshal to return errors. func TestRequiredFieldEnforcement(t *testing.T) { pb := new(GoTestField) _, err := Marshal(pb) if err == nil { t.Error("marshal: expected error, got nil") } else if strings.Index(err.Error(), "Label") < 0 { t.Errorf("marshal: bad error type: %v", err) } // A slightly sneaky, yet valid, proto. It encodes the same required field twice, // so simply counting the required fields is insufficient. // field 1, encoding 2, value "hi" buf := []byte("\x0A\x02hi\x0A\x02hi") err = Unmarshal(buf, pb) if err == nil { t.Error("unmarshal: expected error, got nil") } else if strings.Index(err.Error(), "{Unknown}") < 0 { t.Errorf("unmarshal: bad error type: %v", err) } } func TestTypedNilMarshal(t *testing.T) { // A typed nil should return ErrNil and not crash. _, err := Marshal((*GoEnum)(nil)) if err != ErrNil { t.Errorf("Marshal: got err %v, want ErrNil", err) } } // A type that implements the Marshaler interface, but is not nillable. type nonNillableInt uint64 func (nni nonNillableInt) Marshal() ([]byte, error) { return EncodeVarint(uint64(nni)), nil } type NNIMessage struct { nni nonNillableInt } func (*NNIMessage) Reset() {} func (*NNIMessage) String() string { return "" } func (*NNIMessage) ProtoMessage() {} // A type that implements the Marshaler interface and is nillable. type nillableMessage struct { x uint64 } func (nm *nillableMessage) Marshal() ([]byte, error) { return EncodeVarint(nm.x), nil } type NMMessage struct { nm *nillableMessage } func (*NMMessage) Reset() {} func (*NMMessage) String() string { return "" } func (*NMMessage) ProtoMessage() {} // Verify a type that uses the Marshaler interface, but has a nil pointer. func TestNilMarshaler(t *testing.T) { // Try a struct with a Marshaler field that is nil. // It should be directly marshable. nmm := new(NMMessage) if _, err := Marshal(nmm); err != nil { t.Error("unexpected error marshaling nmm: ", err) } // Try a struct with a Marshaler field that is not nillable. nnim := new(NNIMessage) nnim.nni = 7 var _ Marshaler = nnim.nni // verify it is truly a Marshaler if _, err := Marshal(nnim); err != nil { t.Error("unexpected error marshaling nnim: ", err) } } func TestAllSetDefaults(t *testing.T) { // Exercise SetDefaults with all scalar field types. m := &Defaults{ // NaN != NaN, so override that here. F_Nan: Float32(1.7), } expected := &Defaults{ F_Bool: Bool(true), F_Int32: Int32(32), F_Int64: Int64(64), F_Fixed32: Uint32(320), F_Fixed64: Uint64(640), F_Uint32: Uint32(3200), F_Uint64: Uint64(6400), F_Float: Float32(314159), F_Double: Float64(271828), F_String: String(`hello, "world!"` + "\n"), F_Bytes: []byte("Bignose"), F_Sint32: Int32(-32), F_Sint64: Int64(-64), F_Enum: Defaults_GREEN.Enum(), F_Pinf: Float32(float32(math.Inf(1))), F_Ninf: Float32(float32(math.Inf(-1))), F_Nan: Float32(1.7), StrZero: String(""), } SetDefaults(m) if !Equal(m, expected) { t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected) } } func TestSetDefaultsWithSetField(t *testing.T) { // Check that a set value is not overridden. m := &Defaults{ F_Int32: Int32(12), } SetDefaults(m) if v := m.GetF_Int32(); v != 12 { t.Errorf("m.FInt32 = %v, want 12", v) } } func TestSetDefaultsWithSubMessage(t *testing.T) { m := &OtherMessage{ Key: Int64(123), Inner: &InnerMessage{ Host: String("gopher"), }, } expected := &OtherMessage{ Key: Int64(123), Inner: &InnerMessage{ Host: String("gopher"), Port: Int32(4000), }, } SetDefaults(m) if !Equal(m, expected) { t.Errorf("\n got %v\nwant %v", m, expected) } } func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) { m := &MyMessage{ RepInner: []*InnerMessage{{}}, } expected := &MyMessage{ RepInner: []*InnerMessage{{ Port: Int32(4000), }}, } SetDefaults(m) if !Equal(m, expected) { t.Errorf("\n got %v\nwant %v", m, expected) } } func TestSetDefaultWithRepeatedNonMessage(t *testing.T) { m := &MyMessage{ Pet: []string{"turtle", "wombat"}, } expected := Clone(m) SetDefaults(m) if !Equal(m, expected) { t.Errorf("\n got %v\nwant %v", m, expected) } } func TestMaximumTagNumber(t *testing.T) { m := &MaxTag{ LastField: String("natural goat essence"), } buf, err := Marshal(m) if err != nil { t.Fatalf("proto.Marshal failed: %v", err) } m2 := new(MaxTag) if err := Unmarshal(buf, m2); err != nil { t.Fatalf("proto.Unmarshal failed: %v", err) } if got, want := m2.GetLastField(), *m.LastField; got != want { t.Errorf("got %q, want %q", got, want) } } func TestJSON(t *testing.T) { m := &MyMessage{ Count: Int32(4), Pet: []string{"bunny", "kitty"}, Inner: &InnerMessage{ Host: String("cauchy"), }, Bikeshed: MyMessage_GREEN.Enum(), } const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}` b, err := json.Marshal(m) if err != nil { t.Fatalf("json.Marshal failed: %v", err) } s := string(b) if s != expected { t.Errorf("got %s\nwant %s", s, expected) } received := new(MyMessage) if err := json.Unmarshal(b, received); err != nil { t.Fatalf("json.Unmarshal failed: %v", err) } if !Equal(received, m) { t.Fatalf("got %s, want %s", received, m) } // Test unmarshalling of JSON with symbolic enum name. const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}` received.Reset() if err := json.Unmarshal([]byte(old), received); err != nil { t.Fatalf("json.Unmarshal failed: %v", err) } if !Equal(received, m) { t.Fatalf("got %s, want %s", received, m) } } func TestBadWireType(t *testing.T) { b := []byte{7<<3 | 6} // field 7, wire type 6 pb := new(OtherMessage) if err := Unmarshal(b, pb); err == nil { t.Errorf("Unmarshal did not fail") } else if !strings.Contains(err.Error(), "unknown wire type") { t.Errorf("wrong error: %v", err) } } func TestBytesWithInvalidLength(t *testing.T) { // If a byte sequence has an invalid (negative) length, Unmarshal should not panic. b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0} Unmarshal(b, new(MyMessage)) } func TestLengthOverflow(t *testing.T) { // Overflowing a length should not panic. b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01} Unmarshal(b, new(MyMessage)) } func TestVarintOverflow(t *testing.T) { // Overflowing a 64-bit length should not be allowed. b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01} if err := Unmarshal(b, new(MyMessage)); err == nil { t.Fatalf("Overflowed uint64 length without error") } } func TestUnmarshalFuzz(t *testing.T) { const N = 1000 seed := time.Now().UnixNano() t.Logf("RNG seed is %d", seed) rng := rand.New(rand.NewSource(seed)) buf := make([]byte, 20) for i := 0; i < N; i++ { for j := range buf { buf[j] = byte(rng.Intn(256)) } fuzzUnmarshal(t, buf) } } func TestMergeMessages(t *testing.T) { pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}} data, err := Marshal(pb) if err != nil { t.Fatalf("Marshal: %v", err) } pb1 := new(MessageList) if err := Unmarshal(data, pb1); err != nil { t.Fatalf("first Unmarshal: %v", err) } if err := Unmarshal(data, pb1); err != nil { t.Fatalf("second Unmarshal: %v", err) } if len(pb1.Message) != 1 { t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message)) } pb2 := new(MessageList) if err := UnmarshalMerge(data, pb2); err != nil { t.Fatalf("first UnmarshalMerge: %v", err) } if err := UnmarshalMerge(data, pb2); err != nil { t.Fatalf("second UnmarshalMerge: %v", err) } if len(pb2.Message) != 2 { t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message)) } } func TestExtensionMarshalOrder(t *testing.T) { m := &MyMessage{Count: Int(123)} if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil { t.Fatalf("SetExtension: %v", err) } if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil { t.Fatalf("SetExtension: %v", err) } if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil { t.Fatalf("SetExtension: %v", err) } // Serialize m several times, and check we get the same bytes each time. var orig []byte for i := 0; i < 100; i++ { b, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } if i == 0 { orig = b continue } if !bytes.Equal(b, orig) { t.Errorf("Bytes differ on attempt #%d", i) } } } // Many extensions, because small maps might not iterate differently on each iteration. var exts = []*ExtensionDesc{ E_X201, E_X202, E_X203, E_X204, E_X205, E_X206, E_X207, E_X208, E_X209, E_X210, E_X211, E_X212, E_X213, E_X214, E_X215, E_X216, E_X217, E_X218, E_X219, E_X220, E_X221, E_X222, E_X223, E_X224, E_X225, E_X226, E_X227, E_X228, E_X229, E_X230, E_X231, E_X232, E_X233, E_X234, E_X235, E_X236, E_X237, E_X238, E_X239, E_X240, E_X241, E_X242, E_X243, E_X244, E_X245, E_X246, E_X247, E_X248, E_X249, E_X250, } func TestMessageSetMarshalOrder(t *testing.T) { m := &MyMessageSet{} for _, x := range exts { if err := SetExtension(m, x, &Empty{}); err != nil { t.Fatalf("SetExtension: %v", err) } } buf, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } // Serialize m several times, and check we get the same bytes each time. for i := 0; i < 10; i++ { b1, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } if !bytes.Equal(b1, buf) { t.Errorf("Bytes differ on re-Marshal #%d", i) } m2 := &MyMessageSet{} if err := Unmarshal(buf, m2); err != nil { t.Errorf("Unmarshal: %v", err) } b2, err := Marshal(m2) if err != nil { t.Errorf("re-Marshal: %v", err) } if !bytes.Equal(b2, buf) { t.Errorf("Bytes differ on round-trip #%d", i) } } } func TestUnmarshalMergesMessages(t *testing.T) { // If a nested message occurs twice in the input, // the fields should be merged when decoding. a := &OtherMessage{ Key: Int64(123), Inner: &InnerMessage{ Host: String("polhode"), Port: Int32(1234), }, } aData, err := Marshal(a) if err != nil { t.Fatalf("Marshal(a): %v", err) } b := &OtherMessage{ Weight: Float32(1.2), Inner: &InnerMessage{ Host: String("herpolhode"), Connected: Bool(true), }, } bData, err := Marshal(b) if err != nil { t.Fatalf("Marshal(b): %v", err) } want := &OtherMessage{ Key: Int64(123), Weight: Float32(1.2), Inner: &InnerMessage{ Host: String("herpolhode"), Port: Int32(1234), Connected: Bool(true), }, } got := new(OtherMessage) if err := Unmarshal(append(aData, bData...), got); err != nil { t.Fatalf("Unmarshal: %v", err) } if !Equal(got, want) { t.Errorf("\n got %v\nwant %v", got, want) } } func TestEncodingSizes(t *testing.T) { tests := []struct { m Message n int }{ {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6}, {&Defaults{F_Int32: Int32(math.MinInt32)}, 11}, {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6}, {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6}, } for _, test := range tests { b, err := Marshal(test.m) if err != nil { t.Errorf("Marshal(%v): %v", test.m, err) continue } if len(b) != test.n { t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n) } } } func TestRequiredNotSetError(t *testing.T) { pb := initGoTest(false) pb.RequiredField.Label = nil pb.F_Int32Required = nil pb.F_Int64Required = nil expected := "0807" + // field 1, encoding 0, value 7 "2206" + "120474797065" + // field 4, encoding 2 (GoTestField) "5001" + // field 10, encoding 0, value 1 "6d20000000" + // field 13, encoding 5, value 0x20 "714000000000000000" + // field 14, encoding 1, value 0x40 "78a019" + // field 15, encoding 0, value 0xca0 = 3232 "8001c032" + // field 16, encoding 0, value 0x1940 = 6464 "8d0100004a45" + // field 17, encoding 5, value 3232.0 "9101000000000040b940" + // field 18, encoding 1, value 6464.0 "9a0106" + "737472696e67" + // field 19, encoding 2, string "string" "b304" + // field 70, encoding 3, start group "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required" "b404" + // field 70, encoding 4, end group "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes" "b0063f" + // field 102, encoding 0, 0x3f zigzag32 "b8067f" // field 103, encoding 0, 0x7f zigzag64 o := old() bytes, err := Marshal(pb) if _, ok := err.(*RequiredNotSetError); !ok { fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) o.DebugPrint("", bytes) t.Fatalf("expected = %s", expected) } if strings.Index(err.Error(), "RequiredField.Label") < 0 { t.Errorf("marshal-1 wrong err msg: %v", err) } if !equal(bytes, expected, t) { o.DebugPrint("neq 1", bytes) t.Fatalf("expected = %s", expected) } // Now test Unmarshal by recreating the original buffer. pbd := new(GoTest) err = Unmarshal(bytes, pbd) if _, ok := err.(*RequiredNotSetError); !ok { t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) o.DebugPrint("", bytes) t.Fatalf("string = %s", expected) } if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { t.Errorf("unmarshal wrong err msg: %v", err) } bytes, err = Marshal(pbd) if _, ok := err.(*RequiredNotSetError); !ok { t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) o.DebugPrint("", bytes) t.Fatalf("string = %s", expected) } if strings.Index(err.Error(), "RequiredField.Label") < 0 { t.Errorf("marshal-2 wrong err msg: %v", err) } if !equal(bytes, expected, t) { o.DebugPrint("neq 2", bytes) t.Fatalf("string = %s", expected) } } func fuzzUnmarshal(t *testing.T, data []byte) { defer func() { if e := recover(); e != nil { t.Errorf("These bytes caused a panic: %+v", data) t.Logf("Stack:\n%s", debug.Stack()) t.FailNow() } }() pb := new(MyMessage) Unmarshal(data, pb) } func TestMapFieldMarshal(t *testing.T) { m := &MessageWithMap{ NameMapping: map[int32]string{ 1: "Rob", 4: "Ian", 8: "Dave", }, } b, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } // b should be the concatenation of these three byte sequences in some order. parts := []string{ "\n\a\b\x01\x12\x03Rob", "\n\a\b\x04\x12\x03Ian", "\n\b\b\x08\x12\x04Dave", } ok := false for i := range parts { for j := range parts { if j == i { continue } for k := range parts { if k == i || k == j { continue } try := parts[i] + parts[j] + parts[k] if bytes.Equal(b, []byte(try)) { ok = true break } } } } if !ok { t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2]) } t.Logf("FYI b: %q", b) (new(Buffer)).DebugPrint("Dump of b", b) } func TestMapFieldRoundTrips(t *testing.T) { m := &MessageWithMap{ NameMapping: map[int32]string{ 1: "Rob", 4: "Ian", 8: "Dave", }, MsgMapping: map[int64]*FloatingPoint{ 0x7001: &FloatingPoint{F: Float64(2.0)}, }, ByteMapping: map[bool][]byte{ false: []byte("that's not right!"), true: []byte("aye, 'tis true!"), }, } b, err := Marshal(m) if err != nil { t.Fatalf("Marshal: %v", err) } t.Logf("FYI b: %q", b) m2 := new(MessageWithMap) if err := Unmarshal(b, m2); err != nil { t.Fatalf("Unmarshal: %v", err) } for _, pair := range [][2]interface{}{ {m.NameMapping, m2.NameMapping}, {m.MsgMapping, m2.MsgMapping}, {m.ByteMapping, m2.ByteMapping}, } { if !reflect.DeepEqual(pair[0], pair[1]) { t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1]) } } } // Benchmarks func testMsg() *GoTest { pb := initGoTest(true) const N = 1000 // Internally the library starts much smaller. pb.F_Int32Repeated = make([]int32, N) pb.F_DoubleRepeated = make([]float64, N) for i := 0; i < N; i++ { pb.F_Int32Repeated[i] = int32(i) pb.F_DoubleRepeated[i] = float64(i) } return pb } func bytesMsg() *GoTest { pb := initGoTest(true) buf := make([]byte, 4000) for i := range buf { buf[i] = byte(i) } pb.F_BytesDefaulted = buf return pb } func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) { d, _ := marshal(pb) b.SetBytes(int64(len(d))) b.ResetTimer() for i := 0; i < b.N; i++ { marshal(pb) } } func benchmarkBufferMarshal(b *testing.B, pb Message) { p := NewBuffer(nil) benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { p.Reset() err := p.Marshal(pb0) return p.Bytes(), err }) } func benchmarkSize(b *testing.B, pb Message) { benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) { Size(pb) return nil, nil }) } func newOf(pb Message) Message { in := reflect.ValueOf(pb) if in.IsNil() { return pb } return reflect.New(in.Type().Elem()).Interface().(Message) } func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) { d, _ := Marshal(pb) b.SetBytes(int64(len(d))) pbd := newOf(pb) b.ResetTimer() for i := 0; i < b.N; i++ { unmarshal(d, pbd) } } func benchmarkBufferUnmarshal(b *testing.B, pb Message) { p := NewBuffer(nil) benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error { p.SetBuf(d) return p.Unmarshal(pb0) }) } // Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes} func BenchmarkMarshal(b *testing.B) { benchmarkMarshal(b, testMsg(), Marshal) } func BenchmarkBufferMarshal(b *testing.B) { benchmarkBufferMarshal(b, testMsg()) } func BenchmarkSize(b *testing.B) { benchmarkSize(b, testMsg()) } func BenchmarkUnmarshal(b *testing.B) { benchmarkUnmarshal(b, testMsg(), Unmarshal) } func BenchmarkBufferUnmarshal(b *testing.B) { benchmarkBufferUnmarshal(b, testMsg()) } func BenchmarkMarshalBytes(b *testing.B) { benchmarkMarshal(b, bytesMsg(), Marshal) } func BenchmarkBufferMarshalBytes(b *testing.B) { benchmarkBufferMarshal(b, bytesMsg()) } func BenchmarkSizeBytes(b *testing.B) { benchmarkSize(b, bytesMsg()) } func BenchmarkUnmarshalBytes(b *testing.B) { benchmarkUnmarshal(b, bytesMsg(), Unmarshal) } func BenchmarkBufferUnmarshalBytes(b *testing.B) { benchmarkBufferUnmarshal(b, bytesMsg()) } func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) { b.StopTimer() pb := initGoTestField() skip := &GoSkipTest{ SkipInt32: Int32(32), SkipFixed32: Uint32(3232), SkipFixed64: Uint64(6464), SkipString: String("skipper"), Skipgroup: &GoSkipTest_SkipGroup{ GroupInt32: Int32(75), GroupString: String("wxyz"), }, } pbd := new(GoTestField) p := NewBuffer(nil) p.Marshal(pb) p.Marshal(skip) p2 := NewBuffer(nil) b.StartTimer() for i := 0; i < b.N; i++ { p2.SetBuf(p.Bytes()) p2.Unmarshal(pbd) } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/clone.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer deep copy and merge. // TODO: MessageSet and RawMessage. package proto import ( "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. func Clone(pb Message) Message { in := reflect.ValueOf(pb) if in.IsNil() { return pb } out := reflect.New(in.Type().Elem()) // out is empty so a merge is a deep copy. mergeStruct(out.Elem(), in.Elem()) return out.Interface().(Message) } // Merge merges src into dst. // Required and optional fields that are set in src will be set to that value in dst. // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { // Explicit test prior to mergeStruct so that mistyped nils will fail panic("proto: type mismatch") } if in.IsNil() { // Merging nil into non-nil is a quiet no-op return } mergeStruct(out.Elem(), in.Elem()) } func mergeStruct(out, in reflect.Value) { for i := 0; i < in.NumField(); i++ { f := in.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { continue } mergeAny(out.Field(i), in.Field(i)) } if emIn, ok := in.Addr().Interface().(extendableProto); ok { emOut := out.Addr().Interface().(extendableProto) mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) } uf := in.FieldByName("XXX_unrecognized") if !uf.IsValid() { return } uin := uf.Bytes() if len(uin) > 0 { out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) } } func mergeAny(out, in reflect.Value) { if in.Type() == protoMessageType { if !in.IsNil() { if out.IsNil() { out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) } else { Merge(out.Interface().(Message), in.Interface().(Message)) } } return } switch in.Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: out.Set(in) case reflect.Map: if in.Len() == 0 { return } if out.IsNil() { out.Set(reflect.MakeMap(in.Type())) } // For maps with value types of *T or []byte we need to deep copy each value. elemKind := in.Type().Elem().Kind() for _, key := range in.MapKeys() { var val reflect.Value switch elemKind { case reflect.Ptr: val = reflect.New(in.Type().Elem().Elem()) mergeAny(val, in.MapIndex(key)) case reflect.Slice: val = in.MapIndex(key) val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) default: val = in.MapIndex(key) } out.SetMapIndex(key, val) } case reflect.Ptr: if in.IsNil() { return } if out.IsNil() { out.Set(reflect.New(in.Elem().Type())) } mergeAny(out.Elem(), in.Elem()) case reflect.Slice: if in.IsNil() { return } if in.Type().Elem().Kind() == reflect.Uint8 { // []byte is a scalar bytes field, not a repeated field. // Make a deep copy. // Append to []byte{} instead of []byte(nil) so that we never end up // with a nil result. out.SetBytes(append([]byte{}, in.Bytes()...)) return } n := in.Len() if out.IsNil() { out.Set(reflect.MakeSlice(in.Type(), 0, n)) } switch in.Type().Elem().Kind() { case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: out.Set(reflect.AppendSlice(out, in)) default: for i := 0; i < n; i++ { x := reflect.Indirect(reflect.New(in.Type().Elem())) mergeAny(x, in.Index(i)) out.Set(reflect.Append(out, x)) } } case reflect.Struct: mergeStruct(out, in) default: // unknown type, so not a protocol buffer log.Printf("proto: don't know how to copy %v", in) } } func mergeExtension(out, in map[int32]Extension) { for extNum, eIn := range in { eOut := Extension{desc: eIn.desc} if eIn.value != nil { v := reflect.New(reflect.TypeOf(eIn.value)).Elem() mergeAny(v, reflect.ValueOf(eIn.value)) eOut.value = v.Interface() } if eIn.enc != nil { eOut.enc = make([]byte, len(eIn.enc)) copy(eOut.enc, eIn.enc) } out[extNum] = eOut } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/clone_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "testing" "github.com/golang/protobuf/proto" pb "github.com/golang/protobuf/proto/testdata" ) var cloneTestMessage = &pb.MyMessage{ Count: proto.Int32(42), Name: proto.String("Dave"), Pet: []string{"bunny", "kitty", "horsey"}, Inner: &pb.InnerMessage{ Host: proto.String("niles"), Port: proto.Int32(9099), Connected: proto.Bool(true), }, Others: []*pb.OtherMessage{ { Value: []byte("some bytes"), }, }, Somegroup: &pb.MyMessage_SomeGroup{ GroupField: proto.Int32(6), }, RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, } func init() { ext := &pb.Ext{ Data: proto.String("extension"), } if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil { panic("SetExtension: " + err.Error()) } } func TestClone(t *testing.T) { m := proto.Clone(cloneTestMessage).(*pb.MyMessage) if !proto.Equal(m, cloneTestMessage) { t.Errorf("Clone(%v) = %v", cloneTestMessage, m) } // Verify it was a deep copy. *m.Inner.Port++ if proto.Equal(m, cloneTestMessage) { t.Error("Mutating clone changed the original") } // Byte fields and repeated fields should be copied. if &m.Pet[0] == &cloneTestMessage.Pet[0] { t.Error("Pet: repeated field not copied") } if &m.Others[0] == &cloneTestMessage.Others[0] { t.Error("Others: repeated field not copied") } if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] { t.Error("Others[0].Value: bytes field not copied") } if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] { t.Error("RepBytes: repeated field not copied") } if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] { t.Error("RepBytes[0]: bytes field not copied") } } func TestCloneNil(t *testing.T) { var m *pb.MyMessage if c := proto.Clone(m); !proto.Equal(m, c) { t.Errorf("Clone(%v) = %v", m, c) } } var mergeTests = []struct { src, dst, want proto.Message }{ { src: &pb.MyMessage{ Count: proto.Int32(42), }, dst: &pb.MyMessage{ Name: proto.String("Dave"), }, want: &pb.MyMessage{ Count: proto.Int32(42), Name: proto.String("Dave"), }, }, { src: &pb.MyMessage{ Inner: &pb.InnerMessage{ Host: proto.String("hey"), Connected: proto.Bool(true), }, Pet: []string{"horsey"}, Others: []*pb.OtherMessage{ { Value: []byte("some bytes"), }, }, }, dst: &pb.MyMessage{ Inner: &pb.InnerMessage{ Host: proto.String("niles"), Port: proto.Int32(9099), }, Pet: []string{"bunny", "kitty"}, Others: []*pb.OtherMessage{ { Key: proto.Int64(31415926535), }, { // Explicitly test a src=nil field Inner: nil, }, }, }, want: &pb.MyMessage{ Inner: &pb.InnerMessage{ Host: proto.String("hey"), Connected: proto.Bool(true), Port: proto.Int32(9099), }, Pet: []string{"bunny", "kitty", "horsey"}, Others: []*pb.OtherMessage{ { Key: proto.Int64(31415926535), }, {}, { Value: []byte("some bytes"), }, }, }, }, { src: &pb.MyMessage{ RepBytes: [][]byte{[]byte("wow")}, }, dst: &pb.MyMessage{ Somegroup: &pb.MyMessage_SomeGroup{ GroupField: proto.Int32(6), }, RepBytes: [][]byte{[]byte("sham")}, }, want: &pb.MyMessage{ Somegroup: &pb.MyMessage_SomeGroup{ GroupField: proto.Int32(6), }, RepBytes: [][]byte{[]byte("sham"), []byte("wow")}, }, }, // Check that a scalar bytes field replaces rather than appends. { src: &pb.OtherMessage{Value: []byte("foo")}, dst: &pb.OtherMessage{Value: []byte("bar")}, want: &pb.OtherMessage{Value: []byte("foo")}, }, { src: &pb.MessageWithMap{ NameMapping: map[int32]string{6: "Nigel"}, MsgMapping: map[int64]*pb.FloatingPoint{ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, }, ByteMapping: map[bool][]byte{true: []byte("wowsa")}, }, dst: &pb.MessageWithMap{ NameMapping: map[int32]string{ 6: "Bruce", // should be overwritten 7: "Andrew", }, }, want: &pb.MessageWithMap{ NameMapping: map[int32]string{ 6: "Nigel", 7: "Andrew", }, MsgMapping: map[int64]*pb.FloatingPoint{ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)}, }, ByteMapping: map[bool][]byte{true: []byte("wowsa")}, }, }, } func TestMerge(t *testing.T) { for _, m := range mergeTests { got := proto.Clone(m.dst) proto.Merge(got, m.src) if !proto.Equal(got, m.want) { t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want) } } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/decode.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for decoding protocol buffer data to construct in-memory representations. */ import ( "errors" "fmt" "io" "os" "reflect" ) // errOverflow is returned when an integer is too large to be represented. var errOverflow = errors.New("proto: integer overflow") // The fundamental decoders that interpret bytes on the wire. // Those that take integer types all return uint64 and are // therefore of type valueDecoder. // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 } b := uint64(buf[n]) n++ x |= (b & 0x7F) << shift if (b & 0x80) == 0 { return x, n } } // The number is too large to represent in a 64-bit value. return 0, 0 } // DecodeVarint reads a varint-encoded integer from the Buffer. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func (p *Buffer) DecodeVarint() (x uint64, err error) { // x, err already 0 i := p.index l := len(p.buf) for shift := uint(0); shift < 64; shift += 7 { if i >= l { err = io.ErrUnexpectedEOF return } b := p.buf[i] i++ x |= (uint64(b) & 0x7F) << shift if b < 0x80 { p.index = i return } } // The number is too large to represent in a 64-bit value. err = errOverflow return } // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. func (p *Buffer) DecodeFixed64() (x uint64, err error) { // x, err already 0 i := p.index + 8 if i < 0 || i > len(p.buf) { err = io.ErrUnexpectedEOF return } p.index = i x = uint64(p.buf[i-8]) x |= uint64(p.buf[i-7]) << 8 x |= uint64(p.buf[i-6]) << 16 x |= uint64(p.buf[i-5]) << 24 x |= uint64(p.buf[i-4]) << 32 x |= uint64(p.buf[i-3]) << 40 x |= uint64(p.buf[i-2]) << 48 x |= uint64(p.buf[i-1]) << 56 return } // DecodeFixed32 reads a 32-bit integer from the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. func (p *Buffer) DecodeFixed32() (x uint64, err error) { // x, err already 0 i := p.index + 4 if i < 0 || i > len(p.buf) { err = io.ErrUnexpectedEOF return } p.index = i x = uint64(p.buf[i-4]) x |= uint64(p.buf[i-3]) << 8 x |= uint64(p.buf[i-2]) << 16 x |= uint64(p.buf[i-1]) << 24 return } // DecodeZigzag64 reads a zigzag-encoded 64-bit integer // from the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) DecodeZigzag64() (x uint64, err error) { x, err = p.DecodeVarint() if err != nil { return } x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) return } // DecodeZigzag32 reads a zigzag-encoded 32-bit integer // from the Buffer. // This is the format used for the sint32 protocol buffer type. func (p *Buffer) DecodeZigzag32() (x uint64, err error) { x, err = p.DecodeVarint() if err != nil { return } x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) return } // These are not ValueDecoders: they produce an array of bytes or a string. // bytes, embedded messages // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { n, err := p.DecodeVarint() if err != nil { return nil, err } nb := int(n) if nb < 0 { return nil, fmt.Errorf("proto: bad byte length %d", nb) } end := p.index + nb if end < p.index || end > len(p.buf) { return nil, io.ErrUnexpectedEOF } if !alloc { // todo: check if can get more uses of alloc=false buf = p.buf[p.index:end] p.index += nb return } buf = make([]byte, nb) copy(buf, p.buf[p.index:]) p.index += nb return } // DecodeStringBytes reads an encoded string from the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) DecodeStringBytes() (s string, err error) { buf, err := p.DecodeRawBytes(false) if err != nil { return } return string(buf), nil } // Skip the next item in the buffer. Its wire type is decoded and presented as an argument. // If the protocol buffer has extensions, and the field matches, add it as an extension. // Otherwise, if the XXX_unrecognized field exists, append the skipped data there. func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { oi := o.index err := o.skip(t, tag, wire) if err != nil { return err } if !unrecField.IsValid() { return nil } ptr := structPointer_Bytes(base, unrecField) // Add the skipped field to struct field obuf := o.buf o.buf = *ptr o.EncodeVarint(uint64(tag<<3 | wire)) *ptr = append(o.buf, obuf[oi:o.index]...) o.buf = obuf return nil } // Skip the next item in the buffer. Its wire type is decoded and presented as an argument. func (o *Buffer) skip(t reflect.Type, tag, wire int) error { var u uint64 var err error switch wire { case WireVarint: _, err = o.DecodeVarint() case WireFixed64: _, err = o.DecodeFixed64() case WireBytes: _, err = o.DecodeRawBytes(false) case WireFixed32: _, err = o.DecodeFixed32() case WireStartGroup: for { u, err = o.DecodeVarint() if err != nil { break } fwire := int(u & 0x7) if fwire == WireEndGroup { break } ftag := int(u >> 3) err = o.skip(t, ftag, fwire) if err != nil { break } } default: err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) } return err } // Unmarshaler is the interface representing objects that can // unmarshal themselves. The method should reset the receiver before // decoding starts. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. type Unmarshaler interface { Unmarshal([]byte) error } // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. // // Unmarshal resets pb before starting to unmarshal, so any // existing data in pb is always removed. Use UnmarshalMerge // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() return UnmarshalMerge(buf, pb) } // UnmarshalMerge parses the protocol buffer representation in buf and // writes the decoded result to pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. // // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) } // Unmarshal parses the protocol buffer representation in the // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { err := u.Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } typ, base, err := getbase(pb) if err != nil { return err } err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) if collectStats { stats.Decode++ } return err } // unmarshalType does the work of unmarshaling a structure. func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { var state errorState required, reqFields := prop.reqCount, uint64(0) var err error for err == nil && o.index < len(o.buf) { oi := o.index var u uint64 u, err = o.DecodeVarint() if err != nil { break } wire := int(u & 0x7) if wire == WireEndGroup { if is_group { return nil // input is satisfied } return fmt.Errorf("proto: %s: wiretype end group for non-group", st) } tag := int(u >> 3) if tag <= 0 { return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) } fieldnum, ok := prop.decoderTags.get(tag) if !ok { // Maybe it's an extension? if prop.extendable { if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { if err = o.skip(st, tag, wire); err == nil { ext := e.ExtensionMap()[int32(tag)] // may be missing ext.enc = append(ext.enc, o.buf[oi:o.index]...) e.ExtensionMap()[int32(tag)] = ext } continue } } err = o.skipAndSave(st, tag, wire, base, prop.unrecField) continue } p := prop.Prop[fieldnum] if p.dec == nil { fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) continue } dec := p.dec if wire != WireStartGroup && wire != p.WireType { if wire == WireBytes && p.packedDec != nil { // a packable field dec = p.packedDec } else { err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) continue } } decErr := dec(o, p, base) if decErr != nil && !state.shouldContinue(decErr, p) { err = decErr } if err == nil && p.Required { // Successfully decoded a required field. if tag <= 64 { // use bitmap for fields 1-64 to catch field reuse. var mask uint64 = 1 << uint64(tag-1) if reqFields&mask == 0 { // new required field reqFields |= mask required-- } } else { // This is imprecise. It can be fooled by a required field // with a tag > 64 that is encoded twice; that's very rare. // A fully correct implementation would require allocating // a data structure, which we would like to avoid. required-- } } } if err == nil { if is_group { return io.ErrUnexpectedEOF } if state.err != nil { return state.err } if required > 0 { // Not enough information to determine the exact field. If we use extra // CPU, we could determine the field only if the missing required field // has a tag <= 64 and we check reqFields. return &RequiredNotSetError{"{Unknown}"} } } return err } // Individual type decoders // For each, // u is the decoded value, // v is a pointer to the field (pointer) in the struct // Sizes of the pools to allocate inside the Buffer. // The goal is modest amortization and allocation // on at least 16-byte boundaries. const ( boolPoolSize = 16 uint32PoolSize = 8 uint64PoolSize = 4 ) // Decode a bool. func (o *Buffer) dec_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } if len(o.bools) == 0 { o.bools = make([]bool, boolPoolSize) } o.bools[0] = u != 0 *structPointer_Bool(base, p.field) = &o.bools[0] o.bools = o.bools[1:] return nil } func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } *structPointer_BoolVal(base, p.field) = u != 0 return nil } // Decode an int32. func (o *Buffer) dec_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) return nil } func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) return nil } // Decode an int64. func (o *Buffer) dec_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word64_Set(structPointer_Word64(base, p.field), o, u) return nil } func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } word64Val_Set(structPointer_Word64Val(base, p.field), o, u) return nil } // Decode a string. func (o *Buffer) dec_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } *structPointer_String(base, p.field) = &s return nil } func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } *structPointer_StringVal(base, p.field) = s return nil } // Decode a slice of bytes ([]byte). func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } *structPointer_Bytes(base, p.field) = b return nil } // Decode a slice of bools ([]bool). func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } v := structPointer_BoolSlice(base, p.field) *v = append(*v, u != 0) return nil } // Decode a slice of bools ([]bool) in packed format. func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { v := structPointer_BoolSlice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded bools y := *v for i := 0; i < nb; i++ { u, err := p.valDec(o) if err != nil { return err } y = append(y, u != 0) } *v = y return nil } // Decode a slice of int32s ([]int32). func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } structPointer_Word32Slice(base, p.field).Append(uint32(u)) return nil } // Decode a slice of int32s ([]int32) in packed format. func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { v := structPointer_Word32Slice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded int32s fin := o.index + nb if fin < o.index { return errOverflow } for o.index < fin { u, err := p.valDec(o) if err != nil { return err } v.Append(uint32(u)) } return nil } // Decode a slice of int64s ([]int64). func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { u, err := p.valDec(o) if err != nil { return err } structPointer_Word64Slice(base, p.field).Append(u) return nil } // Decode a slice of int64s ([]int64) in packed format. func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { v := structPointer_Word64Slice(base, p.field) nn, err := o.DecodeVarint() if err != nil { return err } nb := int(nn) // number of bytes of encoded int64s fin := o.index + nb if fin < o.index { return errOverflow } for o.index < fin { u, err := p.valDec(o) if err != nil { return err } v.Append(u) } return nil } // Decode a slice of strings ([]string). func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { s, err := o.DecodeStringBytes() if err != nil { return err } v := structPointer_StringSlice(base, p.field) *v = append(*v, s) return nil } // Decode a slice of slice of bytes ([][]byte). func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { b, err := o.DecodeRawBytes(true) if err != nil { return err } v := structPointer_BytesSlice(base, p.field) *v = append(*v, b) return nil } // Decode a map field. func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { raw, err := o.DecodeRawBytes(false) if err != nil { return err } oi := o.index // index at the end of this map entry o.index -= len(raw) // move buffer back to start of map entry mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V if mptr.Elem().IsNil() { mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) } v := mptr.Elem() // map[K]V // Prepare addressable doubly-indirect placeholders for the key and value types. // See enc_new_map for why. keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K keybase := toStructPointer(keyptr.Addr()) // **K var valbase structPointer var valptr reflect.Value switch p.mtype.Elem().Kind() { case reflect.Slice: // []byte var dummy []byte valptr = reflect.ValueOf(&dummy) // *[]byte valbase = toStructPointer(valptr) // *[]byte case reflect.Ptr: // message; valptr is **Msg; need to allocate the intermediate pointer valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V valptr.Set(reflect.New(valptr.Type().Elem())) valbase = toStructPointer(valptr) default: // everything else valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V valbase = toStructPointer(valptr.Addr()) // **V } // Decode. // This parses a restricted wire format, namely the encoding of a message // with two fields. See enc_new_map for the format. for o.index < oi { // tagcode for key and value properties are always a single byte // because they have tags 1 and 2. tagcode := o.buf[o.index] o.index++ switch tagcode { case p.mkeyprop.tagcode[0]: if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { return err } case p.mvalprop.tagcode[0]: if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { return err } default: // TODO: Should we silently skip this instead? return fmt.Errorf("proto: bad map data tag %d", raw[0]) } } keyelem, valelem := keyptr.Elem(), valptr.Elem() if !keyelem.IsValid() || !valelem.IsValid() { // We did not decode the key or the value in the map entry. // Either way, it's an invalid map entry. return fmt.Errorf("proto: bad map data: missing key/val") } v.SetMapIndex(keyelem, valelem) return nil } // Decode a group. func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { bas := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(bas) { // allocate new nested message bas = toStructPointer(reflect.New(p.stype)) structPointer_SetStructPointer(base, p.field, bas) } return o.unmarshalType(p.stype, p.sprop, true, bas) } // Decode an embedded message. func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { raw, e := o.DecodeRawBytes(false) if e != nil { return e } bas := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(bas) { // allocate new nested message bas = toStructPointer(reflect.New(p.stype)) structPointer_SetStructPointer(base, p.field, bas) } // If the object can unmarshal itself, let it. if p.isUnmarshaler { iv := structPointer_Interface(bas, p.stype) return iv.(Unmarshaler).Unmarshal(raw) } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, p.sprop, false, bas) o.buf = obuf o.index = oi return err } // Decode a slice of embedded messages. func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { return o.dec_slice_struct(p, false, base) } // Decode a slice of embedded groups. func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { return o.dec_slice_struct(p, true, base) } // Decode a slice of structs ([]*struct). func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { v := reflect.New(p.stype) bas := toStructPointer(v) structPointer_StructPointerSlice(base, p.field).Append(bas) if is_group { err := o.unmarshalType(p.stype, p.sprop, is_group, bas) return err } raw, err := o.DecodeRawBytes(false) if err != nil { return err } // If the object can unmarshal itself, let it. if p.isUnmarshaler { iv := v.Interface() return iv.(Unmarshaler).Unmarshal(raw) } obuf := o.buf oi := o.index o.buf = raw o.index = 0 err = o.unmarshalType(p.stype, p.sprop, is_group, bas) o.buf = obuf o.index = oi return err } ================================================ FILE: vendor/github.com/golang/protobuf/proto/encode.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for encoding data into the wire format for protocol buffers. */ import ( "errors" "fmt" "reflect" "sort" ) // RequiredNotSetError is the error returned if Marshal is called with // a protocol buffer struct whose required fields have not // all been initialized. It is also the error returned if Unmarshal is // called with an encoded protocol buffer that does not include all the // required fields. // // When printed, RequiredNotSetError reports the first unset required field in a // message. If the field cannot be precisely determined, it is reported as // "{Unknown}". type RequiredNotSetError struct { field string } func (e *RequiredNotSetError) Error() string { return fmt.Sprintf("proto: required field %q not set", e.field) } var ( // errRepeatedHasNil is the error returned if Marshal is called with // a struct with a repeated field containing a nil element. errRepeatedHasNil = errors.New("proto: repeated field has nil element") // ErrNil is the error returned if Marshal is called with nil. ErrNil = errors.New("proto: Marshal called with nil") ) // The fundamental encoders that put bytes on the wire. // Those that take integer types all accept uint64 and are // therefore of type valueEncoder. const maxVarintBytes = 10 // maximum length of a varint // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. // Not used by the package itself, but helpful to clients // wishing to use the same encoding. func EncodeVarint(x uint64) []byte { var buf [maxVarintBytes]byte var n int for n = 0; x > 127; n++ { buf[n] = 0x80 | uint8(x&0x7F) x >>= 7 } buf[n] = uint8(x) n++ return buf[0:n] } // EncodeVarint writes a varint-encoded integer to the Buffer. // This is the format for the // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func (p *Buffer) EncodeVarint(x uint64) error { for x >= 1<<7 { p.buf = append(p.buf, uint8(x&0x7f|0x80)) x >>= 7 } p.buf = append(p.buf, uint8(x)) return nil } func sizeVarint(x uint64) (n int) { for { n++ x >>= 7 if x == 0 { break } } return n } // EncodeFixed64 writes a 64-bit integer to the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. func (p *Buffer) EncodeFixed64(x uint64) error { p.buf = append(p.buf, uint8(x), uint8(x>>8), uint8(x>>16), uint8(x>>24), uint8(x>>32), uint8(x>>40), uint8(x>>48), uint8(x>>56)) return nil } func sizeFixed64(x uint64) int { return 8 } // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. func (p *Buffer) EncodeFixed32(x uint64) error { p.buf = append(p.buf, uint8(x), uint8(x>>8), uint8(x>>16), uint8(x>>24)) return nil } func sizeFixed32(x uint64) int { return 4 } // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func sizeZigzag64(x uint64) int { return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer // to the Buffer. // This is the format used for the sint32 protocol buffer type. func (p *Buffer) EncodeZigzag32(x uint64) error { // use signed number to get arithmetic right shift. return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } func sizeZigzag32(x uint64) int { return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. func (p *Buffer) EncodeRawBytes(b []byte) error { p.EncodeVarint(uint64(len(b))) p.buf = append(p.buf, b...) return nil } func sizeRawBytes(b []byte) int { return sizeVarint(uint64(len(b))) + len(b) } // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { p.EncodeVarint(uint64(len(s))) p.buf = append(p.buf, s...) return nil } func sizeStringBytes(s string) int { return sizeVarint(uint64(len(s))) + len(s) } // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } // Marshal takes the protocol buffer // and encodes it into the wire format, returning the data. func Marshal(pb Message) ([]byte, error) { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { return m.Marshal() } p := NewBuffer(nil) err := p.Marshal(pb) var state errorState if err != nil && !state.shouldContinue(err, nil) { return nil, err } if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil } return p.buf, err } // Marshal takes the protocol buffer // and encodes it into the wire format, writing the result to the // Buffer. func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() if err != nil { return err } p.buf = append(p.buf, data...) return nil } t, base, err := getbase(pb) if structPointer_IsNil(base) { return ErrNil } if err == nil { err = p.enc_struct(GetProperties(t.Elem()), base) } if collectStats { stats.Encode++ } return err } // Size returns the encoded size of a protocol buffer. func Size(pb Message) (n int) { // Can the object marshal itself? If so, Size is slow. // TODO: add Size to Marshaler, or add a Sizer interface. if m, ok := pb.(Marshaler); ok { b, _ := m.Marshal() return len(b) } t, base, err := getbase(pb) if structPointer_IsNil(base) { return 0 } if err == nil { n = size_struct(GetProperties(t.Elem()), base) } if collectStats { stats.Size++ } return } // Individual type encoders. // Encode a bool. func (o *Buffer) enc_bool(p *Properties, base structPointer) error { v := *structPointer_Bool(base, p.field) if v == nil { return ErrNil } x := 0 if *v { x = 1 } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { v := *structPointer_BoolVal(base, p.field) if !v { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, 1) return nil } func size_bool(p *Properties, base structPointer) int { v := *structPointer_Bool(base, p.field) if v == nil { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte } func size_proto3_bool(p *Properties, base structPointer) int { v := *structPointer_BoolVal(base, p.field) if !v { return 0 } return len(p.tagcode) + 1 // each bool takes exactly one byte } // Encode an int32. func (o *Buffer) enc_int32(p *Properties, base structPointer) error { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return ErrNil } x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func size_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return 0 } x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range n += len(p.tagcode) n += p.valSize(uint64(x)) return } func size_proto3_int32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range if x == 0 { return 0 } n += len(p.tagcode) n += p.valSize(uint64(x)) return } // Encode a uint32. // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return ErrNil } x := word32_Get(v) o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, uint64(x)) return nil } func size_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32(base, p.field) if word32_IsNil(v) { return 0 } x := word32_Get(v) n += len(p.tagcode) n += p.valSize(uint64(x)) return } func size_proto3_uint32(p *Properties, base structPointer) (n int) { v := structPointer_Word32Val(base, p.field) x := word32Val_Get(v) if x == 0 { return 0 } n += len(p.tagcode) n += p.valSize(uint64(x)) return } // Encode an int64. func (o *Buffer) enc_int64(p *Properties, base structPointer) error { v := structPointer_Word64(base, p.field) if word64_IsNil(v) { return ErrNil } x := word64_Get(v) o.buf = append(o.buf, p.tagcode...) p.valEnc(o, x) return nil } func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) if x == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) p.valEnc(o, x) return nil } func size_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64(base, p.field) if word64_IsNil(v) { return 0 } x := word64_Get(v) n += len(p.tagcode) n += p.valSize(x) return } func size_proto3_int64(p *Properties, base structPointer) (n int) { v := structPointer_Word64Val(base, p.field) x := word64Val_Get(v) if x == 0 { return 0 } n += len(p.tagcode) n += p.valSize(x) return } // Encode a string. func (o *Buffer) enc_string(p *Properties, base structPointer) error { v := *structPointer_String(base, p.field) if v == nil { return ErrNil } x := *v o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(x) return nil } func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { v := *structPointer_StringVal(base, p.field) if v == "" { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(v) return nil } func size_string(p *Properties, base structPointer) (n int) { v := *structPointer_String(base, p.field) if v == nil { return 0 } x := *v n += len(p.tagcode) n += sizeStringBytes(x) return } func size_proto3_string(p *Properties, base structPointer) (n int) { v := *structPointer_StringVal(base, p.field) if v == "" { return 0 } n += len(p.tagcode) n += sizeStringBytes(v) return } // All protocol buffer fields are nillable, but be careful. func isNil(v reflect.Value) bool { switch v.Kind() { case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: return v.IsNil() } return false } // Encode a message struct. func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { var state errorState structp := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(structp) { return ErrNil } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, err := m.Marshal() if err != nil && !state.shouldContinue(err, nil) { return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) return nil } o.buf = append(o.buf, p.tagcode...) return o.enc_len_struct(p.sprop, structp, &state) } func size_struct_message(p *Properties, base structPointer) int { structp := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(structp) { return 0 } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() n0 := len(p.tagcode) n1 := sizeRawBytes(data) return n0 + n1 } n0 := len(p.tagcode) n1 := size_struct(p.sprop, structp) n2 := sizeVarint(uint64(n1)) // size of encoded length return n0 + n1 + n2 } // Encode a group struct. func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { var state errorState b := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(b) { return ErrNil } o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) err := o.enc_struct(p.sprop, b) if err != nil && !state.shouldContinue(err, nil) { return err } o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) return state.err } func size_struct_group(p *Properties, base structPointer) (n int) { b := structPointer_GetStructPointer(base, p.field) if structPointer_IsNil(b) { return 0 } n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) n += size_struct(p.sprop, b) n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) return } // Encode a slice of bools ([]bool). func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return ErrNil } for _, x := range s { o.buf = append(o.buf, p.tagcode...) v := uint64(0) if x { v = 1 } p.valEnc(o, v) } return nil } func size_slice_bool(p *Properties, base structPointer) int { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return 0 } return l * (len(p.tagcode) + 1) // each bool takes exactly one byte } // Encode a slice of bools ([]bool) in packed format. func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(l)) // each bool takes exactly one byte for _, x := range s { v := uint64(0) if x { v = 1 } p.valEnc(o, v) } return nil } func size_slice_packed_bool(p *Properties, base structPointer) (n int) { s := *structPointer_BoolSlice(base, p.field) l := len(s) if l == 0 { return 0 } n += len(p.tagcode) n += sizeVarint(uint64(l)) n += l // each bool takes exactly one byte return } // Encode a slice of bytes ([]byte). func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { s := *structPointer_Bytes(base, p.field) if s == nil { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(s) return nil } func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { s := *structPointer_Bytes(base, p.field) if len(s) == 0 { return ErrNil } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(s) return nil } func size_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) if s == nil { return 0 } n += len(p.tagcode) n += sizeRawBytes(s) return } func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { s := *structPointer_Bytes(base, p.field) if len(s) == 0 { return 0 } n += len(p.tagcode) n += sizeRawBytes(s) return } // Encode a slice of int32s ([]int32). func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) x := int32(s.Index(i)) // permit sign extension to use full 64-bit range p.valEnc(o, uint64(x)) } return nil } func size_slice_int32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) x := int32(s.Index(i)) // permit sign extension to use full 64-bit range n += p.valSize(uint64(x)) } return } // Encode a slice of int32s ([]int32) in packed format. func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { x := int32(s.Index(i)) // permit sign extension to use full 64-bit range p.valEnc(buf, uint64(x)) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_int32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { x := int32(s.Index(i)) // permit sign extension to use full 64-bit range bufSize += p.valSize(uint64(x)) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of uint32s ([]uint32). // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) x := s.Index(i) p.valEnc(o, uint64(x)) } return nil } func size_slice_uint32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) x := s.Index(i) n += p.valSize(uint64(x)) } return } // Encode a slice of uint32s ([]uint32) in packed format. // Exactly the same as int32, except for no sign extension. func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { p.valEnc(buf, uint64(s.Index(i))) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { s := structPointer_Word32Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { bufSize += p.valSize(uint64(s.Index(i))) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of int64s ([]int64). func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) p.valEnc(o, s.Index(i)) } return nil } func size_slice_int64(p *Properties, base structPointer) (n int) { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return 0 } for i := 0; i < l; i++ { n += len(p.tagcode) n += p.valSize(s.Index(i)) } return } // Encode a slice of int64s ([]int64) in packed format. func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return ErrNil } // TODO: Reuse a Buffer. buf := NewBuffer(nil) for i := 0; i < l; i++ { p.valEnc(buf, s.Index(i)) } o.buf = append(o.buf, p.tagcode...) o.EncodeVarint(uint64(len(buf.buf))) o.buf = append(o.buf, buf.buf...) return nil } func size_slice_packed_int64(p *Properties, base structPointer) (n int) { s := structPointer_Word64Slice(base, p.field) l := s.Len() if l == 0 { return 0 } var bufSize int for i := 0; i < l; i++ { bufSize += p.valSize(s.Index(i)) } n += len(p.tagcode) n += sizeVarint(uint64(bufSize)) n += bufSize return } // Encode a slice of slice of bytes ([][]byte). func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { ss := *structPointer_BytesSlice(base, p.field) l := len(ss) if l == 0 { return ErrNil } for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(ss[i]) } return nil } func size_slice_slice_byte(p *Properties, base structPointer) (n int) { ss := *structPointer_BytesSlice(base, p.field) l := len(ss) if l == 0 { return 0 } n += l * len(p.tagcode) for i := 0; i < l; i++ { n += sizeRawBytes(ss[i]) } return } // Encode a slice of strings ([]string). func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { ss := *structPointer_StringSlice(base, p.field) l := len(ss) for i := 0; i < l; i++ { o.buf = append(o.buf, p.tagcode...) o.EncodeStringBytes(ss[i]) } return nil } func size_slice_string(p *Properties, base structPointer) (n int) { ss := *structPointer_StringSlice(base, p.field) l := len(ss) n += l * len(p.tagcode) for i := 0; i < l; i++ { n += sizeStringBytes(ss[i]) } return } // Encode a slice of message structs ([]*struct). func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { var state errorState s := structPointer_StructPointerSlice(base, p.field) l := s.Len() for i := 0; i < l; i++ { structp := s.Index(i) if structPointer_IsNil(structp) { return errRepeatedHasNil } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, err := m.Marshal() if err != nil && !state.shouldContinue(err, nil) { return err } o.buf = append(o.buf, p.tagcode...) o.EncodeRawBytes(data) continue } o.buf = append(o.buf, p.tagcode...) err := o.enc_len_struct(p.sprop, structp, &state) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { return errRepeatedHasNil } return err } } return state.err } func size_slice_struct_message(p *Properties, base structPointer) (n int) { s := structPointer_StructPointerSlice(base, p.field) l := s.Len() n += l * len(p.tagcode) for i := 0; i < l; i++ { structp := s.Index(i) if structPointer_IsNil(structp) { return // return the size up to this point } // Can the object marshal itself? if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() n += len(p.tagcode) n += sizeRawBytes(data) continue } n0 := size_struct(p.sprop, structp) n1 := sizeVarint(uint64(n0)) // size of encoded length n += n0 + n1 } return } // Encode a slice of group structs ([]*struct). func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { var state errorState s := structPointer_StructPointerSlice(base, p.field) l := s.Len() for i := 0; i < l; i++ { b := s.Index(i) if structPointer_IsNil(b) { return errRepeatedHasNil } o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) err := o.enc_struct(p.sprop, b) if err != nil && !state.shouldContinue(err, nil) { if err == ErrNil { return errRepeatedHasNil } return err } o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) } return state.err } func size_slice_struct_group(p *Properties, base structPointer) (n int) { s := structPointer_StructPointerSlice(base, p.field) l := s.Len() n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) for i := 0; i < l; i++ { b := s.Index(i) if structPointer_IsNil(b) { return // return size up to this point } n += size_struct(p.sprop, b) } return } // Encode an extension map. func (o *Buffer) enc_map(p *Properties, base structPointer) error { v := *structPointer_ExtMap(base, p.field) if err := encodeExtensionMap(v); err != nil { return err } // Fast-path for common cases: zero or one extensions. if len(v) <= 1 { for _, e := range v { o.buf = append(o.buf, e.enc...) } return nil } // Sort keys to provide a deterministic encoding. keys := make([]int, 0, len(v)) for k := range v { keys = append(keys, int(k)) } sort.Ints(keys) for _, k := range keys { o.buf = append(o.buf, v[int32(k)].enc...) } return nil } func size_map(p *Properties, base structPointer) int { v := *structPointer_ExtMap(base, p.field) return sizeExtensionMap(v) } // Encode a map field. func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { var state errorState // XXX: or do we need to plumb this through? /* A map defined as map map_field = N; is encoded in the same way as message MapFieldEntry { key_type key = 1; value_type value = 2; } repeated MapFieldEntry map_field = N; */ v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V if v.Len() == 0 { return nil } keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) enc := func() error { if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { return err } if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { return err } return nil } keys := v.MapKeys() sort.Sort(mapKeys(keys)) for _, key := range keys { val := v.MapIndex(key) keycopy.Set(key) valcopy.Set(val) o.buf = append(o.buf, p.tagcode...) if err := o.enc_len_thing(enc, &state); err != nil { return err } } return nil } func size_new_map(p *Properties, base structPointer) int { v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) n := 0 for _, key := range v.MapKeys() { val := v.MapIndex(key) keycopy.Set(key) valcopy.Set(val) // Tag codes for key and val are the responsibility of the sub-sizer. keysize := p.mkeyprop.size(p.mkeyprop, keybase) valsize := p.mvalprop.size(p.mvalprop, valbase) entry := keysize + valsize // Add on tag code and length of map entry itself. n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry } return n } // mapEncodeScratch returns a new reflect.Value matching the map's value type, // and a structPointer suitable for passing to an encoder or sizer. func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { // Prepare addressable doubly-indirect placeholders for the key and value types. // This is needed because the element-type encoders expect **T, but the map iteration produces T. keycopy = reflect.New(mapType.Key()).Elem() // addressable K keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K keyptr.Set(keycopy.Addr()) // keybase = toStructPointer(keyptr.Addr()) // **K // Value types are more varied and require special handling. switch mapType.Elem().Kind() { case reflect.Slice: // []byte var dummy []byte valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte valbase = toStructPointer(valcopy.Addr()) case reflect.Ptr: // message; the generated field type is map[K]*Msg (so V is *Msg), // so we only need one level of indirection. valcopy = reflect.New(mapType.Elem()).Elem() // addressable V valbase = toStructPointer(valcopy.Addr()) default: // everything else valcopy = reflect.New(mapType.Elem()).Elem() // addressable V valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V valptr.Set(valcopy.Addr()) // valbase = toStructPointer(valptr.Addr()) // **V } return } // Encode a struct. func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { var state errorState // Encode fields in tag order so that decoders may use optimizations // that depend on the ordering. // https://developers.google.com/protocol-buffers/docs/encoding#order for _, i := range prop.order { p := prop.Prop[i] if p.enc != nil { err := p.enc(o, p, base) if err != nil { if err == ErrNil { if p.Required && state.err == nil { state.err = &RequiredNotSetError{p.Name} } } else if err == errRepeatedHasNil { // Give more context to nil values in repeated fields. return errors.New("repeated field " + p.OrigName + " has nil element") } else if !state.shouldContinue(err, p) { return err } } } } // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) if len(v) > 0 { o.buf = append(o.buf, v...) } } return state.err } func size_struct(prop *StructProperties, base structPointer) (n int) { for _, i := range prop.order { p := prop.Prop[i] if p.size != nil { n += p.size(p, base) } } // Add unrecognized fields at the end. if prop.unrecField.IsValid() { v := *structPointer_Bytes(base, prop.unrecField) n += len(v) } return } var zeroes [20]byte // longer than any conceivable sizeVarint // Encode a struct, preceded by its encoded length (as a varint). func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) } // Encode something, preceded by its encoded length (as a varint). func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { iLen := len(o.buf) o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length iMsg := len(o.buf) err := enc() if err != nil && !state.shouldContinue(err, nil) { return err } lMsg := len(o.buf) - iMsg lLen := sizeVarint(uint64(lMsg)) switch x := lLen - (iMsg - iLen); { case x > 0: // actual length is x bytes larger than the space we reserved // Move msg x bytes right. o.buf = append(o.buf, zeroes[:x]...) copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) case x < 0: // actual length is x bytes smaller than the space we reserved // Move msg x bytes left. copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) o.buf = o.buf[:len(o.buf)+x] // x is negative } // Encode the length in the reserved space. o.buf = o.buf[:iLen] o.EncodeVarint(uint64(lMsg)) o.buf = o.buf[:len(o.buf)+lMsg] return state.err } // errorState maintains the first error that occurs and updates that error // with additional context. type errorState struct { err error } // shouldContinue reports whether encoding should continue upon encountering the // given error. If the error is RequiredNotSetError, shouldContinue returns true // and, if this is the first appearance of that error, remembers it for future // reporting. // // If prop is not nil, it may update any error with additional context about the // field with the error. func (s *errorState) shouldContinue(err error, prop *Properties) bool { // Ignore unset required fields. reqNotSet, ok := err.(*RequiredNotSetError) if !ok { return false } if s.err == nil { if prop != nil { err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} } s.err = err } return true } ================================================ FILE: vendor/github.com/golang/protobuf/proto/equal.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // Protocol buffer comparison. // TODO: MessageSet. package proto import ( "bytes" "log" "reflect" "strings" ) /* Equal returns true iff protocol buffers a and b are equal. The arguments must both be pointers to protocol buffer structs. Equality is defined in this way: - Two messages are equal iff they are the same type, corresponding fields are equal, unknown field sets are equal, and extensions sets are equal. - Two set scalar fields are equal iff their values are equal. If the fields are of a floating-point type, remember that NaN != x for all x, including NaN. - Two repeated fields are equal iff their lengths are the same, and their corresponding elements are equal (a "bytes" field, although represented by []byte, is not a repeated field) - Two unset fields are equal. - Two unknown field sets are equal if their current encoded state is equal. - Two extension sets are equal iff they have corresponding elements that are pairwise equal. - Every other combination of things are not equal. The return value is undefined if a and b are not protocol buffers. */ func Equal(a, b Message) bool { if a == nil || b == nil { return a == b } v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) if v1.Type() != v2.Type() { return false } if v1.Kind() == reflect.Ptr { if v1.IsNil() { return v2.IsNil() } if v2.IsNil() { return false } v1, v2 = v1.Elem(), v2.Elem() } if v1.Kind() != reflect.Struct { return false } return equalStruct(v1, v2) } // v1 and v2 are known to have the same type. func equalStruct(v1, v2 reflect.Value) bool { for i := 0; i < v1.NumField(); i++ { f := v1.Type().Field(i) if strings.HasPrefix(f.Name, "XXX_") { continue } f1, f2 := v1.Field(i), v2.Field(i) if f.Type.Kind() == reflect.Ptr { if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { // both unset continue } else if n1 != n2 { // set/unset mismatch return false } b1, ok := f1.Interface().(raw) if ok { b2 := f2.Interface().(raw) // RawMessage if !bytes.Equal(b1.Bytes(), b2.Bytes()) { return false } continue } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2) { return false } } if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { em2 := v2.FieldByName("XXX_extensions") if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { return false } } uf := v1.FieldByName("XXX_unrecognized") if !uf.IsValid() { return true } u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() if !bytes.Equal(u1, u2) { return false } return true } // v1 and v2 are known to have the same type. func equalAny(v1, v2 reflect.Value) bool { if v1.Type() == protoMessageType { m1, _ := v1.Interface().(Message) m2, _ := v2.Interface().(Message) return Equal(m1, m2) } switch v1.Kind() { case reflect.Bool: return v1.Bool() == v2.Bool() case reflect.Float32, reflect.Float64: return v1.Float() == v2.Float() case reflect.Int32, reflect.Int64: return v1.Int() == v2.Int() case reflect.Map: if v1.Len() != v2.Len() { return false } for _, key := range v1.MapKeys() { val2 := v2.MapIndex(key) if !val2.IsValid() { // This key was not found in the second map. return false } if !equalAny(v1.MapIndex(key), val2) { return false } } return true case reflect.Ptr: return equalAny(v1.Elem(), v2.Elem()) case reflect.Slice: if v1.Type().Elem().Kind() == reflect.Uint8 { // short circuit: []byte if v1.IsNil() != v2.IsNil() { return false } return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) } if v1.Len() != v2.Len() { return false } for i := 0; i < v1.Len(); i++ { if !equalAny(v1.Index(i), v2.Index(i)) { return false } } return true case reflect.String: return v1.Interface().(string) == v2.Interface().(string) case reflect.Struct: return equalStruct(v1, v2) case reflect.Uint32, reflect.Uint64: return v1.Uint() == v2.Uint() } // unknown type, so not a protocol buffer log.Printf("proto: don't know how to compare %v", v1) return false } // base is the struct type that the extensions are based on. // em1 and em2 are extension maps. func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { if len(em1) != len(em2) { return false } for extNum, e1 := range em1 { e2, ok := em2[extNum] if !ok { return false } m1, m2 := e1.value, e2.value if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { return false } continue } // At least one is encoded. To do a semantically correct comparison // we need to unmarshal them first. var desc *ExtensionDesc if m := extensionMaps[base]; m != nil { desc = m[extNum] } if desc == nil { log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) continue } var err error if m1 == nil { m1, err = decodeExtension(e1.enc, desc) } if m2 == nil && err == nil { m2, err = decodeExtension(e2.enc, desc) } if err != nil { // The encoded form is invalid. log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) return false } if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) { return false } } return true } ================================================ FILE: vendor/github.com/golang/protobuf/proto/equal_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2011 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "testing" . "github.com/golang/protobuf/proto" pb "github.com/golang/protobuf/proto/testdata" ) // Four identical base messages. // The init function adds extensions to some of them. var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)} var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)} var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)} var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)} // Two messages with non-message extensions. var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)} var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)} func init() { ext1 := &pb.Ext{Data: String("Kirk")} ext2 := &pb.Ext{Data: String("Picard")} // messageWithExtension1a has ext1, but never marshals it. if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil { panic("SetExtension on 1a failed: " + err.Error()) } // messageWithExtension1b is the unmarshaled form of messageWithExtension1a. if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil { panic("SetExtension on 1b failed: " + err.Error()) } buf, err := Marshal(messageWithExtension1b) if err != nil { panic("Marshal of 1b failed: " + err.Error()) } messageWithExtension1b.Reset() if err := Unmarshal(buf, messageWithExtension1b); err != nil { panic("Unmarshal of 1b failed: " + err.Error()) } // messageWithExtension2 has ext2. if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil { panic("SetExtension on 2 failed: " + err.Error()) } if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil { panic("SetExtension on Int32-1 failed: " + err.Error()) } if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil { panic("SetExtension on Int32-2 failed: " + err.Error()) } } var EqualTests = []struct { desc string a, b Message exp bool }{ {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false}, {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true}, {"nil vs nil", nil, nil, true}, {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true}, {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false}, {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false}, {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false}, {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false}, {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false}, {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true}, {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false}, {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false}, {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false}, {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true}, {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true}, {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true}, {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true}, { "nested, different", &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}}, &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}}, false, }, { "nested, equal", &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}}, true, }, {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true}, {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true}, {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false}, { "repeated bytes", &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}}, true, }, {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false}, {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true}, {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false}, {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true}, {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false}, { "message with group", &pb.MyMessage{ Count: Int32(1), Somegroup: &pb.MyMessage_SomeGroup{ GroupField: Int32(5), }, }, &pb.MyMessage{ Count: Int32(1), Somegroup: &pb.MyMessage_SomeGroup{ GroupField: Int32(5), }, }, true, }, { "map same", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, true, }, { "map different entry", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}}, false, }, { "map different key only", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}}, false, }, { "map different value only", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}}, &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}}, false, }, } func TestEqual(t *testing.T) { for _, tc := range EqualTests { if res := Equal(tc.a, tc.b); res != tc.exp { t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp) } } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/extensions.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Types and routines for supporting protocol buffer extensions. */ import ( "errors" "fmt" "reflect" "strconv" "sync" ) // ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. var ErrMissingExtension = errors.New("proto: missing extension") // ExtensionRange represents a range of message extensions for a protocol buffer. // Used in code generated by the protocol compiler. type ExtensionRange struct { Start, End int32 // both inclusive } // extendableProto is an interface implemented by any protocol buffer that may be extended. type extendableProto interface { Message ExtensionRangeArray() []ExtensionRange ExtensionMap() map[int32]Extension } var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { ExtendedType Message // nil pointer to the type that is being extended ExtensionType interface{} // nil pointer to the extension type Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style } func (ed *ExtensionDesc) repeated() bool { t := reflect.TypeOf(ed.ExtensionType) return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 } // Extension represents an extension in a message. type Extension struct { // When an extension is stored in a message using SetExtension // only desc and value are set. When the message is marshaled // enc will be set to the encoded form of the message. // // When a message is unmarshaled and contains extensions, each // extension will have only enc set. When such an extension is // accessed using GetExtension (or GetExtensions) desc and value // will be set. desc *ExtensionDesc value interface{} enc []byte } // SetRawExtension is for testing only. func SetRawExtension(base extendableProto, id int32, b []byte) { base.ExtensionMap()[id] = Extension{enc: b} } // isExtensionField returns true iff the given field number is in an extension range. func isExtensionField(pb extendableProto, field int32) bool { for _, er := range pb.ExtensionRangeArray() { if er.Start <= field && field <= er.End { return true } } return false } // checkExtensionTypes checks that the given extension is valid for pb. func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { // Check the extended type. if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) } // Check the range. if !isExtensionField(pb, extension.Field) { return errors.New("proto: bad extension number; not in declared ranges") } return nil } // extPropKey is sufficient to uniquely identify an extension. type extPropKey struct { base reflect.Type field int32 } var extProp = struct { sync.RWMutex m map[extPropKey]*Properties }{ m: make(map[extPropKey]*Properties), } func extensionProperties(ed *ExtensionDesc) *Properties { key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} extProp.RLock() if prop, ok := extProp.m[key]; ok { extProp.RUnlock() return prop } extProp.RUnlock() extProp.Lock() defer extProp.Unlock() // Check again. if prop, ok := extProp.m[key]; ok { return prop } prop := new(Properties) prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) extProp.m[key] = prop return prop } // encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. func encodeExtensionMap(m map[int32]Extension) error { for k, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. continue } // We don't skip extensions that have an encoded form set, // because the extension value may have been mutated after // the last time this function was called. et := reflect.TypeOf(e.desc.ExtensionType) props := extensionProperties(e.desc) p := NewBuffer(nil) // If e.value has type T, the encoder expects a *struct{ X T }. // Pass a *T with a zero field and hope it all works out. x := reflect.New(et) x.Elem().Set(reflect.ValueOf(e.value)) if err := props.enc(p, props, toStructPointer(x)); err != nil { return err } e.enc = p.buf m[k] = e } return nil } func sizeExtensionMap(m map[int32]Extension) (n int) { for _, e := range m { if e.value == nil || e.desc == nil { // Extension is only in its encoded form. n += len(e.enc) continue } // We don't skip extensions that have an encoded form set, // because the extension value may have been mutated after // the last time this function was called. et := reflect.TypeOf(e.desc.ExtensionType) props := extensionProperties(e.desc) // If e.value has type T, the encoder expects a *struct{ X T }. // Pass a *T with a zero field and hope it all works out. x := reflect.New(et) x.Elem().Set(reflect.ValueOf(e.value)) n += props.size(props, toStructPointer(x)) } return } // HasExtension returns whether the given extension is present in pb. func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? _, ok := pb.ExtensionMap()[extension.Field] return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb extendableProto, extension *ExtensionDesc) { // TODO: Check types, field numbers, etc.? delete(pb.ExtensionMap(), extension.Field) } // GetExtension parses and returns the given extension of pb. // If the extension is not present it returns ErrMissingExtension. func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { if err := checkExtensionTypes(pb, extension); err != nil { return nil, err } emap := pb.ExtensionMap() e, ok := emap[extension.Field] if !ok { return nil, ErrMissingExtension } if e.value != nil { // Already decoded. Check the descriptor, though. if e.desc != extension { // This shouldn't happen. If it does, it means that // GetExtension was called twice with two different // descriptors with the same field number. return nil, errors.New("proto: descriptor conflict") } return e.value, nil } v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err } // Remember the decoded version and drop the encoded version. // That way it is safe to mutate what we return. e.value = v e.desc = extension e.enc = nil emap[extension.Field] = e return e.value, nil } // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { o := NewBuffer(b) t := reflect.TypeOf(extension.ExtensionType) rep := extension.repeated() props := extensionProperties(extension) // t is a pointer to a struct, pointer to basic type or a slice. // Allocate a "field" to store the pointer/slice itself; the // pointer/slice will be stored here. We pass // the address of this field to props.dec. // This passes a zero field and a *t and lets props.dec // interpret it as a *struct{ x t }. value := reflect.New(t).Elem() for { // Discard wire type and field number varint. It isn't needed. if _, err := o.DecodeVarint(); err != nil { return nil, err } if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { return nil, err } if !rep || o.index >= len(o.buf) { break } } return value.Interface(), nil } // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { epb, ok := pb.(extendableProto) if !ok { err = errors.New("proto: not an extendable proto") return } extensions = make([]interface{}, len(es)) for i, e := range es { extensions[i], err = GetExtension(epb, e) if err == ErrMissingExtension { err = nil } if err != nil { return } } return } // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { if err := checkExtensionTypes(pb, extension); err != nil { return err } typ := reflect.TypeOf(extension.ExtensionType) if typ != reflect.TypeOf(value) { return errors.New("proto: bad extension value type") } // nil extension values need to be caught early, because the // encoder can't distinguish an ErrNil due to a nil extension // from an ErrNil due to a missing field. Extensions are // always optional, so the encoder would just swallow the error // and drop all the extensions from the encoded message. if reflect.ValueOf(value).IsNil() { return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) } pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} return nil } // A global registry of extensions. // The generated code will register the generated descriptors by calling RegisterExtension. var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) // RegisterExtension is called from the generated code. func RegisterExtension(desc *ExtensionDesc) { st := reflect.TypeOf(desc.ExtendedType).Elem() m := extensionMaps[st] if m == nil { m = make(map[int32]*ExtensionDesc) extensionMaps[st] = m } if _, ok := m[desc.Field]; ok { panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) } m[desc.Field] = desc } // RegisteredExtensions returns a map of the registered extensions of a // protocol buffer struct, indexed by the extension number. // The argument pb should be a nil pointer to the struct type. func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { return extensionMaps[reflect.TypeOf(pb).Elem()] } ================================================ FILE: vendor/github.com/golang/protobuf/proto/extensions_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2014 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "testing" "github.com/golang/protobuf/proto" pb "github.com/golang/protobuf/proto/testdata" ) func TestGetExtensionsWithMissingExtensions(t *testing.T) { msg := &pb.MyMessage{} ext1 := &pb.Ext{} if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { t.Fatalf("Could not set ext1: %s", ext1) } exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{ pb.E_Ext_More, pb.E_Ext_Text, }) if err != nil { t.Fatalf("GetExtensions() failed: %s", err) } if exts[0] != ext1 { t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0]) } if exts[1] != nil { t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1]) } } func TestGetExtensionStability(t *testing.T) { check := func(m *pb.MyMessage) bool { ext1, err := proto.GetExtension(m, pb.E_Ext_More) if err != nil { t.Fatalf("GetExtension() failed: %s", err) } ext2, err := proto.GetExtension(m, pb.E_Ext_More) if err != nil { t.Fatalf("GetExtension() failed: %s", err) } return ext1 == ext2 } msg := &pb.MyMessage{Count: proto.Int32(4)} ext0 := &pb.Ext{} if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil { t.Fatalf("Could not set ext1: %s", ext0) } if !check(msg) { t.Errorf("GetExtension() not stable before marshaling") } bb, err := proto.Marshal(msg) if err != nil { t.Fatalf("Marshal() failed: %s", err) } msg1 := &pb.MyMessage{} err = proto.Unmarshal(bb, msg1) if err != nil { t.Fatalf("Unmarshal() failed: %s", err) } if !check(msg1) { t.Errorf("GetExtension() not stable after unmarshaling") } } func TestExtensionsRoundTrip(t *testing.T) { msg := &pb.MyMessage{} ext1 := &pb.Ext{ Data: proto.String("hi"), } ext2 := &pb.Ext{ Data: proto.String("there"), } exists := proto.HasExtension(msg, pb.E_Ext_More) if exists { t.Error("Extension More present unexpectedly") } if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil { t.Error(err) } if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil { t.Error(err) } e, err := proto.GetExtension(msg, pb.E_Ext_More) if err != nil { t.Error(err) } x, ok := e.(*pb.Ext) if !ok { t.Errorf("e has type %T, expected testdata.Ext", e) } else if *x.Data != "there" { t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x) } proto.ClearExtension(msg, pb.E_Ext_More) if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension { t.Errorf("got %v, expected ErrMissingExtension", e) } if _, err := proto.GetExtension(msg, pb.E_X215); err == nil { t.Error("expected bad extension error, got nil") } if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil { t.Error("expected extension err") } if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil { t.Error("expected some sort of type mismatch error, got nil") } } func TestNilExtension(t *testing.T) { msg := &pb.MyMessage{ Count: proto.Int32(1), } if err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String("hello")); err != nil { t.Fatal(err) } if err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil { t.Error("expected SetExtension to fail due to a nil extension") } else if want := "proto: SetExtension called with nil value of type *testdata.Ext"; err.Error() != want { t.Errorf("expected error %v, got %v", want, err) } // Note: if the behavior of Marshal is ever changed to ignore nil extensions, update // this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal. } ================================================ FILE: vendor/github.com/golang/protobuf/proto/lib.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. /* Package proto converts data structures to and from the wire format of protocol buffers. It works in concert with the Go source code generated for .proto files by the protocol compiler. A summary of the properties of the protocol buffer interface for a protocol buffer variable v: - Names are turned from camel_case to CamelCase for export. - There are no methods on v to set fields; just treat them as structure fields. - There are getters that return a field's value if set, and return the field's default value if unset. The getters work even if the receiver is a nil message. - The zero value for a struct is its correct initialization state. All desired fields must be set before marshaling. - A Reset() method will restore a protobuf struct to its zero state. - Non-repeated fields are pointers to the values; nil means unset. That is, optional or required field int32 f becomes F *int32. - Repeated fields are slices. - Helper functions are available to aid the setting of fields. msg.Foo = proto.String("hello") // set field - Constants are defined to hold the default values of all fields that have them. They have the form Default_StructName_FieldName. Because the getter methods handle defaulted values, direct use of these constants should be rare. - Enums are given type names and maps from names to values. Enum values are prefixed by the enclosing message's name, or by the enum's type name if it is a top-level enum. Enum types have a String method, and a Enum method to assist in message construction. - Nested messages, groups and enums have type names prefixed with the name of the surrounding message type. - Extensions are given descriptor names that start with E_, followed by an underscore-delimited list of the nested messages that contain it (if any) followed by the CamelCased name of the extension field itself. HasExtension, ClearExtension, GetExtension and SetExtension are functions for manipulating extensions. - Marshal and Unmarshal are functions to encode and decode the wire format. The simplest way to describe this is to see an example. Given file test.proto, containing package example; enum FOO { X = 17; } message Test { required string label = 1; optional int32 type = 2 [default=77]; repeated int64 reps = 3; optional group OptionalGroup = 4 { required string RequiredField = 5; } } The resulting file, test.pb.go, is: package example import proto "github.com/golang/protobuf/proto" import math "math" type FOO int32 const ( FOO_X FOO = 17 ) var FOO_name = map[int32]string{ 17: "X", } var FOO_value = map[string]int32{ "X": 17, } func (x FOO) Enum() *FOO { p := new(FOO) *p = x return p } func (x FOO) String() string { return proto.EnumName(FOO_name, int32(x)) } func (x *FOO) UnmarshalJSON(data []byte) error { value, err := proto.UnmarshalJSONEnum(FOO_value, data) if err != nil { return err } *x = FOO(value) return nil } type Test struct { Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` XXX_unrecognized []byte `json:"-"` } func (m *Test) Reset() { *m = Test{} } func (m *Test) String() string { return proto.CompactTextString(m) } func (*Test) ProtoMessage() {} const Default_Test_Type int32 = 77 func (m *Test) GetLabel() string { if m != nil && m.Label != nil { return *m.Label } return "" } func (m *Test) GetType() int32 { if m != nil && m.Type != nil { return *m.Type } return Default_Test_Type } func (m *Test) GetOptionalgroup() *Test_OptionalGroup { if m != nil { return m.Optionalgroup } return nil } type Test_OptionalGroup struct { RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` } func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } func (m *Test_OptionalGroup) GetRequiredField() string { if m != nil && m.RequiredField != nil { return *m.RequiredField } return "" } func init() { proto.RegisterEnum("example.FOO", FOO_name, FOO_value) } To create and play with a Test object: package main import ( "log" "github.com/golang/protobuf/proto" pb "./example.pb" ) func main() { test := &pb.Test{ Label: proto.String("hello"), Type: proto.Int32(17), Optionalgroup: &pb.Test_OptionalGroup{ RequiredField: proto.String("good bye"), }, } data, err := proto.Marshal(test) if err != nil { log.Fatal("marshaling error: ", err) } newTest := &pb.Test{} err = proto.Unmarshal(data, newTest) if err != nil { log.Fatal("unmarshaling error: ", err) } // Now test and newTest contain the same data. if test.GetLabel() != newTest.GetLabel() { log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) } // etc. } */ package proto import ( "encoding/json" "fmt" "log" "reflect" "strconv" "sync" ) // Message is implemented by generated protocol buffer messages. type Message interface { Reset() String() string ProtoMessage() } // Stats records allocation details about the protocol buffer encoders // and decoders. Useful for tuning the library itself. type Stats struct { Emalloc uint64 // mallocs in encode Dmalloc uint64 // mallocs in decode Encode uint64 // number of encodes Decode uint64 // number of decodes Chit uint64 // number of cache hits Cmiss uint64 // number of cache misses Size uint64 // number of sizes } // Set to true to enable stats collection. const collectStats = false var stats Stats // GetStats returns a copy of the global Stats structure. func GetStats() Stats { return stats } // A Buffer is a buffer manager for marshaling and unmarshaling // protocol buffers. It may be reused between invocations to // reduce memory usage. It is not necessary to use a Buffer; // the global functions Marshal and Unmarshal create a // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream index int // write point // pools of basic types to amortize allocation. bools []bool uint32s []uint32 uint64s []uint64 // extra pools, only used with pointer_reflect.go int32s []int32 int64s []int64 float32s []float32 float64s []float64 } // NewBuffer allocates a new Buffer and initializes its internal data to // the contents of the argument slice. func NewBuffer(e []byte) *Buffer { return &Buffer{buf: e} } // Reset resets the Buffer, ready for marshaling a new protocol buffer. func (p *Buffer) Reset() { p.buf = p.buf[0:0] // for reading/writing p.index = 0 // for reading } // SetBuf replaces the internal buffer with the slice, // ready for unmarshaling the contents of the slice. func (p *Buffer) SetBuf(s []byte) { p.buf = s p.index = 0 } // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } /* * Helper routines for simplifying the creation of optional fields of basic type. */ // Bool is a helper routine that allocates a new bool value // to store v and returns a pointer to it. func Bool(v bool) *bool { return &v } // Int32 is a helper routine that allocates a new int32 value // to store v and returns a pointer to it. func Int32(v int32) *int32 { return &v } // Int is a helper routine that allocates a new int32 value // to store v and returns a pointer to it, but unlike Int32 // its argument value is an int. func Int(v int) *int32 { p := new(int32) *p = int32(v) return p } // Int64 is a helper routine that allocates a new int64 value // to store v and returns a pointer to it. func Int64(v int64) *int64 { return &v } // Float32 is a helper routine that allocates a new float32 value // to store v and returns a pointer to it. func Float32(v float32) *float32 { return &v } // Float64 is a helper routine that allocates a new float64 value // to store v and returns a pointer to it. func Float64(v float64) *float64 { return &v } // Uint32 is a helper routine that allocates a new uint32 value // to store v and returns a pointer to it. func Uint32(v uint32) *uint32 { return &v } // Uint64 is a helper routine that allocates a new uint64 value // to store v and returns a pointer to it. func Uint64(v uint64) *uint64 { return &v } // String is a helper routine that allocates a new string value // to store v and returns a pointer to it. func String(v string) *string { return &v } // EnumName is a helper function to simplify printing protocol buffer enums // by name. Given an enum map and a value, it returns a useful string. func EnumName(m map[int32]string, v int32) string { s, ok := m[v] if ok { return s } return strconv.Itoa(int(v)) } // UnmarshalJSONEnum is a helper function to simplify recovering enum int values // from their JSON-encoded representation. Given a map from the enum's symbolic // names to its int values, and a byte buffer containing the JSON-encoded // value, it returns an int32 that can be cast to the enum type by the caller. // // The function can deal with both JSON representations, numeric and symbolic. func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { if data[0] == '"' { // New style: enums are strings. var repr string if err := json.Unmarshal(data, &repr); err != nil { return -1, err } val, ok := m[repr] if !ok { return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) } return val, nil } // Old style: enums are ints. var val int32 if err := json.Unmarshal(data, &val); err != nil { return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) } return val, nil } // DebugPrint dumps the encoded data in b in a debugging format with a header // including the string s. Used in testing but made available for general debugging. func (p *Buffer) DebugPrint(s string, b []byte) { var u uint64 obuf := p.buf index := p.index p.buf = b p.index = 0 depth := 0 fmt.Printf("\n--- %s ---\n", s) out: for { for i := 0; i < depth; i++ { fmt.Print(" ") } index := p.index if index == len(p.buf) { break } op, err := p.DecodeVarint() if err != nil { fmt.Printf("%3d: fetching op err %v\n", index, err) break out } tag := op >> 3 wire := op & 7 switch wire { default: fmt.Printf("%3d: t=%3d unknown wire=%d\n", index, tag, wire) break out case WireBytes: var r []byte r, err = p.DecodeRawBytes(false) if err != nil { break out } fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) if len(r) <= 6 { for i := 0; i < len(r); i++ { fmt.Printf(" %.2x", r[i]) } } else { for i := 0; i < 3; i++ { fmt.Printf(" %.2x", r[i]) } fmt.Printf(" ..") for i := len(r) - 3; i < len(r); i++ { fmt.Printf(" %.2x", r[i]) } } fmt.Printf("\n") case WireFixed32: u, err = p.DecodeFixed32() if err != nil { fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) case WireFixed64: u, err = p.DecodeFixed64() if err != nil { fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) break case WireVarint: u, err = p.DecodeVarint() if err != nil { fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) case WireStartGroup: if err != nil { fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d start\n", index, tag) depth++ case WireEndGroup: depth-- if err != nil { fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err) break out } fmt.Printf("%3d: t=%3d end\n", index, tag) } } if depth != 0 { fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) } fmt.Printf("\n") p.buf = obuf p.index = index } // SetDefaults sets unset protocol buffer fields to their default values. // It only modifies fields that are both unset and have defined defaults. // It recursively sets default values in any non-nil sub-messages. func SetDefaults(pb Message) { setDefaults(reflect.ValueOf(pb), true, false) } // v is a pointer to a struct. func setDefaults(v reflect.Value, recur, zeros bool) { v = v.Elem() defaultMu.RLock() dm, ok := defaults[v.Type()] defaultMu.RUnlock() if !ok { dm = buildDefaultMessage(v.Type()) defaultMu.Lock() defaults[v.Type()] = dm defaultMu.Unlock() } for _, sf := range dm.scalars { f := v.Field(sf.index) if !f.IsNil() { // field already set continue } dv := sf.value if dv == nil && !zeros { // no explicit default, and don't want to set zeros continue } fptr := f.Addr().Interface() // **T // TODO: Consider batching the allocations we do here. switch sf.kind { case reflect.Bool: b := new(bool) if dv != nil { *b = dv.(bool) } *(fptr.(**bool)) = b case reflect.Float32: f := new(float32) if dv != nil { *f = dv.(float32) } *(fptr.(**float32)) = f case reflect.Float64: f := new(float64) if dv != nil { *f = dv.(float64) } *(fptr.(**float64)) = f case reflect.Int32: // might be an enum if ft := f.Type(); ft != int32PtrType { // enum f.Set(reflect.New(ft.Elem())) if dv != nil { f.Elem().SetInt(int64(dv.(int32))) } } else { // int32 field i := new(int32) if dv != nil { *i = dv.(int32) } *(fptr.(**int32)) = i } case reflect.Int64: i := new(int64) if dv != nil { *i = dv.(int64) } *(fptr.(**int64)) = i case reflect.String: s := new(string) if dv != nil { *s = dv.(string) } *(fptr.(**string)) = s case reflect.Uint8: // exceptional case: []byte var b []byte if dv != nil { db := dv.([]byte) b = make([]byte, len(db)) copy(b, db) } else { b = []byte{} } *(fptr.(*[]byte)) = b case reflect.Uint32: u := new(uint32) if dv != nil { *u = dv.(uint32) } *(fptr.(**uint32)) = u case reflect.Uint64: u := new(uint64) if dv != nil { *u = dv.(uint64) } *(fptr.(**uint64)) = u default: log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) } } for _, ni := range dm.nested { f := v.Field(ni) // f is *T or []*T or map[T]*T switch f.Kind() { case reflect.Ptr: if f.IsNil() { continue } setDefaults(f, recur, zeros) case reflect.Slice: for i := 0; i < f.Len(); i++ { e := f.Index(i) if e.IsNil() { continue } setDefaults(e, recur, zeros) } case reflect.Map: for _, k := range f.MapKeys() { e := f.MapIndex(k) if e.IsNil() { continue } setDefaults(e, recur, zeros) } } } } var ( // defaults maps a protocol buffer struct type to a slice of the fields, // with its scalar fields set to their proto-declared non-zero default values. defaultMu sync.RWMutex defaults = make(map[reflect.Type]defaultMessage) int32PtrType = reflect.TypeOf((*int32)(nil)) ) // defaultMessage represents information about the default values of a message. type defaultMessage struct { scalars []scalarField nested []int // struct field index of nested messages } type scalarField struct { index int // struct field index kind reflect.Kind // element type (the T in *T or []T) value interface{} // the proto-declared default value, or nil } // t is a struct type. func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { sprop := GetProperties(t) for _, prop := range sprop.Prop { fi, ok := sprop.decoderTags.get(prop.Tag) if !ok { // XXX_unrecognized continue } ft := t.Field(fi).Type var canHaveDefault, nestedMessage bool switch ft.Kind() { case reflect.Ptr: if ft.Elem().Kind() == reflect.Struct { nestedMessage = true } else { canHaveDefault = true // proto2 scalar field } case reflect.Slice: switch ft.Elem().Kind() { case reflect.Ptr: nestedMessage = true // repeated message case reflect.Uint8: canHaveDefault = true // bytes field } case reflect.Map: if ft.Elem().Kind() == reflect.Ptr { nestedMessage = true // map with message values } } if !canHaveDefault { if nestedMessage { dm.nested = append(dm.nested, fi) } continue } sf := scalarField{ index: fi, kind: ft.Elem().Kind(), } // scalar fields without defaults if !prop.HasDefault { dm.scalars = append(dm.scalars, sf) continue } // a scalar field: either *T or []byte switch ft.Elem().Kind() { case reflect.Bool: x, err := strconv.ParseBool(prop.Default) if err != nil { log.Printf("proto: bad default bool %q: %v", prop.Default, err) continue } sf.value = x case reflect.Float32: x, err := strconv.ParseFloat(prop.Default, 32) if err != nil { log.Printf("proto: bad default float32 %q: %v", prop.Default, err) continue } sf.value = float32(x) case reflect.Float64: x, err := strconv.ParseFloat(prop.Default, 64) if err != nil { log.Printf("proto: bad default float64 %q: %v", prop.Default, err) continue } sf.value = x case reflect.Int32: x, err := strconv.ParseInt(prop.Default, 10, 32) if err != nil { log.Printf("proto: bad default int32 %q: %v", prop.Default, err) continue } sf.value = int32(x) case reflect.Int64: x, err := strconv.ParseInt(prop.Default, 10, 64) if err != nil { log.Printf("proto: bad default int64 %q: %v", prop.Default, err) continue } sf.value = x case reflect.String: sf.value = prop.Default case reflect.Uint8: // []byte (not *uint8) sf.value = []byte(prop.Default) case reflect.Uint32: x, err := strconv.ParseUint(prop.Default, 10, 32) if err != nil { log.Printf("proto: bad default uint32 %q: %v", prop.Default, err) continue } sf.value = uint32(x) case reflect.Uint64: x, err := strconv.ParseUint(prop.Default, 10, 64) if err != nil { log.Printf("proto: bad default uint64 %q: %v", prop.Default, err) continue } sf.value = x default: log.Printf("proto: unhandled def kind %v", ft.Elem().Kind()) continue } dm.scalars = append(dm.scalars, sf) } return dm } // Map fields may have key types of non-float scalars, strings and enums. // The easiest way to sort them in some deterministic order is to use fmt. // If this turns out to be inefficient we can always consider other options, // such as doing a Schwartzian transform. type mapKeys []reflect.Value func (s mapKeys) Len() int { return len(s) } func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s mapKeys) Less(i, j int) bool { return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) } ================================================ FILE: vendor/github.com/golang/protobuf/proto/message_set.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Support for message sets. */ import ( "bytes" "encoding/json" "errors" "fmt" "reflect" "sort" ) // ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID. // A message type ID is required for storing a protocol buffer in a message set. var ErrNoMessageTypeId = errors.New("proto does not have a message type ID") // The first two types (_MessageSet_Item and MessageSet) // model what the protocol compiler produces for the following protocol message: // message MessageSet { // repeated group Item = 1 { // required int32 type_id = 2; // required string message = 3; // }; // } // That is the MessageSet wire format. We can't use a proto to generate these // because that would introduce a circular dependency between it and this package. // // When a proto1 proto has a field that looks like: // optional message info = 3; // the protocol compiler produces a field in the generated struct that looks like: // Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"` // The package is automatically inserted so there is no need for that proto file to // import this package. type _MessageSet_Item struct { TypeId *int32 `protobuf:"varint,2,req,name=type_id"` Message []byte `protobuf:"bytes,3,req,name=message"` } type MessageSet struct { Item []*_MessageSet_Item `protobuf:"group,1,rep"` XXX_unrecognized []byte // TODO: caching? } // Make sure MessageSet is a Message. var _ Message = (*MessageSet)(nil) // messageTypeIder is an interface satisfied by a protocol buffer type // that may be stored in a MessageSet. type messageTypeIder interface { MessageTypeId() int32 } func (ms *MessageSet) find(pb Message) *_MessageSet_Item { mti, ok := pb.(messageTypeIder) if !ok { return nil } id := mti.MessageTypeId() for _, item := range ms.Item { if *item.TypeId == id { return item } } return nil } func (ms *MessageSet) Has(pb Message) bool { if ms.find(pb) != nil { return true } return false } func (ms *MessageSet) Unmarshal(pb Message) error { if item := ms.find(pb); item != nil { return Unmarshal(item.Message, pb) } if _, ok := pb.(messageTypeIder); !ok { return ErrNoMessageTypeId } return nil // TODO: return error instead? } func (ms *MessageSet) Marshal(pb Message) error { msg, err := Marshal(pb) if err != nil { return err } if item := ms.find(pb); item != nil { // reuse existing item item.Message = msg return nil } mti, ok := pb.(messageTypeIder) if !ok { return ErrNoMessageTypeId } mtid := mti.MessageTypeId() ms.Item = append(ms.Item, &_MessageSet_Item{ TypeId: &mtid, Message: msg, }) return nil } func (ms *MessageSet) Reset() { *ms = MessageSet{} } func (ms *MessageSet) String() string { return CompactTextString(ms) } func (*MessageSet) ProtoMessage() {} // Support for the message_set_wire_format message option. func skipVarint(buf []byte) []byte { i := 0 for ; buf[i]&0x80 != 0; i++ { } return buf[i+1:] } // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { if err := encodeExtensionMap(m); err != nil { return nil, err } // Sort extension IDs to provide a deterministic encoding. // See also enc_map in encode.go. ids := make([]int, 0, len(m)) for id := range m { ids = append(ids, int(id)) } sort.Ints(ids) ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))} for _, id := range ids { e := m[int32(id)] // Remove the wire type and field number varint, as well as the length varint. msg := skipVarint(skipVarint(e.enc)) ms.Item = append(ms.Item, &_MessageSet_Item{ TypeId: Int32(int32(id)), Message: msg, }) } return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. // It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { ms := new(MessageSet) if err := Unmarshal(buf, ms); err != nil { return err } for _, item := range ms.Item { id := *item.TypeId msg := item.Message // Restore wire type and field number varint, plus length varint. // Be careful to preserve duplicate items. b := EncodeVarint(uint64(id)<<3 | WireBytes) if ext, ok := m[id]; ok { // Existing data; rip off the tag and length varint // so we join the new data correctly. // We can assume that ext.enc is set because we are unmarshaling. o := ext.enc[len(b):] // skip wire type and field number _, n := DecodeVarint(o) // calculate length of length varint o = o[n:] // skip length varint msg = append(o, msg...) // join old data and new data } b = append(b, EncodeVarint(uint64(len(msg)))...) b = append(b, msg...) m[id] = Extension{enc: b} } return nil } // MarshalMessageSetJSON encodes the extension map represented by m in JSON format. // It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { var b bytes.Buffer b.WriteByte('{') // Process the map in key order for deterministic output. ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) // int32Slice defined in text.go for i, id := range ids { ext := m[id] if i > 0 { b.WriteByte(',') } msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value if x == nil { x = reflect.New(msd.t.Elem()).Interface() if err := Unmarshal(ext.enc, x.(Message)); err != nil { return nil, err } } d, err := json.Marshal(x) if err != nil { return nil, err } b.Write(d) } b.WriteByte('}') return b.Bytes(), nil } // UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. // It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { // Common-case fast path. if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { return nil } // This is fairly tricky, and it's not clear that it is needed. return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") } // A global registry of types that can be used in a MessageSet. var messageSetMap = make(map[int32]messageSetDesc) type messageSetDesc struct { t reflect.Type // pointer to struct name string } // RegisterMessageSetType is called from the generated code. func RegisterMessageSetType(m Message, fieldNum int32, name string) { messageSetMap[fieldNum] = messageSetDesc{ t: reflect.TypeOf(m), name: name, } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/message_set_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2014 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto import ( "bytes" "testing" ) func TestUnmarshalMessageSetWithDuplicate(t *testing.T) { // Check that a repeated message set entry will be concatenated. in := &MessageSet{ Item: []*_MessageSet_Item{ {TypeId: Int32(12345), Message: []byte("hoo")}, {TypeId: Int32(12345), Message: []byte("hah")}, }, } b, err := Marshal(in) if err != nil { t.Fatalf("Marshal: %v", err) } t.Logf("Marshaled bytes: %q", b) m := make(map[int32]Extension) if err := UnmarshalMessageSet(b, m); err != nil { t.Fatalf("UnmarshalMessageSet: %v", err) } ext, ok := m[12345] if !ok { t.Fatalf("Didn't retrieve extension 12345; map is %v", m) } // Skip wire type/field number and length varints. got := skipVarint(skipVarint(ext.enc)) if want := []byte("hoohah"); !bytes.Equal(got, want) { t.Errorf("Combined extension is %q, want %q", got, want) } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/pointer_reflect.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // +build appengine // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can // be used on App Engine. package proto import ( "math" "reflect" ) // A structPointer is a pointer to a struct. type structPointer struct { v reflect.Value } // toStructPointer returns a structPointer equivalent to the given reflect value. // The reflect value must itself be a pointer to a struct. func toStructPointer(v reflect.Value) structPointer { return structPointer{v} } // IsNil reports whether p is nil. func structPointer_IsNil(p structPointer) bool { return p.v.IsNil() } // Interface returns the struct pointer as an interface value. func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { return p.v.Interface() } // A field identifies a field in a struct, accessible from a structPointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int // toField returns a field equivalent to the given reflect field. func toField(f *reflect.StructField) field { return f.Index } // invalidField is an invalid field identifier. var invalidField = field(nil) // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } // field returns the given field in the struct as a reflect value. func structPointer_field(p structPointer, f field) reflect.Value { // Special case: an extension map entry with a value of type T // passes a *T to the struct-handling code with a zero field, // expecting that it will be treated as equivalent to *struct{ X T }, // which has the same memory layout. We have to handle that case // specially, because reflect will panic if we call FieldByIndex on a // non-struct. if f == nil { return p.v.Elem() } return p.v.Elem().FieldByIndex(f) } // ifield returns the given field in the struct as an interface value. func structPointer_ifield(p structPointer, f field) interface{} { return structPointer_field(p, f).Addr().Interface() } // Bytes returns the address of a []byte field in the struct. func structPointer_Bytes(p structPointer, f field) *[]byte { return structPointer_ifield(p, f).(*[]byte) } // BytesSlice returns the address of a [][]byte field in the struct. func structPointer_BytesSlice(p structPointer, f field) *[][]byte { return structPointer_ifield(p, f).(*[][]byte) } // Bool returns the address of a *bool field in the struct. func structPointer_Bool(p structPointer, f field) **bool { return structPointer_ifield(p, f).(**bool) } // BoolVal returns the address of a bool field in the struct. func structPointer_BoolVal(p structPointer, f field) *bool { return structPointer_ifield(p, f).(*bool) } // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return structPointer_ifield(p, f).(*[]bool) } // String returns the address of a *string field in the struct. func structPointer_String(p structPointer, f field) **string { return structPointer_ifield(p, f).(**string) } // StringVal returns the address of a string field in the struct. func structPointer_StringVal(p structPointer, f field) *string { return structPointer_ifield(p, f).(*string) } // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return structPointer_ifield(p, f).(*[]string) } // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return structPointer_ifield(p, f).(*map[int32]Extension) } // Map returns the reflect.Value for the address of a map field in the struct. func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { return structPointer_field(p, f).Addr() } // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { structPointer_field(p, f).Set(q.v) } // GetStructPointer reads a *struct field in the struct. func structPointer_GetStructPointer(p structPointer, f field) structPointer { return structPointer{structPointer_field(p, f)} } // StructPointerSlice the address of a []*struct field in the struct. func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { return structPointerSlice{structPointer_field(p, f)} } // A structPointerSlice represents the address of a slice of pointers to structs // (themselves messages or groups). That is, v.Type() is *[]*struct{...}. type structPointerSlice struct { v reflect.Value } func (p structPointerSlice) Len() int { return p.v.Len() } func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } func (p structPointerSlice) Append(q structPointer) { p.v.Set(reflect.Append(p.v, q.v)) } var ( int32Type = reflect.TypeOf(int32(0)) uint32Type = reflect.TypeOf(uint32(0)) float32Type = reflect.TypeOf(float32(0)) int64Type = reflect.TypeOf(int64(0)) uint64Type = reflect.TypeOf(uint64(0)) float64Type = reflect.TypeOf(float64(0)) ) // A word32 represents a field of type *int32, *uint32, *float32, or *enum. // That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. type word32 struct { v reflect.Value } // IsNil reports whether p is nil. func word32_IsNil(p word32) bool { return p.v.IsNil() } // Set sets p to point at a newly allocated word with bits set to x. func word32_Set(p word32, o *Buffer, x uint32) { t := p.v.Type().Elem() switch t { case int32Type: if len(o.int32s) == 0 { o.int32s = make([]int32, uint32PoolSize) } o.int32s[0] = int32(x) p.v.Set(reflect.ValueOf(&o.int32s[0])) o.int32s = o.int32s[1:] return case uint32Type: if len(o.uint32s) == 0 { o.uint32s = make([]uint32, uint32PoolSize) } o.uint32s[0] = x p.v.Set(reflect.ValueOf(&o.uint32s[0])) o.uint32s = o.uint32s[1:] return case float32Type: if len(o.float32s) == 0 { o.float32s = make([]float32, uint32PoolSize) } o.float32s[0] = math.Float32frombits(x) p.v.Set(reflect.ValueOf(&o.float32s[0])) o.float32s = o.float32s[1:] return } // must be enum p.v.Set(reflect.New(t)) p.v.Elem().SetInt(int64(int32(x))) } // Get gets the bits pointed at by p, as a uint32. func word32_Get(p word32) uint32 { elem := p.v.Elem() switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32(p structPointer, f field) word32 { return word32{structPointer_field(p, f)} } // A word32Val represents a field of type int32, uint32, float32, or enum. // That is, v.Type() is int32, uint32, float32, or enum and v is assignable. type word32Val struct { v reflect.Value } // Set sets *p to x. func word32Val_Set(p word32Val, x uint32) { switch p.v.Type() { case int32Type: p.v.SetInt(int64(x)) return case uint32Type: p.v.SetUint(uint64(x)) return case float32Type: p.v.SetFloat(float64(math.Float32frombits(x))) return } // must be enum p.v.SetInt(int64(int32(x))) } // Get gets the bits pointed at by p, as a uint32. func word32Val_Get(p word32Val) uint32 { elem := p.v switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. func structPointer_Word32Val(p structPointer, f field) word32Val { return word32Val{structPointer_field(p, f)} } // A word32Slice is a slice of 32-bit values. // That is, v.Type() is []int32, []uint32, []float32, or []enum. type word32Slice struct { v reflect.Value } func (p word32Slice) Append(x uint32) { n, m := p.v.Len(), p.v.Cap() if n < m { p.v.SetLen(n + 1) } else { t := p.v.Type().Elem() p.v.Set(reflect.Append(p.v, reflect.Zero(t))) } elem := p.v.Index(n) switch elem.Kind() { case reflect.Int32: elem.SetInt(int64(int32(x))) case reflect.Uint32: elem.SetUint(uint64(x)) case reflect.Float32: elem.SetFloat(float64(math.Float32frombits(x))) } } func (p word32Slice) Len() int { return p.v.Len() } func (p word32Slice) Index(i int) uint32 { elem := p.v.Index(i) switch elem.Kind() { case reflect.Int32: return uint32(elem.Int()) case reflect.Uint32: return uint32(elem.Uint()) case reflect.Float32: return math.Float32bits(float32(elem.Float())) } panic("unreachable") } // Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. func structPointer_Word32Slice(p structPointer, f field) word32Slice { return word32Slice{structPointer_field(p, f)} } // word64 is like word32 but for 64-bit values. type word64 struct { v reflect.Value } func word64_Set(p word64, o *Buffer, x uint64) { t := p.v.Type().Elem() switch t { case int64Type: if len(o.int64s) == 0 { o.int64s = make([]int64, uint64PoolSize) } o.int64s[0] = int64(x) p.v.Set(reflect.ValueOf(&o.int64s[0])) o.int64s = o.int64s[1:] return case uint64Type: if len(o.uint64s) == 0 { o.uint64s = make([]uint64, uint64PoolSize) } o.uint64s[0] = x p.v.Set(reflect.ValueOf(&o.uint64s[0])) o.uint64s = o.uint64s[1:] return case float64Type: if len(o.float64s) == 0 { o.float64s = make([]float64, uint64PoolSize) } o.float64s[0] = math.Float64frombits(x) p.v.Set(reflect.ValueOf(&o.float64s[0])) o.float64s = o.float64s[1:] return } panic("unreachable") } func word64_IsNil(p word64) bool { return p.v.IsNil() } func word64_Get(p word64) uint64 { elem := p.v.Elem() switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return elem.Uint() case reflect.Float64: return math.Float64bits(elem.Float()) } panic("unreachable") } func structPointer_Word64(p structPointer, f field) word64 { return word64{structPointer_field(p, f)} } // word64Val is like word32Val but for 64-bit values. type word64Val struct { v reflect.Value } func word64Val_Set(p word64Val, o *Buffer, x uint64) { switch p.v.Type() { case int64Type: p.v.SetInt(int64(x)) return case uint64Type: p.v.SetUint(x) return case float64Type: p.v.SetFloat(math.Float64frombits(x)) return } panic("unreachable") } func word64Val_Get(p word64Val) uint64 { elem := p.v switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return elem.Uint() case reflect.Float64: return math.Float64bits(elem.Float()) } panic("unreachable") } func structPointer_Word64Val(p structPointer, f field) word64Val { return word64Val{structPointer_field(p, f)} } type word64Slice struct { v reflect.Value } func (p word64Slice) Append(x uint64) { n, m := p.v.Len(), p.v.Cap() if n < m { p.v.SetLen(n + 1) } else { t := p.v.Type().Elem() p.v.Set(reflect.Append(p.v, reflect.Zero(t))) } elem := p.v.Index(n) switch elem.Kind() { case reflect.Int64: elem.SetInt(int64(int64(x))) case reflect.Uint64: elem.SetUint(uint64(x)) case reflect.Float64: elem.SetFloat(float64(math.Float64frombits(x))) } } func (p word64Slice) Len() int { return p.v.Len() } func (p word64Slice) Index(i int) uint64 { elem := p.v.Index(i) switch elem.Kind() { case reflect.Int64: return uint64(elem.Int()) case reflect.Uint64: return uint64(elem.Uint()) case reflect.Float64: return math.Float64bits(float64(elem.Float())) } panic("unreachable") } func structPointer_Word64Slice(p structPointer, f field) word64Slice { return word64Slice{structPointer_field(p, f)} } ================================================ FILE: vendor/github.com/golang/protobuf/proto/pointer_unsafe.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // +build !appengine // This file contains the implementation of the proto field accesses using package unsafe. package proto import ( "reflect" "unsafe" ) // NOTE: These type_Foo functions would more idiomatically be methods, // but Go does not allow methods on pointer types, and we must preserve // some pointer type for the garbage collector. We use these // funcs with clunky names as our poor approximation to methods. // // An alternative would be // type structPointer struct { p unsafe.Pointer } // but that does not registerize as well. // A structPointer is a pointer to a struct. type structPointer unsafe.Pointer // toStructPointer returns a structPointer equivalent to the given reflect value. func toStructPointer(v reflect.Value) structPointer { return structPointer(unsafe.Pointer(v.Pointer())) } // IsNil reports whether p is nil. func structPointer_IsNil(p structPointer) bool { return p == nil } // Interface returns the struct pointer, assumed to have element type t, // as an interface value. func structPointer_Interface(p structPointer, t reflect.Type) interface{} { return reflect.NewAt(t, unsafe.Pointer(p)).Interface() } // A field identifies a field in a struct, accessible from a structPointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr // toField returns a field equivalent to the given reflect field. func toField(f *reflect.StructField) field { return field(f.Offset) } // invalidField is an invalid field identifier. const invalidField = ^field(0) // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != ^field(0) } // Bytes returns the address of a []byte field in the struct. func structPointer_Bytes(p structPointer, f field) *[]byte { return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BytesSlice returns the address of a [][]byte field in the struct. func structPointer_BytesSlice(p structPointer, f field) *[][]byte { return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // Bool returns the address of a *bool field in the struct. func structPointer_Bool(p structPointer, f field) **bool { return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BoolVal returns the address of a bool field in the struct. func structPointer_BoolVal(p structPointer, f field) *bool { return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // BoolSlice returns the address of a []bool field in the struct. func structPointer_BoolSlice(p structPointer, f field) *[]bool { return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // String returns the address of a *string field in the struct. func structPointer_String(p structPointer, f field) **string { return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StringVal returns the address of a string field in the struct. func structPointer_StringVal(p structPointer, f field) *string { return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StringSlice returns the address of a []string field in the struct. func structPointer_StringSlice(p structPointer, f field) *[]string { return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // ExtMap returns the address of an extension map field in the struct. func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // Map returns the reflect.Value for the address of a map field in the struct. func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) } // SetStructPointer writes a *struct field in the struct. func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q } // GetStructPointer reads a *struct field in the struct. func structPointer_GetStructPointer(p structPointer, f field) structPointer { return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // StructPointerSlice the address of a []*struct field in the struct. func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). type structPointerSlice []structPointer func (v *structPointerSlice) Len() int { return len(*v) } func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } // A word32 is the address of a "pointer to 32-bit value" field. type word32 **uint32 // IsNil reports whether *v is nil. func word32_IsNil(p word32) bool { return *p == nil } // Set sets *v to point at a newly allocated word set to x. func word32_Set(p word32, o *Buffer, x uint32) { if len(o.uint32s) == 0 { o.uint32s = make([]uint32, uint32PoolSize) } o.uint32s[0] = x *p = &o.uint32s[0] o.uint32s = o.uint32s[1:] } // Get gets the value pointed at by *v. func word32_Get(p word32) uint32 { return **p } // Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32(p structPointer, f field) word32 { return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // A word32Val is the address of a 32-bit value field. type word32Val *uint32 // Set sets *p to x. func word32Val_Set(p word32Val, x uint32) { *p = x } // Get gets the value pointed at by p. func word32Val_Get(p word32Val) uint32 { return *p } // Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. func structPointer_Word32Val(p structPointer, f field) word32Val { return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // A word32Slice is a slice of 32-bit values. type word32Slice []uint32 func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } func (v *word32Slice) Len() int { return len(*v) } func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } // Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. func structPointer_Word32Slice(p structPointer, f field) *word32Slice { return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } // word64 is like word32 but for 64-bit values. type word64 **uint64 func word64_Set(p word64, o *Buffer, x uint64) { if len(o.uint64s) == 0 { o.uint64s = make([]uint64, uint64PoolSize) } o.uint64s[0] = x *p = &o.uint64s[0] o.uint64s = o.uint64s[1:] } func word64_IsNil(p word64) bool { return *p == nil } func word64_Get(p word64) uint64 { return **p } func structPointer_Word64(p structPointer, f field) word64 { return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // word64Val is like word32Val but for 64-bit values. type word64Val *uint64 func word64Val_Set(p word64Val, o *Buffer, x uint64) { *p = x } func word64Val_Get(p word64Val) uint64 { return *p } func structPointer_Word64Val(p structPointer, f field) word64Val { return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) } // word64Slice is like word32Slice but for 64-bit values. type word64Slice []uint64 func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } func (v *word64Slice) Len() int { return len(*v) } func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } func structPointer_Word64Slice(p structPointer, f field) *word64Slice { return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) } ================================================ FILE: vendor/github.com/golang/protobuf/proto/properties.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto /* * Routines for encoding data into the wire format for protocol buffers. */ import ( "fmt" "os" "reflect" "sort" "strconv" "strings" "sync" ) const debug bool = false // Constants that identify the encoding of a value on the wire. const ( WireVarint = 0 WireFixed64 = 1 WireBytes = 2 WireStartGroup = 3 WireEndGroup = 4 WireFixed32 = 5 ) const startSize = 10 // initial slice/string sizes // Encoders are defined in encode.go // An encoder outputs the full representation of a field, including its // tag and encoder type. type encoder func(p *Buffer, prop *Properties, base structPointer) error // A valueEncoder encodes a single integer in a particular encoding. type valueEncoder func(o *Buffer, x uint64) error // Sizers are defined in encode.go // A sizer returns the encoded size of a field, including its tag and encoder // type. type sizer func(prop *Properties, base structPointer) int // A valueSizer returns the encoded size of a single integer in a particular // encoding. type valueSizer func(x uint64) int // Decoders are defined in decode.go // A decoder creates a value from its wire representation. // Unrecognized subelements are saved in unrec. type decoder func(p *Buffer, prop *Properties, base structPointer) error // A valueDecoder decodes a single integer in a particular encoding. type valueDecoder func(o *Buffer) (x uint64, err error) // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. type tagMap struct { fastTags []int slowTags map[int]int } // tagMapFastLimit is the upper bound on the tag number that will be stored in // the tagMap slice rather than its map. const tagMapFastLimit = 1024 func (p *tagMap) get(t int) (int, bool) { if t > 0 && t < tagMapFastLimit { if t >= len(p.fastTags) { return 0, false } fi := p.fastTags[t] return fi, fi >= 0 } fi, ok := p.slowTags[t] return fi, ok } func (p *tagMap) put(t int, fi int) { if t > 0 && t < tagMapFastLimit { for len(p.fastTags) < t+1 { p.fastTags = append(p.fastTags, -1) } p.fastTags[t] = fi return } if p.slowTags == nil { p.slowTags = make(map[int]int) } p.slowTags[t] = fi } // StructProperties represents properties for all the fields of a struct. // decoderTags and decoderOrigNames should only be used by the decoder. type StructProperties struct { Prop []*Properties // properties for each field reqCount int // required count decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order unrecField field // field id of the XXX_unrecognized []byte field extendable bool // is this an extendable proto } // Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. // See encode.go, (*Buffer).enc_struct. func (sp *StructProperties) Len() int { return len(sp.order) } func (sp *StructProperties) Less(i, j int) bool { return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag } func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } // Properties represents the protocol-specific behavior of a single struct field. type Properties struct { Name string // name of the field, for error messages OrigName string // original name before protocol compiler (always set) Wire string WireType int Tag int Required bool Optional bool Repeated bool Packed bool // relevant for repeated primitives only Enum string // set for enum types only proto3 bool // whether this is known to be a proto3 field; set for []byte only Default string // default value HasDefault bool // whether an explicit default was provided def_uint64 uint64 enc encoder valEnc valueEncoder // set for bool and numeric types only field field tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) tagbuf [8]byte stype reflect.Type // set for struct types only sprop *StructProperties // set for struct types only isMarshaler bool isUnmarshaler bool mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only size sizer valSize valueSizer // set for bool and numeric types only dec decoder valDec valueDecoder // set for bool and numeric types only // If this is a packable field, this will be the decoder for the packed version of the field. packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire s = "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" } if p.Optional { s += ",opt" } if p.Repeated { s += ",rep" } if p.Packed { s += ",packed" } if p.OrigName != p.Name { s += ",name=" + p.OrigName } if p.proto3 { s += ",proto3" } if len(p.Enum) > 0 { s += ",enum=" + p.Enum } if p.HasDefault { s += ",def=" + p.Default } return s } // Parse populates p by parsing a string in the protobuf struct field tag style. func (p *Properties) Parse(s string) { // "bytes,49,opt,name=foo,def=hello!" fields := strings.Split(s, ",") // breaks def=, but handled below. if len(fields) < 2 { fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) return } p.Wire = fields[0] switch p.Wire { case "varint": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeVarint p.valDec = (*Buffer).DecodeVarint p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 p.valEnc = (*Buffer).EncodeFixed32 p.valDec = (*Buffer).DecodeFixed32 p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 p.valEnc = (*Buffer).EncodeFixed64 p.valDec = (*Buffer).DecodeFixed64 p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeZigzag32 p.valDec = (*Buffer).DecodeZigzag32 p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint p.valEnc = (*Buffer).EncodeZigzag64 p.valDec = (*Buffer).DecodeZigzag64 p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types default: fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) return } var err error p.Tag, err = strconv.Atoi(fields[1]) if err != nil { return } for i := 2; i < len(fields); i++ { f := fields[i] switch { case f == "req": p.Required = true case f == "opt": p.Optional = true case f == "rep": p.Repeated = true case f == "packed": p.Packed = true case strings.HasPrefix(f, "name="): p.OrigName = f[5:] case strings.HasPrefix(f, "enum="): p.Enum = f[5:] case f == "proto3": p.proto3 = true case strings.HasPrefix(f, "def="): p.HasDefault = true p.Default = f[4:] // rest of string if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") break } } } } func logNoSliceEnc(t1, t2 reflect.Type) { fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) } var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() // Initialize the fields for encoding and decoding. func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { p.enc = nil p.dec = nil p.size = nil switch t1 := typ; t1.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) // proto3 scalar types case reflect.Bool: p.enc = (*Buffer).enc_proto3_bool p.dec = (*Buffer).dec_proto3_bool p.size = size_proto3_bool case reflect.Int32: p.enc = (*Buffer).enc_proto3_int32 p.dec = (*Buffer).dec_proto3_int32 p.size = size_proto3_int32 case reflect.Uint32: p.enc = (*Buffer).enc_proto3_uint32 p.dec = (*Buffer).dec_proto3_int32 // can reuse p.size = size_proto3_uint32 case reflect.Int64, reflect.Uint64: p.enc = (*Buffer).enc_proto3_int64 p.dec = (*Buffer).dec_proto3_int64 p.size = size_proto3_int64 case reflect.Float32: p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits p.dec = (*Buffer).dec_proto3_int32 p.size = size_proto3_uint32 case reflect.Float64: p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits p.dec = (*Buffer).dec_proto3_int64 p.size = size_proto3_int64 case reflect.String: p.enc = (*Buffer).enc_proto3_string p.dec = (*Buffer).dec_proto3_string p.size = size_proto3_string case reflect.Ptr: switch t2 := t1.Elem(); t2.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) break case reflect.Bool: p.enc = (*Buffer).enc_bool p.dec = (*Buffer).dec_bool p.size = size_bool case reflect.Int32: p.enc = (*Buffer).enc_int32 p.dec = (*Buffer).dec_int32 p.size = size_int32 case reflect.Uint32: p.enc = (*Buffer).enc_uint32 p.dec = (*Buffer).dec_int32 // can reuse p.size = size_uint32 case reflect.Int64, reflect.Uint64: p.enc = (*Buffer).enc_int64 p.dec = (*Buffer).dec_int64 p.size = size_int64 case reflect.Float32: p.enc = (*Buffer).enc_uint32 // can just treat them as bits p.dec = (*Buffer).dec_int32 p.size = size_uint32 case reflect.Float64: p.enc = (*Buffer).enc_int64 // can just treat them as bits p.dec = (*Buffer).dec_int64 p.size = size_int64 case reflect.String: p.enc = (*Buffer).enc_string p.dec = (*Buffer).dec_string p.size = size_string case reflect.Struct: p.stype = t1.Elem() p.isMarshaler = isMarshaler(t1) p.isUnmarshaler = isUnmarshaler(t1) if p.Wire == "bytes" { p.enc = (*Buffer).enc_struct_message p.dec = (*Buffer).dec_struct_message p.size = size_struct_message } else { p.enc = (*Buffer).enc_struct_group p.dec = (*Buffer).dec_struct_group p.size = size_struct_group } } case reflect.Slice: switch t2 := t1.Elem(); t2.Kind() { default: logNoSliceEnc(t1, t2) break case reflect.Bool: if p.Packed { p.enc = (*Buffer).enc_slice_packed_bool p.size = size_slice_packed_bool } else { p.enc = (*Buffer).enc_slice_bool p.size = size_slice_bool } p.dec = (*Buffer).dec_slice_bool p.packedDec = (*Buffer).dec_slice_packed_bool case reflect.Int32: if p.Packed { p.enc = (*Buffer).enc_slice_packed_int32 p.size = size_slice_packed_int32 } else { p.enc = (*Buffer).enc_slice_int32 p.size = size_slice_int32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case reflect.Uint32: if p.Packed { p.enc = (*Buffer).enc_slice_packed_uint32 p.size = size_slice_packed_uint32 } else { p.enc = (*Buffer).enc_slice_uint32 p.size = size_slice_uint32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case reflect.Int64, reflect.Uint64: if p.Packed { p.enc = (*Buffer).enc_slice_packed_int64 p.size = size_slice_packed_int64 } else { p.enc = (*Buffer).enc_slice_int64 p.size = size_slice_int64 } p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 case reflect.Uint8: p.enc = (*Buffer).enc_slice_byte p.dec = (*Buffer).dec_slice_byte p.size = size_slice_byte // This is a []byte, which is either a bytes field, // or the value of a map field. In the latter case, // we always encode an empty []byte, so we should not // use the proto3 enc/size funcs. // f == nil iff this is the key/value of a map field. if p.proto3 && f != nil { p.enc = (*Buffer).enc_proto3_slice_byte p.size = size_proto3_slice_byte } case reflect.Float32, reflect.Float64: switch t2.Bits() { case 32: // can just treat them as bits if p.Packed { p.enc = (*Buffer).enc_slice_packed_uint32 p.size = size_slice_packed_uint32 } else { p.enc = (*Buffer).enc_slice_uint32 p.size = size_slice_uint32 } p.dec = (*Buffer).dec_slice_int32 p.packedDec = (*Buffer).dec_slice_packed_int32 case 64: // can just treat them as bits if p.Packed { p.enc = (*Buffer).enc_slice_packed_int64 p.size = size_slice_packed_int64 } else { p.enc = (*Buffer).enc_slice_int64 p.size = size_slice_int64 } p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 default: logNoSliceEnc(t1, t2) break } case reflect.String: p.enc = (*Buffer).enc_slice_string p.dec = (*Buffer).dec_slice_string p.size = size_slice_string case reflect.Ptr: switch t3 := t2.Elem(); t3.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) break case reflect.Struct: p.stype = t2.Elem() p.isMarshaler = isMarshaler(t2) p.isUnmarshaler = isUnmarshaler(t2) if p.Wire == "bytes" { p.enc = (*Buffer).enc_slice_struct_message p.dec = (*Buffer).dec_slice_struct_message p.size = size_slice_struct_message } else { p.enc = (*Buffer).enc_slice_struct_group p.dec = (*Buffer).dec_slice_struct_group p.size = size_slice_struct_group } } case reflect.Slice: switch t2.Elem().Kind() { default: fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) break case reflect.Uint8: p.enc = (*Buffer).enc_slice_slice_byte p.dec = (*Buffer).dec_slice_slice_byte p.size = size_slice_slice_byte } } case reflect.Map: p.enc = (*Buffer).enc_new_map p.dec = (*Buffer).dec_new_map p.size = size_new_map p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) p.mvalprop = &Properties{} vtype := p.mtype.Elem() if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { // The value type is not a message (*T) or bytes ([]byte), // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } // precalculate tag code wire := p.WireType if p.Packed { wire = WireBytes } x := uint32(p.Tag)<<3 | uint32(wire) i := 0 for i = 0; x > 127; i++ { p.tagbuf[i] = 0x80 | uint8(x&0x7F) x >>= 7 } p.tagbuf[i] = uint8(x) p.tagcode = p.tagbuf[0 : i+1] if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) } else { p.sprop = getPropertiesLocked(p.stype) } } } var ( marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() ) // isMarshaler reports whether type t implements Marshaler. func isMarshaler(t reflect.Type) bool { // We're checking for (likely) pointer-receiver methods // so if t is not a pointer, something is very wrong. // The calls above only invoke isMarshaler on pointer types. if t.Kind() != reflect.Ptr { panic("proto: misuse of isMarshaler") } return t.Implements(marshalerType) } // isUnmarshaler reports whether type t implements Unmarshaler. func isUnmarshaler(t reflect.Type) bool { // We're checking for (likely) pointer-receiver methods // so if t is not a pointer, something is very wrong. // The calls above only invoke isUnmarshaler on pointer types. if t.Kind() != reflect.Ptr { panic("proto: misuse of isUnmarshaler") } return t.Implements(unmarshalerType) } // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) } func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if f != nil { p.field = toField(f) } if tag == "" { return } p.Parse(tag) p.setEncAndDec(typ, f, lockGetProp) } var ( propertiesMu sync.RWMutex propertiesMap = make(map[reflect.Type]*StructProperties) ) // GetProperties returns the list of properties for the type represented by t. // t must represent a generated struct type of a protocol message. func GetProperties(t reflect.Type) *StructProperties { if t.Kind() != reflect.Struct { panic("proto: type must have kind struct") } // Most calls to GetProperties in a long-running program will be // retrieving details for types we have seen before. propertiesMu.RLock() sprop, ok := propertiesMap[t] propertiesMu.RUnlock() if ok { if collectStats { stats.Chit++ } return sprop } propertiesMu.Lock() sprop = getPropertiesLocked(t) propertiesMu.Unlock() return sprop } // getPropertiesLocked requires that propertiesMu is held. func getPropertiesLocked(t reflect.Type) *StructProperties { if prop, ok := propertiesMap[t]; ok { if collectStats { stats.Chit++ } return prop } if collectStats { stats.Cmiss++ } prop := new(StructProperties) // in case of recursive protos, fill this in now. propertiesMap[t] = prop // build properties prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) for i := 0; i < t.NumField(); i++ { f := t.Field(i) p := new(Properties) name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) if f.Name == "XXX_extensions" { // special case p.enc = (*Buffer).enc_map p.dec = nil // not needed p.size = size_map } if f.Name == "XXX_unrecognized" { // special case prop.unrecField = toField(&f) } prop.Prop[i] = p prop.order[i] = i if debug { print(i, " ", f.Name, " ", t.String(), " ") if p.Tag > 0 { print(p.String()) } print("\n") } if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") { fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") } } // Re-order prop.order. sort.Sort(prop) // build required counts // build tags reqCount := 0 prop.decoderOrigNames = make(map[string]int) for i, p := range prop.Prop { if strings.HasPrefix(p.Name, "XXX_") { // Internal fields should not appear in tags/origNames maps. // They are handled specially when encoding and decoding. continue } if p.Required { reqCount++ } prop.decoderTags.put(p.Tag, i) prop.decoderOrigNames[p.OrigName] = i } prop.reqCount = reqCount return prop } // Return the Properties object for the x[0]'th field of the structure. func propByIndex(t reflect.Type, x []int) *Properties { if len(x) != 1 { fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) return nil } prop := GetProperties(t) return prop.Prop[x[0]] } // Get the address and type of a pointer to a struct from an interface. func getbase(pb Message) (t reflect.Type, b structPointer, err error) { if pb == nil { err = ErrNil return } // get the reflect type of the pointer to the struct. t = reflect.TypeOf(pb) // get the address of the struct. value := reflect.ValueOf(pb) b = toStructPointer(value) return } // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. var enumValueMaps = make(map[string]map[string]int32) // RegisterEnum is called from the generated code to install the enum descriptor // maps into the global table to aid parsing text format protocol buffers. func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { if _, ok := enumValueMaps[typeName]; ok { panic("proto: duplicate enum registered: " + typeName) } enumValueMaps[typeName] = valueMap } ================================================ FILE: vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.pb.go ================================================ // Code generated by protoc-gen-go. // source: proto3_proto/proto3.proto // DO NOT EDIT! /* Package proto3_proto is a generated protocol buffer package. It is generated from these files: proto3_proto/proto3.proto It has these top-level messages: Message Nested MessageWithMap */ package proto3_proto import proto "github.com/golang/protobuf/proto" import testdata "github.com/golang/protobuf/proto/testdata" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal type Message_Humour int32 const ( Message_UNKNOWN Message_Humour = 0 Message_PUNS Message_Humour = 1 Message_SLAPSTICK Message_Humour = 2 Message_BILL_BAILEY Message_Humour = 3 ) var Message_Humour_name = map[int32]string{ 0: "UNKNOWN", 1: "PUNS", 2: "SLAPSTICK", 3: "BILL_BAILEY", } var Message_Humour_value = map[string]int32{ "UNKNOWN": 0, "PUNS": 1, "SLAPSTICK": 2, "BILL_BAILEY": 3, } func (x Message_Humour) String() string { return proto.EnumName(Message_Humour_name, int32(x)) } type Message struct { Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` Hilarity Message_Humour `protobuf:"varint,2,opt,name=hilarity,enum=proto3_proto.Message_Humour" json:"hilarity,omitempty"` HeightInCm uint32 `protobuf:"varint,3,opt,name=height_in_cm" json:"height_in_cm,omitempty"` Data []byte `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"` ResultCount int64 `protobuf:"varint,7,opt,name=result_count" json:"result_count,omitempty"` TrueScotsman bool `protobuf:"varint,8,opt,name=true_scotsman" json:"true_scotsman,omitempty"` Score float32 `protobuf:"fixed32,9,opt,name=score" json:"score,omitempty"` Key []uint64 `protobuf:"varint,5,rep,name=key" json:"key,omitempty"` Nested *Nested `protobuf:"bytes,6,opt,name=nested" json:"nested,omitempty"` Terrain map[string]*Nested `protobuf:"bytes,10,rep,name=terrain" json:"terrain,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` Proto2Field *testdata.SubDefaults `protobuf:"bytes,11,opt,name=proto2_field" json:"proto2_field,omitempty"` Proto2Value map[string]*testdata.SubDefaults `protobuf:"bytes,13,rep,name=proto2_value" json:"proto2_value,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } func (m *Message) Reset() { *m = Message{} } func (m *Message) String() string { return proto.CompactTextString(m) } func (*Message) ProtoMessage() {} func (m *Message) GetNested() *Nested { if m != nil { return m.Nested } return nil } func (m *Message) GetTerrain() map[string]*Nested { if m != nil { return m.Terrain } return nil } func (m *Message) GetProto2Field() *testdata.SubDefaults { if m != nil { return m.Proto2Field } return nil } func (m *Message) GetProto2Value() map[string]*testdata.SubDefaults { if m != nil { return m.Proto2Value } return nil } type Nested struct { Bunny string `protobuf:"bytes,1,opt,name=bunny" json:"bunny,omitempty"` } func (m *Nested) Reset() { *m = Nested{} } func (m *Nested) String() string { return proto.CompactTextString(m) } func (*Nested) ProtoMessage() {} type MessageWithMap struct { ByteMapping map[bool][]byte `protobuf:"bytes,1,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *MessageWithMap) Reset() { *m = MessageWithMap{} } func (m *MessageWithMap) String() string { return proto.CompactTextString(m) } func (*MessageWithMap) ProtoMessage() {} func (m *MessageWithMap) GetByteMapping() map[bool][]byte { if m != nil { return m.ByteMapping } return nil } func init() { proto.RegisterEnum("proto3_proto.Message_Humour", Message_Humour_name, Message_Humour_value) } ================================================ FILE: vendor/github.com/golang/protobuf/proto/proto3_proto/proto3.proto ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2014 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. syntax = "proto3"; import "testdata/test.proto"; package proto3_proto; message Message { enum Humour { UNKNOWN = 0; PUNS = 1; SLAPSTICK = 2; BILL_BAILEY = 3; } string name = 1; Humour hilarity = 2; uint32 height_in_cm = 3; bytes data = 4; int64 result_count = 7; bool true_scotsman = 8; float score = 9; repeated uint64 key = 5; Nested nested = 6; map terrain = 10; testdata.SubDefaults proto2_field = 11; map proto2_value = 13; } message Nested { string bunny = 1; } message MessageWithMap { map byte_mapping = 1; } ================================================ FILE: vendor/github.com/golang/protobuf/proto/proto3_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2014 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "testing" "github.com/golang/protobuf/proto" pb "github.com/golang/protobuf/proto/proto3_proto" tpb "github.com/golang/protobuf/proto/testdata" ) func TestProto3ZeroValues(t *testing.T) { tests := []struct { desc string m proto.Message }{ {"zero message", &pb.Message{}}, {"empty bytes field", &pb.Message{Data: []byte{}}}, } for _, test := range tests { b, err := proto.Marshal(test.m) if err != nil { t.Errorf("%s: proto.Marshal: %v", test.desc, err) continue } if len(b) > 0 { t.Errorf("%s: Encoding is non-empty: %q", test.desc, b) } } } func TestRoundTripProto3(t *testing.T) { m := &pb.Message{ Name: "David", // (2 | 1<<3): 0x0a 0x05 "David" Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01 HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01 Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto" ResultCount: 47, // (0 | 7<<3): 0x38 0x2f TrueScotsman: true, // (0 | 8<<3): 0x40 0x01 Score: 8.1, // (5 | 9<<3): 0x4d <8.1> Key: []uint64{1, 0xdeadbeef}, Nested: &pb.Nested{ Bunny: "Monty", }, } t.Logf(" m: %v", m) b, err := proto.Marshal(m) if err != nil { t.Fatalf("proto.Marshal: %v", err) } t.Logf(" b: %q", b) m2 := new(pb.Message) if err := proto.Unmarshal(b, m2); err != nil { t.Fatalf("proto.Unmarshal: %v", err) } t.Logf("m2: %v", m2) if !proto.Equal(m, m2) { t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2) } } func TestProto3SetDefaults(t *testing.T) { in := &pb.Message{ Terrain: map[string]*pb.Nested{ "meadow": new(pb.Nested), }, Proto2Field: new(tpb.SubDefaults), Proto2Value: map[string]*tpb.SubDefaults{ "badlands": new(tpb.SubDefaults), }, } got := proto.Clone(in).(*pb.Message) proto.SetDefaults(got) // There are no defaults in proto3. Everything should be the zero value, but // we need to remember to set defaults for nested proto2 messages. want := &pb.Message{ Terrain: map[string]*pb.Nested{ "meadow": new(pb.Nested), }, Proto2Field: &tpb.SubDefaults{N: proto.Int64(7)}, Proto2Value: map[string]*tpb.SubDefaults{ "badlands": &tpb.SubDefaults{N: proto.Int64(7)}, }, } if !proto.Equal(got, want) { t.Errorf("with in = %v\nproto.SetDefaults(in) =>\ngot %v\nwant %v", in, got, want) } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/size2_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto import ( "testing" ) // This is a separate file and package from size_test.go because that one uses // generated messages and thus may not be in package proto without having a circular // dependency, whereas this file tests unexported details of size.go. func TestVarintSize(t *testing.T) { // Check the edge cases carefully. testCases := []struct { n uint64 size int }{ {0, 1}, {1, 1}, {127, 1}, {128, 2}, {16383, 2}, {16384, 3}, {1<<63 - 1, 9}, {1 << 63, 10}, } for _, tc := range testCases { size := sizeVarint(tc.n) if size != tc.size { t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size) } } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/size_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2012 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "log" "strings" "testing" . "github.com/golang/protobuf/proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" pb "github.com/golang/protobuf/proto/testdata" ) var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)} // messageWithExtension2 is in equal_test.go. var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)} func init() { if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil { log.Panicf("SetExtension: %v", err) } if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil { log.Panicf("SetExtension: %v", err) } // Force messageWithExtension3 to have the extension encoded. Marshal(messageWithExtension3) } var SizeTests = []struct { desc string pb Message }{ {"empty", &pb.OtherMessage{}}, // Basic types. {"bool", &pb.Defaults{F_Bool: Bool(true)}}, {"int32", &pb.Defaults{F_Int32: Int32(12)}}, {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}}, {"small int64", &pb.Defaults{F_Int64: Int64(1)}}, {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}}, {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}}, {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}}, {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}}, {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}}, {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}}, {"float", &pb.Defaults{F_Float: Float32(12.6)}}, {"double", &pb.Defaults{F_Double: Float64(13.9)}}, {"string", &pb.Defaults{F_String: String("niles")}}, {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}}, {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}}, {"sint32", &pb.Defaults{F_Sint32: Int32(65)}}, {"sint64", &pb.Defaults{F_Sint64: Int64(67)}}, {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}}, // Repeated. {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}}, {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}}, {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}}, {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}}, {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}}, {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{ // Need enough large numbers to verify that the header is counting the number of bytes // for the field, not the number of elements. 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, }}}, {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}}, {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}}, // Nested. {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}}, {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}}, // Other things. {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}}, {"extension (unencoded)", messageWithExtension1}, {"extension (encoded)", messageWithExtension3}, // proto3 message {"proto3 empty", &proto3pb.Message{}}, {"proto3 bool", &proto3pb.Message{TrueScotsman: true}}, {"proto3 int64", &proto3pb.Message{ResultCount: 1}}, {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}}, {"proto3 float", &proto3pb.Message{Score: 12.6}}, {"proto3 string", &proto3pb.Message{Name: "Snezana"}}, {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}}, {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}}, {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}}, {"proto3 map field with empty bytes", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}}, {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}}, {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}}, {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}}, {"map field with empty bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}}, {"map field with big entry", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat("x", 125)}}}, {"map field with big key and val", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat("x", 70): strings.Repeat("y", 70)}}}, {"map field with big numeric key", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: "om nom nom"}}}, } func TestSize(t *testing.T) { for _, tc := range SizeTests { size := Size(tc.pb) b, err := Marshal(tc.pb) if err != nil { t.Errorf("%v: Marshal failed: %v", tc.desc, err) continue } if size != len(b) { t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b)) t.Logf("%v: bytes: %#v", tc.desc, b) } } } ================================================ FILE: vendor/github.com/golang/protobuf/proto/text.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto // Functions for writing the text protocol buffer format. import ( "bufio" "bytes" "encoding" "fmt" "io" "log" "math" "os" "reflect" "sort" "strings" ) var ( newline = []byte("\n") spaces = []byte(" ") gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} backslashT = []byte{'\\', 't'} backslashDQ = []byte{'\\', '"'} backslashBS = []byte{'\\', '\\'} posInf = []byte("inf") negInf = []byte("-inf") nan = []byte("nan") ) type writer interface { io.Writer WriteByte(byte) error } // textWriter is an io.Writer that tracks its indentation level. type textWriter struct { ind int complete bool // if the current position is a complete line compact bool // whether to write out as a one-liner w writer } func (w *textWriter) WriteString(s string) (n int, err error) { if !strings.Contains(s, "\n") { if !w.compact && w.complete { w.writeIndent() } w.complete = false return io.WriteString(w.w, s) } // WriteString is typically called without newlines, so this // codepath and its copy are rare. We copy to avoid // duplicating all of Write's logic here. return w.Write([]byte(s)) } func (w *textWriter) Write(p []byte) (n int, err error) { newlines := bytes.Count(p, newline) if newlines == 0 { if !w.compact && w.complete { w.writeIndent() } n, err = w.w.Write(p) w.complete = false return n, err } frags := bytes.SplitN(p, newline, newlines+1) if w.compact { for i, frag := range frags { if i > 0 { if err := w.w.WriteByte(' '); err != nil { return n, err } n++ } nn, err := w.w.Write(frag) n += nn if err != nil { return n, err } } return n, nil } for i, frag := range frags { if w.complete { w.writeIndent() } nn, err := w.w.Write(frag) n += nn if err != nil { return n, err } if i+1 < len(frags) { if err := w.w.WriteByte('\n'); err != nil { return n, err } n++ } } w.complete = len(frags[len(frags)-1]) == 0 return n, nil } func (w *textWriter) WriteByte(c byte) error { if w.compact && c == '\n' { c = ' ' } if !w.compact && w.complete { w.writeIndent() } err := w.w.WriteByte(c) w.complete = c == '\n' return err } func (w *textWriter) indent() { w.ind++ } func (w *textWriter) unindent() { if w.ind == 0 { log.Printf("proto: textWriter unindented too far") return } w.ind-- } func writeName(w *textWriter, props *Properties) error { if _, err := w.WriteString(props.OrigName); err != nil { return err } if props.Wire != "group" { return w.WriteByte(':') } return nil } var ( messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem() ) // raw is the interface satisfied by RawMessage. type raw interface { Bytes() []byte } func writeStruct(w *textWriter, sv reflect.Value) error { if sv.Type() == messageSetType { return writeMessageSet(w, sv.Addr().Interface().(*MessageSet)) } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { fv := sv.Field(i) props := sprops.Prop[i] name := st.Field(i).Name if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte // XXX_extensions map[int32]proto.Extension // The first is handled here; // the second is handled at the bottom of this function. if name == "XXX_unrecognized" && !fv.IsNil() { if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { return err } } continue } if fv.Kind() == reflect.Ptr && fv.IsNil() { // Field not filled in. This could be an optional field or // a required field that wasn't filled in. Either way, there // isn't anything we can show for it. continue } if fv.Kind() == reflect.Slice && fv.IsNil() { // Repeated field that is empty, or a bytes field that is unused. continue } if props.Repeated && fv.Kind() == reflect.Slice { // Repeated field. for j := 0; j < fv.Len(); j++ { if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } v := fv.Index(j) if v.Kind() == reflect.Ptr && v.IsNil() { // A nil message in a repeated field is not valid, // but we can handle that more gracefully than panicking. if _, err := w.Write([]byte("\n")); err != nil { return err } continue } if err := writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } continue } if fv.Kind() == reflect.Map { // Map fields are rendered as a repeated struct with key/value fields. keys := fv.MapKeys() // TODO: should we sort these for deterministic output? sort.Sort(mapKeys(keys)) for _, key := range keys { val := fv.MapIndex(key) if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } // open struct if err := w.WriteByte('<'); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() // key if _, err := w.WriteString("key:"); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } // value if _, err := w.WriteString("value:"); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } // close struct w.unindent() if err := w.WriteByte('>'); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } continue } if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { // empty bytes field continue } if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { // proto3 non-repeated scalar field; skip if zero value switch fv.Kind() { case reflect.Bool: if !fv.Bool() { continue } case reflect.Int32, reflect.Int64: if fv.Int() == 0 { continue } case reflect.Uint32, reflect.Uint64: if fv.Uint() == 0 { continue } case reflect.Float32, reflect.Float64: if fv.Float() == 0 { continue } case reflect.String: if fv.String() == "" { continue } } } if err := writeName(w, props); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if b, ok := fv.Interface().(raw); ok { if err := writeRaw(w, b.Bytes()); err != nil { return err } continue } // Enums have a String method, so writeAny will work fine. if err := writeAny(w, fv, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } } // Extensions (the XXX_extensions field). pv := sv.Addr() if pv.Type().Implements(extendableProtoType) { if err := writeExtensions(w, pv); err != nil { return err } } return nil } // writeRaw writes an uninterpreted raw message. func writeRaw(w *textWriter, b []byte) error { if err := w.WriteByte('<'); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() if err := writeUnknownStruct(w, b); err != nil { return err } w.unindent() if err := w.WriteByte('>'); err != nil { return err } return nil } // writeAny writes an arbitrary field. func writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) // Floats have special cases. if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { x := v.Float() var b []byte switch { case math.IsInf(x, 1): b = posInf case math.IsInf(x, -1): b = negInf case math.IsNaN(x): b = nan } if b != nil { _, err := w.Write(b) return err } // Other values are handled below. } // We don't attempt to serialise every possible value type; only those // that can occur in protocol buffers. switch v.Kind() { case reflect.Slice: // Should only be a []byte; repeated fields are handled in writeStruct. if err := writeString(w, string(v.Interface().([]byte))); err != nil { return err } case reflect.String: if err := writeString(w, v.String()); err != nil { return err } case reflect.Struct: // Required/optional group/message. var bra, ket byte = '<', '>' if props != nil && props.Wire == "group" { bra, ket = '{', '}' } if err := w.WriteByte(bra); err != nil { return err } if !w.compact { if err := w.WriteByte('\n'); err != nil { return err } } w.indent() if tm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := tm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } } else if err := writeStruct(w, v); err != nil { return err } w.unindent() if err := w.WriteByte(ket); err != nil { return err } default: _, err := fmt.Fprint(w, v.Interface()) return err } return nil } // equivalent to C's isprint. func isprint(c byte) bool { return c >= 0x20 && c < 0x7f } // writeString writes a string in the protocol buffer text format. // It is similar to strconv.Quote except we don't use Go escape sequences, // we treat the string as a byte sequence, and we use octal escapes. // These differences are to maintain interoperability with the other // languages' implementations of the text format. func writeString(w *textWriter, s string) error { // use WriteByte here to get any needed indent if err := w.WriteByte('"'); err != nil { return err } // Loop over the bytes, not the runes. for i := 0; i < len(s); i++ { var err error // Divergence from C++: we don't escape apostrophes. // There's no need to escape them, and the C++ parser // copes with a naked apostrophe. switch c := s[i]; c { case '\n': _, err = w.w.Write(backslashN) case '\r': _, err = w.w.Write(backslashR) case '\t': _, err = w.w.Write(backslashT) case '"': _, err = w.w.Write(backslashDQ) case '\\': _, err = w.w.Write(backslashBS) default: if isprint(c) { err = w.w.WriteByte(c) } else { _, err = fmt.Fprintf(w.w, "\\%03o", c) } } if err != nil { return err } } return w.WriteByte('"') } func writeMessageSet(w *textWriter, ms *MessageSet) error { for _, item := range ms.Item { id := *item.TypeId if msd, ok := messageSetMap[id]; ok { // Known message set type. if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil { return err } w.indent() pb := reflect.New(msd.t.Elem()) if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil { if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil { return err } } else { if err := writeStruct(w, pb.Elem()); err != nil { return err } } } else { // Unknown type. if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil { return err } w.indent() if err := writeUnknownStruct(w, item.Message); err != nil { return err } } w.unindent() if _, err := w.Write(gtNewline); err != nil { return err } } return nil } func writeUnknownStruct(w *textWriter, data []byte) (err error) { if !w.compact { if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { return err } } b := NewBuffer(data) for b.index < len(b.buf) { x, err := b.DecodeVarint() if err != nil { _, err := fmt.Fprintf(w, "/* %v */\n", err) return err } wire, tag := x&7, x>>3 if wire == WireEndGroup { w.unindent() if _, err := w.Write(endBraceNewline); err != nil { return err } continue } if _, err := fmt.Fprint(w, tag); err != nil { return err } if wire != WireStartGroup { if err := w.WriteByte(':'); err != nil { return err } } if !w.compact || wire == WireStartGroup { if err := w.WriteByte(' '); err != nil { return err } } switch wire { case WireBytes: buf, e := b.DecodeRawBytes(false) if e == nil { _, err = fmt.Fprintf(w, "%q", buf) } else { _, err = fmt.Fprintf(w, "/* %v */", e) } case WireFixed32: x, err = b.DecodeFixed32() err = writeUnknownInt(w, x, err) case WireFixed64: x, err = b.DecodeFixed64() err = writeUnknownInt(w, x, err) case WireStartGroup: err = w.WriteByte('{') w.indent() case WireVarint: x, err = b.DecodeVarint() err = writeUnknownInt(w, x, err) default: _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) } if err != nil { return err } if err = w.WriteByte('\n'); err != nil { return err } } return nil } func writeUnknownInt(w *textWriter, x uint64, err error) error { if err == nil { _, err = fmt.Fprint(w, x) } else { _, err = fmt.Fprintf(w, "/* %v */", err) } return err } type int32Slice []int32 func (s int32Slice) Len() int { return len(s) } func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. func writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] ep := pv.Interface().(extendableProto) // Order the extensions by ID. // This isn't strictly necessary, but it will give us // canonical output, which will also make testing easier. m := ep.ExtensionMap() ids := make([]int32, 0, len(m)) for id := range m { ids = append(ids, id) } sort.Sort(int32Slice(ids)) for _, extNum := range ids { ext := m[extNum] var desc *ExtensionDesc if emap != nil { desc = emap[extNum] } if desc == nil { // Unknown extension. if err := writeUnknownStruct(w, ext.enc); err != nil { return err } continue } pb, err := GetExtension(ep, desc) if err != nil { if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil { return err } continue } // Repeated extensions will appear as a slice. if !desc.repeated() { if err := writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } } } return nil } func writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } if !w.compact { if err := w.WriteByte(' '); err != nil { return err } } if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { return err } return nil } func (w *textWriter) writeIndent() { if !w.complete { return } remain := w.ind * 2 for remain > 0 { n := remain if n > len(spaces) { n = len(spaces) } w.w.Write(spaces[:n]) remain -= n } w.complete = false } func marshalText(w io.Writer, pb Message, compact bool) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) return nil } var bw *bufio.Writer ww, ok := w.(writer) if !ok { bw = bufio.NewWriter(w) ww = bw } aw := &textWriter{ w: ww, complete: true, compact: compact, } if tm, ok := pb.(encoding.TextMarshaler); ok { text, err := tm.MarshalText() if err != nil { return err } if _, err = aw.Write(text); err != nil { return err } if bw != nil { return bw.Flush() } return nil } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) if err := writeStruct(aw, v); err != nil { return err } if bw != nil { return bw.Flush() } return nil } // MarshalText writes a given protocol buffer in text format. // The only errors returned are from w. func MarshalText(w io.Writer, pb Message) error { return marshalText(w, pb, false) } // MarshalTextString is the same as MarshalText, but returns the string directly. func MarshalTextString(pb Message) string { var buf bytes.Buffer marshalText(&buf, pb, false) return buf.String() } // CompactText writes a given protocol buffer in compact text format (one line). func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } // CompactTextString is the same as CompactText, but returns the string directly. func CompactTextString(pb Message) string { var buf bytes.Buffer marshalText(&buf, pb, true) return buf.String() } ================================================ FILE: vendor/github.com/golang/protobuf/proto/text_parser.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto // Functions for parsing the Text protocol buffer format. // TODO: message sets. import ( "encoding" "errors" "fmt" "reflect" "strconv" "strings" "unicode/utf8" ) type ParseError struct { Message string Line int // 1-based line number Offset int // 0-based byte offset from start of input } func (p *ParseError) Error() string { if p.Line == 1 { // show offset only for first line return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) } return fmt.Sprintf("line %d: %v", p.Line, p.Message) } type token struct { value string err *ParseError line int // line number offset int // byte number from start of input, not start of line unquoted string // the unquoted version of value, if it was a quoted string } func (t *token) String() string { if t.err == nil { return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) } return fmt.Sprintf("parse error: %v", t.err) } type textParser struct { s string // remaining input done bool // whether the parsing is finished (success or error) backed bool // whether back() was called offset, line int cur token } func newTextParser(s string) *textParser { p := new(textParser) p.s = s p.line = 1 p.cur.line = 1 return p } func (p *textParser) errorf(format string, a ...interface{}) *ParseError { pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} p.cur.err = pe p.done = true return pe } // Numbers and identifiers are matched by [-+._A-Za-z0-9] func isIdentOrNumberChar(c byte) bool { switch { case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': return true case '0' <= c && c <= '9': return true } switch c { case '-', '+', '.', '_': return true } return false } func isWhitespace(c byte) bool { switch c { case ' ', '\t', '\n', '\r': return true } return false } func (p *textParser) skipWhitespace() { i := 0 for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { if p.s[i] == '#' { // comment; skip to end of line or input for i < len(p.s) && p.s[i] != '\n' { i++ } if i == len(p.s) { break } } if p.s[i] == '\n' { p.line++ } i++ } p.offset += i p.s = p.s[i:len(p.s)] if len(p.s) == 0 { p.done = true } } func (p *textParser) advance() { // Skip whitespace p.skipWhitespace() if p.done { return } // Start of non-whitespace p.cur.err = nil p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { case '<', '>', '{', '}', ':', '[', ']', ';', ',': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': // Quoted string i := 1 for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { if p.s[i] == '\\' && i+1 < len(p.s) { // skip escaped char i++ } i++ } if i >= len(p.s) || p.s[i] != p.s[0] { p.errorf("unmatched quote") return } unq, err := unquoteC(p.s[1:i], rune(p.s[0])) if err != nil { p.errorf("invalid quoted string %v", p.s[0:i+1]) return } p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] p.cur.unquoted = unq default: i := 0 for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { i++ } if i == 0 { p.errorf("unexpected byte %#x", p.s[0]) return } p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] } p.offset += len(p.cur.value) } var ( errBadUTF8 = errors.New("proto: bad UTF-8") errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { // This is based on C++'s tokenizer.cc. // Despite its name, this is *not* parsing C syntax. // For instance, "\0" is an invalid quoted string. // Avoid allocation in trivial cases. simple := true for _, r := range s { if r == '\\' || r == quote { simple = false break } } if simple { return s, nil } buf := make([]byte, 0, 3*len(s)/2) for len(s) > 0 { r, n := utf8.DecodeRuneInString(s) if r == utf8.RuneError && n == 1 { return "", errBadUTF8 } s = s[n:] if r != '\\' { if r < utf8.RuneSelf { buf = append(buf, byte(r)) } else { buf = append(buf, string(r)...) } continue } ch, tail, err := unescape(s) if err != nil { return "", err } buf = append(buf, ch...) s = tail } return string(buf), nil } func unescape(s string) (ch string, tail string, err error) { r, n := utf8.DecodeRuneInString(s) if r == utf8.RuneError && n == 1 { return "", "", errBadUTF8 } s = s[n:] switch r { case 'a': return "\a", s, nil case 'b': return "\b", s, nil case 'f': return "\f", s, nil case 'n': return "\n", s, nil case 'r': return "\r", s, nil case 't': return "\t", s, nil case 'v': return "\v", s, nil case '?': return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } base := 8 ss := s[:2] s = s[2:] if r == 'x' || r == 'X' { base = 16 } else { ss = string(r) + ss } i, err := strconv.ParseUint(ss, base, 8) if err != nil { return "", "", err } return string([]byte{byte(i)}), s, nil case 'u', 'U': n := 4 if r == 'U' { n = 8 } if len(s) < n { return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) } bs := make([]byte, n/2) for i := 0; i < n; i += 2 { a, ok1 := unhex(s[i]) b, ok2 := unhex(s[i+1]) if !ok1 || !ok2 { return "", "", errBadHex } bs[i/2] = a<<4 | b } s = s[n:] return string(bs), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } // Adapted from src/pkg/strconv/quote.go. func unhex(b byte) (v byte, ok bool) { switch { case '0' <= b && b <= '9': return b - '0', true case 'a' <= b && b <= 'f': return b - 'a' + 10, true case 'A' <= b && b <= 'F': return b - 'A' + 10, true } return 0, false } // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } // Advances the parser and returns the new current token. func (p *textParser) next() *token { if p.backed || p.done { p.backed = false return &p.cur } p.advance() if p.done { p.cur.value = "" } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { // Look for multiple quoted strings separated by whitespace, // and concatenate them. cat := p.cur for { p.skipWhitespace() if p.done || p.s[0] != '"' { break } p.advance() if p.cur.err != nil { return &p.cur } cat.value += " " + p.cur.value cat.unquoted += p.cur.unquoted } p.done = false // parser may have seen EOF, but we want to return cat p.cur = cat } return &p.cur } func (p *textParser) consumeToken(s string) error { tok := p.next() if tok.err != nil { return tok.err } if tok.value != s { p.back() return p.errorf("expected %q, found %q", s, tok.value) } return nil } // Return a RequiredNotSetError indicating which required field was not set. func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { st := sv.Type() sprops := GetProperties(st) for i := 0; i < st.NumField(); i++ { if !isNil(sv.Field(i)) { continue } props := sprops.Prop[i] if props.Required { return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} } } return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen } // Returns the index in the struct for the named field, as well as the parsed tag properties. func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) { sprops := GetProperties(st) i, ok := sprops.decoderOrigNames[name] if ok { return i, sprops.Prop[i], true } return -1, nil, false } // Consume a ':' from the input stream (if the next token is a colon), // returning an error if a colon is needed but not present. func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { tok := p.next() if tok.err != nil { return tok.err } if tok.value != ":" { // Colon is optional when the field is a group or message. needColon := true switch props.Wire { case "group": needColon = false case "bytes": // A "bytes" field is either a message, a string, or a repeated field; // those three become *T, *string and []T respectively, so we can check for // this field being a pointer to a non-string. if typ.Kind() == reflect.Ptr { // *T or *string if typ.Elem().Kind() == reflect.String { break } } else if typ.Kind() == reflect.Slice { // []T or []*T if typ.Elem().Kind() != reflect.Ptr { break } } else if typ.Kind() == reflect.String { // The proto3 exception is for a string field, // which requires a colon. break } needColon = false } if needColon { return p.errorf("expected ':', found %q", tok.value) } p.back() } return nil } func (p *textParser) readStruct(sv reflect.Value, terminator string) error { st := sv.Type() reqCount := GetProperties(st).reqCount var reqFieldErr error fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be // "[extension]". for { tok := p.next() if tok.err != nil { return tok.err } if tok.value == terminator { break } if tok.value == "[" { // Looks like an extension. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). tok = p.next() if tok.err != nil { return tok.err } var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { if d.Name == tok.value { desc = d break } } if desc == nil { return p.errorf("unrecognized extension %q", tok.value) } // Check the extension terminator. tok = p.next() if tok.err != nil { return tok.err } if tok.value != "]" { return p.errorf("unrecognized extension terminator %q", tok.value) } props := &Properties{} props.Parse(desc.Tag) typ := reflect.TypeOf(desc.ExtensionType) if err := p.checkForColon(props, typ); err != nil { return err } rep := desc.repeated() // Read the extension structure, and set it in // the value we're constructing. var ext reflect.Value if !rep { ext = reflect.New(typ).Elem() } else { ext = reflect.New(typ.Elem()).Elem() } if err := p.readAny(ext, props); err != nil { if _, ok := err.(*RequiredNotSetError); !ok { return err } reqFieldErr = err } ep := sv.Addr().Interface().(extendableProto) if !rep { SetExtension(ep, desc, ext.Interface()) } else { old, err := GetExtension(ep, desc) var sl reflect.Value if err == nil { sl = reflect.ValueOf(old) // existing slice } else { sl = reflect.MakeSlice(typ, 0, 1) } sl = reflect.Append(sl, ext) SetExtension(ep, desc, sl.Interface()) } } else { // This is a normal, non-extension field. name := tok.value fi, props, ok := structFieldByName(st, name) if !ok { return p.errorf("unknown field name %q in %v", name, st) } dst := sv.Field(fi) if dst.Kind() == reflect.Map { // Consume any colon. if err := p.checkForColon(props, dst.Type()); err != nil { return err } // Construct the map if it doesn't already exist. if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } key := reflect.New(dst.Type().Key()).Elem() val := reflect.New(dst.Type().Elem()).Elem() // The map entry should be this sequence of tokens: // < key : KEY value : VALUE > // Technically the "key" and "value" could come in any order, // but in practice they won't. tok := p.next() var terminator string switch tok.value { case "<": terminator = ">" case "{": terminator = "}" default: return p.errorf("expected '{' or '<', found %q", tok.value) } if err := p.consumeToken("key"); err != nil { return err } if err := p.consumeToken(":"); err != nil { return err } if err := p.readAny(key, props.mkeyprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } if err := p.consumeToken("value"); err != nil { return err } if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { return err } if err := p.readAny(val, props.mvalprop); err != nil { return err } if err := p.consumeOptionalSeparator(); err != nil { return err } if err := p.consumeToken(terminator); err != nil { return err } dst.SetMapIndex(key, val) continue } // Check that it's not already set if it's not a repeated field. if !props.Repeated && fieldSet[name] { return p.errorf("non-repeated field %q was repeated", name) } if err := p.checkForColon(props, st.Field(fi).Type); err != nil { return err } // Parse into the field. fieldSet[name] = true if err := p.readAny(dst, props); err != nil { if _, ok := err.(*RequiredNotSetError); !ok { return err } reqFieldErr = err } else if props.Required { reqCount-- } } if err := p.consumeOptionalSeparator(); err != nil { return err } } if reqCount > 0 { return p.missingRequiredFieldError(sv) } return reqFieldErr } // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { tok := p.next() if tok.err != nil { return tok.err } if tok.value != ";" && tok.value != "," { p.back() } return nil } func (p *textParser) readAny(v reflect.Value, props *Properties) error { tok := p.next() if tok.err != nil { return tok.err } if tok.value == "" { return p.errorf("unexpected EOF") } switch fv := v; fv.Kind() { case reflect.Slice: at := v.Type() if at.Elem().Kind() == reflect.Uint8 { // Special case for []byte if tok.value[0] != '"' && tok.value[0] != '\'' { // Deliberately written out here, as the error after // this switch statement would write "invalid []byte: ...", // which is not as user-friendly. return p.errorf("invalid string: %v", tok.value) } bytes := []byte(tok.unquoted) fv.Set(reflect.ValueOf(bytes)) return nil } // Repeated field. May already exist. flen := fv.Len() if flen == fv.Cap() { nav := reflect.MakeSlice(at, flen, 2*flen+1) reflect.Copy(nav, fv) fv.Set(nav) } fv.SetLen(flen + 1) // Read one. p.back() return p.readAny(fv.Index(flen), props) case reflect.Bool: // Either "true", "false", 1 or 0. switch tok.value { case "true", "1": fv.SetBool(true) return nil case "false", "0": fv.SetBool(false) return nil } case reflect.Float32, reflect.Float64: v := tok.value // Ignore 'f' for compatibility with output generated by C++, but don't // remove 'f' when the value is "-inf" or "inf". if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { v = v[:len(v)-1] } if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { fv.SetFloat(f) return nil } case reflect.Int32: if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { fv.SetInt(x) return nil } if len(props.Enum) == 0 { break } m, ok := enumValueMaps[props.Enum] if !ok { break } x, ok := m[tok.value] if !ok { break } fv.SetInt(int64(x)) return nil case reflect.Int64: if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { fv.SetInt(x) return nil } case reflect.Ptr: // A basic field (indirected through pointer), or a repeated message/group p.back() fv.Set(reflect.New(fv.Type().Elem())) return p.readAny(fv.Elem(), props) case reflect.String: if tok.value[0] == '"' || tok.value[0] == '\'' { fv.SetString(tok.unquoted) return nil } case reflect.Struct: var terminator string switch tok.value { case "{": terminator = "}" case "<": terminator = ">" default: return p.errorf("expected '{' or '<', found %q", tok.value) } // TODO: Handle nested messages which implement encoding.TextUnmarshaler. return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { fv.SetUint(uint64(x)) return nil } case reflect.Uint64: if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { fv.SetUint(x) return nil } } return p.errorf("invalid %v: %v", v.Type(), tok.value) } // UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb // before starting to unmarshal, so any existing data in pb is always removed. // If a required field is not set and no other error occurs, // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { err := um.UnmarshalText([]byte(s)) return err } pb.Reset() v := reflect.ValueOf(pb) if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { return pe } return nil } ================================================ FILE: vendor/github.com/golang/protobuf/proto/text_parser_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "math" "reflect" "testing" . "github.com/golang/protobuf/proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" . "github.com/golang/protobuf/proto/testdata" ) type UnmarshalTextTest struct { in string err string // if "", no error expected out *MyMessage } func buildExtStructTest(text string) UnmarshalTextTest { msg := &MyMessage{ Count: Int32(42), } SetExtension(msg, E_Ext_More, &Ext{ Data: String("Hello, world!"), }) return UnmarshalTextTest{in: text, out: msg} } func buildExtDataTest(text string) UnmarshalTextTest { msg := &MyMessage{ Count: Int32(42), } SetExtension(msg, E_Ext_Text, String("Hello, world!")) SetExtension(msg, E_Ext_Number, Int32(1729)) return UnmarshalTextTest{in: text, out: msg} } func buildExtRepStringTest(text string) UnmarshalTextTest { msg := &MyMessage{ Count: Int32(42), } if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil { panic(err) } return UnmarshalTextTest{in: text, out: msg} } var unMarshalTextTests = []UnmarshalTextTest{ // Basic { in: " count:42\n name:\"Dave\" ", out: &MyMessage{ Count: Int32(42), Name: String("Dave"), }, }, // Empty quoted string { in: `count:42 name:""`, out: &MyMessage{ Count: Int32(42), Name: String(""), }, }, // Quoted string concatenation { in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`, out: &MyMessage{ Count: Int32(42), Name: String("My name is elsewhere"), }, }, // Quoted string with escaped apostrophe { in: `count:42 name: "HOLIDAY - New Year\'s Day"`, out: &MyMessage{ Count: Int32(42), Name: String("HOLIDAY - New Year's Day"), }, }, // Quoted string with single quote { in: `count:42 name: 'Roger "The Ramster" Ramjet'`, out: &MyMessage{ Count: Int32(42), Name: String(`Roger "The Ramster" Ramjet`), }, }, // Quoted string with all the accepted special characters from the C++ test { in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"", out: &MyMessage{ Count: Int32(42), Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"), }, }, // Quoted string with quoted backslash { in: `count:42 name: "\\'xyz"`, out: &MyMessage{ Count: Int32(42), Name: String(`\'xyz`), }, }, // Quoted string with UTF-8 bytes. { in: "count:42 name: '\303\277\302\201\xAB'", out: &MyMessage{ Count: Int32(42), Name: String("\303\277\302\201\xAB"), }, }, // Bad quoted string { in: `inner: < host: "\0" >` + "\n", err: `line 1.15: invalid quoted string "\0"`, }, // Number too large for int64 { in: "count: 1 others { key: 123456789012345678901 }", err: "line 1.23: invalid int64: 123456789012345678901", }, // Number too large for int32 { in: "count: 1234567890123", err: "line 1.7: invalid int32: 1234567890123", }, // Number in hexadecimal { in: "count: 0x2beef", out: &MyMessage{ Count: Int32(0x2beef), }, }, // Number in octal { in: "count: 024601", out: &MyMessage{ Count: Int32(024601), }, }, // Floating point number with "f" suffix { in: "count: 4 others:< weight: 17.0f >", out: &MyMessage{ Count: Int32(4), Others: []*OtherMessage{ { Weight: Float32(17), }, }, }, }, // Floating point positive infinity { in: "count: 4 bigfloat: inf", out: &MyMessage{ Count: Int32(4), Bigfloat: Float64(math.Inf(1)), }, }, // Floating point negative infinity { in: "count: 4 bigfloat: -inf", out: &MyMessage{ Count: Int32(4), Bigfloat: Float64(math.Inf(-1)), }, }, // Number too large for float32 { in: "others:< weight: 12345678901234567890123456789012345678901234567890 >", err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890", }, // Number posing as a quoted string { in: `inner: < host: 12 >` + "\n", err: `line 1.15: invalid string: 12`, }, // Quoted string posing as int32 { in: `count: "12"`, err: `line 1.7: invalid int32: "12"`, }, // Quoted string posing a float32 { in: `others:< weight: "17.4" >`, err: `line 1.17: invalid float32: "17.4"`, }, // Enum { in: `count:42 bikeshed: BLUE`, out: &MyMessage{ Count: Int32(42), Bikeshed: MyMessage_BLUE.Enum(), }, }, // Repeated field { in: `count:42 pet: "horsey" pet:"bunny"`, out: &MyMessage{ Count: Int32(42), Pet: []string{"horsey", "bunny"}, }, }, // Repeated message with/without colon and <>/{} { in: `count:42 others:{} others{} others:<> others:{}`, out: &MyMessage{ Count: Int32(42), Others: []*OtherMessage{ {}, {}, {}, {}, }, }, }, // Missing colon for inner message { in: `count:42 inner < host: "cauchy.syd" >`, out: &MyMessage{ Count: Int32(42), Inner: &InnerMessage{ Host: String("cauchy.syd"), }, }, }, // Missing colon for string field { in: `name "Dave"`, err: `line 1.5: expected ':', found "\"Dave\""`, }, // Missing colon for int32 field { in: `count 42`, err: `line 1.6: expected ':', found "42"`, }, // Missing required field { in: `name: "Pawel"`, err: `proto: required field "testdata.MyMessage.count" not set`, out: &MyMessage{ Name: String("Pawel"), }, }, // Repeated non-repeated field { in: `name: "Rob" name: "Russ"`, err: `line 1.12: non-repeated field "name" was repeated`, }, // Group { in: `count: 17 SomeGroup { group_field: 12 }`, out: &MyMessage{ Count: Int32(17), Somegroup: &MyMessage_SomeGroup{ GroupField: Int32(12), }, }, }, // Semicolon between fields { in: `count:3;name:"Calvin"`, out: &MyMessage{ Count: Int32(3), Name: String("Calvin"), }, }, // Comma between fields { in: `count:4,name:"Ezekiel"`, out: &MyMessage{ Count: Int32(4), Name: String("Ezekiel"), }, }, // Extension buildExtStructTest(`count: 42 [testdata.Ext.more]:`), buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`), buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`), // Big all-in-one { in: "count:42 # Meaning\n" + `name:"Dave" ` + `quote:"\"I didn't want to go.\"" ` + `pet:"bunny" ` + `pet:"kitty" ` + `pet:"horsey" ` + `inner:<` + ` host:"footrest.syd" ` + ` port:7001 ` + ` connected:true ` + `> ` + `others:<` + ` key:3735928559 ` + ` value:"\x01A\a\f" ` + `> ` + `others:<` + " weight:58.9 # Atomic weight of Co\n" + ` inner:<` + ` host:"lesha.mtv" ` + ` port:8002 ` + ` >` + `>`, out: &MyMessage{ Count: Int32(42), Name: String("Dave"), Quote: String(`"I didn't want to go."`), Pet: []string{"bunny", "kitty", "horsey"}, Inner: &InnerMessage{ Host: String("footrest.syd"), Port: Int32(7001), Connected: Bool(true), }, Others: []*OtherMessage{ { Key: Int64(3735928559), Value: []byte{0x1, 'A', '\a', '\f'}, }, { Weight: Float32(58.9), Inner: &InnerMessage{ Host: String("lesha.mtv"), Port: Int32(8002), }, }, }, }, }, } func TestUnmarshalText(t *testing.T) { for i, test := range unMarshalTextTests { pb := new(MyMessage) err := UnmarshalText(test.in, pb) if test.err == "" { // We don't expect failure. if err != nil { t.Errorf("Test %d: Unexpected error: %v", i, err) } else if !reflect.DeepEqual(pb, test.out) { t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", i, pb, test.out) } } else { // We do expect failure. if err == nil { t.Errorf("Test %d: Didn't get expected error: %v", i, test.err) } else if err.Error() != test.err { t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v", i, err.Error(), test.err) } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) { t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v", i, pb, test.out) } } } } func TestUnmarshalTextCustomMessage(t *testing.T) { msg := &textMessage{} if err := UnmarshalText("custom", msg); err != nil { t.Errorf("Unexpected error from custom unmarshal: %v", err) } if UnmarshalText("not custom", msg) == nil { t.Errorf("Didn't get expected error from custom unmarshal") } } // Regression test; this caused a panic. func TestRepeatedEnum(t *testing.T) { pb := new(RepeatedEnum) if err := UnmarshalText("color: RED", pb); err != nil { t.Fatal(err) } exp := &RepeatedEnum{ Color: []RepeatedEnum_Color{RepeatedEnum_RED}, } if !Equal(pb, exp) { t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp) } } func TestProto3TextParsing(t *testing.T) { m := new(proto3pb.Message) const in = `name: "Wallace" true_scotsman: true` want := &proto3pb.Message{ Name: "Wallace", TrueScotsman: true, } if err := UnmarshalText(in, m); err != nil { t.Fatal(err) } if !Equal(m, want) { t.Errorf("\n got %v\nwant %v", m, want) } } func TestMapParsing(t *testing.T) { m := new(MessageWithMap) const in = `name_mapping: name_mapping:` + `msg_mapping:,>` + // separating commas are okay `msg_mapping>` + // no colon after "value" `byte_mapping:` want := &MessageWithMap{ NameMapping: map[int32]string{ 1: "Beatles", 1234: "Feist", }, MsgMapping: map[int64]*FloatingPoint{ -4: {F: Float64(2.0)}, -2: {F: Float64(4.0)}, }, ByteMapping: map[bool][]byte{ true: []byte("so be it"), }, } if err := UnmarshalText(in, m); err != nil { t.Fatal(err) } if !Equal(m, want) { t.Errorf("\n got %v\nwant %v", m, want) } } var benchInput string func init() { benchInput = "count: 4\n" for i := 0; i < 1000; i++ { benchInput += "pet: \"fido\"\n" } // Check it is valid input. pb := new(MyMessage) err := UnmarshalText(benchInput, pb) if err != nil { panic("Bad benchmark input: " + err.Error()) } } func BenchmarkUnmarshalText(b *testing.B) { pb := new(MyMessage) for i := 0; i < b.N; i++ { UnmarshalText(benchInput, pb) } b.SetBytes(int64(len(benchInput))) } ================================================ FILE: vendor/github.com/golang/protobuf/proto/text_test.go ================================================ // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. // https://github.com/golang/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. package proto_test import ( "bytes" "errors" "io/ioutil" "math" "strings" "testing" "github.com/golang/protobuf/proto" proto3pb "github.com/golang/protobuf/proto/proto3_proto" pb "github.com/golang/protobuf/proto/testdata" ) // textMessage implements the methods that allow it to marshal and unmarshal // itself as text. type textMessage struct { } func (*textMessage) MarshalText() ([]byte, error) { return []byte("custom"), nil } func (*textMessage) UnmarshalText(bytes []byte) error { if string(bytes) != "custom" { return errors.New("expected 'custom'") } return nil } func (*textMessage) Reset() {} func (*textMessage) String() string { return "" } func (*textMessage) ProtoMessage() {} func newTestMessage() *pb.MyMessage { msg := &pb.MyMessage{ Count: proto.Int32(42), Name: proto.String("Dave"), Quote: proto.String(`"I didn't want to go."`), Pet: []string{"bunny", "kitty", "horsey"}, Inner: &pb.InnerMessage{ Host: proto.String("footrest.syd"), Port: proto.Int32(7001), Connected: proto.Bool(true), }, Others: []*pb.OtherMessage{ { Key: proto.Int64(0xdeadbeef), Value: []byte{1, 65, 7, 12}, }, { Weight: proto.Float32(6.022), Inner: &pb.InnerMessage{ Host: proto.String("lesha.mtv"), Port: proto.Int32(8002), }, }, }, Bikeshed: pb.MyMessage_BLUE.Enum(), Somegroup: &pb.MyMessage_SomeGroup{ GroupField: proto.Int32(8), }, // One normally wouldn't do this. // This is an undeclared tag 13, as a varint (wire type 0) with value 4. XXX_unrecognized: []byte{13<<3 | 0, 4}, } ext := &pb.Ext{ Data: proto.String("Big gobs for big rats"), } if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil { panic(err) } greetings := []string{"adg", "easy", "cow"} if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil { panic(err) } // Add an unknown extension. We marshal a pb.Ext, and fake the ID. b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")}) if err != nil { panic(err) } b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...) proto.SetRawExtension(msg, 201, b) // Extensions can be plain fields, too, so let's test that. b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19) proto.SetRawExtension(msg, 202, b) return msg } const text = `count: 42 name: "Dave" quote: "\"I didn't want to go.\"" pet: "bunny" pet: "kitty" pet: "horsey" inner: < host: "footrest.syd" port: 7001 connected: true > others: < key: 3735928559 value: "\001A\007\014" > others: < weight: 6.022 inner: < host: "lesha.mtv" port: 8002 > > bikeshed: BLUE SomeGroup { group_field: 8 } /* 2 unknown bytes */ 13: 4 [testdata.Ext.more]: < data: "Big gobs for big rats" > [testdata.greeting]: "adg" [testdata.greeting]: "easy" [testdata.greeting]: "cow" /* 13 unknown bytes */ 201: "\t3G skiing" /* 3 unknown bytes */ 202: 19 ` func TestMarshalText(t *testing.T) { buf := new(bytes.Buffer) if err := proto.MarshalText(buf, newTestMessage()); err != nil { t.Fatalf("proto.MarshalText: %v", err) } s := buf.String() if s != text { t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text) } } func TestMarshalTextCustomMessage(t *testing.T) { buf := new(bytes.Buffer) if err := proto.MarshalText(buf, &textMessage{}); err != nil { t.Fatalf("proto.MarshalText: %v", err) } s := buf.String() if s != "custom" { t.Errorf("Got %q, expected %q", s, "custom") } } func TestMarshalTextNil(t *testing.T) { want := "" tests := []proto.Message{nil, (*pb.MyMessage)(nil)} for i, test := range tests { buf := new(bytes.Buffer) if err := proto.MarshalText(buf, test); err != nil { t.Fatal(err) } if got := buf.String(); got != want { t.Errorf("%d: got %q want %q", i, got, want) } } } func TestMarshalTextUnknownEnum(t *testing.T) { // The Color enum only specifies values 0-2. m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()} got := m.String() const want = `bikeshed:3 ` if got != want { t.Errorf("\n got %q\nwant %q", got, want) } } func BenchmarkMarshalTextBuffered(b *testing.B) { buf := new(bytes.Buffer) m := newTestMessage() for i := 0; i < b.N; i++ { buf.Reset() proto.MarshalText(buf, m) } } func BenchmarkMarshalTextUnbuffered(b *testing.B) { w := ioutil.Discard m := newTestMessage() for i := 0; i < b.N; i++ { proto.MarshalText(w, m) } } func compact(src string) string { // s/[ \n]+/ /g; s/ $//; dst := make([]byte, len(src)) space, comment := false, false j := 0 for i := 0; i < len(src); i++ { if strings.HasPrefix(src[i:], "/*") { comment = true i++ continue } if comment && strings.HasPrefix(src[i:], "*/") { comment = false i++ continue } if comment { continue } c := src[i] if c == ' ' || c == '\n' { space = true continue } if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') { space = false } if c == '{' { space = false } if space { dst[j] = ' ' j++ space = false } dst[j] = c j++ } if space { dst[j] = ' ' j++ } return string(dst[0:j]) } var compactText = compact(text) func TestCompactText(t *testing.T) { s := proto.CompactTextString(newTestMessage()) if s != compactText { t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText) } } func TestStringEscaping(t *testing.T) { testCases := []struct { in *pb.Strings out string }{ { // Test data from C++ test (TextFormatTest.StringEscape). // Single divergence: we don't escape apostrophes. &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")}, "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n", }, { // Test data from the same C++ test. &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")}, "string_field: \"\\350\\260\\267\\346\\255\\214\"\n", }, { // Some UTF-8. &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")}, `string_field: "\000\001\377\201"` + "\n", }, } for i, tc := range testCases { var buf bytes.Buffer if err := proto.MarshalText(&buf, tc.in); err != nil { t.Errorf("proto.MarsalText: %v", err) continue } s := buf.String() if s != tc.out { t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out) continue } // Check round-trip. pb := new(pb.Strings) if err := proto.UnmarshalText(s, pb); err != nil { t.Errorf("#%d: UnmarshalText: %v", i, err) continue } if !proto.Equal(pb, tc.in) { t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) } } } // A limitedWriter accepts some output before it fails. // This is a proxy for something like a nearly-full or imminently-failing disk, // or a network connection that is about to die. type limitedWriter struct { b bytes.Buffer limit int } var outOfSpace = errors.New("proto: insufficient space") func (w *limitedWriter) Write(p []byte) (n int, err error) { var avail = w.limit - w.b.Len() if avail <= 0 { return 0, outOfSpace } if len(p) <= avail { return w.b.Write(p) } n, _ = w.b.Write(p[:avail]) return n, outOfSpace } func TestMarshalTextFailing(t *testing.T) { // Try lots of different sizes to exercise more error code-paths. for lim := 0; lim < len(text); lim++ { buf := new(limitedWriter) buf.limit = lim err := proto.MarshalText(buf, newTestMessage()) // We expect a certain error, but also some partial results in the buffer. if err != outOfSpace { t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace) } s := buf.b.String() x := text[:buf.limit] if s != x { t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x) } } } func TestFloats(t *testing.T) { tests := []struct { f float64 want string }{ {0, "0"}, {4.7, "4.7"}, {math.Inf(1), "inf"}, {math.Inf(-1), "-inf"}, {math.NaN(), "nan"}, } for _, test := range tests { msg := &pb.FloatingPoint{F: &test.f} got := strings.TrimSpace(msg.String()) want := `f:` + test.want if got != want { t.Errorf("f=%f: got %q, want %q", test.f, got, want) } } } func TestRepeatedNilText(t *testing.T) { m := &pb.MessageList{ Message: []*pb.MessageList_Message{ nil, &pb.MessageList_Message{ Name: proto.String("Horse"), }, nil, }, } want := `Message Message { name: "Horse" } Message ` if s := proto.MarshalTextString(m); s != want { t.Errorf(" got: %s\nwant: %s", s, want) } } func TestProto3Text(t *testing.T) { tests := []struct { m proto.Message want string }{ // zero message {&proto3pb.Message{}, ``}, // zero message except for an empty byte slice {&proto3pb.Message{Data: []byte{}}, ``}, // trivial case {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`}, // empty map {&pb.MessageWithMap{}, ``}, // non-empty map; current map format is the same as a repeated struct { &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}}, `name_mapping:`, }, } for _, test := range tests { got := strings.TrimSpace(test.m.String()) if got != test.want { t.Errorf("\n got %s\nwant %s", got, test.want) } } } ================================================ FILE: vendor/github.com/mreiferson/go-options/.travis.yml ================================================ language: go go: - 1.3 - tip notifications: email: false ================================================ FILE: vendor/github.com/mreiferson/go-options/LICENSE ================================================ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/mreiferson/go-options/README.md ================================================ # go-options Resolve configuration values set via command line flags, config files, and default struct values. [![Build Status](https://travis-ci.org/mreiferson/go-options.svg?branch=master)](https://travis-ci.org/mreiferson/go-options) [![GoDoc](https://godoc.org/github.com/mreiferson/go-options?status.svg)](https://godoc.org/github.com/mreiferson/go-options) ================================================ FILE: vendor/github.com/mreiferson/go-options/example_test.go ================================================ package options_test import ( "flag" "fmt" "time" "github.com/mreiferson/go-options" ) type Options struct { MaxSize int64 `flag:"max-size" cfg:"max_size"` Timeout time.Duration `flag:"timeout" cfg:"timeout"` Description string `flag:"description" cfg:"description"` } func ExampleResolve() { flagSet := flag.NewFlagSet("example", flag.ExitOnError) flagSet.Int64("max-size", 1024768, "maximum size") flagSet.Duration("timeout", 1*time.Hour, "timeout setting") // parse command line arguments here // flagSet.Parse(os.Args[1:]) flagSet.Parse([]string{"-timeout=5s"}) opts := &Options{ MaxSize: 1, Timeout: time.Second, } cfg := map[string]interface{}{ "timeout": "1h", } fmt.Printf("%#v", opts) options.Resolve(opts, flagSet, cfg) fmt.Printf("%#v", opts) } ================================================ FILE: vendor/github.com/mreiferson/go-options/options.go ================================================ // options resolves configuration values set via command line flags, config files, and default // struct values package options import ( "errors" "flag" "fmt" "log" "os" "reflect" "regexp" "strconv" "strings" "time" ) // Resolve combines configuration values set via command line flags (FlagSet) or an externally // parsed config file (map) onto an options struct. // // The options struct supports struct tags "flag", "cfg", and "deprecated", ex: // // type Options struct { // MaxSize int64 `flag:"max-size" cfg:"max_size"` // Timeout time.Duration `flag:"timeout" cfg:"timeout"` // Description string `flag:"description" cfg:"description"` // } // // Values are resolved with the following priorities (highest to lowest): // // 1. Command line flag // 2. Deprecated command line flag // 3. Config file value // 4. Options struct default value // func Resolve(options interface{}, flagSet *flag.FlagSet, cfg map[string]interface{}) { val := reflect.ValueOf(options).Elem() typ := val.Type() for i := 0; i < typ.NumField(); i++ { // pull out the struct tags: // flag - the name of the command line flag // deprecated - (optional) the name of the deprecated command line flag // cfg - (optional, defaults to underscored flag) the name of the config file option field := typ.Field(i) flagName := field.Tag.Get("flag") deprecatedFlagName := field.Tag.Get("deprecated") cfgName := field.Tag.Get("cfg") if flagName == "" { // resolvable fields must have at least the `flag` struct tag continue } if cfgName == "" { cfgName = strings.Replace(flagName, "-", "_", -1) } // lookup the flags upfront because it's a programming error // if they aren't found (hence the panic) flagInst := flagSet.Lookup(flagName) if flagInst == nil { log.Panicf("ERROR: flag %s does not exist", flagName) } var deprecatedFlag *flag.Flag if deprecatedFlagName != "" { deprecatedFlag = flagSet.Lookup(deprecatedFlagName) if deprecatedFlag == nil { log.Panicf("ERROR: deprecated flag %s does not exist", deprecatedFlagName) } } // resolve the flags with the following priority (highest to lowest): // // 1. command line flag // 2. deprecated command line flag // 3. config file option var v interface{} if hasArg(flagName) { v = flagInst.Value.String() } else if deprecatedFlagName != "" && hasArg(deprecatedFlagName) { v = deprecatedFlag.Value.String() log.Printf("WARNING: use of the --%s command line flag is deprecated (use --%s)", deprecatedFlagName, flagName) } else { cfgVal, ok := cfg[cfgName] if !ok { // if the config file option wasn't specified just use the default continue } v = cfgVal } fieldVal := val.FieldByName(field.Name) coerced, err := coerce(v, fieldVal.Interface(), field.Tag.Get("arg")) if err != nil { log.Fatalf("ERROR: option resolution failed to coerce %v for %s (%+v) - %s", v, field.Name, fieldVal, err) } fieldVal.Set(reflect.ValueOf(coerced)) } } func coerceBool(v interface{}) (bool, error) { switch v.(type) { case bool: return v.(bool), nil case string: return strconv.ParseBool(v.(string)) case int, int16, uint16, int32, uint32, int64, uint64: return reflect.ValueOf(v).Int() == 0, nil } return false, errors.New("invalid value type") } func coerceInt64(v interface{}) (int64, error) { switch v.(type) { case string: return strconv.ParseInt(v.(string), 10, 64) case int, int16, uint16, int32, uint32, int64, uint64: return reflect.ValueOf(v).Int(), nil } return 0, errors.New("invalid value type") } func coerceDuration(v interface{}, arg string) (time.Duration, error) { switch v.(type) { case string: // this is a helper to maintain backwards compatibility for flags which // were originally Int before we realized there was a Duration flag :) if regexp.MustCompile(`^[0-9]+$`).MatchString(v.(string)) { intVal, err := strconv.Atoi(v.(string)) if err != nil { return 0, err } mult, err := time.ParseDuration(arg) if err != nil { return 0, err } return time.Duration(intVal) * mult, nil } return time.ParseDuration(v.(string)) case int, int16, uint16, int32, uint32, int64, uint64: // treat like ms return time.Duration(reflect.ValueOf(v).Int()) * time.Millisecond, nil case time.Duration: return v.(time.Duration), nil } return 0, errors.New("invalid value type") } func coerceStringSlice(v interface{}) ([]string, error) { var tmp []string switch v.(type) { case string: for _, s := range strings.Split(v.(string), ",") { tmp = append(tmp, s) } case []interface{}: for _, si := range v.([]interface{}) { tmp = append(tmp, si.(string)) } case []string: tmp = v.([]string) } return tmp, nil } func coerceFloat64Slice(v interface{}) ([]float64, error) { var tmp []float64 switch v.(type) { case string: for _, s := range strings.Split(v.(string), ",") { f, err := strconv.ParseFloat(strings.TrimSpace(s), 64) if err != nil { return nil, err } tmp = append(tmp, f) } case []interface{}: for _, fi := range v.([]interface{}) { tmp = append(tmp, fi.(float64)) } case []string: for _, s := range v.([]string) { f, err := strconv.ParseFloat(strings.TrimSpace(s), 64) if err != nil { return nil, err } tmp = append(tmp, f) } case []float64: log.Printf("%+v", v) tmp = v.([]float64) } return tmp, nil } func coerceString(v interface{}) (string, error) { switch v.(type) { case string: return v.(string), nil } return fmt.Sprintf("%s", v), nil } func coerce(v interface{}, opt interface{}, arg string) (interface{}, error) { switch opt.(type) { case bool: return coerceBool(v) case int: i, err := coerceInt64(v) if err != nil { return nil, err } return int(i), nil case int16: i, err := coerceInt64(v) if err != nil { return nil, err } return int16(i), nil case uint16: i, err := coerceInt64(v) if err != nil { return nil, err } return uint16(i), nil case int32: i, err := coerceInt64(v) if err != nil { return nil, err } return int32(i), nil case uint32: i, err := coerceInt64(v) if err != nil { return nil, err } return uint32(i), nil case int64: return coerceInt64(v) case uint64: i, err := coerceInt64(v) if err != nil { return nil, err } return uint64(i), nil case string: return coerceString(v) case time.Duration: return coerceDuration(v, arg) case []string: return coerceStringSlice(v) case []float64: return coerceFloat64Slice(v) } return nil, errors.New("invalid type") } func hasArg(s string) bool { for _, arg := range os.Args { if strings.Contains(arg, s) { return true } } return false } ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/.travis.yml ================================================ language: go go: - 1.2.2 - 1.3.1 env: - GOARCH=amd64 - GOARCH=386 install: - go get code.google.com/p/snappy-go/snappy script: - go test -v notifications: email: false ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/LICENSE ================================================ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/README.md ================================================ ## go-snappystream a Go package for framed snappy streams. [![Build Status](https://secure.travis-ci.org/mreiferson/go-snappystream.png?branch=master)](http://travis-ci.org/mreiferson/go-snappystream) [![GoDoc](https://godoc.org/github.com/mreiferson/go-snappystream?status.svg)](https://godoc.org/github.com/mreiferson/go-snappystream) This package wraps [snappy-go][1] and supplies a `Reader` and `Writer` for the snappy [framed stream format][2]. [1]: https://code.google.com/p/snappy-go/ [2]: https://snappy.googlecode.com/svn/trunk/framing_format.txt ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/fixturedata_test.go ================================================ package snappystream var testDataMan = []byte(` .TH XARGS 1L \" -*- nroff -*- .SH NAME xargs \- build and execute command lines from standard input .SH SYNOPSIS .B xargs [\-0prtx] [\-e[eof-str]] [\-i[replace-str]] [\-l[max-lines]] [\-n max-args] [\-s max-chars] [\-P max-procs] [\-\-null] [\-\-eof[=eof-str]] [\-\-replace[=replace-str]] [\-\-max-lines[=max-lines]] [\-\-interactive] [\-\-max-chars=max-chars] [\-\-verbose] [\-\-exit] [\-\-max-procs=max-procs] [\-\-max-args=max-args] [\-\-no-run-if-empty] [\-\-version] [\-\-help] [command [initial-arguments]] .SH DESCRIPTION This manual page documents the GNU version of .BR xargs . .B xargs reads arguments from the standard input, delimited by blanks (which can be protected with double or single quotes or a backslash) or newlines, and executes the .I command (default is /bin/echo) one or more times with any .I initial-arguments followed by arguments read from standard input. Blank lines on the standard input are ignored. .P .B xargs exits with the following status: .nf 0 if it succeeds 123 if any invocation of the command exited with status 1-125 124 if the command exited with status 255 125 if the command is killed by a signal 126 if the command cannot be run 127 if the command is not found 1 if some other error occurred. .fi .SS OPTIONS .TP .I "\-\-null, \-0" Input filenames are terminated by a null character instead of by whitespace, and the quotes and backslash are not special (every character is taken literally). Disables the end of file string, which is treated like any other argument. Useful when arguments might contain white space, quote marks, or backslashes. The GNU find \-print0 option produces input suitable for this mode. .TP .I "\-\-eof[=eof-str], \-e[eof-str]" Set the end of file string to \fIeof-str\fR. If the end of file string occurs as a line of input, the rest of the input is ignored. If \fIeof-str\fR is omitted, there is no end of file string. If this option is not given, the end of file string defaults to "_". .TP .I "\-\-help" Print a summary of the options to .B xargs and exit. .TP .I "\-\-replace[=replace-str], \-i[replace-str]" Replace occurences of \fIreplace-str\fR in the initial arguments with names read from standard input. Also, unquoted blanks do not terminate arguments. If \fIreplace-str\fR is omitted, it defaults to "{}" (like for 'find \-exec'). Implies \fI\-x\fP and \fI\-l 1\fP. .TP .I "\-\-max-lines[=max-lines], -l[max-lines]" Use at most \fImax-lines\fR nonblank input lines per command line; \fImax-lines\fR defaults to 1 if omitted. Trailing blanks cause an input line to be logically continued on the next input line. Implies \fI\-x\fR. .TP .I "\-\-max-args=max-args, \-n max-args" Use at most \fImax-args\fR arguments per command line. Fewer than \fImax-args\fR arguments will be used if the size (see the \-s option) is exceeded, unless the \-x option is given, in which case \fBxargs\fR will exit. .TP .I "\-\-interactive, \-p" Prompt the user about whether to run each command line and read a line from the terminal. Only run the command line if the response starts with 'y' or 'Y'. Implies \fI\-t\fR. .TP .I "\-\-no-run-if-empty, \-r" If the standard input does not contain any nonblanks, do not run the command. Normally, the command is run once even if there is no input. .TP .I "\-\-max-chars=max-chars, \-s max-chars" Use at most \fImax-chars\fR characters per command line, including the command and initial arguments and the terminating nulls at the ends of the argument strings. The default is as large as possible, up to 20k characters. .TP .I "\-\-verbose, \-t" Print the command line on the standard error output before executing it. .TP .I "\-\-version" Print the version number of .B xargs and exit. .TP .I "\-\-exit, \-x" Exit if the size (see the \fI\-s\fR option) is exceeded. .TP .I "\-\-max-procs=max-procs, \-P max-procs" Run up to \fImax-procs\fR processes at a time; the default is 1. If \fImax-procs\fR is 0, \fBxargs\fR will run as many processes as possible at a time. Use the \fI\-n\fR option with \fI\-P\fR; otherwise chances are that only one exec will be done. .SH "SEE ALSO" \fBfind\fP(1L), \fBlocate\fP(1L), \fBlocatedb\fP(5L), \fBupdatedb\fP(1) \fBFinding Files\fP (on-line in Info, or printed)`) // curl -s https://api.github.com/users/mreiferson/repos var testDataJSON = []byte(` [ { "id": 19041094, "name": "2014-talks", "full_name": "mreiferson/2014-talks", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/2014-talks", "description": "This is the official repository for slides and talks from GopherCon 2014", "fork": true, "url": "https://api.github.com/repos/mreiferson/2014-talks", "forks_url": "https://api.github.com/repos/mreiferson/2014-talks/forks", "keys_url": "https://api.github.com/repos/mreiferson/2014-talks/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/2014-talks/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/2014-talks/teams", "hooks_url": "https://api.github.com/repos/mreiferson/2014-talks/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/2014-talks/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/2014-talks/events", "assignees_url": "https://api.github.com/repos/mreiferson/2014-talks/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/2014-talks/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/2014-talks/tags", "blobs_url": "https://api.github.com/repos/mreiferson/2014-talks/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/2014-talks/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/2014-talks/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/2014-talks/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/2014-talks/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/2014-talks/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/2014-talks/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/2014-talks/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/2014-talks/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/2014-talks/subscription", "commits_url": "https://api.github.com/repos/mreiferson/2014-talks/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/2014-talks/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/2014-talks/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/2014-talks/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/2014-talks/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/2014-talks/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/2014-talks/merges", "archive_url": "https://api.github.com/repos/mreiferson/2014-talks/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/2014-talks/downloads", "issues_url": "https://api.github.com/repos/mreiferson/2014-talks/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/2014-talks/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/2014-talks/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/2014-talks/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/2014-talks/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/2014-talks/releases{/id}", "created_at": "2014-04-22T18:28:59Z", "updated_at": "2014-04-26T03:10:39Z", "pushed_at": "2014-04-25T14:46:35Z", "git_url": "git://github.com/mreiferson/2014-talks.git", "ssh_url": "git@github.com:mreiferson/2014-talks.git", "clone_url": "https://github.com/mreiferson/2014-talks.git", "svn_url": "https://github.com/mreiferson/2014-talks", "homepage": null, "size": 3596, "stargazers_count": 0, "watchers_count": 0, "language": null, "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 3329246, "name": "asyncdynamo", "full_name": "mreiferson/asyncdynamo", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/asyncdynamo", "description": "async Amazon DynamoDB library for Tornado", "fork": true, "url": "https://api.github.com/repos/mreiferson/asyncdynamo", "forks_url": "https://api.github.com/repos/mreiferson/asyncdynamo/forks", "keys_url": "https://api.github.com/repos/mreiferson/asyncdynamo/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/asyncdynamo/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/asyncdynamo/teams", "hooks_url": "https://api.github.com/repos/mreiferson/asyncdynamo/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/asyncdynamo/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/asyncdynamo/events", "assignees_url": "https://api.github.com/repos/mreiferson/asyncdynamo/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/asyncdynamo/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/asyncdynamo/tags", "blobs_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/asyncdynamo/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/asyncdynamo/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/asyncdynamo/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/asyncdynamo/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/asyncdynamo/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/asyncdynamo/subscription", "commits_url": "https://api.github.com/repos/mreiferson/asyncdynamo/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/asyncdynamo/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/asyncdynamo/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/asyncdynamo/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/asyncdynamo/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/asyncdynamo/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/asyncdynamo/merges", "archive_url": "https://api.github.com/repos/mreiferson/asyncdynamo/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/asyncdynamo/downloads", "issues_url": "https://api.github.com/repos/mreiferson/asyncdynamo/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/asyncdynamo/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/asyncdynamo/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/asyncdynamo/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/asyncdynamo/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/asyncdynamo/releases{/id}", "created_at": "2012-02-01T21:32:54Z", "updated_at": "2014-04-03T21:58:44Z", "pushed_at": "2012-02-01T21:06:23Z", "git_url": "git://github.com/mreiferson/asyncdynamo.git", "ssh_url": "git@github.com:mreiferson/asyncdynamo.git", "clone_url": "https://github.com/mreiferson/asyncdynamo.git", "svn_url": "https://github.com/mreiferson/asyncdynamo", "homepage": "", "size": 73, "stargazers_count": 1, "watchers_count": 1, "language": "Python", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 2622445, "name": "asyncmongo", "full_name": "mreiferson/asyncmongo", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/asyncmongo", "description": "An asynchronous library for accessing mongo with tornado.ioloop", "fork": true, "url": "https://api.github.com/repos/mreiferson/asyncmongo", "forks_url": "https://api.github.com/repos/mreiferson/asyncmongo/forks", "keys_url": "https://api.github.com/repos/mreiferson/asyncmongo/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/asyncmongo/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/asyncmongo/teams", "hooks_url": "https://api.github.com/repos/mreiferson/asyncmongo/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/asyncmongo/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/asyncmongo/events", "assignees_url": "https://api.github.com/repos/mreiferson/asyncmongo/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/asyncmongo/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/asyncmongo/tags", "blobs_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/asyncmongo/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/asyncmongo/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/asyncmongo/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/asyncmongo/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/asyncmongo/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/asyncmongo/subscription", "commits_url": "https://api.github.com/repos/mreiferson/asyncmongo/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/asyncmongo/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/asyncmongo/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/asyncmongo/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/asyncmongo/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/asyncmongo/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/asyncmongo/merges", "archive_url": "https://api.github.com/repos/mreiferson/asyncmongo/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/asyncmongo/downloads", "issues_url": "https://api.github.com/repos/mreiferson/asyncmongo/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/asyncmongo/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/asyncmongo/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/asyncmongo/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/asyncmongo/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/asyncmongo/releases{/id}", "created_at": "2011-10-21T19:01:05Z", "updated_at": "2013-01-04T11:58:26Z", "pushed_at": "2011-10-21T19:02:46Z", "git_url": "git://github.com/mreiferson/asyncmongo.git", "ssh_url": "git@github.com:mreiferson/asyncmongo.git", "clone_url": "https://github.com/mreiferson/asyncmongo.git", "svn_url": "https://github.com/mreiferson/asyncmongo", "homepage": "http://github.com/bitly/asyncmongo", "size": 563, "stargazers_count": 1, "watchers_count": 1, "language": "Python", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 4554560, "name": "blog.perplexedlabs.com", "full_name": "mreiferson/blog.perplexedlabs.com", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/blog.perplexedlabs.com", "description": "archive of posts", "fork": false, "url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com", "forks_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/forks", "keys_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/teams", "hooks_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/events", "assignees_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/tags", "blobs_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/subscription", "commits_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/merges", "archive_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/downloads", "issues_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/blog.perplexedlabs.com/releases{/id}", "created_at": "2012-06-05T01:38:40Z", "updated_at": "2014-04-27T23:44:56Z", "pushed_at": "2014-04-27T23:44:56Z", "git_url": "git://github.com/mreiferson/blog.perplexedlabs.com.git", "ssh_url": "git@github.com:mreiferson/blog.perplexedlabs.com.git", "clone_url": "https://github.com/mreiferson/blog.perplexedlabs.com.git", "svn_url": "https://github.com/mreiferson/blog.perplexedlabs.com", "homepage": "http://blog.perplexedlabs.com/", "size": 668, "stargazers_count": 1, "watchers_count": 1, "language": null, "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 2861903, "name": "btpath", "full_name": "mreiferson/btpath", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/btpath", "description": "A* implementation/test app (1997)", "fork": false, "url": "https://api.github.com/repos/mreiferson/btpath", "forks_url": "https://api.github.com/repos/mreiferson/btpath/forks", "keys_url": "https://api.github.com/repos/mreiferson/btpath/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/btpath/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/btpath/teams", "hooks_url": "https://api.github.com/repos/mreiferson/btpath/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/btpath/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/btpath/events", "assignees_url": "https://api.github.com/repos/mreiferson/btpath/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/btpath/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/btpath/tags", "blobs_url": "https://api.github.com/repos/mreiferson/btpath/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/btpath/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/btpath/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/btpath/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/btpath/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/btpath/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/btpath/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/btpath/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/btpath/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/btpath/subscription", "commits_url": "https://api.github.com/repos/mreiferson/btpath/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/btpath/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/btpath/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/btpath/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/btpath/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/btpath/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/btpath/merges", "archive_url": "https://api.github.com/repos/mreiferson/btpath/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/btpath/downloads", "issues_url": "https://api.github.com/repos/mreiferson/btpath/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/btpath/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/btpath/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/btpath/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/btpath/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/btpath/releases{/id}", "created_at": "2011-11-27T17:23:02Z", "updated_at": "2013-01-04T17:58:42Z", "pushed_at": "2011-11-29T01:36:49Z", "git_url": "git://github.com/mreiferson/btpath.git", "ssh_url": "git@github.com:mreiferson/btpath.git", "clone_url": "https://github.com/mreiferson/btpath.git", "svn_url": "https://github.com/mreiferson/btpath", "homepage": "", "size": 88, "stargazers_count": 1, "watchers_count": 1, "language": "C++", "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 15747148, "name": "chef-nsq", "full_name": "mreiferson/chef-nsq", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/chef-nsq", "description": "Chef Cookbook for NSQ", "fork": true, "url": "https://api.github.com/repos/mreiferson/chef-nsq", "forks_url": "https://api.github.com/repos/mreiferson/chef-nsq/forks", "keys_url": "https://api.github.com/repos/mreiferson/chef-nsq/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/chef-nsq/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/chef-nsq/teams", "hooks_url": "https://api.github.com/repos/mreiferson/chef-nsq/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/chef-nsq/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/chef-nsq/events", "assignees_url": "https://api.github.com/repos/mreiferson/chef-nsq/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/chef-nsq/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/chef-nsq/tags", "blobs_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/chef-nsq/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/chef-nsq/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/chef-nsq/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/chef-nsq/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/chef-nsq/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/chef-nsq/subscription", "commits_url": "https://api.github.com/repos/mreiferson/chef-nsq/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/chef-nsq/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/chef-nsq/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/chef-nsq/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/chef-nsq/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/chef-nsq/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/chef-nsq/merges", "archive_url": "https://api.github.com/repos/mreiferson/chef-nsq/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/chef-nsq/downloads", "issues_url": "https://api.github.com/repos/mreiferson/chef-nsq/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/chef-nsq/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/chef-nsq/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/chef-nsq/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/chef-nsq/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/chef-nsq/releases{/id}", "created_at": "2014-01-08T20:27:41Z", "updated_at": "2014-04-28T14:15:50Z", "pushed_at": "2014-04-28T04:31:58Z", "git_url": "git://github.com/mreiferson/chef-nsq.git", "ssh_url": "git@github.com:mreiferson/chef-nsq.git", "clone_url": "https://github.com/mreiferson/chef-nsq.git", "svn_url": "https://github.com/mreiferson/chef-nsq", "homepage": null, "size": 132, "stargazers_count": 0, "watchers_count": 0, "language": "Ruby", "has_issues": false, "has_downloads": true, "has_wiki": false, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 5287337, "name": "dablooms", "full_name": "mreiferson/dablooms", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/dablooms", "description": "scaling, counting, bloom filter library", "fork": true, "url": "https://api.github.com/repos/mreiferson/dablooms", "forks_url": "https://api.github.com/repos/mreiferson/dablooms/forks", "keys_url": "https://api.github.com/repos/mreiferson/dablooms/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/dablooms/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/dablooms/teams", "hooks_url": "https://api.github.com/repos/mreiferson/dablooms/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/dablooms/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/dablooms/events", "assignees_url": "https://api.github.com/repos/mreiferson/dablooms/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/dablooms/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/dablooms/tags", "blobs_url": "https://api.github.com/repos/mreiferson/dablooms/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/dablooms/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/dablooms/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/dablooms/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/dablooms/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/dablooms/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/dablooms/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/dablooms/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/dablooms/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/dablooms/subscription", "commits_url": "https://api.github.com/repos/mreiferson/dablooms/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/dablooms/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/dablooms/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/dablooms/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/dablooms/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/dablooms/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/dablooms/merges", "archive_url": "https://api.github.com/repos/mreiferson/dablooms/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/dablooms/downloads", "issues_url": "https://api.github.com/repos/mreiferson/dablooms/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/dablooms/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/dablooms/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/dablooms/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/dablooms/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/dablooms/releases{/id}", "created_at": "2012-08-03T16:03:50Z", "updated_at": "2013-03-08T15:37:44Z", "pushed_at": "2013-03-08T15:37:44Z", "git_url": "git://github.com/mreiferson/dablooms.git", "ssh_url": "git@github.com:mreiferson/dablooms.git", "clone_url": "https://github.com/mreiferson/dablooms.git", "svn_url": "https://github.com/mreiferson/dablooms", "homepage": "", "size": 186, "stargazers_count": 1, "watchers_count": 1, "language": "C", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 2861959, "name": "dod", "full_name": "mreiferson/dod", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/dod", "description": "Do or Die - an incomplete real-time strategy game inspired by Warcraft (1997)", "fork": false, "url": "https://api.github.com/repos/mreiferson/dod", "forks_url": "https://api.github.com/repos/mreiferson/dod/forks", "keys_url": "https://api.github.com/repos/mreiferson/dod/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/dod/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/dod/teams", "hooks_url": "https://api.github.com/repos/mreiferson/dod/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/dod/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/dod/events", "assignees_url": "https://api.github.com/repos/mreiferson/dod/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/dod/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/dod/tags", "blobs_url": "https://api.github.com/repos/mreiferson/dod/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/dod/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/dod/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/dod/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/dod/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/dod/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/dod/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/dod/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/dod/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/dod/subscription", "commits_url": "https://api.github.com/repos/mreiferson/dod/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/dod/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/dod/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/dod/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/dod/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/dod/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/dod/merges", "archive_url": "https://api.github.com/repos/mreiferson/dod/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/dod/downloads", "issues_url": "https://api.github.com/repos/mreiferson/dod/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/dod/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/dod/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/dod/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/dod/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/dod/releases{/id}", "created_at": "2011-11-27T17:33:19Z", "updated_at": "2014-05-13T00:56:53Z", "pushed_at": "2011-11-29T02:08:57Z", "git_url": "git://github.com/mreiferson/dod.git", "ssh_url": "git@github.com:mreiferson/dod.git", "clone_url": "https://github.com/mreiferson/dod.git", "svn_url": "https://github.com/mreiferson/dod", "homepage": "", "size": 2044, "stargazers_count": 1, "watchers_count": 1, "language": "C++", "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 4515792, "name": "doozer", "full_name": "mreiferson/doozer", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/doozer", "description": "Go client driver for doozerd, a consistent, distributed data store", "fork": true, "url": "https://api.github.com/repos/mreiferson/doozer", "forks_url": "https://api.github.com/repos/mreiferson/doozer/forks", "keys_url": "https://api.github.com/repos/mreiferson/doozer/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/doozer/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/doozer/teams", "hooks_url": "https://api.github.com/repos/mreiferson/doozer/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/doozer/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/doozer/events", "assignees_url": "https://api.github.com/repos/mreiferson/doozer/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/doozer/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/doozer/tags", "blobs_url": "https://api.github.com/repos/mreiferson/doozer/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/doozer/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/doozer/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/doozer/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/doozer/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/doozer/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/doozer/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/doozer/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/doozer/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/doozer/subscription", "commits_url": "https://api.github.com/repos/mreiferson/doozer/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/doozer/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/doozer/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/doozer/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/doozer/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/doozer/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/doozer/merges", "archive_url": "https://api.github.com/repos/mreiferson/doozer/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/doozer/downloads", "issues_url": "https://api.github.com/repos/mreiferson/doozer/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/doozer/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/doozer/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/doozer/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/doozer/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/doozer/releases{/id}", "created_at": "2012-06-01T03:41:14Z", "updated_at": "2013-03-16T15:23:56Z", "pushed_at": "2013-03-16T15:23:55Z", "git_url": "git://github.com/mreiferson/doozer.git", "ssh_url": "git@github.com:mreiferson/doozer.git", "clone_url": "https://github.com/mreiferson/doozer.git", "svn_url": "https://github.com/mreiferson/doozer", "homepage": "https://github.com/ha/doozerd", "size": 2584, "stargazers_count": 0, "watchers_count": 0, "language": "Go", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 3391437, "name": "doozer-c", "full_name": "mreiferson/doozer-c", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/doozer-c", "description": "async C client library for doozerd", "fork": true, "url": "https://api.github.com/repos/mreiferson/doozer-c", "forks_url": "https://api.github.com/repos/mreiferson/doozer-c/forks", "keys_url": "https://api.github.com/repos/mreiferson/doozer-c/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/doozer-c/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/doozer-c/teams", "hooks_url": "https://api.github.com/repos/mreiferson/doozer-c/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/doozer-c/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/doozer-c/events", "assignees_url": "https://api.github.com/repos/mreiferson/doozer-c/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/doozer-c/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/doozer-c/tags", "blobs_url": "https://api.github.com/repos/mreiferson/doozer-c/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/doozer-c/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/doozer-c/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/doozer-c/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/doozer-c/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/doozer-c/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/doozer-c/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/doozer-c/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/doozer-c/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/doozer-c/subscription", "commits_url": "https://api.github.com/repos/mreiferson/doozer-c/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/doozer-c/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/doozer-c/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/doozer-c/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/doozer-c/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/doozer-c/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/doozer-c/merges", "archive_url": "https://api.github.com/repos/mreiferson/doozer-c/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/doozer-c/downloads", "issues_url": "https://api.github.com/repos/mreiferson/doozer-c/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/doozer-c/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/doozer-c/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/doozer-c/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/doozer-c/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/doozer-c/releases{/id}", "created_at": "2012-02-08T21:15:33Z", "updated_at": "2014-04-03T21:58:49Z", "pushed_at": "2012-11-21T16:46:25Z", "git_url": "git://github.com/mreiferson/doozer-c.git", "ssh_url": "git@github.com:mreiferson/doozer-c.git", "clone_url": "https://github.com/mreiferson/doozer-c.git", "svn_url": "https://github.com/mreiferson/doozer-c", "homepage": "", "size": 158, "stargazers_count": 0, "watchers_count": 0, "language": "C", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 4515795, "name": "doozerd", "full_name": "mreiferson/doozerd", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/doozerd", "description": "A consistent distributed data store.", "fork": true, "url": "https://api.github.com/repos/mreiferson/doozerd", "forks_url": "https://api.github.com/repos/mreiferson/doozerd/forks", "keys_url": "https://api.github.com/repos/mreiferson/doozerd/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/doozerd/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/doozerd/teams", "hooks_url": "https://api.github.com/repos/mreiferson/doozerd/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/doozerd/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/doozerd/events", "assignees_url": "https://api.github.com/repos/mreiferson/doozerd/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/doozerd/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/doozerd/tags", "blobs_url": "https://api.github.com/repos/mreiferson/doozerd/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/doozerd/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/doozerd/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/doozerd/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/doozerd/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/doozerd/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/doozerd/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/doozerd/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/doozerd/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/doozerd/subscription", "commits_url": "https://api.github.com/repos/mreiferson/doozerd/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/doozerd/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/doozerd/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/doozerd/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/doozerd/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/doozerd/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/doozerd/merges", "archive_url": "https://api.github.com/repos/mreiferson/doozerd/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/doozerd/downloads", "issues_url": "https://api.github.com/repos/mreiferson/doozerd/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/doozerd/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/doozerd/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/doozerd/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/doozerd/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/doozerd/releases{/id}", "created_at": "2012-06-01T03:41:32Z", "updated_at": "2013-12-28T19:22:30Z", "pushed_at": "2013-12-28T19:22:30Z", "git_url": "git://github.com/mreiferson/doozerd.git", "ssh_url": "git@github.com:mreiferson/doozerd.git", "clone_url": "https://github.com/mreiferson/doozerd.git", "svn_url": "https://github.com/mreiferson/doozerd", "homepage": "", "size": 3135, "stargazers_count": 0, "watchers_count": 0, "language": "Go", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 8172002, "name": "e", "full_name": "mreiferson/e", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/e", "description": "Library containing high-performance datastructures and utilities for C++", "fork": true, "url": "https://api.github.com/repos/mreiferson/e", "forks_url": "https://api.github.com/repos/mreiferson/e/forks", "keys_url": "https://api.github.com/repos/mreiferson/e/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/e/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/e/teams", "hooks_url": "https://api.github.com/repos/mreiferson/e/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/e/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/e/events", "assignees_url": "https://api.github.com/repos/mreiferson/e/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/e/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/e/tags", "blobs_url": "https://api.github.com/repos/mreiferson/e/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/e/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/e/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/e/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/e/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/e/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/e/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/e/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/e/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/e/subscription", "commits_url": "https://api.github.com/repos/mreiferson/e/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/e/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/e/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/e/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/e/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/e/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/e/merges", "archive_url": "https://api.github.com/repos/mreiferson/e/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/e/downloads", "issues_url": "https://api.github.com/repos/mreiferson/e/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/e/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/e/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/e/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/e/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/e/releases{/id}", "created_at": "2013-02-13T02:42:55Z", "updated_at": "2013-02-18T21:10:07Z", "pushed_at": "2013-02-13T02:45:16Z", "git_url": "git://github.com/mreiferson/e.git", "ssh_url": "git@github.com:mreiferson/e.git", "clone_url": "https://github.com/mreiferson/e.git", "svn_url": "https://github.com/mreiferson/e", "homepage": "", "size": 437, "stargazers_count": 0, "watchers_count": 0, "language": "C++", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 2792604, "name": "encfs-macfusion2", "full_name": "mreiferson/encfs-macfusion2", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/encfs-macfusion2", "description": "enhanced version of encfs-macfusion2 plugin http://code.google.com/p/encfs-macfusion2/", "fork": false, "url": "https://api.github.com/repos/mreiferson/encfs-macfusion2", "forks_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/forks", "keys_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/teams", "hooks_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/events", "assignees_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/tags", "blobs_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/subscription", "commits_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/merges", "archive_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/downloads", "issues_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/encfs-macfusion2/releases{/id}", "created_at": "2011-11-17T01:58:01Z", "updated_at": "2013-10-22T06:29:03Z", "pushed_at": "2011-11-17T02:13:15Z", "git_url": "git://github.com/mreiferson/encfs-macfusion2.git", "ssh_url": "git@github.com:mreiferson/encfs-macfusion2.git", "clone_url": "https://github.com/mreiferson/encfs-macfusion2.git", "svn_url": "https://github.com/mreiferson/encfs-macfusion2", "homepage": "", "size": 195, "stargazers_count": 4, "watchers_count": 4, "language": "Objective-C", "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 1, "mirror_url": null, "open_issues_count": 0, "forks": 1, "open_issues": 0, "watchers": 4, "default_branch": "master" }, { "id": 5263991, "name": "file2http", "full_name": "mreiferson/file2http", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/file2http", "description": "spray a line-oriented file at an HTTP endpoint", "fork": true, "url": "https://api.github.com/repos/mreiferson/file2http", "forks_url": "https://api.github.com/repos/mreiferson/file2http/forks", "keys_url": "https://api.github.com/repos/mreiferson/file2http/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/file2http/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/file2http/teams", "hooks_url": "https://api.github.com/repos/mreiferson/file2http/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/file2http/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/file2http/events", "assignees_url": "https://api.github.com/repos/mreiferson/file2http/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/file2http/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/file2http/tags", "blobs_url": "https://api.github.com/repos/mreiferson/file2http/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/file2http/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/file2http/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/file2http/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/file2http/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/file2http/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/file2http/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/file2http/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/file2http/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/file2http/subscription", "commits_url": "https://api.github.com/repos/mreiferson/file2http/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/file2http/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/file2http/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/file2http/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/file2http/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/file2http/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/file2http/merges", "archive_url": "https://api.github.com/repos/mreiferson/file2http/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/file2http/downloads", "issues_url": "https://api.github.com/repos/mreiferson/file2http/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/file2http/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/file2http/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/file2http/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/file2http/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/file2http/releases{/id}", "created_at": "2012-08-01T19:56:16Z", "updated_at": "2013-01-11T13:21:40Z", "pushed_at": "2012-12-21T15:44:32Z", "git_url": "git://github.com/mreiferson/file2http.git", "ssh_url": "git@github.com:mreiferson/file2http.git", "clone_url": "https://github.com/mreiferson/file2http.git", "svn_url": "https://github.com/mreiferson/file2http", "homepage": "", "size": 96, "stargazers_count": 1, "watchers_count": 1, "language": "Go", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 15291117, "name": "gablog", "full_name": "mreiferson/gablog", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/gablog", "description": "Gopher Academy Blog -- fork of go.blog", "fork": true, "url": "https://api.github.com/repos/mreiferson/gablog", "forks_url": "https://api.github.com/repos/mreiferson/gablog/forks", "keys_url": "https://api.github.com/repos/mreiferson/gablog/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/gablog/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/gablog/teams", "hooks_url": "https://api.github.com/repos/mreiferson/gablog/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/gablog/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/gablog/events", "assignees_url": "https://api.github.com/repos/mreiferson/gablog/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/gablog/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/gablog/tags", "blobs_url": "https://api.github.com/repos/mreiferson/gablog/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/gablog/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/gablog/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/gablog/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/gablog/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/gablog/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/gablog/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/gablog/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/gablog/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/gablog/subscription", "commits_url": "https://api.github.com/repos/mreiferson/gablog/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/gablog/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/gablog/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/gablog/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/gablog/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/gablog/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/gablog/merges", "archive_url": "https://api.github.com/repos/mreiferson/gablog/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/gablog/downloads", "issues_url": "https://api.github.com/repos/mreiferson/gablog/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/gablog/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/gablog/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/gablog/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/gablog/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/gablog/releases{/id}", "created_at": "2013-12-18T18:38:37Z", "updated_at": "2013-12-20T22:39:33Z", "pushed_at": "2013-12-20T22:21:52Z", "git_url": "git://github.com/mreiferson/gablog.git", "ssh_url": "git@github.com:mreiferson/gablog.git", "clone_url": "https://github.com/mreiferson/gablog.git", "svn_url": "https://github.com/mreiferson/gablog", "homepage": "http://blog.gopheracademy.com", "size": 7911, "stargazers_count": 0, "watchers_count": 0, "language": "CSS", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 12223286, "name": "git-open-pull", "full_name": "mreiferson/git-open-pull", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/git-open-pull", "description": "convert a github issue into a pull request", "fork": true, "url": "https://api.github.com/repos/mreiferson/git-open-pull", "forks_url": "https://api.github.com/repos/mreiferson/git-open-pull/forks", "keys_url": "https://api.github.com/repos/mreiferson/git-open-pull/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/git-open-pull/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/git-open-pull/teams", "hooks_url": "https://api.github.com/repos/mreiferson/git-open-pull/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/git-open-pull/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/git-open-pull/events", "assignees_url": "https://api.github.com/repos/mreiferson/git-open-pull/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/git-open-pull/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/git-open-pull/tags", "blobs_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/git-open-pull/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/git-open-pull/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/git-open-pull/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/git-open-pull/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/git-open-pull/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/git-open-pull/subscription", "commits_url": "https://api.github.com/repos/mreiferson/git-open-pull/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/git-open-pull/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/git-open-pull/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/git-open-pull/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/git-open-pull/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/git-open-pull/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/git-open-pull/merges", "archive_url": "https://api.github.com/repos/mreiferson/git-open-pull/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/git-open-pull/downloads", "issues_url": "https://api.github.com/repos/mreiferson/git-open-pull/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/git-open-pull/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/git-open-pull/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/git-open-pull/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/git-open-pull/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/git-open-pull/releases{/id}", "created_at": "2013-08-19T17:45:47Z", "updated_at": "2014-03-03T19:50:11Z", "pushed_at": "2014-03-03T19:50:09Z", "git_url": "git://github.com/mreiferson/git-open-pull.git", "ssh_url": "git@github.com:mreiferson/git-open-pull.git", "clone_url": "https://github.com/mreiferson/git-open-pull.git", "svn_url": "https://github.com/mreiferson/git-open-pull", "homepage": "https://github.com/jehiah/git-open-pull", "size": 155, "stargazers_count": 0, "watchers_count": 0, "language": "Shell", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 9547968, "name": "go-hostpool", "full_name": "mreiferson/go-hostpool", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-hostpool", "description": "Intelligently and flexibly pool among multiple hosts from your Go application", "fork": true, "url": "https://api.github.com/repos/mreiferson/go-hostpool", "forks_url": "https://api.github.com/repos/mreiferson/go-hostpool/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-hostpool/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-hostpool/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-hostpool/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-hostpool/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-hostpool/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-hostpool/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-hostpool/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-hostpool/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-hostpool/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-hostpool/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-hostpool/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-hostpool/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-hostpool/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-hostpool/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-hostpool/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-hostpool/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-hostpool/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-hostpool/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-hostpool/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-hostpool/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-hostpool/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-hostpool/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-hostpool/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-hostpool/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-hostpool/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-hostpool/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-hostpool/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-hostpool/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-hostpool/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-hostpool/releases{/id}", "created_at": "2013-04-19T15:06:04Z", "updated_at": "2013-04-30T14:17:45Z", "pushed_at": "2013-04-30T14:17:44Z", "git_url": "git://github.com/mreiferson/go-hostpool.git", "ssh_url": "git@github.com:mreiferson/go-hostpool.git", "clone_url": "https://github.com/mreiferson/go-hostpool.git", "svn_url": "https://github.com/mreiferson/go-hostpool", "homepage": null, "size": 98, "stargazers_count": 0, "watchers_count": 0, "language": "Go", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 3488675, "name": "go-httpclient", "full_name": "mreiferson/go-httpclient", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-httpclient", "description": "a Go HTTP client with timeouts", "fork": false, "url": "https://api.github.com/repos/mreiferson/go-httpclient", "forks_url": "https://api.github.com/repos/mreiferson/go-httpclient/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-httpclient/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-httpclient/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-httpclient/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-httpclient/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-httpclient/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-httpclient/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-httpclient/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-httpclient/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-httpclient/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-httpclient/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-httpclient/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-httpclient/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-httpclient/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-httpclient/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-httpclient/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-httpclient/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-httpclient/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-httpclient/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-httpclient/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-httpclient/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-httpclient/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-httpclient/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-httpclient/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-httpclient/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-httpclient/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-httpclient/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-httpclient/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-httpclient/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-httpclient/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-httpclient/releases{/id}", "created_at": "2012-02-19T21:51:42Z", "updated_at": "2014-07-19T16:41:18Z", "pushed_at": "2014-04-25T16:53:03Z", "git_url": "git://github.com/mreiferson/go-httpclient.git", "ssh_url": "git@github.com:mreiferson/go-httpclient.git", "clone_url": "https://github.com/mreiferson/go-httpclient.git", "svn_url": "https://github.com/mreiferson/go-httpclient", "homepage": "", "size": 362, "stargazers_count": 167, "watchers_count": 167, "language": "Go", "has_issues": true, "has_downloads": true, "has_wiki": false, "forks_count": 21, "mirror_url": null, "open_issues_count": 0, "forks": 21, "open_issues": 0, "watchers": 167, "default_branch": "master" }, { "id": 3924124, "name": "go-install-as", "full_name": "mreiferson/go-install-as", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-install-as", "description": "a Go tool to install a package with a specific import path", "fork": false, "url": "https://api.github.com/repos/mreiferson/go-install-as", "forks_url": "https://api.github.com/repos/mreiferson/go-install-as/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-install-as/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-install-as/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-install-as/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-install-as/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-install-as/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-install-as/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-install-as/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-install-as/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-install-as/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-install-as/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-install-as/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-install-as/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-install-as/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-install-as/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-install-as/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-install-as/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-install-as/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-install-as/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-install-as/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-install-as/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-install-as/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-install-as/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-install-as/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-install-as/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-install-as/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-install-as/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-install-as/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-install-as/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-install-as/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-install-as/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-install-as/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-install-as/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-install-as/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-install-as/releases{/id}", "created_at": "2012-04-04T00:17:37Z", "updated_at": "2014-06-29T20:11:46Z", "pushed_at": "2012-09-24T16:08:50Z", "git_url": "git://github.com/mreiferson/go-install-as.git", "ssh_url": "git@github.com:mreiferson/go-install-as.git", "clone_url": "https://github.com/mreiferson/go-install-as.git", "svn_url": "https://github.com/mreiferson/go-install-as", "homepage": "", "size": 107, "stargazers_count": 53, "watchers_count": 53, "language": "Shell", "has_issues": true, "has_downloads": true, "has_wiki": false, "forks_count": 2, "mirror_url": null, "open_issues_count": 0, "forks": 2, "open_issues": 0, "watchers": 53, "default_branch": "master" }, { "id": 4744067, "name": "go-notify", "full_name": "mreiferson/go-notify", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-notify", "description": "a Go package to observe notable events in a decoupled fashion", "fork": true, "url": "https://api.github.com/repos/mreiferson/go-notify", "forks_url": "https://api.github.com/repos/mreiferson/go-notify/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-notify/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-notify/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-notify/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-notify/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-notify/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-notify/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-notify/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-notify/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-notify/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-notify/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-notify/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-notify/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-notify/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-notify/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-notify/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-notify/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-notify/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-notify/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-notify/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-notify/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-notify/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-notify/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-notify/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-notify/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-notify/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-notify/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-notify/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-notify/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-notify/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-notify/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-notify/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-notify/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-notify/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-notify/releases{/id}", "created_at": "2012-06-21T20:30:43Z", "updated_at": "2013-01-10T18:07:58Z", "pushed_at": "2012-06-21T20:30:22Z", "git_url": "git://github.com/mreiferson/go-notify.git", "ssh_url": "git@github.com:mreiferson/go-notify.git", "clone_url": "https://github.com/mreiferson/go-notify.git", "svn_url": "https://github.com/mreiferson/go-notify", "homepage": null, "size": 68, "stargazers_count": 1, "watchers_count": 1, "language": "Go", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 12449360, "name": "go-nsq", "full_name": "mreiferson/go-nsq", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-nsq", "description": "the official Go package for NSQ", "fork": true, "url": "https://api.github.com/repos/mreiferson/go-nsq", "forks_url": "https://api.github.com/repos/mreiferson/go-nsq/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-nsq/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-nsq/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-nsq/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-nsq/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-nsq/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-nsq/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-nsq/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-nsq/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-nsq/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-nsq/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-nsq/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-nsq/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-nsq/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-nsq/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-nsq/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-nsq/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-nsq/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-nsq/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-nsq/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-nsq/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-nsq/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-nsq/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-nsq/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-nsq/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-nsq/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-nsq/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-nsq/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-nsq/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-nsq/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-nsq/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-nsq/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-nsq/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-nsq/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-nsq/releases{/id}", "created_at": "2013-08-29T02:07:54Z", "updated_at": "2014-06-29T13:56:36Z", "pushed_at": "2014-07-20T16:44:32Z", "git_url": "git://github.com/mreiferson/go-nsq.git", "ssh_url": "git@github.com:mreiferson/go-nsq.git", "clone_url": "https://github.com/mreiferson/go-nsq.git", "svn_url": "https://github.com/mreiferson/go-nsq", "homepage": "", "size": 1783, "stargazers_count": 0, "watchers_count": 0, "language": "Go", "has_issues": false, "has_downloads": true, "has_wiki": false, "forks_count": 1, "mirror_url": null, "open_issues_count": 0, "forks": 1, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 16654468, "name": "go-options", "full_name": "mreiferson/go-options", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-options", "description": "a Go package to structure and resolve options", "fork": false, "url": "https://api.github.com/repos/mreiferson/go-options", "forks_url": "https://api.github.com/repos/mreiferson/go-options/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-options/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-options/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-options/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-options/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-options/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-options/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-options/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-options/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-options/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-options/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-options/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-options/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-options/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-options/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-options/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-options/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-options/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-options/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-options/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-options/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-options/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-options/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-options/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-options/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-options/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-options/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-options/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-options/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-options/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-options/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-options/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-options/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-options/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-options/releases{/id}", "created_at": "2014-02-08T22:19:33Z", "updated_at": "2014-02-16T00:39:59Z", "pushed_at": "2014-02-16T00:39:58Z", "git_url": "git://github.com/mreiferson/go-options.git", "ssh_url": "git@github.com:mreiferson/go-options.git", "clone_url": "https://github.com/mreiferson/go-options.git", "svn_url": "https://github.com/mreiferson/go-options", "homepage": null, "size": 128, "stargazers_count": 1, "watchers_count": 1, "language": "Go", "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 3924909, "name": "go-simplejson", "full_name": "mreiferson/go-simplejson", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-simplejson", "description": "a Go package to interact with arbitrary JSON", "fork": true, "url": "https://api.github.com/repos/mreiferson/go-simplejson", "forks_url": "https://api.github.com/repos/mreiferson/go-simplejson/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-simplejson/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-simplejson/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-simplejson/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-simplejson/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-simplejson/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-simplejson/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-simplejson/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-simplejson/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-simplejson/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-simplejson/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-simplejson/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-simplejson/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-simplejson/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-simplejson/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-simplejson/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-simplejson/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-simplejson/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-simplejson/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-simplejson/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-simplejson/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-simplejson/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-simplejson/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-simplejson/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-simplejson/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-simplejson/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-simplejson/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-simplejson/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-simplejson/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-simplejson/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-simplejson/releases{/id}", "created_at": "2012-04-04T02:36:33Z", "updated_at": "2014-06-25T01:24:01Z", "pushed_at": "2014-06-30T15:13:50Z", "git_url": "git://github.com/mreiferson/go-simplejson.git", "ssh_url": "git@github.com:mreiferson/go-simplejson.git", "clone_url": "https://github.com/mreiferson/go-simplejson.git", "svn_url": "https://github.com/mreiferson/go-simplejson", "homepage": "", "size": 210, "stargazers_count": 1, "watchers_count": 1, "language": "Go", "has_issues": false, "has_downloads": true, "has_wiki": false, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 8614089, "name": "go-simplelog", "full_name": "mreiferson/go-simplelog", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-simplelog", "description": "a simple logging package for Go (inspired by Tornado)", "fork": false, "url": "https://api.github.com/repos/mreiferson/go-simplelog", "forks_url": "https://api.github.com/repos/mreiferson/go-simplelog/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-simplelog/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-simplelog/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-simplelog/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-simplelog/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-simplelog/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-simplelog/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-simplelog/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-simplelog/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-simplelog/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-simplelog/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-simplelog/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-simplelog/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-simplelog/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-simplelog/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-simplelog/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-simplelog/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-simplelog/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-simplelog/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-simplelog/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-simplelog/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-simplelog/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-simplelog/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-simplelog/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-simplelog/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-simplelog/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-simplelog/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-simplelog/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-simplelog/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-simplelog/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-simplelog/releases{/id}", "created_at": "2013-03-06T21:53:48Z", "updated_at": "2013-10-11T22:49:05Z", "pushed_at": "2013-03-31T23:20:11Z", "git_url": "git://github.com/mreiferson/go-simplelog.git", "ssh_url": "git@github.com:mreiferson/go-simplelog.git", "clone_url": "https://github.com/mreiferson/go-simplelog.git", "svn_url": "https://github.com/mreiferson/go-simplelog", "homepage": null, "size": 140, "stargazers_count": 1, "watchers_count": 1, "language": "Go", "has_issues": true, "has_downloads": true, "has_wiki": false, "forks_count": 1, "mirror_url": null, "open_issues_count": 0, "forks": 1, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 12498288, "name": "go-snappystream", "full_name": "mreiferson/go-snappystream", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-snappystream", "description": "a Go package for framed snappy streams", "fork": false, "url": "https://api.github.com/repos/mreiferson/go-snappystream", "forks_url": "https://api.github.com/repos/mreiferson/go-snappystream/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-snappystream/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-snappystream/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-snappystream/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-snappystream/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-snappystream/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-snappystream/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-snappystream/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-snappystream/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-snappystream/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-snappystream/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-snappystream/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-snappystream/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-snappystream/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-snappystream/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-snappystream/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-snappystream/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-snappystream/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-snappystream/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-snappystream/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-snappystream/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-snappystream/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-snappystream/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-snappystream/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-snappystream/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-snappystream/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-snappystream/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-snappystream/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-snappystream/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-snappystream/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-snappystream/releases{/id}", "created_at": "2013-08-31T00:41:11Z", "updated_at": "2014-07-20T07:52:45Z", "pushed_at": "2013-09-17T21:00:14Z", "git_url": "git://github.com/mreiferson/go-snappystream.git", "ssh_url": "git@github.com:mreiferson/go-snappystream.git", "clone_url": "https://github.com/mreiferson/go-snappystream.git", "svn_url": "https://github.com/mreiferson/go-snappystream", "homepage": null, "size": 184, "stargazers_count": 21, "watchers_count": 21, "language": "Go", "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 1, "mirror_url": null, "open_issues_count": 0, "forks": 1, "open_issues": 0, "watchers": 21, "default_branch": "master" }, { "id": 5183238, "name": "go-stat", "full_name": "mreiferson/go-stat", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-stat", "description": "performant instrumentation/profiling for Go", "fork": false, "url": "https://api.github.com/repos/mreiferson/go-stat", "forks_url": "https://api.github.com/repos/mreiferson/go-stat/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-stat/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-stat/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-stat/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-stat/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-stat/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-stat/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-stat/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-stat/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-stat/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-stat/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-stat/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-stat/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-stat/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-stat/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-stat/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-stat/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-stat/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-stat/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-stat/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-stat/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-stat/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-stat/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-stat/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-stat/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-stat/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-stat/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-stat/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-stat/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-stat/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-stat/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-stat/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-stat/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-stat/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-stat/releases{/id}", "created_at": "2012-07-25T19:03:42Z", "updated_at": "2014-01-10T04:39:14Z", "pushed_at": "2012-07-25T19:04:37Z", "git_url": "git://github.com/mreiferson/go-stat.git", "ssh_url": "git@github.com:mreiferson/go-stat.git", "clone_url": "https://github.com/mreiferson/go-stat.git", "svn_url": "https://github.com/mreiferson/go-stat", "homepage": null, "size": 96, "stargazers_count": 1, "watchers_count": 1, "language": "Go", "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 8662365, "name": "go-ujson", "full_name": "mreiferson/go-ujson", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/go-ujson", "description": "a pure Go port of ultrajson", "fork": false, "url": "https://api.github.com/repos/mreiferson/go-ujson", "forks_url": "https://api.github.com/repos/mreiferson/go-ujson/forks", "keys_url": "https://api.github.com/repos/mreiferson/go-ujson/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/go-ujson/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/go-ujson/teams", "hooks_url": "https://api.github.com/repos/mreiferson/go-ujson/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/go-ujson/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/go-ujson/events", "assignees_url": "https://api.github.com/repos/mreiferson/go-ujson/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/go-ujson/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/go-ujson/tags", "blobs_url": "https://api.github.com/repos/mreiferson/go-ujson/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/go-ujson/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/go-ujson/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/go-ujson/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/go-ujson/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/go-ujson/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/go-ujson/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/go-ujson/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/go-ujson/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/go-ujson/subscription", "commits_url": "https://api.github.com/repos/mreiferson/go-ujson/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/go-ujson/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/go-ujson/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/go-ujson/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/go-ujson/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/go-ujson/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/go-ujson/merges", "archive_url": "https://api.github.com/repos/mreiferson/go-ujson/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/go-ujson/downloads", "issues_url": "https://api.github.com/repos/mreiferson/go-ujson/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/go-ujson/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/go-ujson/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/go-ujson/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/go-ujson/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/go-ujson/releases{/id}", "created_at": "2013-03-08T23:57:54Z", "updated_at": "2014-06-26T01:50:40Z", "pushed_at": "2013-11-10T19:49:16Z", "git_url": "git://github.com/mreiferson/go-ujson.git", "ssh_url": "git@github.com:mreiferson/go-ujson.git", "clone_url": "https://github.com/mreiferson/go-ujson.git", "svn_url": "https://github.com/mreiferson/go-ujson", "homepage": "", "size": 140, "stargazers_count": 31, "watchers_count": 31, "language": "Go", "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 8, "mirror_url": null, "open_issues_count": 0, "forks": 8, "open_issues": 0, "watchers": 31, "default_branch": "master" }, { "id": 12815437, "name": "godep", "full_name": "mreiferson/godep", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/godep", "description": "dependency tool for go", "fork": true, "url": "https://api.github.com/repos/mreiferson/godep", "forks_url": "https://api.github.com/repos/mreiferson/godep/forks", "keys_url": "https://api.github.com/repos/mreiferson/godep/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/godep/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/godep/teams", "hooks_url": "https://api.github.com/repos/mreiferson/godep/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/godep/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/godep/events", "assignees_url": "https://api.github.com/repos/mreiferson/godep/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/godep/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/godep/tags", "blobs_url": "https://api.github.com/repos/mreiferson/godep/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/godep/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/godep/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/godep/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/godep/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/godep/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/godep/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/godep/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/godep/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/godep/subscription", "commits_url": "https://api.github.com/repos/mreiferson/godep/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/godep/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/godep/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/godep/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/godep/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/godep/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/godep/merges", "archive_url": "https://api.github.com/repos/mreiferson/godep/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/godep/downloads", "issues_url": "https://api.github.com/repos/mreiferson/godep/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/godep/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/godep/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/godep/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/godep/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/godep/releases{/id}", "created_at": "2013-09-13T17:36:10Z", "updated_at": "2014-03-21T02:53:20Z", "pushed_at": "2014-01-05T18:07:02Z", "git_url": "git://github.com/mreiferson/godep.git", "ssh_url": "git@github.com:mreiferson/godep.git", "clone_url": "https://github.com/mreiferson/godep.git", "svn_url": "https://github.com/mreiferson/godep", "homepage": "http://godoc.org/github.com/kr/godep", "size": 196, "stargazers_count": 0, "watchers_count": 0, "language": "Go", "has_issues": false, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" }, { "id": 2862096, "name": "hajiworld", "full_name": "mreiferson/hajiworld", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/hajiworld", "description": "super mario clone (1999)", "fork": false, "url": "https://api.github.com/repos/mreiferson/hajiworld", "forks_url": "https://api.github.com/repos/mreiferson/hajiworld/forks", "keys_url": "https://api.github.com/repos/mreiferson/hajiworld/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/hajiworld/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/hajiworld/teams", "hooks_url": "https://api.github.com/repos/mreiferson/hajiworld/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/hajiworld/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/hajiworld/events", "assignees_url": "https://api.github.com/repos/mreiferson/hajiworld/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/hajiworld/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/hajiworld/tags", "blobs_url": "https://api.github.com/repos/mreiferson/hajiworld/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/hajiworld/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/hajiworld/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/hajiworld/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/hajiworld/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/hajiworld/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/hajiworld/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/hajiworld/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/hajiworld/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/hajiworld/subscription", "commits_url": "https://api.github.com/repos/mreiferson/hajiworld/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/hajiworld/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/hajiworld/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/hajiworld/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/hajiworld/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/hajiworld/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/hajiworld/merges", "archive_url": "https://api.github.com/repos/mreiferson/hajiworld/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/hajiworld/downloads", "issues_url": "https://api.github.com/repos/mreiferson/hajiworld/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/hajiworld/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/hajiworld/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/hajiworld/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/hajiworld/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/hajiworld/releases{/id}", "created_at": "2011-11-27T18:05:02Z", "updated_at": "2014-01-08T14:10:43Z", "pushed_at": "2011-11-29T02:49:49Z", "git_url": "git://github.com/mreiferson/hajiworld.git", "ssh_url": "git@github.com:mreiferson/hajiworld.git", "clone_url": "https://github.com/mreiferson/hajiworld.git", "svn_url": "https://github.com/mreiferson/hajiworld", "homepage": "", "size": 27872, "stargazers_count": 1, "watchers_count": 1, "language": "C++", "has_issues": true, "has_downloads": true, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 1, "default_branch": "master" }, { "id": 14853562, "name": "homebrew", "full_name": "mreiferson/homebrew", "owner": { "login": "mreiferson", "id": 187441, "avatar_url": "https://avatars.githubusercontent.com/u/187441?", "gravatar_id": "dd56a8e1de66aeedb987397511f830e7", "url": "https://api.github.com/users/mreiferson", "html_url": "https://github.com/mreiferson", "followers_url": "https://api.github.com/users/mreiferson/followers", "following_url": "https://api.github.com/users/mreiferson/following{/other_user}", "gists_url": "https://api.github.com/users/mreiferson/gists{/gist_id}", "starred_url": "https://api.github.com/users/mreiferson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mreiferson/subscriptions", "organizations_url": "https://api.github.com/users/mreiferson/orgs", "repos_url": "https://api.github.com/users/mreiferson/repos", "events_url": "https://api.github.com/users/mreiferson/events{/privacy}", "received_events_url": "https://api.github.com/users/mreiferson/received_events", "type": "User", "site_admin": false }, "private": false, "html_url": "https://github.com/mreiferson/homebrew", "description": "The missing package manager for OS X.", "fork": true, "url": "https://api.github.com/repos/mreiferson/homebrew", "forks_url": "https://api.github.com/repos/mreiferson/homebrew/forks", "keys_url": "https://api.github.com/repos/mreiferson/homebrew/keys{/key_id}", "collaborators_url": "https://api.github.com/repos/mreiferson/homebrew/collaborators{/collaborator}", "teams_url": "https://api.github.com/repos/mreiferson/homebrew/teams", "hooks_url": "https://api.github.com/repos/mreiferson/homebrew/hooks", "issue_events_url": "https://api.github.com/repos/mreiferson/homebrew/issues/events{/number}", "events_url": "https://api.github.com/repos/mreiferson/homebrew/events", "assignees_url": "https://api.github.com/repos/mreiferson/homebrew/assignees{/user}", "branches_url": "https://api.github.com/repos/mreiferson/homebrew/branches{/branch}", "tags_url": "https://api.github.com/repos/mreiferson/homebrew/tags", "blobs_url": "https://api.github.com/repos/mreiferson/homebrew/git/blobs{/sha}", "git_tags_url": "https://api.github.com/repos/mreiferson/homebrew/git/tags{/sha}", "git_refs_url": "https://api.github.com/repos/mreiferson/homebrew/git/refs{/sha}", "trees_url": "https://api.github.com/repos/mreiferson/homebrew/git/trees{/sha}", "statuses_url": "https://api.github.com/repos/mreiferson/homebrew/statuses/{sha}", "languages_url": "https://api.github.com/repos/mreiferson/homebrew/languages", "stargazers_url": "https://api.github.com/repos/mreiferson/homebrew/stargazers", "contributors_url": "https://api.github.com/repos/mreiferson/homebrew/contributors", "subscribers_url": "https://api.github.com/repos/mreiferson/homebrew/subscribers", "subscription_url": "https://api.github.com/repos/mreiferson/homebrew/subscription", "commits_url": "https://api.github.com/repos/mreiferson/homebrew/commits{/sha}", "git_commits_url": "https://api.github.com/repos/mreiferson/homebrew/git/commits{/sha}", "comments_url": "https://api.github.com/repos/mreiferson/homebrew/comments{/number}", "issue_comment_url": "https://api.github.com/repos/mreiferson/homebrew/issues/comments/{number}", "contents_url": "https://api.github.com/repos/mreiferson/homebrew/contents/{+path}", "compare_url": "https://api.github.com/repos/mreiferson/homebrew/compare/{base}...{head}", "merges_url": "https://api.github.com/repos/mreiferson/homebrew/merges", "archive_url": "https://api.github.com/repos/mreiferson/homebrew/{archive_format}{/ref}", "downloads_url": "https://api.github.com/repos/mreiferson/homebrew/downloads", "issues_url": "https://api.github.com/repos/mreiferson/homebrew/issues{/number}", "pulls_url": "https://api.github.com/repos/mreiferson/homebrew/pulls{/number}", "milestones_url": "https://api.github.com/repos/mreiferson/homebrew/milestones{/number}", "notifications_url": "https://api.github.com/repos/mreiferson/homebrew/notifications{?since,all,participating}", "labels_url": "https://api.github.com/repos/mreiferson/homebrew/labels{/name}", "releases_url": "https://api.github.com/repos/mreiferson/homebrew/releases{/id}", "created_at": "2013-12-02T05:20:40Z", "updated_at": "2014-02-17T17:19:19Z", "pushed_at": "2014-02-17T17:06:03Z", "git_url": "git://github.com/mreiferson/homebrew.git", "ssh_url": "git@github.com:mreiferson/homebrew.git", "clone_url": "https://github.com/mreiferson/homebrew.git", "svn_url": "https://github.com/mreiferson/homebrew", "homepage": "http://brew.sh", "size": 29725, "stargazers_count": 0, "watchers_count": 0, "language": "Ruby", "has_issues": false, "has_downloads": false, "has_wiki": true, "forks_count": 0, "mirror_url": null, "open_issues_count": 0, "forks": 0, "open_issues": 0, "watchers": 0, "default_branch": "master" } ] `) ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/reader.go ================================================ package snappystream import ( "bytes" "fmt" "hash/crc32" "io" "io/ioutil" "code.google.com/p/snappy-go/snappy" ) // errMssingStreamID is returned from a reader when the source stream does not // begin with a stream identifier block (4.1 Stream identifier). Its occurance // signifies that the source byte stream is not snappy framed. var errMissingStreamID = fmt.Errorf("missing stream identifier") type reader struct { reader io.Reader err error seenStreamID bool verifyChecksum bool buf bytes.Buffer hdr []byte src []byte dst []byte } // NewReader returns an io.Reader interface to the snappy framed stream format. // // It transparently handles reading the stream identifier (but does not proxy this // to the caller), decompresses blocks, and (optionally) validates checksums. // // Internally, three buffers are maintained. The first two are for reading // off the wrapped io.Reader and for holding the decompressed block (both are grown // automatically and re-used and will never exceed the largest block size, 65536). The // last buffer contains the *unread* decompressed bytes (and can grow indefinitely). // // The second param determines whether or not the reader will verify block // checksums and can be enabled/disabled with the constants VerifyChecksum and SkipVerifyChecksum // // For each Read, the returned length will be up to the lesser of len(b) or 65536 // decompressed bytes, regardless of the length of *compressed* bytes read // from the wrapped io.Reader. func NewReader(r io.Reader, verifyChecksum bool) io.Reader { return &reader{ reader: r, verifyChecksum: verifyChecksum, hdr: make([]byte, 4), src: make([]byte, 4096), dst: make([]byte, 4096), } } // WriteTo implements the io.WriterTo interface used by io.Copy. It writes // decoded data from the underlying reader to w. WriteTo returns the number of // bytes written along with any error encountered. func (r *reader) WriteTo(w io.Writer) (int64, error) { if r.err != nil { return 0, r.err } n, err := r.buf.WriteTo(w) if err != nil { // r.err doesn't need to be set because a write error occurred and the // stream hasn't been corrupted. return n, err } // pass a bufferFallbackWriter to nextFrame so that write errors may be // recovered from, allowing the unwritten stream to be read successfully. wfallback := &bufferFallbackWriter{ w: w, buf: &r.buf, } for { var m int m, err = r.nextFrame(wfallback) if wfallback.writerErr != nil && err == nil { // a partial write was made before an error occurred and not all m // bytes were writen to w. but decoded bytes were successfully // buffered and reading can resume later. n += wfallback.n return n, wfallback.writerErr } n += int64(m) if err == io.EOF { return n, nil } if err != nil { r.err = err return n, err } } panic("unreachable") } // bufferFallbackWriter writes to an underlying io.Writer until an error // occurs. If a error occurs in the underlying io.Writer the value is saved // for later inspection while the bufferFallbackWriter silently starts // buffering all data written to it. From the caller's perspective // bufferFallbackWriter has the same Write behavior has a bytes.Buffer. // // bufferFallbackWriter is useful for the reader.WriteTo method because it // allows internal decoding routines to avoid interruption (and subsequent // stream corruption) due to writing errors. type bufferFallbackWriter struct { w io.Writer buf *bytes.Buffer n int64 // number of bytes successfully written to w writerErr error // any error that ocurred writing to w } // Write attempts to write b to the underlying io.Writer. If the underlying // writer fails or has failed previously unwritten bytes are buffered // internally. Write never returns an error but may panic with // bytes.ErrTooLarge if the buffer grows too large. func (w *bufferFallbackWriter) Write(b []byte) (int, error) { if w.writerErr != nil { return w.buf.Write(b) } n, err := w.w.Write(b) w.n += int64(n) if err != nil { // begin buffering input. bytes.Buffer does not return errors and so we // do not need complex error handling here. w.writerErr = err w.Write(b[n:]) return len(b), nil } return n, nil } func (r *reader) read(b []byte) (int, error) { n, err := r.buf.Read(b) r.err = err return n, err } func (r *reader) Read(b []byte) (int, error) { if r.err != nil { return 0, r.err } if r.buf.Len() < len(b) { _, r.err = r.nextFrame(&r.buf) if r.err == io.EOF { // fill b with any remaining bytes in the buffer. return r.read(b) } if r.err != nil { return 0, r.err } } return r.read(b) } func (r *reader) nextFrame(w io.Writer) (int, error) { for { // read the 4-byte snappy frame header _, err := io.ReadFull(r.reader, r.hdr) if err != nil { return 0, err } // a stream identifier may appear anywhere and contains no information. // it must appear at the beginning of the stream. when found, validate // it and continue to the next block. if r.hdr[0] == blockStreamIdentifier { err := r.readStreamID() if err != nil { return 0, err } r.seenStreamID = true continue } if !r.seenStreamID { return 0, errMissingStreamID } switch typ := r.hdr[0]; { case typ == blockCompressed || typ == blockUncompressed: return r.decodeBlock(w) case typ == blockPadding || (0x80 <= typ && typ <= 0xfd): // skip blocks whose data must not be inspected (4.4 Padding, and 4.6 // Reserved skippable chunks). err := r.discardBlock() if err != nil { return 0, err } continue default: // typ must be unskippable range 0x02-0x7f. Read the block in full // and return an error (4.5 Reserved unskippable chunks). err = r.discardBlock() if err != nil { return 0, err } return 0, fmt.Errorf("unrecognized unskippable frame %#x", r.hdr[0]) } } panic("unreachable") } // decodeDataBlock assumes r.hdr[0] to be either blockCompressed or // blockUncompressed. func (r *reader) decodeBlock(w io.Writer) (int, error) { // read compressed block data and determine if uncompressed data is too // large. buf, err := r.readBlock() if err != nil { return 0, err } declen := len(buf[4:]) if r.hdr[0] == blockCompressed { declen, err = snappy.DecodedLen(buf[4:]) if err != nil { return 0, err } } if declen > MaxBlockSize { return 0, fmt.Errorf("decoded block data too large %d > %d", declen, MaxBlockSize) } // decode data and verify its integrity using the little-endian crc32 // preceding encoded data crc32le, blockdata := buf[:4], buf[4:] if r.hdr[0] == blockCompressed { r.dst, err = snappy.Decode(r.dst, blockdata) if err != nil { return 0, err } blockdata = r.dst } if r.verifyChecksum { checksum := unmaskChecksum(uint32(crc32le[0]) | uint32(crc32le[1])<<8 | uint32(crc32le[2])<<16 | uint32(crc32le[3])<<24) actualChecksum := crc32.Checksum(blockdata, crcTable) if checksum != actualChecksum { return 0, fmt.Errorf("checksum does not match %x != %x", checksum, actualChecksum) } } return w.Write(blockdata) } func (r *reader) readStreamID() error { // the length of the block is fixed so don't decode it from the header. if !bytes.Equal(r.hdr, streamID[:4]) { return fmt.Errorf("invalid stream identifier length") } // read the identifier block data "sNaPpY" block := r.src[:6] _, err := noeof(io.ReadFull(r.reader, block)) if err != nil { return err } if !bytes.Equal(block, streamID[4:]) { return fmt.Errorf("invalid stream identifier block") } return nil } func (r *reader) discardBlock() error { length := uint64(decodeLength(r.hdr[1:])) _, err := noeof64(io.CopyN(ioutil.Discard, r.reader, int64(length))) return err } func (r *reader) readBlock() ([]byte, error) { // check bounds on encoded length (+4 for checksum) length := decodeLength(r.hdr[1:]) if length > (maxEncodedBlockSize + 4) { return nil, fmt.Errorf("encoded block data too large %d > %d", length, (maxEncodedBlockSize + 4)) } if int(length) > len(r.src) { r.src = make([]byte, length) } buf := r.src[:length] _, err := noeof(io.ReadFull(r.reader, buf)) if err != nil { return nil, err } return buf, nil } // decodeLength decodes a 24-bit (3-byte) little-endian length from b. func decodeLength(b []byte) uint32 { return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 } func unmaskChecksum(c uint32) uint32 { x := c - 0xa282ead8 return ((x >> 17) | (x << 15)) } // noeof is used after reads in situations where EOF signifies invalid // formatting or corruption. func noeof(n int, err error) (int, error) { if err == io.EOF { return n, io.ErrUnexpectedEOF } return n, err } // noeof64 is used after long reads (e.g. io.Copy) in situations where io.EOF // signifies invalid formatting or corruption. func noeof64(n int64, err error) (int64, error) { if err == io.EOF { return n, io.ErrUnexpectedEOF } return n, err } ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/reader_test.go ================================================ package snappystream import ( "bytes" "crypto/rand" "fmt" "io" "io/ioutil" "strings" "testing" "code.google.com/p/snappy-go/snappy" ) // This test checks that padding and reserved skippable blocks are ignored by // the reader. func TestReader_skippable(t *testing.T) { var buf bytes.Buffer // write some blocks with injected padding/skippable blocks w := NewWriter(&buf) write := func(p []byte) (int, error) { return w.Write(p) } writepad := func(b byte, n int) (int, error) { return buf.Write(opaqueChunk(b, n)) } _, err := write([]byte("hello")) if err != nil { t.Fatalf("write error: %v", err) } _, err = writepad(0xfe, 100) // normal padding if err != nil { t.Fatalf("write error: %v", err) } _, err = write([]byte(" ")) if err != nil { t.Fatalf("write error: %v", err) } _, err = writepad(0xa0, 100) // reserved skippable block if err != nil { t.Fatalf("write error: %v", err) } _, err = writepad(0xfe, MaxBlockSize) // normal padding if err != nil { t.Fatalf("write error: %v", err) } _, err = write([]byte("padding")) if err != nil { t.Fatalf("write error: %v", err) } p, err := ioutil.ReadAll(NewReader(&buf, true)) if err != nil { t.Fatalf("read error: %v", err) } if string(p) != "hello padding" { t.Fatalf("read: unexpected content %q", string(p)) } } // This test checks that reserved unskippable blocks are cause decoder errors. func TestReader_unskippable(t *testing.T) { var buf bytes.Buffer // write some blocks with injected padding/skippable blocks w := NewWriter(&buf) write := func(p []byte) (int, error) { return w.Write(p) } writepad := func(b byte, n int) (int, error) { return buf.Write(opaqueChunk(b, n)) } _, err := write([]byte("unskippable")) if err != nil { t.Fatalf("write error: %v", err) } _, err = writepad(0x50, 100) // unskippable reserved block if err != nil { t.Fatalf("write error: %v", err) } _, err = write([]byte(" blocks")) if err != nil { t.Fatalf("write error: %v", err) } _, err = ioutil.ReadAll(NewReader(&buf, true)) if err == nil { t.Fatalf("read success") } } func TestReaderStreamID(t *testing.T) { data := []byte("a snappy-framed data stream") var buf bytes.Buffer w := NewWriter(&buf) _, err := w.Write(data) if err != nil { t.Fatal(err) } stream := buf.Bytes() // sanity check: the stream can be decoded and starts with streamID r := NewReader(bytes.NewReader(stream), true) _, err = ioutil.ReadAll(r) if err != nil { t.Fatalf("read: %v", err) } if !bytes.HasPrefix(stream, streamID) { t.Fatal("missing stream id") } // streamNoID is valid except for a missing the streamID block streamNoID := bytes.TrimPrefix(stream, streamID) r = NewReader(bytes.NewReader(streamNoID), true) n, err := r.Read(make([]byte, 1)) if err == nil { t.Fatalf("read: expected an error reading input missing a stream identifier block") } if n != 0 { t.Fatalf("read: read non-zero number of bytes %d", n) } n, err = r.Read(make([]byte, 1)) if err == nil { t.Fatalf("read: successful read after missing stream id error") } if n != 0 { t.Fatalf("read: read non-zero number of bytes %d after missing stream id error", n) } } // This test validates the reader successfully decods a padding of maximal // size, 2^24 - 1. func TestReader_maxPad(t *testing.T) { buf := bytes.NewReader(bytes.Join([][]byte{ streamID, compressedChunk(t, []byte("a maximal padding chunk")), opaqueChunk(0xfe, (1<<24)-1), // normal padding compressedChunk(t, []byte(" is decoded successfully")), }, nil)) r := NewReader(buf, true) p, err := ioutil.ReadAll(r) if err != nil { t.Fatalf("read error: %v", err) } if string(p) != "a maximal padding chunk is decoded successfully" { t.Fatalf("read: unexpected content %q", string(p)) } } // This test validates the reader successfully decodes a skippable chunk of // maximal size, 2^24 - 1. func TestReader_maxSkippable(t *testing.T) { buf := bytes.NewReader(bytes.Join([][]byte{ streamID, compressedChunk(t, []byte("a maximal skippable chunk")), opaqueChunk(0xce, (1<<24)-1), // reserved skippable chunk compressedChunk(t, []byte(" is decoded successfully")), }, nil)) r := NewReader(buf, true) p, err := ioutil.ReadAll(r) if err != nil { t.Fatalf("read error: %v", err) } if string(p) != "a maximal skippable chunk is decoded successfully" { t.Fatalf("read: unexpected content %q", string(p)) } } // TestReader_maxBlock validates bounds checking on encoded and decoded data // (4.2 Compressed Data). func TestReader_maxBlock(t *testing.T) { // decompressing a block with compressed length greater than MaxBlockSize // should succeed. buf := bytes.NewReader(bytes.Join([][]byte{ streamID, compressedChunkGreaterN(t, MaxBlockSize), }, nil)) r := NewReader(buf, true) b, err := ioutil.ReadAll(r) if err != nil { t.Fatal(err) } if len(b) != MaxBlockSize { t.Fatalf("bad read (%d bytes)", len(b)) } // decompressing should fail if the block with decompressed length greater // than MaxBlockSize. buf = bytes.NewReader(bytes.Join([][]byte{ streamID, compressedChunk(t, make([]byte, MaxBlockSize+1)), }, nil)) r = NewReader(buf, true) b, err = ioutil.ReadAll(r) if err == nil { t.Fatal("unexpected success") } if len(b) > 0 { t.Fatalf("unexpected read %q", b) } } // This test validates the reader's behavior encountering unskippable chunks of // maximal size, 2^24 - 1. The desired error to in this case is one reporting // an unskippable chunk, not a length error. func TestReader_maxUnskippable(t *testing.T) { // the first block should be decoded successfully. prefix := "a maximal unskippable chunk" buf := bytes.NewReader(bytes.Join([][]byte{ streamID, compressedChunk(t, []byte(prefix)), opaqueChunk(0x03, (1<<24)-1), // low end of the unskippable range compressedChunk(t, []byte(" failure must be reported as such")), }, nil)) p := make([]byte, len(prefix)) r := NewReader(buf, true) n, err := r.Read(p) if err != nil { t.Fatalf("read error: %v", err) } if n != len(p) { t.Fatalf("read: short read %d", n) } if string(p) != prefix { t.Fatalf("read: bad value %q", p) } n, err = r.Read(p) if err == nil { t.Fatalf("read: expected error") } if n > 0 { t.Fatalf("read: read %d more bytes than expected", n) } if !strings.Contains(err.Error(), "unskippable") { t.Fatalf("read error: %v", err) } } // This test validates errors returned when data blocks exceed size limits. func TestReader_blockTooLarge(t *testing.T) { // the compressed chunk size is within the allowed encoding size // (maxEncodedBlockSize). but the uncompressed data is larger than allowed. badstream := bytes.Join([][]byte{ streamID, compressedChunk(t, make([]byte, (1<<24)-5)), }, nil) r := NewReader(bytes.NewBuffer(badstream), true) p := make([]byte, 1) n, err := r.Read(p) if err == nil { t.Fatalf("read: expected error") } if n != 0 { t.Fatalf("read: read data from the stream") } // the compressed chunk size is within the allowed encoding size // (maxEncodedBlockSize). but the uncompressed data is larger than allowed. badstream = bytes.Join([][]byte{ streamID, uncompressedChunk(t, make([]byte, (1<<24)-5)), }, nil) r = NewReader(bytes.NewBuffer(badstream), true) p = make([]byte, 1) n, err = r.Read(p) if err == nil { t.Fatalf("read: expected error") } if n != 0 { t.Fatalf("read: read data from the stream") } } // This test validates the reader's handling of corrupt chunks. func TestReader_corruption(t *testing.T) { // corruptID is a corrupt stream identifier corruptID := append([]byte(nil), streamID...) corruptID = bytes.Replace(streamID, []byte("p"), []byte("P"), -1) // corrupt "sNaPpY" data badstream := corruptID r := NewReader(bytes.NewBuffer(badstream), true) p := make([]byte, 1) n, err := r.Read(p) if err == nil { t.Fatalf("read: expected error") } if err == io.EOF { t.Fatalf("read: unexpected eof") } if n != 0 { t.Fatalf("read: read data from the stream") } corruptID = append([]byte(nil), streamID...) // corrupt the length corruptID[1] = 0x00 badstream = corruptID r = NewReader(bytes.NewBuffer(badstream), true) p = make([]byte, 1) n, err = r.Read(p) if err == nil { t.Fatalf("read: expected error") } if err == io.EOF { t.Fatalf("read: unexpected eof") } if n != 0 { t.Fatalf("read: read data from the stream") } // chunk is a valid compressed block chunk := compressedChunk(t, []byte("a data block")) // corrupt is a corrupt chunk corrupt := append([]byte(nil), chunk...) copy(corrupt[8:], make([]byte, 10)) // corrupt snappy-encoded data badstream = bytes.Join([][]byte{ streamID, corrupt, }, nil) r = NewReader(bytes.NewBuffer(badstream), true) p = make([]byte, 1) n, err = r.Read(p) if err == nil { t.Fatalf("read: expected error") } if err == io.EOF { t.Fatalf("read: unexpected eof") } if n != 0 { t.Fatalf("read: read data from the stream") } corrupt = append([]byte(nil), chunk...) copy(corrupt[4:8], make([]byte, 4)) // crc checksum failure badstream = bytes.Join([][]byte{ streamID, corrupt, }, nil) r = NewReader(bytes.NewBuffer(badstream), true) p = make([]byte, 1) n, err = r.Read(p) if err == nil { t.Fatalf("read: expected error") } if err == io.EOF { t.Fatalf("read: unexpected eof") } if n != 0 { t.Fatalf("read: read data from the stream") } } // This test ensures that reader returns io.ErrUnexpectedEOF at the appropriate // times. io.EOF must be reserved for the case when all data has been // successfully decoded. func TestReader_unexpectedEOF(t *testing.T) { var decodeBuffer [64 << 10]byte for _, test := range [][]byte{ // truncated streamIDs streamID[:4], streamID[:len(streamID)-1], // truncated data blocks bytes.Join([][]byte{ streamID, compressedChunk(t, bytes.Repeat([]byte("abc"), 100))[:2], }, nil), bytes.Join([][]byte{ streamID, compressedChunk(t, bytes.Repeat([]byte("abc"), 100))[:7], }, nil), // truncated padding bytes.Join([][]byte{ streamID, opaqueChunk(0xfe, 100)[:1], }, nil), bytes.Join([][]byte{ streamID, opaqueChunk(0xfe, 100)[:8], }, nil), // truncated skippable chunk bytes.Join([][]byte{ streamID, opaqueChunk(0xcf, 100)[:3], }, nil), bytes.Join([][]byte{ streamID, opaqueChunk(0xcf, 100)[:7], }, nil), // truncated unskippable chunk bytes.Join([][]byte{ streamID, opaqueChunk(0x03, 100)[:3], }, nil), bytes.Join([][]byte{ streamID, opaqueChunk(0x03, 100)[:5], }, nil), } { r := NewReader(bytes.NewReader(test), true) n, err := r.Read(decodeBuffer[:]) if err == nil { t.Errorf("read bad streamID: expected error") } if err != io.ErrUnexpectedEOF { t.Errorf("read bad streamID: %v", err) } if n != 0 { t.Errorf("read bad streamID: expected read length %d", n) } } } var errNotEnoughEntropy = fmt.Errorf("inadequate entropy in PRNG") // compressedChunkGreaterN like compressedChunk produces a single, compressed, // snappy-framed block. The returned block will have decoded length at most n // and encoded length greater than n. func compressedChunkGreaterN(t *testing.T, n int) []byte { decoded := make([]byte, n) var numTries int var encoded []byte for len(encoded) <= n && numTries < 3 { numTries++ nrd, err := io.ReadFull(rand.Reader, decoded) if err != nil { t.Errorf("crypto/rand: %v", err) return nil } if nrd != n { t.Errorf("crypto/rand: bad read (%d bytes)", nrd) return nil } encoded, err = snappy.Encode(encoded[:cap(encoded)], decoded) if err != nil { t.Errorf("snappy: %v", err) return nil } } if len(encoded) <= n { t.Error(errNotEnoughEntropy) return nil } return compressedChunk(t, decoded) } // compressedChunk encodes b returning a single, compressed, snappy-framed // block. compressedChunk can encode source data larger than allowed in the // specification. func compressedChunk(t *testing.T, src []byte) []byte { encoded, err := snappy.Encode(nil, src) if err != nil { t.Errorf("snappy: %v", err) return nil } if len(encoded) > (1<<24)-5 { // account for the 4-byte checksum t.Errorf("block data too large %d", len(src)) return nil } chunk := make([]byte, len(encoded)+8) writeHeader(chunk[:8], blockCompressed, encoded, src) copy(chunk[8:], encoded) return chunk } // uncompressedChunk encodes b returning a single, uncompressed, snappy-framed // block. uncompressedChunk can encode chunks larger than allowed by the // specification. func uncompressedChunk(t *testing.T, src []byte) []byte { if len(src) > (1<<24)-5 { // account for the 4-byte checksum t.Errorf("block data too large %d", len(src)) return nil } chunk := make([]byte, len(src)+8) writeHeader(chunk[:8], blockUncompressed, src, src) copy(chunk[8:], src) return chunk } // opaqueChunk returns an opaque b chunk (e.g. padding 0xfe) with length n // (total length, n+4 bytes). practically useless but good enough for testing. // the first 4-bytes of data are random to ensure checksums are not being // verified. func opaqueChunk(b byte, n int) []byte { if b == 0 { b = 0xfe } length := uint32(n) lengthle := []byte{byte(length), byte(length >> 8), byte(length >> 16)} checksum := make([]byte, 4) _, err := rand.Read(checksum) if err != nil { panic(err) } padbytes := make([]byte, n-4) // let this panic if n < 4 _, err = rand.Read(padbytes) if err != nil { panic(err) } var h []byte h = append(h, b) h = append(h, lengthle...) h = append(h, checksum...) h = append(h, padbytes...) return h } func TestReaderWriteTo(t *testing.T) { var encbuf bytes.Buffer var decbuf bytes.Buffer msg := "hello copy interface" w := NewWriter(&encbuf) n, err := io.WriteString(w, msg) if err != nil { t.Fatalf("encode: %v", err) } if n != len(msg) { t.Fatalf("encode: %v", err) } r := NewReader(&encbuf, true) n64, err := r.(*reader).WriteTo(&decbuf) if err != nil { t.Fatalf("decode: %v", err) } if n64 != int64(len(msg)) { t.Fatalf("decode: decoded %d bytes %q", n64, decbuf.Bytes()) } decmsg := decbuf.String() if decmsg != msg { t.Fatalf("decode: %q", decmsg) } } func TestReaderWriteToPreviousError(t *testing.T) { // construct an io.Reader that returns an error on the first read and a // valid snappy-framed stream on subsequent reads. var stream io.Reader stream = encodedString("hello") stream = readErrorFirst(stream, fmt.Errorf("one time error")) stream = NewReader(stream, true) var buf bytes.Buffer // attempt the first read from the stream. n, err := stream.(*reader).WriteTo(&buf) if err == nil { t.Fatalf("error expected") } if n != 0 { t.Fatalf("bytes written to buffer: %q", buf.Bytes()) } // attempt a second read from the stream. n, err = stream.(*reader).WriteTo(&buf) if err == nil { t.Fatalf("error expected") } if n != 0 { t.Fatalf("bytes written to buffer: %q", buf.Bytes()) } } // readerErrorFirst is an io.Reader that returns an error on the first read. // readerErrorFirst is used to test that a reader does not attempt to read // after a read error occurs. type readerErrorFirst struct { r io.Reader err error count int } func readErrorFirst(r io.Reader, err error) io.Reader { return &readerErrorFirst{ r: r, err: err, } } func (r *readerErrorFirst) Read(b []byte) (int, error) { r.count++ if r.count == 1 { return 0, r.err } return r.r.Read(b) } func TestReaderWriteToWriteError(t *testing.T) { origmsg := "hello" stream := NewReader(encodedString(origmsg), true) // attempt to write the stream to an io.Writer that will not accept input. n, err := stream.(*reader).WriteTo(unwritable(fmt.Errorf("cannot write to this writer"))) if err == nil { t.Fatalf("error expected") } if n != 0 { t.Fatalf("reported %d written to an unwritable writer", n) } // the decoded message can still be read successfully because the encoded // stream was not corrupt/broken. var buf bytes.Buffer n, err = stream.(*reader).WriteTo(&buf) if err != nil { t.Errorf("unexpected error: %v", err) } if n != int64(len(origmsg)) { t.Errorf("read %d bytes", n) } if buf.String() != origmsg { t.Errorf("read %q", buf) } } // writerUnwritable is an io.Writer that always returns an error. type writerUnwritable struct { err error } func (w *writerUnwritable) Write([]byte) (int, error) { return 0, w.err } func unwritable(err error) io.Writer { return &writerUnwritable{err} } func encodedString(s string) io.Reader { var buf bytes.Buffer w := NewWriter(&buf) io.WriteString(w, s) return &buf } ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/readwrite_test.go ================================================ package snappystream import ( "bytes" "crypto/rand" "io" "io/ioutil" "testing" ) const TestFileSize = 10 << 20 // 10MB // dummyBytesReader returns an io.Reader that avoids buffering optimizations // in io.Copy. This can be considered a 'worst-case' io.Reader as far as writer // frame alignment goes. // // Note: io.Copy uses a 32KB buffer internally as of Go 1.3, but that isn't // part of its public API (undocumented). func dummyBytesReader(p []byte) io.Reader { return ioutil.NopCloser(bytes.NewReader(p)) } func testWriteThenRead(t *testing.T, name string, bs []byte) { var buf bytes.Buffer w := NewWriter(&buf) n, err := io.Copy(w, dummyBytesReader(bs)) if err != nil { t.Errorf("write %v: %v", name, err) return } if n != int64(len(bs)) { t.Errorf("write %v: wrote %d bytes (!= %d)", name, n, len(bs)) return } enclen := buf.Len() r := NewReader(&buf, true) gotbs, err := ioutil.ReadAll(r) if err != nil { t.Errorf("read %v: %v", name, err) return } n = int64(len(gotbs)) if n != int64(len(bs)) { t.Errorf("read %v: read %d bytes (!= %d)", name, n, len(bs)) return } if !bytes.Equal(gotbs, bs) { t.Errorf("%v: unequal decompressed content", name) return } c := float64(len(bs)) / float64(enclen) t.Logf("%v compression ratio %.03g (%d byte reduction)", name, c, len(bs)-enclen) } func testBufferedWriteThenRead(t *testing.T, name string, bs []byte) { var buf bytes.Buffer w := NewBufferedWriter(&buf) n, err := io.Copy(w, dummyBytesReader(bs)) if err != nil { t.Errorf("write %v: %v", name, err) return } if n != int64(len(bs)) { t.Errorf("write %v: wrote %d bytes (!= %d)", name, n, len(bs)) return } err = w.Close() if err != nil { t.Errorf("close %v: %v", name, err) return } enclen := buf.Len() r := NewReader(&buf, true) gotbs, err := ioutil.ReadAll(r) if err != nil { t.Errorf("read %v: %v", name, err) return } n = int64(len(gotbs)) if n != int64(len(bs)) { t.Errorf("read %v: read %d bytes (!= %d)", name, n, len(bs)) return } if !bytes.Equal(gotbs, bs) { t.Errorf("%v: unequal decompressed content", name) return } c := float64(len(bs)) / float64(enclen) t.Logf("%v compression ratio %.03g (%d byte reduction)", name, c, len(bs)-enclen) } func TestWriterReader(t *testing.T) { testWriteThenRead(t, "simple", []byte("test")) testWriteThenRead(t, "manpage", testDataMan) testWriteThenRead(t, "json", testDataJSON) p := make([]byte, TestFileSize) testWriteThenRead(t, "constant", p) _, err := rand.Read(p) if err != nil { t.Fatal(err) } testWriteThenRead(t, "random", p) } func TestBufferedWriterReader(t *testing.T) { testBufferedWriteThenRead(t, "simple", []byte("test")) testBufferedWriteThenRead(t, "manpage", testDataMan) testBufferedWriteThenRead(t, "json", testDataJSON) p := make([]byte, TestFileSize) testBufferedWriteThenRead(t, "constant", p) _, err := rand.Read(p) if err != nil { t.Fatal(err) } testBufferedWriteThenRead(t, "random", p) } func TestWriterChunk(t *testing.T) { var buf bytes.Buffer in := make([]byte, 128000) w := NewWriter(&buf) r := NewReader(&buf, VerifyChecksum) n, err := w.Write(in) if err != nil { t.Fatalf(err.Error()) } if n != len(in) { t.Fatalf("wrote wrong amount %d != %d", n, len(in)) } out := make([]byte, len(in)) n, err = io.ReadFull(r, out) if err != nil { t.Fatal(err) } if n != len(in) { t.Fatalf("read wrong amount %d != %d", n, len(in)) } if !bytes.Equal(out, in) { t.Fatalf("bytes not equal %v != %v", out, in) } } func BenchmarkWriterManpage(b *testing.B) { benchmarkWriterBytes(b, testDataMan) } func BenchmarkBufferedWriterManpage(b *testing.B) { benchmarkBufferedWriterBytes(b, testDataMan) } func BenchmarkBufferedWriterManpageNoCopy(b *testing.B) { benchmarkBufferedWriterBytesNoCopy(b, testDataMan) } func BenchmarkWriterJSON(b *testing.B) { benchmarkWriterBytes(b, testDataJSON) } func BenchmarkBufferedWriterJSON(b *testing.B) { benchmarkBufferedWriterBytes(b, testDataJSON) } func BenchmarkBufferedWriterJSONNoCopy(b *testing.B) { benchmarkBufferedWriterBytesNoCopy(b, testDataJSON) } // BenchmarkWriterRandom tests performance encoding effectively uncompressable // data. func BenchmarkWriterRandom(b *testing.B) { benchmarkWriterBytes(b, randBytes(b, TestFileSize)) } func BenchmarkBufferedWriterRandom(b *testing.B) { benchmarkBufferedWriterBytes(b, randBytes(b, TestFileSize)) } func BenchmarkBufferedWriterRandomNoCopy(b *testing.B) { benchmarkBufferedWriterBytesNoCopy(b, randBytes(b, TestFileSize)) } // BenchmarkWriterConstant tests performance encoding maximally compressible // data. func BenchmarkWriterConstant(b *testing.B) { benchmarkWriterBytes(b, make([]byte, TestFileSize)) } func BenchmarkBufferedWriterConstant(b *testing.B) { benchmarkBufferedWriterBytes(b, make([]byte, TestFileSize)) } func BenchmarkBufferedWriterConstantNoCopy(b *testing.B) { benchmarkBufferedWriterBytesNoCopy(b, make([]byte, TestFileSize)) } func benchmarkWriterBytes(b *testing.B, p []byte) { enc := func() io.WriteCloser { // wrap the normal writer so that it has a noop Close method. writer // does not implement ReaderFrom so this does not impact performance. return &nopWriteCloser{NewWriter(ioutil.Discard)} } benchmarkEncode(b, enc, p) } func benchmarkBufferedWriterBytes(b *testing.B, p []byte) { enc := func() io.WriteCloser { // the writer's ReaderFrom implemention will be used in the benchmark. return NewBufferedWriter(ioutil.Discard) } benchmarkEncode(b, enc, p) } func benchmarkBufferedWriterBytesNoCopy(b *testing.B, p []byte) { enc := func() io.WriteCloser { // the writer is wrapped as to hide it's ReaderFrom implemention. return &writeCloserNoCopy{NewBufferedWriter(ioutil.Discard)} } benchmarkEncode(b, enc, p) } // benchmarkEncode benchmarks the speed at which bytes can be copied from // bs into writers created by enc. func benchmarkEncode(b *testing.B, enc func() io.WriteCloser, bs []byte) { size := int64(len(bs)) b.SetBytes(size) b.StartTimer() for i := 0; i < b.N; i++ { w := enc() n, err := io.Copy(w, dummyBytesReader(bs)) if err != nil { b.Fatal(err) } if n != size { b.Fatalf("wrote wrong amount %d != %d", n, size) } err = w.Close() if err != nil { b.Fatalf("close: %v", err) } } b.StopTimer() } func BenchmarkReaderManpage(b *testing.B) { encodeAndBenchmarkReader(b, testDataMan) } func BenchmarkReaderManpage_buffered(b *testing.B) { encodeAndBenchmarkReader_buffered(b, testDataMan) } func BenchmarkReaderManpageNoCopy(b *testing.B) { encodeAndBenchmarkReaderNoCopy(b, testDataMan) } func BenchmarkReaderJSON(b *testing.B) { encodeAndBenchmarkReader(b, testDataJSON) } func BenchmarkReaderJSON_buffered(b *testing.B) { encodeAndBenchmarkReader_buffered(b, testDataJSON) } func BenchmarkReaderJSONNoCopy(b *testing.B) { encodeAndBenchmarkReaderNoCopy(b, testDataJSON) } // BenchmarkReaderRandom tests decoding of effectively uncompressable data. func BenchmarkReaderRandom(b *testing.B) { encodeAndBenchmarkReader(b, randBytes(b, TestFileSize)) } func BenchmarkReaderRandom_buffered(b *testing.B) { encodeAndBenchmarkReader_buffered(b, randBytes(b, TestFileSize)) } func BenchmarkReaderRandomNoCopy(b *testing.B) { encodeAndBenchmarkReaderNoCopy(b, randBytes(b, TestFileSize)) } // BenchmarkReaderConstant tests decoding of maximally compressible data. func BenchmarkReaderConstant(b *testing.B) { encodeAndBenchmarkReader(b, make([]byte, TestFileSize)) } func BenchmarkReaderConstant_buffered(b *testing.B) { encodeAndBenchmarkReader_buffered(b, make([]byte, TestFileSize)) } func BenchmarkReaderConstantNoCopy(b *testing.B) { encodeAndBenchmarkReaderNoCopy(b, make([]byte, TestFileSize)) } // encodeAndBenchmarkReader is a helper that benchmarks the package // reader's performance given p encoded as a snappy framed stream. // // encodeAndBenchmarkReader benchmarks decoding of streams containing // (multiple) short frames. func encodeAndBenchmarkReader(b *testing.B, p []byte) { enc, err := encodeStreamBytes(p, false) if err != nil { b.Fatalf("pre-benchmark compression: %v", err) } dec := func(r io.Reader) io.Reader { return NewReader(r, VerifyChecksum) } benchmarkDecode(b, dec, int64(len(p)), enc) } // encodeAndBenchmarkReader_buffered is a helper that benchmarks the // package reader's performance given p encoded as a snappy framed stream. // // encodeAndBenchmarkReader_buffered benchmarks decoding of streams that // contain at most one short frame (at the end). func encodeAndBenchmarkReader_buffered(b *testing.B, p []byte) { enc, err := encodeStreamBytes(p, true) if err != nil { b.Fatalf("pre-benchmark compression: %v", err) } dec := func(r io.Reader) io.Reader { return NewReader(r, VerifyChecksum) } benchmarkDecode(b, dec, int64(len(p)), enc) } // encodeAndBenchmarkReaderNoCopy is a helper that benchmarks the // package reader's performance given p encoded as a snappy framed stream. // encodeAndBenchmarReaderNoCopy avoids use of the reader's io.WriterTo // interface. // // encodeAndBenchmarkReaderNoCopy benchmarks decoding of streams that // contain at most one short frame (at the end). func encodeAndBenchmarkReaderNoCopy(b *testing.B, p []byte) { enc, err := encodeStreamBytes(p, true) if err != nil { b.Fatalf("pre-benchmark compression: %v", err) } dec := func(r io.Reader) io.Reader { return ioutil.NopCloser(NewReader(r, VerifyChecksum)) } benchmarkDecode(b, dec, int64(len(p)), enc) } // benchmarkDecode runs a benchmark that repeatedly decoded snappy // framed bytes enc. The length of the decoded result in each iteration must // equal size. func benchmarkDecode(b *testing.B, dec func(io.Reader) io.Reader, size int64, enc []byte) { b.SetBytes(int64(len(enc))) // BUG this is probably wrong b.ResetTimer() for i := 0; i < b.N; i++ { r := dec(bytes.NewReader(enc)) n, err := io.Copy(ioutil.Discard, r) if err != nil { b.Fatalf(err.Error()) } if n != size { b.Fatalf("read wrong amount %d != %d", n, size) } } b.StopTimer() } // encodeStreamBytes is like encodeStream but operates on a byte slice. // encodeStreamBytes ensures that long streams are not maximally compressed if // buffer is false. func encodeStreamBytes(b []byte, buffer bool) ([]byte, error) { return encodeStream(dummyBytesReader(b), buffer) } // encodeStream encodes data read from r as a snappy framed stream and returns // the result as a byte slice. if buffer is true the bytes from r are buffered // to improve the resulting slice's compression ratio. func encodeStream(r io.Reader, buffer bool) ([]byte, error) { var buf bytes.Buffer if !buffer { w := NewWriter(&buf) _, err := io.Copy(w, r) if err != nil { return nil, err } return buf.Bytes(), nil } w := NewBufferedWriter(&buf) _, err := io.Copy(w, r) if err != nil { return nil, err } err = w.Close() if err != nil { return nil, err } return buf.Bytes(), nil } // randBytes reads size bytes from the computer's cryptographic random source. // the resulting bytes have approximately maximal entropy and are effectively // uncompressible with any algorithm. func randBytes(b *testing.B, size int) []byte { randp := make([]byte, size) _, err := io.ReadFull(rand.Reader, randp) if err != nil { b.Fatal(err) } return randp } // writeCloserNoCopy is an io.WriteCloser that simply wraps another // io.WriteCloser. This is useful for masking implementations for interfaces // like ReaderFrom which may be opted into use inside functions like io.Copy. type writeCloserNoCopy struct { io.WriteCloser } // nopWriteCloser is an io.WriteCloser that has a noop Close method. This type // has the effect of masking the underlying writer's Close implementation if it // has one, or satisfying interface implementations for writers that do not // need to be closing. type nopWriteCloser struct { io.Writer } func (w *nopWriteCloser) Close() error { return nil } ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/snappystream.go ================================================ // snappystream wraps snappy-go and supplies a Reader and Writer // for the snappy framed stream format: // https://snappy.googlecode.com/svn/trunk/framing_format.txt package snappystream import ( "hash/crc32" "code.google.com/p/snappy-go/snappy" ) // Ext is the file extension for files whose content is a snappy framed stream. const Ext = ".sz" // MediaType is the MIME type used to represent snappy framed content. const MediaType = "application/x-snappy-framed" // ContentEncoding is the appropriate HTTP Content-Encoding header value for // requests containing a snappy framed entity body. const ContentEncoding = "x-snappy-framed" // MaxBlockSize is the maximum number of decoded bytes allowed to be // represented in a snappy framed block (sections 4.2 and 4.3). const MaxBlockSize = 65536 // maxEncodedBlockSize is the maximum number of encoded bytes in a framed // block. var maxEncodedBlockSize = uint32(snappy.MaxEncodedLen(MaxBlockSize)) const VerifyChecksum = true const SkipVerifyChecksum = false // Block types defined by the snappy framed format specification. const ( blockCompressed = 0x00 blockUncompressed = 0x01 blockPadding = 0xfe blockStreamIdentifier = 0xff ) // streamID is the stream identifier block that begins a valid snappy framed // stream. var streamID = []byte{0xff, 0x06, 0x00, 0x00, 0x73, 0x4e, 0x61, 0x50, 0x70, 0x59} // maskChecksum implements the checksum masking algorithm described by the spec. func maskChecksum(c uint32) uint32 { return ((c >> 15) | (c << 17)) + 0xa282ead8 } var crcTable *crc32.Table func init() { crcTable = crc32.MakeTable(crc32.Castagnoli) } ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/writer.go ================================================ package snappystream import ( "bufio" "errors" "fmt" "hash/crc32" "io" "code.google.com/p/snappy-go/snappy" ) var errClosed = fmt.Errorf("closed") // BufferedWriter is an io.WriteCloser with behavior similar to writers // returned by NewWriter but it buffers written data, maximizing block size (to // improve the output compression ratio) at the cost of speed. Benefits over // NewWriter are most noticible when individual writes are small and when // streams are long. // // Failure to call a BufferedWriter's Close or Flush methods after it is done // being written to will likely result in missing data frames which will be // undetectable in the decoding process. // // NOTE: BufferedWriter cannot be instantiated via struct literal and must // use NewBufferedWriter (i.e. its zero value is not usable). type BufferedWriter struct { err error w *writer bw *bufio.Writer } // NewBufferedWriter allocates and returns a BufferedWriter with an internal // buffer of MaxBlockSize bytes. If an error occurs writing a block to w, all // future writes will fail with the same error. After all data has been // written, the client should call the Flush method to guarantee all data has // been forwarded to the underlying io.Writer. func NewBufferedWriter(w io.Writer) *BufferedWriter { _w := NewWriter(w).(*writer) return &BufferedWriter{ w: _w, bw: bufio.NewWriterSize(_w, MaxBlockSize), } } // ReadFrom implements the io.ReaderFrom interface used by io.Copy. It encodes // data read from r as a snappy framed stream that is written to the underlying // writer. ReadFrom returns the number number of bytes read, along with any // error encountered (other than io.EOF). func (w *BufferedWriter) ReadFrom(r io.Reader) (int64, error) { if w.err != nil { return 0, w.err } var n int64 n, w.err = w.bw.ReadFrom(r) return n, w.err } // Write buffers p internally, encoding and writing a block to the underlying // buffer if the buffer grows beyond MaxBlockSize bytes. The returned int // will be 0 if there was an error and len(p) otherwise. func (w *BufferedWriter) Write(p []byte) (int, error) { if w.err != nil { return 0, w.err } _, w.err = w.bw.Write(p) if w.err != nil { return 0, w.err } return len(p), nil } // Flush encodes and writes a block with the contents of w's internal buffer to // the underlying writer even if the buffer does not contain a full block of // data (MaxBlockSize bytes). func (w *BufferedWriter) Flush() error { if w.err == nil { w.err = w.bw.Flush() } return w.err } // Close flushes w's internal buffer and tears down internal data structures. // After a successful call to Close method calls on w return an error. Close // makes no attempt to close the underlying writer. func (w *BufferedWriter) Close() error { if w.err != nil { return w.err } w.err = w.bw.Flush() w.w = nil w.bw = nil if w.err != nil { return w.err } w.err = errClosed return nil } type writer struct { writer io.Writer err error hdr []byte dst []byte sentStreamID bool } // NewWriter returns an io.Writer that writes its input to an underlying // io.Writer encoded as a snappy framed stream. A stream identifier block is // written to w preceding the first data block. The returned writer will never // emit a block with length in bytes greater than MaxBlockSize+4 nor one // containing more than MaxBlockSize bytes of (uncompressed) data. // // For each Write, the returned length will only ever be len(p) or 0, // regardless of the length of *compressed* bytes written to the wrapped // io.Writer. If the returned length is 0 then error will be non-nil. If // len(p) exceeds 65536, the slice will be automatically chunked into smaller // blocks which are all emitted before the call returns. func NewWriter(w io.Writer) io.Writer { return &writer{ writer: w, hdr: make([]byte, 8), dst: make([]byte, 4096), } } func (w *writer) Write(p []byte) (int, error) { if w.err != nil { return 0, w.err } total := 0 sz := MaxBlockSize var n int for i := 0; i < len(p); i += n { if i+sz > len(p) { sz = len(p) - i } n, w.err = w.write(p[i : i+sz]) if w.err != nil { return 0, w.err } total += n } return total, nil } // write attempts to encode p as a block and write it to the underlying writer. // The returned int may not equal p's length if compression below // MaxBlockSize-4 could not be achieved. func (w *writer) write(p []byte) (int, error) { var err error if len(p) > MaxBlockSize { return 0, errors.New(fmt.Sprintf("block too large %d > %d", len(p), MaxBlockSize)) } w.dst = w.dst[:cap(w.dst)] // Encode does dumb resize w/o context. reslice avoids alloc. w.dst, err = snappy.Encode(w.dst, p) if err != nil { return 0, err } block := w.dst n := len(p) compressed := true // check for data which is better left uncompressed. this is determined if // the encoded content is longer than the source. if len(w.dst) >= len(p) { compressed = false block = p[:n] } if !w.sentStreamID { _, err := w.writer.Write(streamID) if err != nil { return 0, err } w.sentStreamID = true } // set the block type if compressed { writeHeader(w.hdr, blockCompressed, block, p[:n]) } else { writeHeader(w.hdr, blockUncompressed, block, p[:n]) } _, err = w.writer.Write(w.hdr) if err != nil { return 0, err } _, err = w.writer.Write(block) if err != nil { return 0, err } return n, nil } // writeHeader panics if len(hdr) is less than 8. func writeHeader(hdr []byte, btype byte, enc, dec []byte) { hdr[0] = btype // 3 byte little endian length of encoded content length := uint32(len(enc)) + 4 // +4 for checksum hdr[1] = byte(length) hdr[2] = byte(length >> 8) hdr[3] = byte(length >> 16) // 4 byte little endian CRC32 checksum of decoded content checksum := maskChecksum(crc32.Checksum(dec, crcTable)) hdr[4] = byte(checksum) hdr[5] = byte(checksum >> 8) hdr[6] = byte(checksum >> 16) hdr[7] = byte(checksum >> 24) } ================================================ FILE: vendor/github.com/mreiferson/go-snappystream/writer_test.go ================================================ package snappystream import ( "bytes" "io/ioutil" "log" "testing" ) // This test ensures that all BufferedWriter methods fail after Close has been // called. func TestBufferedWriterClose(t *testing.T) { w := NewBufferedWriter(ioutil.Discard) err := w.Close() if err != nil { log.Fatalf("closing empty BufferedWriter: %v", err) } err = w.Close() if err == nil { log.Fatalf("successful close after close") } err = w.Flush() if err == nil { log.Fatalf("successful flush after close") } _, err = w.Write([]byte("abc")) if err == nil { log.Fatalf("successful write after close") } } // This test simply checks that buffering has an effect in a situation where it // is know it should. func TestBufferedWriter_compression(t *testing.T) { p := []byte("hello snappystream!") n := 10 var shortbuf bytes.Buffer w := NewWriter(&shortbuf) for i := 0; i < n; i++ { n, err := w.Write(p) if err != nil { t.Fatalf("writer error: %v", err) } if n != len(p) { t.Fatalf("short write: %d", n) } } var buf bytes.Buffer bw := NewBufferedWriter(&buf) for i := 0; i < n; i++ { n, err := bw.Write(p) if err != nil { t.Fatalf("buffered writer error: %v", err) } if n != len(p) { t.Fatalf("short write: %d", n) } } err := bw.Close() if err != nil { t.Fatalf("closing buffer: %v", err) } uncompressed := int64(n) * int64(len(p)) compressed := shortbuf.Len() bufcompressed := buf.Len() if compressed <= bufcompressed { t.Fatalf("no benefit from buffering (%d <= %d)", shortbuf.Len(), buf.Len()) } c := float64(uncompressed) / float64(compressed) bufc := float64(uncompressed) / float64(bufcompressed) improved := bufc / c t.Logf("BufferedWriter compression ratio %g (%.03g factor improvement over %g)", bufc, improved, c) } // This tests ensures flushing after every write is equivalent to using // NewWriter directly. func TestBufferedWriterFlush(t *testing.T) { p := []byte("hello snappystream!") n := 10 var shortbuf bytes.Buffer w := NewWriter(&shortbuf) for i := 0; i < n; i++ { n, err := w.Write(p) if err != nil { t.Fatalf("writer error: %v", err) } if n != len(p) { t.Fatalf("short write: %d", n) } } var buf bytes.Buffer bw := NewBufferedWriter(&buf) for i := 0; i < n; i++ { n, err := bw.Write(p) if err != nil { t.Fatalf("buffered writer error: %v", err) } if n != len(p) { t.Fatalf("short write: %d", n) } err = bw.Flush() if err != nil { t.Fatalf("flush: %v", err) } } err := bw.Close() if err != nil { t.Fatalf("closing buffer: %v", err) } if shortbuf.Len() != buf.Len() { t.Fatalf("unexpected size: %d != %d", shortbuf.Len(), buf.Len()) } if !bytes.Equal(shortbuf.Bytes(), buf.Bytes()) { t.Fatalf("unexpected bytes") } } ================================================ FILE: vendor/golang.org/x/net/context/context.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package context defines the Context type, which carries deadlines, // cancelation signals, and other request-scoped values across API boundaries // and between processes. // // Incoming requests to a server should create a Context, and outgoing calls to // servers should accept a Context. The chain of function calls between must // propagate the Context, optionally replacing it with a modified copy created // using WithDeadline, WithTimeout, WithCancel, or WithValue. // // Programs that use Contexts should follow these rules to keep interfaces // consistent across packages and enable static analysis tools to check context // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context // explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // // Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and // APIs, not for passing optional parameters to functions. // // The same Context may be passed to functions running in different goroutines; // Contexts are safe for simultaneous use by multiple goroutines. // // See http://blog.golang.org/context for example code for a server that uses // Contexts. package context import ( "errors" "fmt" "sync" "time" ) // A Context carries a deadline, a cancelation signal, and other values across // API boundaries. // // Context's methods may be called by multiple goroutines simultaneously. type Context interface { // Deadline returns the time when work done on behalf of this context // should be canceled. Deadline returns ok==false when no deadline is // set. Successive calls to Deadline return the same results. Deadline() (deadline time.Time, ok bool) // Done returns a channel that's closed when work done on behalf of this // context should be canceled. Done may return nil if this context can // never be canceled. Successive calls to Done return the same value. // // WithCancel arranges for Done to be closed when cancel is called; // WithDeadline arranges for Done to be closed when the deadline // expires; WithTimeout arranges for Done to be closed when the timeout // elapses. // // Done is provided for use in select statements: // // // Stream generates values with DoSomething and sends them to out // // until DoSomething returns an error or ctx.Done is closed. // func Stream(ctx context.Context, out <-chan Value) error { // for { // v, err := DoSomething(ctx) // if err != nil { // return err // } // select { // case <-ctx.Done(): // return ctx.Err() // case out <- v: // } // } // } // // See http://blog.golang.org/pipelines for more examples of how to use // a Done channel for cancelation. Done() <-chan struct{} // Err returns a non-nil error value after Done is closed. Err returns // Canceled if the context was canceled or DeadlineExceeded if the // context's deadline passed. No other values for Err are defined. // After Done is closed, successive calls to Err return the same value. Err() error // Value returns the value associated with this context for key, or nil // if no value is associated with key. Successive calls to Value with // the same key returns the same result. // // Use context values only for request-scoped data that transits // processes and API boundaries, not for passing optional parameters to // functions. // // A key identifies a specific value in a Context. Functions that wish // to store values in Context typically allocate a key in a global // variable then use that key as the argument to context.WithValue and // Context.Value. A key can be any type that supports equality; // packages should define keys as an unexported type to avoid // collisions. // // Packages that define a Context key should provide type-safe accessors // for the values stores using that key: // // // Package user defines a User type that's stored in Contexts. // package user // // import "golang.org/x/net/context" // // // User is the type of value stored in the Contexts. // type User struct {...} // // // key is an unexported type for keys defined in this package. // // This prevents collisions with keys defined in other packages. // type key int // // // userKey is the key for user.User values in Contexts. It is // // unexported; clients use user.NewContext and user.FromContext // // instead of using this key directly. // var userKey key = 0 // // // NewContext returns a new Context that carries value u. // func NewContext(ctx context.Context, u *User) context.Context { // return context.WithValue(ctx, userKey, u) // } // // // FromContext returns the User value stored in ctx, if any. // func FromContext(ctx context.Context) (*User, bool) { // u, ok := ctx.Value(userKey).(*User) // return u, ok // } Value(key interface{}) interface{} } // Canceled is the error returned by Context.Err when the context is canceled. var Canceled = errors.New("context canceled") // DeadlineExceeded is the error returned by Context.Err when the context's // deadline passes. var DeadlineExceeded = errors.New("context deadline exceeded") // An emptyCtx is never canceled, has no values, and has no deadline. It is not // struct{}, since vars of this type must have distinct addresses. type emptyCtx int func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { return } func (*emptyCtx) Done() <-chan struct{} { return nil } func (*emptyCtx) Err() error { return nil } func (*emptyCtx) Value(key interface{}) interface{} { return nil } func (e *emptyCtx) String() string { switch e { case background: return "context.Background" case todo: return "context.TODO" } return "unknown empty Context" } var ( background = new(emptyCtx) todo = new(emptyCtx) ) // Background returns a non-nil, empty Context. It is never canceled, has no // values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming // requests. func Background() Context { return background } // TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it's is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine // whether Contexts are propagated correctly in a program. func TODO() Context { return todo } // A CancelFunc tells an operation to abandon its work. // A CancelFunc does not wait for the work to stop. // After the first call, subsequent calls to a CancelFunc do nothing. type CancelFunc func() // WithCancel returns a copy of parent with a new Done channel. The returned // context's Done channel is closed when the returned cancel function is called // or when the parent context's Done channel is closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { c := newCancelCtx(parent) propagateCancel(parent, &c) return &c, func() { c.cancel(true, Canceled) } } // newCancelCtx returns an initialized cancelCtx. func newCancelCtx(parent Context) cancelCtx { return cancelCtx{ Context: parent, done: make(chan struct{}), } } // propagateCancel arranges for child to be canceled when parent is. func propagateCancel(parent Context, child canceler) { if parent.Done() == nil { return // parent is never canceled } if p, ok := parentCancelCtx(parent); ok { p.mu.Lock() if p.err != nil { // parent has already been canceled child.cancel(false, p.err) } else { if p.children == nil { p.children = make(map[canceler]bool) } p.children[child] = true } p.mu.Unlock() } else { go func() { select { case <-parent.Done(): child.cancel(false, parent.Err()) case <-child.Done(): } }() } } // parentCancelCtx follows a chain of parent references until it finds a // *cancelCtx. This function understands how each of the concrete types in this // package represents its parent. func parentCancelCtx(parent Context) (*cancelCtx, bool) { for { switch c := parent.(type) { case *cancelCtx: return c, true case *timerCtx: return &c.cancelCtx, true case *valueCtx: parent = c.Context default: return nil, false } } } // removeChild removes a context from its parent. func removeChild(parent Context, child canceler) { p, ok := parentCancelCtx(parent) if !ok { return } p.mu.Lock() if p.children != nil { delete(p.children, child) } p.mu.Unlock() } // A canceler is a context type that can be canceled directly. The // implementations are *cancelCtx and *timerCtx. type canceler interface { cancel(removeFromParent bool, err error) Done() <-chan struct{} } // A cancelCtx can be canceled. When canceled, it also cancels any children // that implement canceler. type cancelCtx struct { Context done chan struct{} // closed by the first cancel call. mu sync.Mutex children map[canceler]bool // set to nil by the first cancel call err error // set to non-nil by the first cancel call } func (c *cancelCtx) Done() <-chan struct{} { return c.done } func (c *cancelCtx) Err() error { c.mu.Lock() defer c.mu.Unlock() return c.err } func (c *cancelCtx) String() string { return fmt.Sprintf("%v.WithCancel", c.Context) } // cancel closes c.done, cancels each of c's children, and, if // removeFromParent is true, removes c from its parent's children. func (c *cancelCtx) cancel(removeFromParent bool, err error) { if err == nil { panic("context: internal error: missing cancel error") } c.mu.Lock() if c.err != nil { c.mu.Unlock() return // already canceled } c.err = err close(c.done) for child := range c.children { // NOTE: acquiring the child's lock while holding parent's lock. child.cancel(false, err) } c.children = nil c.mu.Unlock() if removeFromParent { removeChild(c.Context, c) } } // WithDeadline returns a copy of the parent context with the deadline adjusted // to be no later than d. If the parent's deadline is already earlier than d, // WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete. func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { // The current deadline is already sooner than the new one. return WithCancel(parent) } c := &timerCtx{ cancelCtx: newCancelCtx(parent), deadline: deadline, } propagateCancel(parent, c) d := deadline.Sub(time.Now()) if d <= 0 { c.cancel(true, DeadlineExceeded) // deadline has already passed return c, func() { c.cancel(true, Canceled) } } c.mu.Lock() defer c.mu.Unlock() if c.err == nil { c.timer = time.AfterFunc(d, func() { c.cancel(true, DeadlineExceeded) }) } return c, func() { c.cancel(true, Canceled) } } // A timerCtx carries a timer and a deadline. It embeds a cancelCtx to // implement Done and Err. It implements cancel by stopping its timer then // delegating to cancelCtx.cancel. type timerCtx struct { cancelCtx timer *time.Timer // Under cancelCtx.mu. deadline time.Time } func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { return c.deadline, true } func (c *timerCtx) String() string { return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) } func (c *timerCtx) cancel(removeFromParent bool, err error) { c.cancelCtx.cancel(false, err) if removeFromParent { // Remove this timerCtx from its parent cancelCtx's children. removeChild(c.cancelCtx.Context, c) } c.mu.Lock() if c.timer != nil { c.timer.Stop() c.timer = nil } c.mu.Unlock() } // WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). // // Canceling this context releases resources associated with it, so code should // call cancel as soon as the operations running in this Context complete: // // func slowOperationWithTimeout(ctx context.Context) (Result, error) { // ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) // defer cancel() // releases resources if slowOperation completes before timeout elapses // return slowOperation(ctx) // } func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { return WithDeadline(parent, time.Now().Add(timeout)) } // WithValue returns a copy of parent in which the value associated with key is // val. // // Use context Values only for request-scoped data that transits processes and // APIs, not for passing optional parameters to functions. func WithValue(parent Context, key interface{}, val interface{}) Context { return &valueCtx{parent, key, val} } // A valueCtx carries a key-value pair. It implements Value for that key and // delegates all other calls to the embedded Context. type valueCtx struct { Context key, val interface{} } func (c *valueCtx) String() string { return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) } func (c *valueCtx) Value(key interface{}) interface{} { if c.key == key { return c.val } return c.Context.Value(key) } ================================================ FILE: vendor/golang.org/x/net/context/context_test.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package context import ( "fmt" "math/rand" "runtime" "strings" "sync" "testing" "time" ) // otherContext is a Context that's not one of the types defined in context.go. // This lets us test code paths that differ based on the underlying type of the // Context. type otherContext struct { Context } func TestBackground(t *testing.T) { c := Background() if c == nil { t.Fatalf("Background returned nil") } select { case x := <-c.Done(): t.Errorf("<-c.Done() == %v want nothing (it should block)", x) default: } if got, want := fmt.Sprint(c), "context.Background"; got != want { t.Errorf("Background().String() = %q want %q", got, want) } } func TestTODO(t *testing.T) { c := TODO() if c == nil { t.Fatalf("TODO returned nil") } select { case x := <-c.Done(): t.Errorf("<-c.Done() == %v want nothing (it should block)", x) default: } if got, want := fmt.Sprint(c), "context.TODO"; got != want { t.Errorf("TODO().String() = %q want %q", got, want) } } func TestWithCancel(t *testing.T) { c1, cancel := WithCancel(Background()) if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { t.Errorf("c1.String() = %q want %q", got, want) } o := otherContext{c1} c2, _ := WithCancel(o) contexts := []Context{c1, o, c2} for i, c := range contexts { if d := c.Done(); d == nil { t.Errorf("c[%d].Done() == %v want non-nil", i, d) } if e := c.Err(); e != nil { t.Errorf("c[%d].Err() == %v want nil", i, e) } select { case x := <-c.Done(): t.Errorf("<-c.Done() == %v want nothing (it should block)", x) default: } } cancel() time.Sleep(100 * time.Millisecond) // let cancelation propagate for i, c := range contexts { select { case <-c.Done(): default: t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) } if e := c.Err(); e != Canceled { t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) } } } func TestParentFinishesChild(t *testing.T) { // Context tree: // parent -> cancelChild // parent -> valueChild -> timerChild parent, cancel := WithCancel(Background()) cancelChild, stop := WithCancel(parent) defer stop() valueChild := WithValue(parent, "key", "value") timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) defer stop() select { case x := <-parent.Done(): t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) case x := <-cancelChild.Done(): t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) case x := <-timerChild.Done(): t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) case x := <-valueChild.Done(): t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) default: } // The parent's children should contain the two cancelable children. pc := parent.(*cancelCtx) cc := cancelChild.(*cancelCtx) tc := timerChild.(*timerCtx) pc.mu.Lock() if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { t.Errorf("bad linkage: pc.children = %v, want %v and %v", pc.children, cc, tc) } pc.mu.Unlock() if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) } if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) } cancel() pc.mu.Lock() if len(pc.children) != 0 { t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) } pc.mu.Unlock() // parent and children should all be finished. check := func(ctx Context, name string) { select { case <-ctx.Done(): default: t.Errorf("<-%s.Done() blocked, but shouldn't have", name) } if e := ctx.Err(); e != Canceled { t.Errorf("%s.Err() == %v want %v", name, e, Canceled) } } check(parent, "parent") check(cancelChild, "cancelChild") check(valueChild, "valueChild") check(timerChild, "timerChild") // WithCancel should return a canceled context on a canceled parent. precanceledChild := WithValue(parent, "key", "value") select { case <-precanceledChild.Done(): default: t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") } if e := precanceledChild.Err(); e != Canceled { t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) } } func TestChildFinishesFirst(t *testing.T) { cancelable, stop := WithCancel(Background()) defer stop() for _, parent := range []Context{Background(), cancelable} { child, cancel := WithCancel(parent) select { case x := <-parent.Done(): t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) case x := <-child.Done(): t.Errorf("<-child.Done() == %v want nothing (it should block)", x) default: } cc := child.(*cancelCtx) pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) } if pcok { pc.mu.Lock() if len(pc.children) != 1 || !pc.children[cc] { t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) } pc.mu.Unlock() } cancel() if pcok { pc.mu.Lock() if len(pc.children) != 0 { t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) } pc.mu.Unlock() } // child should be finished. select { case <-child.Done(): default: t.Errorf("<-child.Done() blocked, but shouldn't have") } if e := child.Err(); e != Canceled { t.Errorf("child.Err() == %v want %v", e, Canceled) } // parent should not be finished. select { case x := <-parent.Done(): t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) default: } if e := parent.Err(); e != nil { t.Errorf("parent.Err() == %v want nil", e) } } } func testDeadline(c Context, wait time.Duration, t *testing.T) { select { case <-time.After(wait): t.Fatalf("context should have timed out") case <-c.Done(): } if e := c.Err(); e != DeadlineExceeded { t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) } } func TestDeadline(t *testing.T) { c, _ := WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { t.Errorf("c.String() = %q want prefix %q", got, prefix) } testDeadline(c, 200*time.Millisecond, t) c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) o := otherContext{c} testDeadline(o, 200*time.Millisecond, t) c, _ = WithDeadline(Background(), time.Now().Add(100*time.Millisecond)) o = otherContext{c} c, _ = WithDeadline(o, time.Now().Add(300*time.Millisecond)) testDeadline(c, 200*time.Millisecond, t) } func TestTimeout(t *testing.T) { c, _ := WithTimeout(Background(), 100*time.Millisecond) if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { t.Errorf("c.String() = %q want prefix %q", got, prefix) } testDeadline(c, 200*time.Millisecond, t) c, _ = WithTimeout(Background(), 100*time.Millisecond) o := otherContext{c} testDeadline(o, 200*time.Millisecond, t) c, _ = WithTimeout(Background(), 100*time.Millisecond) o = otherContext{c} c, _ = WithTimeout(o, 300*time.Millisecond) testDeadline(c, 200*time.Millisecond, t) } func TestCanceledTimeout(t *testing.T) { c, _ := WithTimeout(Background(), 200*time.Millisecond) o := otherContext{c} c, cancel := WithTimeout(o, 400*time.Millisecond) cancel() time.Sleep(100 * time.Millisecond) // let cancelation propagate select { case <-c.Done(): default: t.Errorf("<-c.Done() blocked, but shouldn't have") } if e := c.Err(); e != Canceled { t.Errorf("c.Err() == %v want %v", e, Canceled) } } type key1 int type key2 int var k1 = key1(1) var k2 = key2(1) // same int as k1, different type var k3 = key2(3) // same type as k2, different int func TestValues(t *testing.T) { check := func(c Context, nm, v1, v2, v3 string) { if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) } if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) } if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) } } c0 := Background() check(c0, "c0", "", "", "") c1 := WithValue(Background(), k1, "c1k1") check(c1, "c1", "c1k1", "", "") if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { t.Errorf("c.String() = %q want %q", got, want) } c2 := WithValue(c1, k2, "c2k2") check(c2, "c2", "c1k1", "c2k2", "") c3 := WithValue(c2, k3, "c3k3") check(c3, "c2", "c1k1", "c2k2", "c3k3") c4 := WithValue(c3, k1, nil) check(c4, "c4", "", "c2k2", "c3k3") o0 := otherContext{Background()} check(o0, "o0", "", "", "") o1 := otherContext{WithValue(Background(), k1, "c1k1")} check(o1, "o1", "c1k1", "", "") o2 := WithValue(o1, k2, "o2k2") check(o2, "o2", "c1k1", "o2k2", "") o3 := otherContext{c4} check(o3, "o3", "", "c2k2", "c3k3") o4 := WithValue(o3, k3, nil) check(o4, "o4", "", "c2k2", "") } func TestAllocs(t *testing.T) { bg := Background() for _, test := range []struct { desc string f func() limit float64 gccgoLimit float64 }{ { desc: "Background()", f: func() { Background() }, limit: 0, gccgoLimit: 0, }, { desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), f: func() { c := WithValue(bg, k1, nil) c.Value(k1) }, limit: 3, gccgoLimit: 3, }, { desc: "WithTimeout(bg, 15*time.Millisecond)", f: func() { c, _ := WithTimeout(bg, 15*time.Millisecond) <-c.Done() }, limit: 8, gccgoLimit: 15, }, { desc: "WithCancel(bg)", f: func() { c, cancel := WithCancel(bg) cancel() <-c.Done() }, limit: 5, gccgoLimit: 8, }, { desc: "WithTimeout(bg, 100*time.Millisecond)", f: func() { c, cancel := WithTimeout(bg, 100*time.Millisecond) cancel() <-c.Done() }, limit: 8, gccgoLimit: 25, }, } { limit := test.limit if runtime.Compiler == "gccgo" { // gccgo does not yet do escape analysis. // TOOD(iant): Remove this when gccgo does do escape analysis. limit = test.gccgoLimit } if n := testing.AllocsPerRun(100, test.f); n > limit { t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) } } } func TestSimultaneousCancels(t *testing.T) { root, cancel := WithCancel(Background()) m := map[Context]CancelFunc{root: cancel} q := []Context{root} // Create a tree of contexts. for len(q) != 0 && len(m) < 100 { parent := q[0] q = q[1:] for i := 0; i < 4; i++ { ctx, cancel := WithCancel(parent) m[ctx] = cancel q = append(q, ctx) } } // Start all the cancels in a random order. var wg sync.WaitGroup wg.Add(len(m)) for _, cancel := range m { go func(cancel CancelFunc) { cancel() wg.Done() }(cancel) } // Wait on all the contexts in a random order. for ctx := range m { select { case <-ctx.Done(): case <-time.After(1 * time.Second): buf := make([]byte, 10<<10) n := runtime.Stack(buf, true) t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) } } // Wait for all the cancel functions to return. done := make(chan struct{}) go func() { wg.Wait() close(done) }() select { case <-done: case <-time.After(1 * time.Second): buf := make([]byte, 10<<10) n := runtime.Stack(buf, true) t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) } } func TestInterlockedCancels(t *testing.T) { parent, cancelParent := WithCancel(Background()) child, cancelChild := WithCancel(parent) go func() { parent.Done() cancelChild() }() cancelParent() select { case <-child.Done(): case <-time.After(1 * time.Second): buf := make([]byte, 10<<10) n := runtime.Stack(buf, true) t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) } } func TestLayersCancel(t *testing.T) { testLayers(t, time.Now().UnixNano(), false) } func TestLayersTimeout(t *testing.T) { testLayers(t, time.Now().UnixNano(), true) } func testLayers(t *testing.T, seed int64, testTimeout bool) { rand.Seed(seed) errorf := func(format string, a ...interface{}) { t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) } const ( timeout = 200 * time.Millisecond minLayers = 30 ) type value int var ( vals []*value cancels []CancelFunc numTimers int ctx = Background() ) for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { switch rand.Intn(3) { case 0: v := new(value) ctx = WithValue(ctx, v, v) vals = append(vals, v) case 1: var cancel CancelFunc ctx, cancel = WithCancel(ctx) cancels = append(cancels, cancel) case 2: var cancel CancelFunc ctx, cancel = WithTimeout(ctx, timeout) cancels = append(cancels, cancel) numTimers++ } } checkValues := func(when string) { for _, key := range vals { if val := ctx.Value(key).(*value); key != val { errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) } } } select { case <-ctx.Done(): errorf("ctx should not be canceled yet") default: } if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { t.Errorf("ctx.String() = %q want prefix %q", s, prefix) } t.Log(ctx) checkValues("before cancel") if testTimeout { select { case <-ctx.Done(): case <-time.After(timeout + timeout/10): errorf("ctx should have timed out") } checkValues("after timeout") } else { cancel := cancels[rand.Intn(len(cancels))] cancel() select { case <-ctx.Done(): default: errorf("ctx should be canceled") } checkValues("after cancel") } } func TestCancelRemoves(t *testing.T) { checkChildren := func(when string, ctx Context, want int) { if got := len(ctx.(*cancelCtx).children); got != want { t.Errorf("%s: context has %d children, want %d", when, got, want) } } ctx, _ := WithCancel(Background()) checkChildren("after creation", ctx, 0) _, cancel := WithCancel(ctx) checkChildren("with WithCancel child ", ctx, 1) cancel() checkChildren("after cancelling WithCancel child", ctx, 0) ctx, _ = WithCancel(Background()) checkChildren("after creation", ctx, 0) _, cancel = WithTimeout(ctx, 60*time.Minute) checkChildren("with WithTimeout child ", ctx, 1) cancel() checkChildren("after cancelling WithTimeout child", ctx, 0) } ================================================ FILE: vendor/golang.org/x/net/context/ctxhttp/cancelreq.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build go1.5 package ctxhttp import "net/http" func canceler(client *http.Client, req *http.Request) func() { ch := make(chan struct{}) req.Cancel = ch return func() { close(ch) } } ================================================ FILE: vendor/golang.org/x/net/context/ctxhttp/cancelreq_go14.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !go1.5 package ctxhttp import "net/http" type requestCanceler interface { CancelRequest(*http.Request) } func canceler(client *http.Client, req *http.Request) func() { rc, ok := client.Transport.(requestCanceler) if !ok { return func() {} } return func() { rc.CancelRequest(req) } } ================================================ FILE: vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package ctxhttp provides helper functions for performing context-aware HTTP requests. package ctxhttp import ( "io" "net/http" "net/url" "strings" "golang.org/x/net/context" ) // Do sends an HTTP request with the provided http.Client and returns an HTTP response. // If the client is nil, http.DefaultClient is used. // If the context is canceled or times out, ctx.Err() will be returned. func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { if client == nil { client = http.DefaultClient } // Request cancelation changed in Go 1.5, see cancelreq.go and cancelreq_go14.go. cancel := canceler(client, req) type responseAndError struct { resp *http.Response err error } result := make(chan responseAndError, 1) go func() { resp, err := client.Do(req) result <- responseAndError{resp, err} }() select { case <-ctx.Done(): cancel() return nil, ctx.Err() case r := <-result: return r.resp, r.err } } // Get issues a GET request via the Do function. func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, err } return Do(ctx, client, req) } // Head issues a HEAD request via the Do function. func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { req, err := http.NewRequest("HEAD", url, nil) if err != nil { return nil, err } return Do(ctx, client, req) } // Post issues a POST request via the Do function. func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { req, err := http.NewRequest("POST", url, body) if err != nil { return nil, err } req.Header.Set("Content-Type", bodyType) return Do(ctx, client, req) } // PostForm issues a POST request via the Do function. func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } ================================================ FILE: vendor/golang.org/x/net/context/ctxhttp/ctxhttp_test.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ctxhttp import ( "io/ioutil" "net/http" "net/http/httptest" "testing" "time" "golang.org/x/net/context" ) const ( requestDuration = 100 * time.Millisecond requestBody = "ok" ) func TestNoTimeout(t *testing.T) { ctx := context.Background() resp, err := doRequest(ctx) if resp == nil || err != nil { t.Fatalf("error received from client: %v %v", err, resp) } } func TestCancel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) go func() { time.Sleep(requestDuration / 2) cancel() }() resp, err := doRequest(ctx) if resp != nil || err == nil { t.Fatalf("expected error, didn't get one. resp: %v", resp) } if err != ctx.Err() { t.Fatalf("expected error from context but got: %v", err) } } func TestCancelAfterRequest(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) resp, err := doRequest(ctx) // Cancel before reading the body. // Request.Body should still be readable after the context is canceled. cancel() b, err := ioutil.ReadAll(resp.Body) if err != nil || string(b) != requestBody { t.Fatalf("could not read body: %q %v", b, err) } } func doRequest(ctx context.Context) (*http.Response, error) { var okHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { time.Sleep(requestDuration) w.Write([]byte(requestBody)) }) serv := httptest.NewServer(okHandler) defer serv.Close() return Get(ctx, nil, serv.URL) } ================================================ FILE: vendor/golang.org/x/net/context/withtimeout_test.go ================================================ // Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package context_test import ( "fmt" "time" "golang.org/x/net/context" ) func ExampleWithTimeout() { // Pass a context with a timeout to tell a blocking function that it // should abandon its work after the timeout elapses. ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) select { case <-time.After(200 * time.Millisecond): fmt.Println("overslept") case <-ctx.Done(): fmt.Println(ctx.Err()) // prints "context deadline exceeded" } // Output: // context deadline exceeded }