[
  {
    "path": ".gitignore",
    "content": ""
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"docs\"]\n\tpath = docs\n\turl = https://github.com/nvcook42/morgoth.git\n\tbranch = gh-pages\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: go\nsudo: false\ngo:\n  - 1.7\n\ninstall: true\nscript:\n  - go test -v $(go list ./... | grep -v vendor/)\n\nbefore_deploy:\n  # Build binary\n  - go get -u github.com/mitchellh/gox\n  - CGO_ENABLED=0 gox ./cmd/morgoth\n\ndeploy:\n  provider: releases\n  api_key:\n    secure: cK0/w0ggcVawInO+b++4qqwMGrVoP7PzNkTxaNq+tbFc9cu8CSUS2+PlgRkdoRYfD+fohri3lqdzHUH0rcC7gOfx/Dvtw7mcs14gzfvuNL+/xMlKn/mBoPkOGPXQeh86qsmGbsrgDzv/BwGNgxqqEimxdiev2ZDlssTD16RQZR8=\n  file:\n    - morgoth_darwin_386\n    - morgoth_darwin_amd64\n    - morgoth_freebsd_386\n    - morgoth_freebsd_amd64\n    - morgoth_freebsd_arm\n    - morgoth_linux_386\n    - morgoth_linux_amd64\n    - morgoth_linux_arm\n    - morgoth_netbsd_386\n    - morgoth_netbsd_amd64\n    - morgoth_netbsd_arm\n    - morgoth_openbsd_386\n    - morgoth_openbsd_amd64\n    - morgoth_windows_386.exe\n    - morgoth_windows_amd64.exe\n  skip_cleanup: true\n  on:\n    tags: true\n"
  },
  {
    "path": "Gopkg.toml",
    "content": "\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/influxdata/kapacitor\"\n\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/prometheus/client_golang\"\n\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/prometheus/client_model\"\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n"
  },
  {
    "path": "README.md",
    "content": "\nMorgoth [![Build Status](https://travis-ci.org/nathanielc/morgoth.svg?branch=master)](https://travis-ci.org/nathanielc/morgoth)\n=======\n\nMorgoth is a framework for flexible anomaly detection algorithms packaged to be used with [Kapacitor](https://github.com/influxdata/kapacitor/)\n\nMorgoth provides a framework for implementing the smaller pieces of an anomaly detection problem.\n\nThe basic framework is that Morgoth maintains a dictionary of normal behaviors and compares new windows of data to the normal dictionary.\nIf the new window of data is not found in the dictionary then it is considered anomalous.\n\nMorgoth uses algorithms, called fingerprinters, to compare windows of data to determine if they are similar.\nThe [Lossy Counting Algorithm](http://www.vldb.org/conf/2002/S10P03.pdf)(LCA) is used to maintain the dictionary of normal windows.\nThe LCA is a space efficient algorithm that can account for drift in the normal dictionary, more on LCA below.\n\nMorgoth uses a consensus model where each fingerprinter votes for whether it thinks the current window is anomalous.\nIf the total votes percentage is greater than a consensus threshold then the window is considered anomalous.\n\n## Getting started\n\n### Install\n\nMorgoth can be installed via go:\n\n```sh\ngo get github.com/nathanielc/morgoth/cmd/morgoth\n```\n\n### Configuring\n\nMorgoth can run as either a child process of Kapacitor or as a standalone daemon that listens on a socket.\n\n#### Child Process\n\nMorgoth is a UDF for [Kapacitor](https://github.com/influxdata/kapacitor).\nAdd this configuration to Kapacitor in order to enable using Morgoth.\n\n```\n[udf]\n  [udf.functions]\n    [udf.functions.morgoth]\n      prog = \"/path/to/bin/morgoth\"\n      timeout = \"10s\"\n```\n\nRestart Kapacitor and you are ready to start using Morgoth within Kapacitor.\n\n#### Socket\n\nTo use Morgoth as a socket UDF start the morgoth process with the `-socket` option.\n\n```\n   morgoth -socket /path/to/morgoth/socket\n```\n\nNext you will need to configure Kapacitor to use the morgoth socket.\n\n```\n[udf]\n  [udf.functions]\n    [udf.functions.morgoth]\n      socket = \"/path/to/morgoth/socket\"\n      timeout = \"10s\"\n```\n\nRestart Kapacitor and you are ready to start using Morgoth within Kapacitor.\n\n\n### TICKscript\n\nHere is an example TICKscript for detecting anomalies in cpu data from [Telegraf](https://github.com/influxdata/telegraf).\n\n```javascript\nstream\n    |from()\n        .measurement('cpu')\n        .where(lambda: \"cpu\" == 'cpu-total')\n        .groupBy(*)\n    |window()\n        .period(1m)\n        .every(1m)\n    @morgoth()\n        // track the 'usage_idle' field\n        .field('usage_idle')\n        // label output data as anomalous using the 'anomalous' boolean field.\n        .anomalousField('anomalous')\n        .errorTolerance(0.01)\n        // The window is anomalous if it occurs less the 5% of the time.\n        .minSupport(0.05)\n        // Use the sigma fingerprinter\n        .sigma(3.0)\n        // Multiple fingerprinters can be defined...\n    |alert()\n        // Trigger a critical alert when the window is marked as anomalous.\n        .crit(lambda: \"anomalous\")\n```\n\n\n## Fingerprinters\n\nA fingerprinter is a method that can determine if a window of data is similar to a previous window of data.\nIn effect the fingerprinters take fingerprints of the incoming data and can compare fingerprints of new data to see if they match.\nThese fingerprinting algorithms provide the core of Morgoth as they are the means by which Morgoth determines if a new window of data is new or something already observed.\n\nAn example fingerprinting algorithm is a *sigma* algorithm that computes the mean and standard deviation of a window and store them as the fingerprint for the window.\nWhen a new window arrives it compares the fingerprint (mean, stddev) of the new window to the previous window.\nIf the windows are too far apart then they are not considered at match.\n\nBy defining several fingerprinting algorithms Morgoth can decide whether new data is anomalous or normal.\n\n## Lossy Counting Algorithm\n\nThe LCA counts frequent items in a stream of data.\nIt is *lossy* because to conserve space it will drop less frequent items.\nThe result is that the algorithm will find frequent items but may loose track of less frequent items.\nMore on the specific mathematical properties of the algorithm can be found below.\n\nThere are two parameters to the algorithm, error tolerance (e) and minimum support (m).\nFirst e is in the range [0, 1] and is an error bound, interpreted as a percentage value.\nFor example given and e = 0.01 (1%), items less the 1% frequent in the data set can be dropped.\nDecreasing e will require more space but will keep track of less frequent items.\nIncreasing e will require less space but will loose track of less frequent items.\nSecond m is in the range [0, 1] and is a minimum support such that items that are considered frequent have at least m% frequency.\nFor example if m = 0.05 (5%) then if an item has a support less than 5% it is not considered frequent, aka normal.\nThe minimum support becomes the threshold for when items are considered anomalous.\n\nNotice that m > e, this is so that we reduce the number of false positives.\nFor example say we set e = 5% and m = 5%.\nIf a *normal* behavior X, has a true frequency of 6% than based on variations in the true frequency, X might fall below 5% for a small interval and be dropped.\nThis will cause X's frequency to be underestimated, which will cause it to be flagged as an anomaly, triggering a false positive.\nBy setting e < m we have a buffer to help mitigate creating false positives.\n\n\n### Properties\n\nThe Lossy Counting algorithm has three properties:\n\n1. there are no false negatives,\n2. false positives are guaranteed to have a frequency of at least (m - e)*N,\n3. the frequency of an item can underestimated by at most e*N,\n\nwhere N is the number of items encountered.\n\nThe space requirements for the algorithm are at most (1 / e) * log(e*N).\nIt has also been show that if the item with low frequency are uniformly random than the space requirements are no more than 7 / e.\nThis means that as Morgoth continues to processes windows of data its memory usage will grow as the log of the number of windows and can reach a stable upper bound.\n\n## Metrics\n\nMorgoth exposes metrics about each detector and fingerprinter.\nThe metrics are exposed as a promethues `/metrics` endpoint over HTTP.\nBy default the metrics HTTP endpoint binds to `:6767`.\n\n>NOTE: Using the metrics HTTP endpoint only makes sense if you are using Morgoth in socket mode as otherwise each new process would collide on the bind port.\n\nMetrics will have some or all of these labels:\n\n* task - the Kapacitor task ID.\n* node - the ID of the morgoth node within the Kapacitor task.\n* group - the Kapacitor group ID.\n* fingerprinter - the unique name for the specific fingerprinter, i.e. `sigma-0`.\n\n\nThe most useful metric for debugging why Morgoth is not behaving as expected is likely to be the `morgoth_unique_fingerprints` gauge.\nThe metric reports the number of unique fingerprints each fingerprinter is tracking.\nThis is useful because if the number is large or growing with each new window its likely that the fingerprinter is erroneously marking every window as anomalous.\nBy providing visibility into each fingerprinter, Morgoth can be tuned as needed.\n\nUsing Kapacitor's scraping service you can scrape the Morgoth UDF process for these metrics and consume them within Kapacitor.\nSee this [tutorial](https://docs.influxdata.com/kapacitor/latest/pull_metrics/scraping-and-discovery/) for more information.\n\n"
  },
  {
    "path": "cmd/morgoth/main.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com/influxdata/kapacitor/udf/agent\"\n\t\"github.com/influxdata/wlog\"\n\t\"github.com/nathanielc/morgoth\"\n\t\"github.com/nathanielc/morgoth/counter\"\n\t\"github.com/nathanielc/morgoth/fingerprinters/jsdiv\"\n\t\"github.com/nathanielc/morgoth/fingerprinters/kstest\"\n\t\"github.com/nathanielc/morgoth/fingerprinters/sigma\"\n\t\"github.com/pkg/errors\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n)\n\nconst (\n\tdefaultMinSupport      = 0.05\n\tdefaultErrorTolerance  = 0.01\n\tdefaultConsensus       = 0.5\n\tdefaultMetricsBindAddr = \":6767\"\n\tdefaultAnomalousField  = \"anomalous\"\n)\n\nvar socket = flag.String(\"socket\", \"\", \"Optional listen socket. If set then Morgoth will run in UDF socket mode, otherwise it will expect communication over STDIN/STDOUT.\")\nvar logLevel = flag.String(\"log-level\", \"info\", \"Default log level, one of debug, info, warn or error.\")\nvar metricsBind = flag.String(\"metrics-bind\", defaultMetricsBindAddr, \"Bind address of the metrics HTTP server. The metrics server will only start if also using the socket mode of operation.\")\n\nvar detectorGauge = prometheus.NewGauge(prometheus.GaugeOpts{\n\tName: \"morgoth_detectors\",\n\tHelp: \"Current number of active detectors.\",\n})\n\nfunc init() {\n\tprometheus.MustRegister(detectorGauge)\n}\n\nfunc main() {\n\t// Parse flags\n\tflag.Parse()\n\n\t// Setup logging\n\tlog.SetOutput(wlog.NewWriter(os.Stderr))\n\tif err := wlog.SetLevelFromName(*logLevel); err != nil {\n\t\tlog.Fatal(\"E! \", err)\n\t}\n\n\t// Create error channels\n\tmetricsErr := make(chan error, 1)\n\terrC := make(chan error, 1)\n\tif *socket == \"\" {\n\t\ta := agent.New(os.Stdin, os.Stdout)\n\t\th := newHandler(a)\n\t\ta.Handler = h\n\t\tdefer h.Stop()\n\n\t\tlog.Println(\"I! Starting agent using STDIN/STDOUT\")\n\t\ta.Start()\n\n\t\tgo func() {\n\t\t\terrC <- a.Wait()\n\t\t}()\n\t} else {\n\t\t// Start the metrics server.\n\t\t// Only start the metrics server in socket mode or the bind address would conflict with each new process.\n\t\tgo func() {\n\t\t\tlog.Println(\"I! Starting metrics HTTP server on\", *metricsBind)\n\t\t\thttp.Handle(\"/metrics\", promhttp.Handler())\n\t\t\tmetricsErr <- http.ListenAndServe(*metricsBind, nil)\n\t\t}()\n\n\t\t// Create unix socket\n\t\taddr, err := net.ResolveUnixAddr(\"unix\", *socket)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"E! \", err)\n\t\t}\n\t\tl, err := net.ListenUnix(\"unix\", addr)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"E! \", err)\n\t\t}\n\n\t\t// Create server that listens on the socket\n\t\ts := agent.NewServer(l, &accepter{})\n\t\tdefer s.Stop() // this closes the listener\n\n\t\t// Setup signal handler to stop Server on various signals\n\t\ts.StopOnSignals(os.Interrupt, syscall.SIGTERM)\n\n\t\tgo func() {\n\t\t\tlog.Println(\"I! Starting socket server on\", addr.String())\n\t\t\terrC <- s.Serve()\n\t\t}()\n\t}\n\n\tselect {\n\tcase err := <-metricsErr:\n\t\tif err != nil {\n\t\t\tlog.Println(\"E!\", err)\n\t\t}\n\tcase err := <-errC:\n\t\tif err != nil {\n\t\t\tlog.Println(\"E!\", err)\n\t\t}\n\t}\n\tlog.Println(\"I! Stopping\")\n}\n\n// Simple connection accepter\ntype accepter struct {\n\tcount int64\n}\n\n// Create a new agent/handler for each new connection.\n// Count and log each new connection and termination.\nfunc (acc *accepter) Accept(conn net.Conn) {\n\tcount := acc.count\n\tacc.count++\n\ta := agent.New(conn, conn)\n\th := newHandler(a)\n\ta.Handler = h\n\n\tlog.Println(\"I! Starting agent for connection\", count)\n\ta.Start()\n\tgo func() {\n\t\terr := a.Wait()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"E! Agent for connection %d terminated with error: %s\", count, err)\n\t\t} else {\n\t\t\tlog.Printf(\"I! Agent for connection %d finished\", count)\n\t\t}\n\t\th.Close()\n\t}()\n}\n\ntype fingerprinterInfo struct {\n\tinit    initFingerprinterFunc\n\toptions *agent.OptionInfo\n}\n\n// Function that creates a new instance of a fingerprinter\ntype createFingerprinterFunc func() morgoth.Fingerprinter\n\n// Init createFingerprinterFunc from agent.OptionValues\ntype initFingerprinterFunc func(opts []*agent.OptionValue) (createFingerprinterFunc, error)\n\nvar fingerprinters = map[string]fingerprinterInfo{\n\t\"sigma\": {\n\t\toptions: &agent.OptionInfo{ValueTypes: []agent.ValueType{agent.ValueType_DOUBLE}},\n\t\tinit: func(args []*agent.OptionValue) (createFingerprinterFunc, error) {\n\t\t\tdeviations := args[0].Value.(*agent.OptionValue_DoubleValue).DoubleValue\n\t\t\tif deviations <= 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"sigma: deviations must be > 0, got %f\", deviations)\n\t\t\t}\n\t\t\treturn func() morgoth.Fingerprinter {\n\t\t\t\treturn sigma.New(deviations)\n\t\t\t}, nil\n\t\t},\n\t},\n\t\"kstest\": {\n\t\toptions: &agent.OptionInfo{ValueTypes: []agent.ValueType{agent.ValueType_INT}},\n\t\tinit: func(args []*agent.OptionValue) (createFingerprinterFunc, error) {\n\t\t\tconfidence := args[0].Value.(*agent.OptionValue_IntValue).IntValue\n\t\t\tif confidence < 0 || confidence > 5 {\n\t\t\t\treturn nil, fmt.Errorf(\"kstest: confidence must be in range [0,5], got %d\", confidence)\n\t\t\t}\n\t\t\treturn func() morgoth.Fingerprinter {\n\t\t\t\treturn kstest.New(uint(confidence))\n\t\t\t}, nil\n\t\t},\n\t},\n\t\"jsdiv\": {\n\t\toptions: &agent.OptionInfo{ValueTypes: []agent.ValueType{\n\t\t\tagent.ValueType_DOUBLE,\n\t\t\tagent.ValueType_DOUBLE,\n\t\t\tagent.ValueType_DOUBLE,\n\t\t\tagent.ValueType_DOUBLE,\n\t\t}},\n\t\tinit: func(args []*agent.OptionValue) (createFingerprinterFunc, error) {\n\t\t\tmin := args[0].Value.(*agent.OptionValue_DoubleValue).DoubleValue\n\t\t\tmax := args[1].Value.(*agent.OptionValue_DoubleValue).DoubleValue\n\t\t\tbinWidth := args[2].Value.(*agent.OptionValue_DoubleValue).DoubleValue\n\t\t\tpValue := args[3].Value.(*agent.OptionValue_DoubleValue).DoubleValue\n\n\t\t\tif binWidth <= 0 {\n\t\t\t\treturn nil, fmt.Errorf(\"jsdiv: binWidth, arg 3, must be > 0, got %f\", binWidth)\n\t\t\t}\n\t\t\tif pValue <= 0 || pValue > 1 {\n\t\t\t\treturn nil, fmt.Errorf(\"jsdiv: pValue, arg 4, must be in range (0,1], got %f\", pValue)\n\t\t\t}\n\t\t\tif (max-min)/binWidth < 3 {\n\t\t\t\treturn nil, fmt.Errorf(\"jsdiv: more than 3 bins should fit in the range [min,max]\")\n\t\t\t}\n\n\t\t\treturn func() morgoth.Fingerprinter {\n\t\t\t\treturn jsdiv.New(min, max, binWidth, pValue)\n\t\t\t}, nil\n\t\t},\n\t},\n}\n\n// A Kapacitor UDF Handler\ntype Handler struct {\n\ttaskID string\n\tnodeID string\n\n\tfield          string\n\tscoreField     string\n\tanomalousField string\n\tminSupport     float64\n\terrorTolerance float64\n\tconsensus      float64\n\tagent          *agent.Agent\n\n\tcurrentWindow *morgoth.Window\n\tbeginBatch    *agent.BeginBatch\n\tbatchPoints   []*agent.Point\n\tdetectors     map[string]*morgoth.Detector\n\n\tfingerprinters []fingerprinterCreator\n}\n\ntype fingerprinterCreator struct {\n\tKind   string\n\tCreate createFingerprinterFunc\n}\n\nfunc newHandler(a *agent.Agent) *Handler {\n\treturn &Handler{\n\t\tagent:          a,\n\t\tminSupport:     defaultMinSupport,\n\t\terrorTolerance: defaultErrorTolerance,\n\t\tconsensus:      defaultConsensus,\n\t\tdetectors:      make(map[string]*morgoth.Detector),\n\t\tanomalousField: defaultAnomalousField,\n\t}\n}\n\nfunc (h *Handler) Close() {\n\tfor _, d := range h.detectors {\n\t\tdetectorGauge.Dec()\n\t\td.Close()\n\t}\n}\n\nfunc (h *Handler) detectorName(group string) string {\n\treturn fmt.Sprintf(\"%s:%s,group=%s\", h.taskID, h.nodeID, group)\n}\n\n// Return the InfoResponse. Describing the properties of this Handler\nfunc (h *Handler) Info() (*agent.InfoResponse, error) {\n\toptions := map[string]*agent.OptionInfo{\n\t\t\"field\":          {ValueTypes: []agent.ValueType{agent.ValueType_STRING}},\n\t\t\"scoreField\":     {ValueTypes: []agent.ValueType{agent.ValueType_STRING}},\n\t\t\"anomalousField\": {ValueTypes: []agent.ValueType{agent.ValueType_STRING}},\n\t\t\"minSupport\":     {ValueTypes: []agent.ValueType{agent.ValueType_DOUBLE}},\n\t\t\"errorTolerance\": {ValueTypes: []agent.ValueType{agent.ValueType_DOUBLE}},\n\t\t\"consensus\":      {ValueTypes: []agent.ValueType{agent.ValueType_DOUBLE}},\n\t\t\"logLevel\":       {ValueTypes: []agent.ValueType{agent.ValueType_STRING}},\n\t}\n\t// Add in options from fingerprinters\n\tfor name, info := range fingerprinters {\n\t\toptions[name] = info.options\n\t}\n\tinfo := &agent.InfoResponse{\n\t\tWants:    agent.EdgeType_BATCH,\n\t\tProvides: agent.EdgeType_BATCH,\n\t\tOptions:  options,\n\t}\n\treturn info, nil\n\n}\n\n// Initialize the Handler with the provided options.\nfunc (h *Handler) Init(r *agent.InitRequest) (*agent.InitResponse, error) {\n\th.taskID = r.TaskID\n\th.nodeID = r.NodeID\n\n\tinit := &agent.InitResponse{\n\t\tSuccess: true,\n\t}\n\tvar errors []string\n\tfor _, opt := range r.Options {\n\t\tswitch opt.Name {\n\t\tcase \"field\":\n\t\t\th.field = opt.Values[0].Value.(*agent.OptionValue_StringValue).StringValue\n\t\tcase \"scoreField\":\n\t\t\th.scoreField = opt.Values[0].Value.(*agent.OptionValue_StringValue).StringValue\n\t\tcase \"anomalousField\":\n\t\t\th.anomalousField = opt.Values[0].Value.(*agent.OptionValue_StringValue).StringValue\n\t\tcase \"minSupport\":\n\t\t\th.minSupport = opt.Values[0].Value.(*agent.OptionValue_DoubleValue).DoubleValue\n\t\tcase \"errorTolerance\":\n\t\t\th.errorTolerance = opt.Values[0].Value.(*agent.OptionValue_DoubleValue).DoubleValue\n\t\tcase \"consensus\":\n\t\t\th.consensus = opt.Values[0].Value.(*agent.OptionValue_DoubleValue).DoubleValue\n\t\tcase \"logLevel\":\n\t\t\tlevel := opt.Values[0].Value.(*agent.OptionValue_StringValue).StringValue\n\t\t\terr := wlog.SetLevelFromName(level)\n\t\t\tif err != nil {\n\t\t\t\tinit.Success = false\n\t\t\t\terrors = append(errors, err.Error())\n\t\t\t}\n\t\tdefault:\n\t\t\tif info, ok := fingerprinters[opt.Name]; ok {\n\t\t\t\tcreateFn, err := info.init(opt.Values)\n\t\t\t\tif err != nil {\n\t\t\t\t\tinit.Success = false\n\t\t\t\t\terrors = append(errors, err.Error())\n\t\t\t\t} else {\n\t\t\t\t\th.fingerprinters = append(h.fingerprinters, fingerprinterCreator{\n\t\t\t\t\t\tKind:   opt.Name,\n\t\t\t\t\t\tCreate: createFn,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\treturn nil, fmt.Errorf(\"received unknown init option %q\", opt.Name)\n\t\t\t}\n\t\t}\n\t}\n\n\tif h.field == \"\" {\n\t\terrors = append(errors, \"field must not be empty\")\n\t}\n\tif h.anomalousField == \"\" {\n\t\terrors = append(errors, \"anomalousField must not be empty\")\n\t}\n\tif h.minSupport < 0 || h.minSupport > 1 {\n\t\terrors = append(errors, \"minSupport must be in the range [0,1)\")\n\t}\n\tif h.errorTolerance < 0 || h.errorTolerance > 1 {\n\t\terrors = append(errors, \"errorTolerance must be in the range [0,1)\")\n\t}\n\tif (h.consensus != -1 && h.consensus < 0) || h.consensus > 1 {\n\t\terrors = append(errors, \"consensus must be in the range [0,1) or equal to -1\")\n\t}\n\tif h.minSupport <= h.errorTolerance {\n\t\terrors = append(errors, \"invalid minSupport or errorTolerance: minSupport must be greater than errorTolerance\")\n\t}\n\tinit.Success = len(errors) == 0\n\tinit.Error = strings.Join(errors, \"\\n\")\n\n\treturn init, nil\n}\n\n// Create a snapshot of the running state of the handler.\nfunc (h *Handler) Snapshot() (*agent.SnapshotResponse, error) {\n\treturn &agent.SnapshotResponse{}, nil\n}\n\n// Restore a previous snapshot.\nfunc (h *Handler) Restore(*agent.RestoreRequest) (*agent.RestoreResponse, error) {\n\treturn &agent.RestoreResponse{}, nil\n}\n\n// A batch has begun.\nfunc (h *Handler) BeginBatch(b *agent.BeginBatch) error {\n\th.currentWindow = &morgoth.Window{}\n\th.beginBatch = b\n\th.batchPoints = h.batchPoints[0:0]\n\treturn nil\n}\n\n// A point has arrived.\nfunc (h *Handler) Point(p *agent.Point) error {\n\t// Keep point around\n\th.batchPoints = append(h.batchPoints, p)\n\tvar value float64\n\tif f, ok := p.FieldsDouble[h.field]; ok {\n\t\tvalue = f\n\t} else {\n\t\tif i, ok := p.FieldsInt[h.field]; ok {\n\t\t\tvalue = float64(i)\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"field %q is not a float or int\", h.field)\n\t\t}\n\t}\n\th.currentWindow.Data = append(h.currentWindow.Data, value)\n\treturn nil\n}\n\n// The batch is complete.\nfunc (h *Handler) EndBatch(b *agent.EndBatch) error {\n\tdetector, ok := h.detectors[b.Group]\n\tif !ok {\n\t\tmetrics := h.createDetectorMetrics(b.Group)\n\t\tif err := metrics.Register(); err != nil {\n\t\t\treturn errors.Wrapf(err, \"failed to register metrics for group: %q\", b.Group)\n\t\t}\n\t\t// We validated the args ourselves, ignore the error here\n\t\tdetector, _ = morgoth.NewDetector(\n\t\t\tmetrics,\n\t\t\th.consensus,\n\t\t\th.minSupport,\n\t\t\th.errorTolerance,\n\t\t\th.newFingerprinters(),\n\t\t)\n\t\th.detectors[b.Group] = detector\n\t\tdetectorGauge.Inc()\n\t}\n\tanomalous, avgSupport := detector.IsAnomalous(h.currentWindow)\n\n\t// Send batch back to Kapacitor\n\th.agent.Responses <- &agent.Response{\n\t\tMessage: &agent.Response_Begin{\n\t\t\tBegin: h.beginBatch,\n\t\t},\n\t}\n\tfor _, p := range h.batchPoints {\n\t\tif p.FieldsBool == nil {\n\t\t\tp.FieldsBool = make(map[string]bool, 1)\n\t\t}\n\t\tp.FieldsBool[h.anomalousField] = anomalous\n\n\t\tif h.scoreField != \"\" {\n\t\t\tif p.FieldsDouble == nil {\n\t\t\t\tp.FieldsDouble = make(map[string]float64, 1)\n\t\t\t}\n\t\t\tp.FieldsDouble[h.scoreField] = 1 - avgSupport\n\t\t}\n\t\th.agent.Responses <- &agent.Response{\n\t\t\tMessage: &agent.Response_Point{\n\t\t\t\tPoint: p,\n\t\t\t},\n\t\t}\n\t}\n\th.agent.Responses <- &agent.Response{\n\t\tMessage: &agent.Response_End{\n\t\t\tEnd: b,\n\t\t},\n\t}\n\treturn nil\n}\n\nfunc (h *Handler) createDetectorMetrics(group string) *morgoth.DetectorMetrics {\n\tlabels := prometheus.Labels{\n\t\t\"task\":  h.taskID,\n\t\t\"node\":  h.nodeID,\n\t\t\"group\": group,\n\t}\n\tmetrics := &morgoth.DetectorMetrics{\n\t\tWindowCount: prometheus.NewCounter(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName:        \"morgoth_windows_total\",\n\t\t\t\tHelp:        \"Number of windows processed.\",\n\t\t\t\tConstLabels: labels,\n\t\t\t},\n\t\t),\n\t\tPointCount: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tName:        \"morgoth_points_total\",\n\t\t\tHelp:        \"Number of points processed.\",\n\t\t\tConstLabels: labels,\n\t\t}),\n\t\tAnomalousCount: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tName:        \"morgoth_anomalies_total\",\n\t\t\tHelp:        \"Number of anomalies detected.\",\n\t\t\tConstLabels: labels,\n\t\t}),\n\t\tFingerprinterMetrics: make([]*counter.Metrics, len(h.fingerprinters)),\n\t}\n\tfor i, creator := range h.fingerprinters {\n\t\tfingerprinterLabel := fmt.Sprintf(\"%s-%d\", creator.Kind, i)\n\t\tfLabels := prometheus.Labels{\n\t\t\t\"task\":          h.taskID,\n\t\t\t\"node\":          h.nodeID,\n\t\t\t\"group\":         group,\n\t\t\t\"fingerprinter\": fingerprinterLabel,\n\t\t}\n\t\tmetrics.FingerprinterMetrics[i] = &counter.Metrics{\n\t\t\tUniqueFingerprints: prometheus.NewGauge(prometheus.GaugeOpts{\n\t\t\t\tName:        \"morgoth_unique_fingerprints\",\n\t\t\t\tHelp:        \"Current number of unique fingerprints.\",\n\t\t\t\tConstLabels: fLabels,\n\t\t\t}),\n\t\t\tDistribution: prometheus.NewGaugeVec(\n\t\t\t\tprometheus.GaugeOpts{\n\t\t\t\t\tName:        \"morgoth_fingerprints_distribution\",\n\t\t\t\t\tHelp:        \"Distribution of counts per unique fingerprint. The label \\\"fp\\\" is an arbitrary index to identify the fingerprint and it may change.\",\n\t\t\t\t\tConstLabels: fLabels,\n\t\t\t\t},\n\t\t\t\t[]string{\"fp\"},\n\t\t\t),\n\t\t}\n\t\t// Unregistering a metric does not forget the last value.\n\t\t// We need to explicitly reset the value.\n\t\tmetrics.FingerprinterMetrics[i].UniqueFingerprints.Set(0)\n\t}\n\treturn metrics\n}\n\n// Gracefully stop the Handler.\n// No other methods will be called.\nfunc (h *Handler) Stop() {\n\tclose(h.agent.Responses)\n}\n\nfunc (h *Handler) newFingerprinters() []morgoth.Fingerprinter {\n\tf := make([]morgoth.Fingerprinter, len(h.fingerprinters))\n\tfor i, creator := range h.fingerprinters {\n\t\tf[i] = creator.Create()\n\t}\n\treturn f\n}\n"
  },
  {
    "path": "counter/counter.go",
    "content": "package counter\n\nimport (\n\t\"github.com/pkg/errors\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype Counter interface {\n\t// Count a fingerprint and return the support for the item.\n\t// support = count / total\n\tCount(Countable) float64\n}\n\ntype Countable interface {\n\tIsMatch(other Countable) bool\n}\n\ntype Metrics struct {\n\tUniqueFingerprints prometheus.Gauge\n\tDistribution       *prometheus.GaugeVec\n}\n\nfunc (m *Metrics) Register() error {\n\tif err := prometheus.Register(m.UniqueFingerprints); err != nil {\n\t\treturn errors.Wrap(err, \"unique fingerprints metric\")\n\t}\n\tif err := prometheus.Register(m.Distribution); err != nil {\n\t\treturn errors.Wrap(err, \"distribution metric\")\n\t}\n\treturn nil\n}\n\nfunc (m *Metrics) Unregister() {\n\tprometheus.Unregister(m.UniqueFingerprints)\n\tprometheus.Unregister(m.Distribution)\n}\n"
  },
  {
    "path": "counter/lossy_counter.go",
    "content": "package counter\n\nimport (\n\t\"math\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype lossyCounter struct {\n\tmu                 sync.RWMutex\n\terrorTolerance     float64\n\tfrequencies        []*entry\n\tdistributionGauges []prometheus.Gauge\n\twidth              int\n\ttotal              int\n\tbucket             int\n\n\tmetrics *Metrics\n}\n\ntype entry struct {\n\tcountable Countable\n\tcount     int\n\tdelta     int\n}\n\n//Create a new lossycounter with specified errorTolerance\nfunc NewLossyCounter(metrics *Metrics, errorTolerance float64) *lossyCounter {\n\treturn &lossyCounter{\n\t\tmetrics:        metrics,\n\t\terrorTolerance: errorTolerance,\n\t\twidth:          int(math.Ceil(1.0 / errorTolerance)),\n\t\ttotal:          0,\n\t\tbucket:         1,\n\t}\n}\n\n// Count a countable and return the support for the countable.\nfunc (self *lossyCounter) Count(countable Countable) float64 {\n\tself.mu.Lock()\n\tdefer self.mu.Unlock()\n\tself.total++\n\n\tcount := 0\n\tfor i, existing := range self.frequencies {\n\t\tif existing.countable.IsMatch(countable) {\n\t\t\t//Found match, count it\n\t\t\texisting.count++\n\t\t\tcount = existing.count\n\t\t\t// Keep new countable to allow for drift\n\t\t\tself.frequencies[i].countable = countable\n\n\t\t\tself.distributionGauges[i].Set(float64(count))\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif count == 0 {\n\t\t// No matches create new entry\n\t\tcount = 1\n\n\t\t// Create new gauge\n\t\tg := self.metrics.Distribution.WithLabelValues(strconv.Itoa(len(self.distributionGauges)))\n\t\tg.Set(float64(count))\n\n\t\t// Count new unique fingerprint\n\t\tself.metrics.UniqueFingerprints.Inc()\n\n\t\t// append\n\t\tself.frequencies = append(self.frequencies, &entry{\n\t\t\tcountable: countable,\n\t\t\tcount:     count,\n\t\t\tdelta:     self.bucket - 1,\n\t\t})\n\t\tself.distributionGauges = append(self.distributionGauges, g)\n\t}\n\n\tif self.total%self.width == 0 {\n\t\tself.prune()\n\t\tself.bucket++\n\t}\n\n\treturn float64(count) / float64(self.total)\n}\n\n//Remove infrequent items from the list\nfunc (self *lossyCounter) prune() {\n\tfilteredFreqs := self.frequencies[0:0]\n\tfilteredGauges := self.distributionGauges[0:0]\n\tself.metrics.Distribution.Reset()\n\tfor i, entry := range self.frequencies {\n\t\tif entry.count+entry.delta > self.bucket {\n\t\t\tg := self.metrics.Distribution.WithLabelValues(strconv.Itoa(i))\n\t\t\tg.Set(float64(entry.count))\n\n\t\t\tfilteredFreqs = append(filteredFreqs, entry)\n\t\t\tfilteredGauges = append(filteredGauges, g)\n\t\t}\n\t}\n\n\tself.frequencies = filteredFreqs\n\tself.distributionGauges = filteredGauges\n\n\tself.metrics.UniqueFingerprints.Set(float64(len(self.frequencies)))\n}\n"
  },
  {
    "path": "counter/lossy_counter_test.go",
    "content": "package counter\n\nimport (\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\n//Simple fingerprint implementation\ntype fp struct {\n\tid int\n}\n\nfunc (self *fp) IsMatch(other Countable) bool {\n\tfp, ok := other.(*fp)\n\treturn ok && self.id == fp.id\n}\n\nvar metrics = &Metrics{\n\tUniqueFingerprints: prometheus.NewGauge(\n\t\tprometheus.GaugeOpts{\n\t\t\tName: \"unique\",\n\t\t\tHelp: \"help\",\n\t\t},\n\t),\n\tDistribution: prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tName: \"distribution\",\n\t\t\tHelp: \"help\",\n\t\t},\n\t\t[]string{\"fp\"},\n\t),\n}\n\nfunc TestLossyCounterShouldCountAllItems(t *testing.T) {\n\tassert := assert.New(t)\n\n\tlc := NewLossyCounter(metrics, 0.01)\n\n\tfp1 := &fp{1}\n\tfp2 := &fp{2}\n\n\tassert.NotEqual(fp1, fp2)\n\n\tassert.Equal(1.0/1.0, lc.Count(fp1))\n\tassert.Equal(2.0/2.0, lc.Count(fp1))\n\tassert.Equal(1.0/3.0, lc.Count(fp2))\n\tassert.Equal(2.0/4.0, lc.Count(fp2))\n\tassert.Equal(3.0/5.0, lc.Count(fp1))\n\tassert.Equal(4.0/6.0, lc.Count(fp1))\n}\n\nfunc TestLossyCounterShouldByLossy(t *testing.T) {\n\tassert := assert.New(t)\n\n\t//Create Lossy Counter that will drop items less than 10% frequent\n\tlc := NewLossyCounter(metrics, 0.10)\n\n\tfp1 := &fp{1}\n\tfp2 := &fp{2}\n\n\t// Count fp1 10 times: 10%\n\tfor i := 0; i < 10; i++ {\n\t\tassert.Equal(1.0, lc.Count(fp1))\n\t}\n\n\t// Count fp2 90 times: 90%\n\tfor i := 0; i < 90; i++ {\n\t\tassert.Equal(float64(i+1)/float64(11+i), lc.Count(fp2))\n\t}\n\n\t// Count fp1 once more, should have been dropped and\n\t// now is counted only once\n\tassert.Equal(1.0/101.0, lc.Count(fp1))\n}\n\n//Benchmark the worst case scenario for the lossy counter:\n// every item is errorTolerance frequent\nfunc BenchmarkCounting(b *testing.B) {\n\n\te := 0.01\n\tlc := NewLossyCounter(metrics, 0.01)\n\n\tunique := int(math.Ceil(1.0 / e))\n\n\tfps := make([]*fp, unique)\n\tfor i := 0; i < unique; i++ {\n\t\tfps[i] = &fp{i}\n\t}\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tid := i % unique\n\t\tlc.Count(fps[id])\n\t}\n\n}\n"
  },
  {
    "path": "detector.go",
    "content": "package morgoth\n\nimport (\n\t\"log\"\n\t\"sync\"\n\n\t\"github.com/nathanielc/morgoth/counter\"\n\t\"github.com/pkg/errors\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype Detector struct {\n\tmu             sync.RWMutex\n\tconsensus      float64\n\tminSupport     float64\n\terrorTolerance float64\n\tcounters       []fingerprinterCounter\n\n\tmetrics *DetectorMetrics\n}\n\ntype DetectorMetrics struct {\n\tWindowCount          prometheus.Counter\n\tPointCount           prometheus.Counter\n\tAnomalousCount       prometheus.Counter\n\tFingerprinterMetrics []*counter.Metrics\n}\n\nfunc (m *DetectorMetrics) Register() error {\n\tif err := prometheus.Register(m.WindowCount); err != nil {\n\t\treturn errors.Wrap(err, \"window count metric\")\n\t}\n\tif err := prometheus.Register(m.PointCount); err != nil {\n\t\treturn errors.Wrap(err, \"point count metric\")\n\t}\n\tif err := prometheus.Register(m.AnomalousCount); err != nil {\n\t\treturn errors.Wrap(err, \"anomalous count metric\")\n\t}\n\tfor i, f := range m.FingerprinterMetrics {\n\t\tif err := f.Register(); err != nil {\n\t\t\treturn errors.Wrapf(err, \"fingerprinter %d\", i)\n\t\t}\n\t}\n\treturn nil\n}\nfunc (m *DetectorMetrics) Unregister() {\n\tprometheus.Unregister(m.WindowCount)\n\tprometheus.Unregister(m.PointCount)\n\tprometheus.Unregister(m.AnomalousCount)\n\tfor _, f := range m.FingerprinterMetrics {\n\t\tf.Unregister()\n\t}\n}\n\n// Pair of fingerprinter and counter\ntype fingerprinterCounter struct {\n\tFingerprinter\n\tcounter.Counter\n}\n\n// Create a new Lossy couting based detector\n// The consensus is a percentage of the fingerprinters that must agree in order to flag a window as anomalous.\n// If the consensus is -1 then the average support from each fingerprinter is compared to minSupport instead of using a consensus.\n// The minSupport defines a minimum frequency as a percentage for a window to be considered normal.\n// The errorTolerance defines a frequency as a precentage for the smallest frequency that will be retained in memory.\n// The errorTolerance must be less than the minSupport.\nfunc NewDetector(metrics *DetectorMetrics, consensus, minSupport, errorTolerance float64, fingerprinters []Fingerprinter) (*Detector, error) {\n\tif (consensus != -1 && consensus < 0) || consensus > 1 {\n\t\treturn nil, errors.New(\"consensus must be in the range [0,1) or equal to -1\")\n\t}\n\tif minSupport <= errorTolerance {\n\t\treturn nil, errors.New(\"minSupport must be greater than errorTolerance\")\n\t}\n\tif len(metrics.FingerprinterMetrics) != len(fingerprinters) {\n\t\treturn nil, errors.New(\"must provide the same number of fingerprinter metrics as fingerprinters\")\n\t}\n\tcounters := make([]fingerprinterCounter, len(fingerprinters))\n\tfor i, fingerprinter := range fingerprinters {\n\t\tcounters[i] = fingerprinterCounter{\n\t\t\tFingerprinter: fingerprinter,\n\t\t\tCounter:       counter.NewLossyCounter(metrics.FingerprinterMetrics[i], errorTolerance),\n\t\t}\n\t}\n\treturn &Detector{\n\t\tmetrics:        metrics,\n\t\tconsensus:      consensus,\n\t\tminSupport:     minSupport,\n\t\terrorTolerance: errorTolerance,\n\t\tcounters:       counters,\n\t}, nil\n}\n\n// Determine if the window is anomalous\nfunc (self *Detector) IsAnomalous(window *Window) (bool, float64) {\n\tself.mu.Lock()\n\tdefer self.mu.Unlock()\n\tself.metrics.WindowCount.Inc()\n\tself.metrics.PointCount.Add(float64(len(window.Data)))\n\n\tvote := 0.0\n\tavgSupport := 0.0\n\tn := 0.0\n\tfor _, fc := range self.counters {\n\t\tfingerprint := fc.Fingerprint(window.Copy())\n\t\tsupport := fc.Count(fingerprint)\n\t\tanomalous := support <= self.minSupport\n\t\tif anomalous {\n\t\t\tvote++\n\t\t}\n\t\tlog.Printf(\"D! %T anomalous? %v support: %f\", fc.Fingerprinter, anomalous, support)\n\n\t\tavgSupport = ((avgSupport * n) + support) / (n + 1)\n\t\tn++\n\t}\n\n\tanomalous := false\n\tif self.consensus != -1 {\n\t\t// Use voting consensus\n\t\tvote /= float64(len(self.counters))\n\t\tanomalous = vote >= self.consensus\n\t} else {\n\t\t// Use average suppport\n\t\tanomalous = avgSupport <= self.minSupport\n\t}\n\n\tif anomalous {\n\t\tself.metrics.AnomalousCount.Inc()\n\t}\n\n\treturn anomalous, avgSupport\n}\n\nfunc (self *Detector) Close() {\n\tself.metrics.Unregister()\n}\n"
  },
  {
    "path": "fingerprint.go",
    "content": "package morgoth\n\nimport \"github.com/nathanielc/morgoth/counter\"\n\ntype Fingerprint interface {\n\tIsMatch(other counter.Countable) bool\n}\n\ntype Fingerprinter interface {\n\tFingerprint(window *Window) Fingerprint\n}\n"
  },
  {
    "path": "fingerprinters/jsdiv/jsdiv.go",
    "content": "package jsdiv\n\nimport (\n\t\"math\"\n\n\t\"github.com/nathanielc/morgoth\"\n\t\"github.com/nathanielc/morgoth/counter\"\n)\n\nconst iterations = 20\n\ntype histogram map[int]float64\n\nvar ln2 = math.Log(2)\n\n// Jensen-Shannon Divergence\n//\n// Fingerprints store the histogram of the window.\n// Fingerprints are compared to see their JS divergence distance is less than a critical threshold.\n//\n// Configuration:\n//  min: Excpected minimum value of the window data.\n//  max: Excpected maximum value of the window data.\n//  binwidth: Size of a bin for the histogram\n//  pValue: Standard p-value statistical threshold. Typical value is 0.05\ntype JSDiv struct {\n\tminIndex int\n\tmaxIndex int\n\tbinWidth float64\n\tpValue   float64\n}\n\nfunc New(min, max, binWidth, pValue float64) *JSDiv {\n\treturn &JSDiv{\n\t\tminIndex: int(math.Floor(min / binWidth)),\n\t\tmaxIndex: int(math.Floor(max / binWidth)),\n\t\tbinWidth: binWidth,\n\t\tpValue:   pValue,\n\t}\n}\n\nfunc (self *JSDiv) Fingerprint(window *morgoth.Window) morgoth.Fingerprint {\n\n\thist, count := calcHistogram(window.Data, self.binWidth)\n\treturn &JSDivFingerprint{\n\t\thist,\n\t\tcount,\n\t\tself.pValue,\n\t\tself.minIndex,\n\t\tself.maxIndex,\n\t}\n}\n\nfunc calcHistogram(xs []float64, binWidth float64) (hist histogram, count int) {\n\tcount = len(xs)\n\tc := float64(count)\n\thist = make(histogram)\n\tfor _, x := range xs {\n\t\ti := int(math.Floor(x / binWidth))\n\t\thist[i] += 1.0 / c\n\t}\n\treturn\n}\n\ntype JSDivFingerprint struct {\n\thistogram histogram\n\tcount     int\n\tpValue    float64\n\n\tminIndex int\n\tmaxIndex int\n}\n\nfunc (self *JSDivFingerprint) IsMatch(other counter.Countable) bool {\n\tothr, ok := other.(*JSDivFingerprint)\n\tif !ok {\n\t\treturn false\n\t}\n\n\ts := self.calcSignificance(othr)\n\n\treturn s < self.pValue\n}\n\nfunc (self *JSDivFingerprint) calcSignificance(other *JSDivFingerprint) float64 {\n\tp := self.histogram\n\tq := other.histogram\n\tm := make(histogram, len(p)+len(q))\n\tmin := self.minIndex\n\tmax := self.maxIndex\n\tfor i := range p {\n\t\tif i < min {\n\t\t\tmin = i\n\t\t}\n\t\tif i > max {\n\t\t\tmax = i\n\t\t}\n\t\tm[i] = 0.5 * p[i]\n\t}\n\tfor i := range q {\n\t\tif i < min {\n\t\t\tmin = i\n\t\t}\n\t\tif i > max {\n\t\t\tmax = i\n\t\t}\n\t\tm[i] += 0.5 * q[i]\n\t}\n\n\tk := max - min\n\n\tv := 0.5 * float64(k-1)\n\n\tD := calcS(m) - (0.5*calcS(p) + 0.5*calcS(q))\n\n\tinc := apporxIncompleteGamma(v, float64(self.count+other.count)*ln2*D)\n\tgamma := math.Gamma(v)\n\n\treturn inc / gamma\n}\n\n// Calculate the Shannon measure for a histogram\nfunc calcS(hist histogram) float64 {\n\ts := 0.0\n\tfor _, v := range hist {\n\t\tif v != 0 {\n\t\t\ts += v * math.Log2(v)\n\t\t}\n\t}\n\n\treturn -s\n}\n\n// This is a work in progress. Need to update.\nfunc apporxIncompleteGamma(s, x float64) float64 {\n\tg := 0.0\n\txs := math.Pow(x, s)\n\tex := math.Exp(-x)\n\n\tfor k := 0; k < iterations; k++ {\n\t\tdenominator := s\n\t\tfor i := 1; i <= k; i++ {\n\t\t\tdenominator *= s + float64(i)\n\t\t}\n\t\tg += (xs * ex * math.Pow(x, float64(k))) / denominator\n\t}\n\treturn g\n}\n"
  },
  {
    "path": "fingerprinters/jsdiv/jsdiv_test.go",
    "content": "package jsdiv\n"
  },
  {
    "path": "fingerprinters/kstest/kstest.go",
    "content": "package kstest\n\nimport (\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com/nathanielc/morgoth\"\n\t\"github.com/nathanielc/morgoth/counter\"\n)\n\nvar confidenceMappings = []float64{\n\t1.22,\n\t1.36,\n\t1.48,\n\t1.63,\n\t1.73,\n\t1.95,\n}\n\n// Kolmogorov–Smirnov test.\n// https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test\n//\n// The fingerprint is the cummulative distribution of the window.\n// The fingerprints are compared by computing the largest distance between the cummulative distribution functions and comparing to a critical value.\n//\n// Configuration:\n//  The only parameter is a confidence level.\n//  Valid values are from 0-5.\n//  The level maps to a list of predefined critical values for the KS test.\n//  Increasing 'confidence' decreases the number of anomalies detected.\n//\ntype KSTest struct {\n\tconfidence uint\n}\n\nfunc New(confidence uint) *KSTest {\n\treturn &KSTest{\n\t\tconfidence: confidence,\n\t}\n}\n\nfunc (self *KSTest) Fingerprint(window *morgoth.Window) morgoth.Fingerprint {\n\n\tsort.Float64s(window.Data)\n\n\treturn &KSTestFingerprint{self.confidence, window.Data}\n}\n\ntype KSTestFingerprint struct {\n\tconfidence uint\n\tedf        []float64\n}\n\nfunc (self *KSTestFingerprint) IsMatch(other counter.Countable) bool {\n\tothr, ok := other.(*KSTestFingerprint)\n\tif !ok {\n\t\treturn false\n\t}\n\tif self.confidence != othr.confidence {\n\t\treturn false\n\t}\n\n\tthreshold := self.calcThreshold(othr)\n\n\tD := calcD(self.edf, othr.edf)\n\n\treturn D < threshold\n}\n\n// Calculate the critical threshold for this comparision\nfunc (self *KSTestFingerprint) calcThreshold(othr *KSTestFingerprint) float64 {\n\tc := confidenceMappings[self.confidence]\n\tn := float64(len(self.edf))\n\tm := float64(len(othr.edf))\n\treturn c * math.Sqrt((n+m)/(n*m))\n\n}\n\n// Calculate maximum distance between cummulative distributions\nfunc calcD(f1, f2 []float64) float64 {\n\tD := 0.0\n\tn := len(f1)\n\tm := len(f2)\n\ti := 0\n\tj := 0\n\tfor i < n && j < m {\n\t\tfor i < n && j < m && f1[i] < f2[j] {\n\t\t\ti++\n\t\t}\n\t\tfor i < n && j < m && f1[i] > f2[j] {\n\t\t\tj++\n\t\t}\n\t\tfor i < n && j < m && f1[i] == f2[j] {\n\t\t\ti++\n\t\t\tj++\n\t\t}\n\t\tcdf1 := float64(i) / float64(n)\n\t\tcdf2 := float64(j) / float64(m)\n\t\tif d := math.Abs(cdf1 - cdf2); d > D {\n\t\t\tD = d\n\t\t}\n\t}\n\treturn D\n}\n"
  },
  {
    "path": "fingerprinters/kstest/kstest_test.go",
    "content": "package kstest\n\nimport (\n\t\"testing\"\n\n\t\"github.com/nathanielc/morgoth\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestCalcDShouldBe0(t *testing.T) {\n\tassert := assert.New(t)\n\n\tdata := make([]float64, 10)\n\tfor i := range data {\n\t\tdata[i] = float64(i+1) / float64(len(data))\n\t}\n\n\texpectedD := 0.0\n\n\td := calcD(data, data)\n\tassert.InDelta(expectedD, d, 1e-5)\n}\n\nfunc TestCalcDShouldBeSmall(t *testing.T) {\n\tassert := assert.New(t)\n\n\tdata1 := make([]float64, 10)\n\tfor i := range data1 {\n\t\tdata1[i] = float64(i+1) / float64(len(data1))\n\t}\n\n\tdata2 := make([]float64, 10)\n\tfor i := range data2 {\n\t\tdata2[i] = float64(i) / float64(len(data2))\n\t}\n\n\texpectedD := 0.1\n\n\td := calcD(data1, data2)\n\tassert.InDelta(expectedD, d, 1e-5)\n\n\td = calcD(data2, data1)\n\tassert.InDelta(expectedD, d, 1e-5)\n\n}\n\nfunc TestCalcDShouldBe1(t *testing.T) {\n\tassert := assert.New(t)\n\n\tdata1 := make([]float64, 10)\n\tfor i := range data1 {\n\t\tdata1[i] = 0.0\n\t}\n\n\tdata2 := make([]float64, 10)\n\tfor i := range data2 {\n\t\tdata2[i] = float64(i+1) / float64(len(data2))\n\t}\n\n\texpectedD := 1.0\n\n\td := calcD(data1, data2)\n\tassert.InDelta(expectedD, d, 1e-5)\n\n\td = calcD(data2, data1)\n\tassert.InDelta(expectedD, d, 1e-5)\n\n}\n\nfunc BenchmarkCalcD(b *testing.B) {\n\n\tdata := make([]float64, 100)\n\tfor i := range data {\n\t\tdata[i] = float64(i+1) / float64(len(data))\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\tcalcD(data, data)\n\t}\n}\n\nfunc BenchmarkIsMatch(b *testing.B) {\n\n\tdata1 := make([]float64, 100)\n\tfor i := range data1 {\n\t\tdata1[i] = float64(-i) / float64(len(data1))\n\t}\n\n\tdata2 := make([]float64, 100)\n\tfor i := range data2 {\n\t\tdata2[i] = float64(i+1) / float64(len(data2))\n\t}\n\n\tks := KSTest{\n\t\tconfidence: 4,\n\t}\n\n\tf1 := ks.Fingerprint(&morgoth.Window{\n\t\tData: data1,\n\t})\n\n\tf2 := ks.Fingerprint(&morgoth.Window{\n\t\tData: data2,\n\t})\n\n\tfor i := 0; i < b.N; i++ {\n\t\tf1.IsMatch(f2)\n\t}\n}\n\nfunc BenchmarkFingerprint(b *testing.B) {\n\n\tdata := make([]float64, 100)\n\tfor i := range data {\n\t\tdata[i] = float64(i) / float64(len(data))\n\t}\n\n\tks := KSTest{\n\t\tconfidence: 4,\n\t}\n\n\tw := &morgoth.Window{\n\t\tData: data,\n\t}\n\n\tfor i := 0; i < b.N; i++ {\n\t\tks.Fingerprint(w)\n\t}\n}\n"
  },
  {
    "path": "fingerprinters/sigma/sigma.go",
    "content": "package sigma\n\nimport (\n\t\"math\"\n\n\t\"github.com/nathanielc/morgoth\"\n\t\"github.com/nathanielc/morgoth/counter\"\n)\n\n// Simple fingerprinter that computes both mean and standard deviation of a window.\n// Fingerprints are compared to see if the means are more than n deviations apart.\ntype Sigma struct {\n\tdeviations float64\n}\n\nfunc New(deviations float64) *Sigma {\n\treturn &Sigma{\n\t\tdeviations: deviations,\n\t}\n}\n\nfunc (self *Sigma) Fingerprint(window *morgoth.Window) morgoth.Fingerprint {\n\tmean, std := calcStats(window.Data)\n\treturn SigmaFingerprint{\n\t\tmean:      mean,\n\t\tthreshold: self.deviations * std,\n\t}\n}\n\nfunc calcStats(xs []float64) (mean, std float64) {\n\tn := 0.0\n\tM2 := 0.0\n\n\tfor _, x := range xs {\n\t\tn++\n\t\tdelta := x - mean\n\t\tmean = mean + delta/n\n\t\tM2 += delta * (x - mean)\n\t}\n\n\tstd = math.Sqrt(M2 / n)\n\treturn\n}\n\ntype SigmaFingerprint struct {\n\tmean      float64\n\tthreshold float64\n}\n\nfunc (self SigmaFingerprint) IsMatch(other counter.Countable) bool {\n\to, ok := other.(SigmaFingerprint)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn math.Abs(self.mean-o.mean) <= self.threshold\n}\n"
  },
  {
    "path": "vendor/github.com/beorn7/perks/.gitignore",
    "content": "*.test\n*.prof\n"
  },
  {
    "path": "vendor/github.com/beorn7/perks/LICENSE",
    "content": "Copyright (C) 2013 Blake Mizerany\n\nPermission is hereby granted, free of charge, to any person obtaining\na copy of this software and associated documentation files (the\n\"Software\"), to deal in the Software without restriction, including\nwithout limitation the rights to use, copy, modify, merge, publish,\ndistribute, sublicense, and/or sell copies of the Software, and to\npermit persons to whom the Software is furnished to do so, subject to\nthe following conditions:\n\nThe above copyright notice and this permission notice shall be\nincluded in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\nNONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE\nLIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\nOF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\nWITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/beorn7/perks/README.md",
    "content": "# Perks for Go (golang.org)\n\nPerks contains the Go package quantile that computes approximate quantiles over\nan unbounded data stream within low memory and CPU bounds.\n\nFor more information and examples, see:\nhttp://godoc.org/github.com/bmizerany/perks\n\nA very special thank you and shout out to Graham Cormode (Rutgers University),\nFlip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and\nDivesh Srivastava (AT&T Labs–Research) for their research and publication of\n[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf)\n\nThank you, also:\n* Armon Dadgar (@armon)\n* Andrew Gerrand (@nf)\n* Brad Fitzpatrick (@bradfitz)\n* Keith Rarick (@kr)\n\nFAQ:\n\nQ: Why not move the quantile package into the project root?\nA: I want to add more packages to perks later.\n\nCopyright (C) 2013 Blake Mizerany\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/beorn7/perks/quantile/bench_test.go",
    "content": "package quantile\n\nimport (\n\t\"testing\"\n)\n\nfunc BenchmarkInsertTargeted(b *testing.B) {\n\tb.ReportAllocs()\n\n\ts := NewTargeted(Targets)\n\tb.ResetTimer()\n\tfor i := float64(0); i < float64(b.N); i++ {\n\t\ts.Insert(i)\n\t}\n}\n\nfunc BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {\n\ts := NewTargeted(TargetsSmallEpsilon)\n\tb.ResetTimer()\n\tfor i := float64(0); i < float64(b.N); i++ {\n\t\ts.Insert(i)\n\t}\n}\n\nfunc BenchmarkInsertBiased(b *testing.B) {\n\ts := NewLowBiased(0.01)\n\tb.ResetTimer()\n\tfor i := float64(0); i < float64(b.N); i++ {\n\t\ts.Insert(i)\n\t}\n}\n\nfunc BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {\n\ts := NewLowBiased(0.0001)\n\tb.ResetTimer()\n\tfor i := float64(0); i < float64(b.N); i++ {\n\t\ts.Insert(i)\n\t}\n}\n\nfunc BenchmarkQuery(b *testing.B) {\n\ts := NewTargeted(Targets)\n\tfor i := float64(0); i < 1e6; i++ {\n\t\ts.Insert(i)\n\t}\n\tb.ResetTimer()\n\tn := float64(b.N)\n\tfor i := float64(0); i < n; i++ {\n\t\ts.Query(i / n)\n\t}\n}\n\nfunc BenchmarkQuerySmallEpsilon(b *testing.B) {\n\ts := NewTargeted(TargetsSmallEpsilon)\n\tfor i := float64(0); i < 1e6; i++ {\n\t\ts.Insert(i)\n\t}\n\tb.ResetTimer()\n\tn := float64(b.N)\n\tfor i := float64(0); i < n; i++ {\n\t\ts.Query(i / n)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/beorn7/perks/quantile/example_test.go",
    "content": "// +build go1.1\n\npackage quantile_test\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/beorn7/perks/quantile\"\n)\n\nfunc Example_simple() {\n\tch := make(chan float64)\n\tgo sendFloats(ch)\n\n\t// Compute the 50th, 90th, and 99th percentile.\n\tq := quantile.NewTargeted(map[float64]float64{\n\t\t0.50: 0.005,\n\t\t0.90: 0.001,\n\t\t0.99: 0.0001,\n\t})\n\tfor v := range ch {\n\t\tq.Insert(v)\n\t}\n\n\tfmt.Println(\"perc50:\", q.Query(0.50))\n\tfmt.Println(\"perc90:\", q.Query(0.90))\n\tfmt.Println(\"perc99:\", q.Query(0.99))\n\tfmt.Println(\"count:\", q.Count())\n\t// Output:\n\t// perc50: 5\n\t// perc90: 16\n\t// perc99: 223\n\t// count: 2388\n}\n\nfunc Example_mergeMultipleStreams() {\n\t// Scenario:\n\t// We have multiple database shards. On each shard, there is a process\n\t// collecting query response times from the database logs and inserting\n\t// them into a Stream (created via NewTargeted(0.90)), much like the\n\t// Simple example. These processes expose a network interface for us to\n\t// ask them to serialize and send us the results of their\n\t// Stream.Samples so we may Merge and Query them.\n\t//\n\t// NOTES:\n\t// * These sample sets are small, allowing us to get them\n\t// across the network much faster than sending the entire list of data\n\t// points.\n\t//\n\t// * For this to work correctly, we must supply the same quantiles\n\t// a priori the process collecting the samples supplied to NewTargeted,\n\t// even if we do not plan to query them all here.\n\tch := make(chan quantile.Samples)\n\tgetDBQuerySamples(ch)\n\tq := quantile.NewTargeted(map[float64]float64{0.90: 0.001})\n\tfor samples := range ch {\n\t\tq.Merge(samples)\n\t}\n\tfmt.Println(\"perc90:\", q.Query(0.90))\n}\n\nfunc Example_window() {\n\t// Scenario: We want the 90th, 95th, and 99th percentiles for each\n\t// minute.\n\n\tch := make(chan float64)\n\tgo sendStreamValues(ch)\n\n\ttick := time.NewTicker(1 * time.Minute)\n\tq := quantile.NewTargeted(map[float64]float64{\n\t\t0.90: 0.001,\n\t\t0.95: 0.0005,\n\t\t0.99: 0.0001,\n\t})\n\tfor {\n\t\tselect {\n\t\tcase t := <-tick.C:\n\t\t\tflushToDB(t, q.Samples())\n\t\t\tq.Reset()\n\t\tcase v := <-ch:\n\t\t\tq.Insert(v)\n\t\t}\n\t}\n}\n\nfunc sendStreamValues(ch chan float64) {\n\t// Use your imagination\n}\n\nfunc flushToDB(t time.Time, samples quantile.Samples) {\n\t// Use your imagination\n}\n\n// This is a stub for the above example. In reality this would hit the remote\n// servers via http or something like it.\nfunc getDBQuerySamples(ch chan quantile.Samples) {}\n\nfunc sendFloats(ch chan<- float64) {\n\tf, err := os.Open(\"exampledata.txt\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tsc := bufio.NewScanner(f)\n\tfor sc.Scan() {\n\t\tb := sc.Bytes()\n\t\tv, err := strconv.ParseFloat(string(b), 64)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tch <- v\n\t}\n\tif sc.Err() != nil {\n\t\tlog.Fatal(sc.Err())\n\t}\n\tclose(ch)\n}\n"
  },
  {
    "path": "vendor/github.com/beorn7/perks/quantile/exampledata.txt",
    "content": "8\n5\n26\n12\n5\n235\n13\n6\n28\n30\n3\n3\n3\n3\n5\n2\n33\n7\n2\n4\n7\n12\n14\n5\n8\n3\n10\n4\n5\n3\n6\n6\n209\n20\n3\n10\n14\n3\n4\n6\n8\n5\n11\n7\n3\n2\n3\n3\n212\n5\n222\n4\n10\n10\n5\n6\n3\n8\n3\n10\n254\n220\n2\n3\n5\n24\n5\n4\n222\n7\n3\n3\n223\n8\n15\n12\n14\n14\n3\n2\n2\n3\n13\n3\n11\n4\n4\n6\n5\n7\n13\n5\n3\n5\n2\n5\n3\n5\n2\n7\n15\n17\n14\n3\n6\n6\n3\n17\n5\n4\n7\n6\n4\n4\n8\n6\n8\n3\n9\n3\n6\n3\n4\n5\n3\n3\n660\n4\n6\n10\n3\n6\n3\n2\n5\n13\n2\n4\n4\n10\n4\n8\n4\n3\n7\n9\n9\n3\n10\n37\n3\n13\n4\n12\n3\n6\n10\n8\n5\n21\n2\n3\n8\n3\n2\n3\n3\n4\n12\n2\n4\n8\n8\n4\n3\n2\n20\n1\n6\n32\n2\n11\n6\n18\n3\n8\n11\n3\n212\n3\n4\n2\n6\n7\n12\n11\n3\n2\n16\n10\n6\n4\n6\n3\n2\n7\n3\n2\n2\n2\n2\n5\n6\n4\n3\n10\n3\n4\n6\n5\n3\n4\n4\n5\n6\n4\n3\n4\n4\n5\n7\n5\n5\n3\n2\n7\n2\n4\n12\n4\n5\n6\n2\n4\n4\n8\n4\n15\n13\n7\n16\n5\n3\n23\n5\n5\n7\n3\n2\n9\n8\n7\n5\n8\n11\n4\n10\n76\n4\n47\n4\n3\n2\n7\n4\n2\n3\n37\n10\n4\n2\n20\n5\n4\n4\n10\n10\n4\n3\n7\n23\n240\n7\n13\n5\n5\n3\n3\n2\n5\n4\n2\n8\n7\n19\n2\n23\n8\n7\n2\n5\n3\n8\n3\n8\n13\n5\n5\n5\n2\n3\n23\n4\n9\n8\n4\n3\n3\n5\n220\n2\n3\n4\n6\n14\n3\n53\n6\n2\n5\n18\n6\n3\n219\n6\n5\n2\n5\n3\n6\n5\n15\n4\n3\n17\n3\n2\n4\n7\n2\n3\n3\n4\n4\n3\n2\n664\n6\n3\n23\n5\n5\n16\n5\n8\n2\n4\n2\n24\n12\n3\n2\n3\n5\n8\n3\n5\n4\n3\n14\n3\n5\n8\n2\n3\n7\n9\n4\n2\n3\n6\n8\n4\n3\n4\n6\n5\n3\n3\n6\n3\n19\n4\n4\n6\n3\n6\n3\n5\n22\n5\n4\n4\n3\n8\n11\n4\n9\n7\n6\n13\n4\n4\n4\n6\n17\n9\n3\n3\n3\n4\n3\n221\n5\n11\n3\n4\n2\n12\n6\n3\n5\n7\n5\n7\n4\n9\n7\n14\n37\n19\n217\n16\n3\n5\n2\n2\n7\n19\n7\n6\n7\n4\n24\n5\n11\n4\n7\n7\n9\n13\n3\n4\n3\n6\n28\n4\n4\n5\n5\n2\n5\n6\n4\n4\n6\n10\n5\n4\n3\n2\n3\n3\n6\n5\n5\n4\n3\n2\n3\n7\n4\n6\n18\n16\n8\n16\n4\n5\n8\n6\n9\n13\n1545\n6\n215\n6\n5\n6\n3\n45\n31\n5\n2\n2\n4\n3\n3\n2\n5\n4\n3\n5\n7\n7\n4\n5\n8\n5\n4\n749\n2\n31\n9\n11\n2\n11\n5\n4\n4\n7\n9\n11\n4\n5\n4\n7\n3\n4\n6\n2\n15\n3\n4\n3\n4\n3\n5\n2\n13\n5\n5\n3\n3\n23\n4\n4\n5\n7\n4\n13\n2\n4\n3\n4\n2\n6\n2\n7\n3\n5\n5\n3\n29\n5\n4\n4\n3\n10\n2\n3\n79\n16\n6\n6\n7\n7\n3\n5\n5\n7\n4\n3\n7\n9\n5\n6\n5\n9\n6\n3\n6\n4\n17\n2\n10\n9\n3\n6\n2\n3\n21\n22\n5\n11\n4\n2\n17\n2\n224\n2\n14\n3\n4\n4\n2\n4\n4\n4\n4\n5\n3\n4\n4\n10\n2\n6\n3\n3\n5\n7\n2\n7\n5\n6\n3\n218\n2\n2\n5\n2\n6\n3\n5\n222\n14\n6\n33\n3\n2\n5\n3\n3\n3\n9\n5\n3\n3\n2\n7\n4\n3\n4\n3\n5\n6\n5\n26\n4\n13\n9\n7\n3\n221\n3\n3\n4\n4\n4\n4\n2\n18\n5\n3\n7\n9\n6\n8\n3\n10\n3\n11\n9\n5\n4\n17\n5\n5\n6\n6\n3\n2\n4\n12\n17\n6\n7\n218\n4\n2\n4\n10\n3\n5\n15\n3\n9\n4\n3\n3\n6\n29\n3\n3\n4\n5\n5\n3\n8\n5\n6\n6\n7\n5\n3\n5\n3\n29\n2\n31\n5\n15\n24\n16\n5\n207\n4\n3\n3\n2\n15\n4\n4\n13\n5\n5\n4\n6\n10\n2\n7\n8\n4\n6\n20\n5\n3\n4\n3\n12\n12\n5\n17\n7\n3\n3\n3\n6\n10\n3\n5\n25\n80\n4\n9\n3\n2\n11\n3\n3\n2\n3\n8\n7\n5\n5\n19\n5\n3\n3\n12\n11\n2\n6\n5\n5\n5\n3\n3\n3\n4\n209\n14\n3\n2\n5\n19\n4\n4\n3\n4\n14\n5\n6\n4\n13\n9\n7\n4\n7\n10\n2\n9\n5\n7\n2\n8\n4\n6\n5\n5\n222\n8\n7\n12\n5\n216\n3\n4\n4\n6\n3\n14\n8\n7\n13\n4\n3\n3\n3\n3\n17\n5\n4\n3\n33\n6\n6\n33\n7\n5\n3\n8\n7\n5\n2\n9\n4\n2\n233\n24\n7\n4\n8\n10\n3\n4\n15\n2\n16\n3\n3\n13\n12\n7\n5\n4\n207\n4\n2\n4\n27\n15\n2\n5\n2\n25\n6\n5\n5\n6\n13\n6\n18\n6\n4\n12\n225\n10\n7\n5\n2\n2\n11\n4\n14\n21\n8\n10\n3\n5\n4\n232\n2\n5\n5\n3\n7\n17\n11\n6\n6\n23\n4\n6\n3\n5\n4\n2\n17\n3\n6\n5\n8\n3\n2\n2\n14\n9\n4\n4\n2\n5\n5\n3\n7\n6\n12\n6\n10\n3\n6\n2\n2\n19\n5\n4\n4\n9\n2\n4\n13\n3\n5\n6\n3\n6\n5\n4\n9\n6\n3\n5\n7\n3\n6\n6\n4\n3\n10\n6\n3\n221\n3\n5\n3\n6\n4\n8\n5\n3\n6\n4\n4\n2\n54\n5\n6\n11\n3\n3\n4\n4\n4\n3\n7\n3\n11\n11\n7\n10\n6\n13\n223\n213\n15\n231\n7\n3\n7\n228\n2\n3\n4\n4\n5\n6\n7\n4\n13\n3\n4\n5\n3\n6\n4\n6\n7\n2\n4\n3\n4\n3\n3\n6\n3\n7\n3\n5\n18\n5\n6\n8\n10\n3\n3\n3\n2\n4\n2\n4\n4\n5\n6\n6\n4\n10\n13\n3\n12\n5\n12\n16\n8\n4\n19\n11\n2\n4\n5\n6\n8\n5\n6\n4\n18\n10\n4\n2\n216\n6\n6\n6\n2\n4\n12\n8\n3\n11\n5\n6\n14\n5\n3\n13\n4\n5\n4\n5\n3\n28\n6\n3\n7\n219\n3\n9\n7\n3\n10\n6\n3\n4\n19\n5\n7\n11\n6\n15\n19\n4\n13\n11\n3\n7\n5\n10\n2\n8\n11\n2\n6\n4\n6\n24\n6\n3\n3\n3\n3\n6\n18\n4\n11\n4\n2\n5\n10\n8\n3\n9\n5\n3\n4\n5\n6\n2\n5\n7\n4\n4\n14\n6\n4\n4\n5\n5\n7\n2\n4\n3\n7\n3\n3\n6\n4\n5\n4\n4\n4\n3\n3\n3\n3\n8\n14\n2\n3\n5\n3\n2\n4\n5\n3\n7\n3\n3\n18\n3\n4\n4\n5\n7\n3\n3\n3\n13\n5\n4\n8\n211\n5\n5\n3\n5\n2\n5\n4\n2\n655\n6\n3\n5\n11\n2\n5\n3\n12\n9\n15\n11\n5\n12\n217\n2\n6\n17\n3\n3\n207\n5\n5\n4\n5\n9\n3\n2\n8\n5\n4\n3\n2\n5\n12\n4\n14\n5\n4\n2\n13\n5\n8\n4\n225\n4\n3\n4\n5\n4\n3\n3\n6\n23\n9\n2\n6\n7\n233\n4\n4\n6\n18\n3\n4\n6\n3\n4\n4\n2\n3\n7\n4\n13\n227\n4\n3\n5\n4\n2\n12\n9\n17\n3\n7\n14\n6\n4\n5\n21\n4\n8\n9\n2\n9\n25\n16\n3\n6\n4\n7\n8\n5\n2\n3\n5\n4\n3\n3\n5\n3\n3\n3\n2\n3\n19\n2\n4\n3\n4\n2\n3\n4\n4\n2\n4\n3\n3\n3\n2\n6\n3\n17\n5\n6\n4\n3\n13\n5\n3\n3\n3\n4\n9\n4\n2\n14\n12\n4\n5\n24\n4\n3\n37\n12\n11\n21\n3\n4\n3\n13\n4\n2\n3\n15\n4\n11\n4\n4\n3\n8\n3\n4\n4\n12\n8\n5\n3\n3\n4\n2\n220\n3\n5\n223\n3\n3\n3\n10\n3\n15\n4\n241\n9\n7\n3\n6\n6\n23\n4\n13\n7\n3\n4\n7\n4\n9\n3\n3\n4\n10\n5\n5\n1\n5\n24\n2\n4\n5\n5\n6\n14\n3\n8\n2\n3\n5\n13\n13\n3\n5\n2\n3\n15\n3\n4\n2\n10\n4\n4\n4\n5\n5\n3\n5\n3\n4\n7\n4\n27\n3\n6\n4\n15\n3\n5\n6\n6\n5\n4\n8\n3\n9\n2\n6\n3\n4\n3\n7\n4\n18\n3\n11\n3\n3\n8\n9\n7\n24\n3\n219\n7\n10\n4\n5\n9\n12\n2\n5\n4\n4\n4\n3\n3\n19\n5\n8\n16\n8\n6\n22\n3\n23\n3\n242\n9\n4\n3\n3\n5\n7\n3\n3\n5\n8\n3\n7\n5\n14\n8\n10\n3\n4\n3\n7\n4\n6\n7\n4\n10\n4\n3\n11\n3\n7\n10\n3\n13\n6\n8\n12\n10\n5\n7\n9\n3\n4\n7\n7\n10\n8\n30\n9\n19\n4\n3\n19\n15\n4\n13\n3\n215\n223\n4\n7\n4\n8\n17\n16\n3\n7\n6\n5\n5\n4\n12\n3\n7\n4\n4\n13\n4\n5\n2\n5\n6\n5\n6\n6\n7\n10\n18\n23\n9\n3\n3\n6\n5\n2\n4\n2\n7\n3\n3\n2\n5\n5\n14\n10\n224\n6\n3\n4\n3\n7\n5\n9\n3\n6\n4\n2\n5\n11\n4\n3\n3\n2\n8\n4\n7\n4\n10\n7\n3\n3\n18\n18\n17\n3\n3\n3\n4\n5\n3\n3\n4\n12\n7\n3\n11\n13\n5\n4\n7\n13\n5\n4\n11\n3\n12\n3\n6\n4\n4\n21\n4\n6\n9\n5\n3\n10\n8\n4\n6\n4\n4\n6\n5\n4\n8\n6\n4\n6\n4\n4\n5\n9\n6\n3\n4\n2\n9\n3\n18\n2\n4\n3\n13\n3\n6\n6\n8\n7\n9\n3\n2\n16\n3\n4\n6\n3\n2\n33\n22\n14\n4\n9\n12\n4\n5\n6\n3\n23\n9\n4\n3\n5\n5\n3\n4\n5\n3\n5\n3\n10\n4\n5\n5\n8\n4\n4\n6\n8\n5\n4\n3\n4\n6\n3\n3\n3\n5\n9\n12\n6\n5\n9\n3\n5\n3\n2\n2\n2\n18\n3\n2\n21\n2\n5\n4\n6\n4\n5\n10\n3\n9\n3\n2\n10\n7\n3\n6\n6\n4\n4\n8\n12\n7\n3\n7\n3\n3\n9\n3\n4\n5\n4\n4\n5\n5\n10\n15\n4\n4\n14\n6\n227\n3\n14\n5\n216\n22\n5\n4\n2\n2\n6\n3\n4\n2\n9\n9\n4\n3\n28\n13\n11\n4\n5\n3\n3\n2\n3\n3\n5\n3\n4\n3\n5\n23\n26\n3\n4\n5\n6\n4\n6\n3\n5\n5\n3\n4\n3\n2\n2\n2\n7\n14\n3\n6\n7\n17\n2\n2\n15\n14\n16\n4\n6\n7\n13\n6\n4\n5\n6\n16\n3\n3\n28\n3\n6\n15\n3\n9\n2\n4\n6\n3\n3\n22\n4\n12\n6\n7\n2\n5\n4\n10\n3\n16\n6\n9\n2\n5\n12\n7\n5\n5\n5\n5\n2\n11\n9\n17\n4\n3\n11\n7\n3\n5\n15\n4\n3\n4\n211\n8\n7\n5\n4\n7\n6\n7\n6\n3\n6\n5\n6\n5\n3\n4\n4\n26\n4\n6\n10\n4\n4\n3\n2\n3\n3\n4\n5\n9\n3\n9\n4\n4\n5\n5\n8\n2\n4\n2\n3\n8\n4\n11\n19\n5\n8\n6\n3\n5\n6\n12\n3\n2\n4\n16\n12\n3\n4\n4\n8\n6\n5\n6\n6\n219\n8\n222\n6\n16\n3\n13\n19\n5\n4\n3\n11\n6\n10\n4\n7\n7\n12\n5\n3\n3\n5\n6\n10\n3\n8\n2\n5\n4\n7\n2\n4\n4\n2\n12\n9\n6\n4\n2\n40\n2\n4\n10\n4\n223\n4\n2\n20\n6\n7\n24\n5\n4\n5\n2\n20\n16\n6\n5\n13\n2\n3\n3\n19\n3\n2\n4\n5\n6\n7\n11\n12\n5\n6\n7\n7\n3\n5\n3\n5\n3\n14\n3\n4\n4\n2\n11\n1\n7\n3\n9\n6\n11\n12\n5\n8\n6\n221\n4\n2\n12\n4\n3\n15\n4\n5\n226\n7\n218\n7\n5\n4\n5\n18\n4\n5\n9\n4\n4\n2\n9\n18\n18\n9\n5\n6\n6\n3\n3\n7\n3\n5\n4\n4\n4\n12\n3\n6\n31\n5\n4\n7\n3\n6\n5\n6\n5\n11\n2\n2\n11\n11\n6\n7\n5\n8\n7\n10\n5\n23\n7\n4\n3\n5\n34\n2\n5\n23\n7\n3\n6\n8\n4\n4\n4\n2\n5\n3\n8\n5\n4\n8\n25\n2\n3\n17\n8\n3\n4\n8\n7\n3\n15\n6\n5\n7\n21\n9\n5\n6\n6\n5\n3\n2\n3\n10\n3\n6\n3\n14\n7\n4\n4\n8\n7\n8\n2\n6\n12\n4\n213\n6\n5\n21\n8\n2\n5\n23\n3\n11\n2\n3\n6\n25\n2\n3\n6\n7\n6\n6\n4\n4\n6\n3\n17\n9\n7\n6\n4\n3\n10\n7\n2\n3\n3\n3\n11\n8\n3\n7\n6\n4\n14\n36\n3\n4\n3\n3\n22\n13\n21\n4\n2\n7\n4\n4\n17\n15\n3\n7\n11\n2\n4\n7\n6\n209\n6\n3\n2\n2\n24\n4\n9\n4\n3\n3\n3\n29\n2\n2\n4\n3\n3\n5\n4\n6\n3\n3\n2\n4\n"
  },
  {
    "path": "vendor/github.com/beorn7/perks/quantile/stream.go",
    "content": "// Package quantile computes approximate quantiles over an unbounded data\n// stream within low memory and CPU bounds.\n//\n// A small amount of accuracy is traded to achieve the above properties.\n//\n// Multiple streams can be merged before calling Query to generate a single set\n// of results. This is meaningful when the streams represent the same type of\n// data. See Merge and Samples.\n//\n// For more detailed information about the algorithm used, see:\n//\n// Effective Computation of Biased Quantiles over Data Streams\n//\n// http://www.cs.rutgers.edu/~muthu/bquant.pdf\npackage quantile\n\nimport (\n\t\"math\"\n\t\"sort\"\n)\n\n// Sample holds an observed value and meta information for compression. JSON\n// tags have been added for convenience.\ntype Sample struct {\n\tValue float64 `json:\",string\"`\n\tWidth float64 `json:\",string\"`\n\tDelta float64 `json:\",string\"`\n}\n\n// Samples represents a slice of samples. It implements sort.Interface.\ntype Samples []Sample\n\nfunc (a Samples) Len() int           { return len(a) }\nfunc (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }\nfunc (a Samples) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\n\ntype invariant func(s *stream, r float64) float64\n\n// NewLowBiased returns an initialized Stream for low-biased quantiles\n// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but\n// error guarantees can still be given even for the lower ranks of the data\n// distribution.\n//\n// The provided epsilon is a relative error, i.e. the true quantile of a value\n// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.\n//\n// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error\n// properties.\nfunc NewLowBiased(epsilon float64) *Stream {\n\tƒ := func(s *stream, r float64) float64 {\n\t\treturn 2 * epsilon * r\n\t}\n\treturn newStream(ƒ)\n}\n\n// NewHighBiased returns an initialized Stream for high-biased quantiles\n// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but\n// error guarantees can still be given even for the higher ranks of the data\n// distribution.\n//\n// The provided epsilon is a relative error, i.e. the true quantile of a value\n// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).\n//\n// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error\n// properties.\nfunc NewHighBiased(epsilon float64) *Stream {\n\tƒ := func(s *stream, r float64) float64 {\n\t\treturn 2 * epsilon * (s.n - r)\n\t}\n\treturn newStream(ƒ)\n}\n\n// NewTargeted returns an initialized Stream concerned with a particular set of\n// quantile values that are supplied a priori. Knowing these a priori reduces\n// space and computation time. The targets map maps the desired quantiles to\n// their absolute errors, i.e. the true quantile of a value returned by a query\n// is guaranteed to be within (Quantile±Epsilon).\n//\n// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.\nfunc NewTargeted(targets map[float64]float64) *Stream {\n\tƒ := func(s *stream, r float64) float64 {\n\t\tvar m = math.MaxFloat64\n\t\tvar f float64\n\t\tfor quantile, epsilon := range targets {\n\t\t\tif quantile*s.n <= r {\n\t\t\t\tf = (2 * epsilon * r) / quantile\n\t\t\t} else {\n\t\t\t\tf = (2 * epsilon * (s.n - r)) / (1 - quantile)\n\t\t\t}\n\t\t\tif f < m {\n\t\t\t\tm = f\n\t\t\t}\n\t\t}\n\t\treturn m\n\t}\n\treturn newStream(ƒ)\n}\n\n// Stream computes quantiles for a stream of float64s. It is not thread-safe by\n// design. Take care when using across multiple goroutines.\ntype Stream struct {\n\t*stream\n\tb      Samples\n\tsorted bool\n}\n\nfunc newStream(ƒ invariant) *Stream {\n\tx := &stream{ƒ: ƒ}\n\treturn &Stream{x, make(Samples, 0, 500), true}\n}\n\n// Insert inserts v into the stream.\nfunc (s *Stream) Insert(v float64) {\n\ts.insert(Sample{Value: v, Width: 1})\n}\n\nfunc (s *Stream) insert(sample Sample) {\n\ts.b = append(s.b, sample)\n\ts.sorted = false\n\tif len(s.b) == cap(s.b) {\n\t\ts.flush()\n\t}\n}\n\n// Query returns the computed qth percentiles value. If s was created with\n// NewTargeted, and q is not in the set of quantiles provided a priori, Query\n// will return an unspecified result.\nfunc (s *Stream) Query(q float64) float64 {\n\tif !s.flushed() {\n\t\t// Fast path when there hasn't been enough data for a flush;\n\t\t// this also yields better accuracy for small sets of data.\n\t\tl := len(s.b)\n\t\tif l == 0 {\n\t\t\treturn 0\n\t\t}\n\t\ti := int(math.Ceil(float64(l) * q))\n\t\tif i > 0 {\n\t\t\ti -= 1\n\t\t}\n\t\ts.maybeSort()\n\t\treturn s.b[i].Value\n\t}\n\ts.flush()\n\treturn s.stream.query(q)\n}\n\n// Merge merges samples into the underlying streams samples. This is handy when\n// merging multiple streams from separate threads, database shards, etc.\n//\n// ATTENTION: This method is broken and does not yield correct results. The\n// underlying algorithm is not capable of merging streams correctly.\nfunc (s *Stream) Merge(samples Samples) {\n\tsort.Sort(samples)\n\ts.stream.merge(samples)\n}\n\n// Reset reinitializes and clears the list reusing the samples buffer memory.\nfunc (s *Stream) Reset() {\n\ts.stream.reset()\n\ts.b = s.b[:0]\n}\n\n// Samples returns stream samples held by s.\nfunc (s *Stream) Samples() Samples {\n\tif !s.flushed() {\n\t\treturn s.b\n\t}\n\ts.flush()\n\treturn s.stream.samples()\n}\n\n// Count returns the total number of samples observed in the stream\n// since initialization.\nfunc (s *Stream) Count() int {\n\treturn len(s.b) + s.stream.count()\n}\n\nfunc (s *Stream) flush() {\n\ts.maybeSort()\n\ts.stream.merge(s.b)\n\ts.b = s.b[:0]\n}\n\nfunc (s *Stream) maybeSort() {\n\tif !s.sorted {\n\t\ts.sorted = true\n\t\tsort.Sort(s.b)\n\t}\n}\n\nfunc (s *Stream) flushed() bool {\n\treturn len(s.stream.l) > 0\n}\n\ntype stream struct {\n\tn float64\n\tl []Sample\n\tƒ invariant\n}\n\nfunc (s *stream) reset() {\n\ts.l = s.l[:0]\n\ts.n = 0\n}\n\nfunc (s *stream) insert(v float64) {\n\ts.merge(Samples{{v, 1, 0}})\n}\n\nfunc (s *stream) merge(samples Samples) {\n\t// TODO(beorn7): This tries to merge not only individual samples, but\n\t// whole summaries. The paper doesn't mention merging summaries at\n\t// all. Unittests show that the merging is inaccurate. Find out how to\n\t// do merges properly.\n\tvar r float64\n\ti := 0\n\tfor _, sample := range samples {\n\t\tfor ; i < len(s.l); i++ {\n\t\t\tc := s.l[i]\n\t\t\tif c.Value > sample.Value {\n\t\t\t\t// Insert at position i.\n\t\t\t\ts.l = append(s.l, Sample{})\n\t\t\t\tcopy(s.l[i+1:], s.l[i:])\n\t\t\t\ts.l[i] = Sample{\n\t\t\t\t\tsample.Value,\n\t\t\t\t\tsample.Width,\n\t\t\t\t\tmath.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),\n\t\t\t\t\t// TODO(beorn7): How to calculate delta correctly?\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t\tgoto inserted\n\t\t\t}\n\t\t\tr += c.Width\n\t\t}\n\t\ts.l = append(s.l, Sample{sample.Value, sample.Width, 0})\n\t\ti++\n\tinserted:\n\t\ts.n += sample.Width\n\t\tr += sample.Width\n\t}\n\ts.compress()\n}\n\nfunc (s *stream) count() int {\n\treturn int(s.n)\n}\n\nfunc (s *stream) query(q float64) float64 {\n\tt := math.Ceil(q * s.n)\n\tt += math.Ceil(s.ƒ(s, t) / 2)\n\tp := s.l[0]\n\tvar r float64\n\tfor _, c := range s.l[1:] {\n\t\tr += p.Width\n\t\tif r+c.Width+c.Delta > t {\n\t\t\treturn p.Value\n\t\t}\n\t\tp = c\n\t}\n\treturn p.Value\n}\n\nfunc (s *stream) compress() {\n\tif len(s.l) < 2 {\n\t\treturn\n\t}\n\tx := s.l[len(s.l)-1]\n\txi := len(s.l) - 1\n\tr := s.n - 1 - x.Width\n\n\tfor i := len(s.l) - 2; i >= 0; i-- {\n\t\tc := s.l[i]\n\t\tif c.Width+x.Width+x.Delta <= s.ƒ(s, r) {\n\t\t\tx.Width += c.Width\n\t\t\ts.l[xi] = x\n\t\t\t// Remove element at i.\n\t\t\tcopy(s.l[i:], s.l[i+1:])\n\t\t\ts.l = s.l[:len(s.l)-1]\n\t\t\txi -= 1\n\t\t} else {\n\t\t\tx = c\n\t\t\txi = i\n\t\t}\n\t\tr -= c.Width\n\t}\n}\n\nfunc (s *stream) samples() Samples {\n\tsamples := make(Samples, len(s.l))\n\tcopy(samples, s.l)\n\treturn samples\n}\n"
  },
  {
    "path": "vendor/github.com/beorn7/perks/quantile/stream_test.go",
    "content": "package quantile\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"sort\"\n\t\"testing\"\n)\n\nvar (\n\tTargets = map[float64]float64{\n\t\t0.01: 0.001,\n\t\t0.10: 0.01,\n\t\t0.50: 0.05,\n\t\t0.90: 0.01,\n\t\t0.99: 0.001,\n\t}\n\tTargetsSmallEpsilon = map[float64]float64{\n\t\t0.01: 0.0001,\n\t\t0.10: 0.001,\n\t\t0.50: 0.005,\n\t\t0.90: 0.001,\n\t\t0.99: 0.0001,\n\t}\n\tLowQuantiles  = []float64{0.01, 0.1, 0.5}\n\tHighQuantiles = []float64{0.99, 0.9, 0.5}\n)\n\nconst RelativeEpsilon = 0.01\n\nfunc verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {\n\tsort.Float64s(a)\n\tfor quantile, epsilon := range Targets {\n\t\tn := float64(len(a))\n\t\tk := int(quantile * n)\n\t\tif k < 1 {\n\t\t\tk = 1\n\t\t}\n\t\tlower := int((quantile - epsilon) * n)\n\t\tif lower < 1 {\n\t\t\tlower = 1\n\t\t}\n\t\tupper := int(math.Ceil((quantile + epsilon) * n))\n\t\tif upper > len(a) {\n\t\t\tupper = len(a)\n\t\t}\n\t\tw, min, max := a[k-1], a[lower-1], a[upper-1]\n\t\tif g := s.Query(quantile); g < min || g > max {\n\t\t\tt.Errorf(\"q=%f: want %v [%f,%f], got %v\", quantile, w, min, max, g)\n\t\t}\n\t}\n}\n\nfunc verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {\n\tsort.Float64s(a)\n\tfor _, qu := range LowQuantiles {\n\t\tn := float64(len(a))\n\t\tk := int(qu * n)\n\n\t\tlowerRank := int((1 - RelativeEpsilon) * qu * n)\n\t\tupperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))\n\t\tw, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]\n\t\tif g := s.Query(qu); g < min || g > max {\n\t\t\tt.Errorf(\"q=%f: want %v [%f,%f], got %v\", qu, w, min, max, g)\n\t\t}\n\t}\n}\n\nfunc verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {\n\tsort.Float64s(a)\n\tfor _, qu := range HighQuantiles {\n\t\tn := float64(len(a))\n\t\tk := int(qu * n)\n\n\t\tlowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)\n\t\tupperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))\n\t\tw, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]\n\t\tif g := s.Query(qu); g < min || g > max {\n\t\t\tt.Errorf(\"q=%f: want %v [%f,%f], got %v\", qu, w, min, max, g)\n\t\t}\n\t}\n}\n\nfunc populateStream(s *Stream) []float64 {\n\ta := make([]float64, 0, 1e5+100)\n\tfor i := 0; i < cap(a); i++ {\n\t\tv := rand.NormFloat64()\n\t\t// Add 5% asymmetric outliers.\n\t\tif i%20 == 0 {\n\t\t\tv = v*v + 1\n\t\t}\n\t\ts.Insert(v)\n\t\ta = append(a, v)\n\t}\n\treturn a\n}\n\nfunc TestTargetedQuery(t *testing.T) {\n\trand.Seed(42)\n\ts := NewTargeted(Targets)\n\ta := populateStream(s)\n\tverifyPercsWithAbsoluteEpsilon(t, a, s)\n}\n\nfunc TestTargetedQuerySmallSampleSize(t *testing.T) {\n\trand.Seed(42)\n\ts := NewTargeted(TargetsSmallEpsilon)\n\ta := []float64{1, 2, 3, 4, 5}\n\tfor _, v := range a {\n\t\ts.Insert(v)\n\t}\n\tverifyPercsWithAbsoluteEpsilon(t, a, s)\n\t// If not yet flushed, results should be precise:\n\tif !s.flushed() {\n\t\tfor φ, want := range map[float64]float64{\n\t\t\t0.01: 1,\n\t\t\t0.10: 1,\n\t\t\t0.50: 3,\n\t\t\t0.90: 5,\n\t\t\t0.99: 5,\n\t\t} {\n\t\t\tif got := s.Query(φ); got != want {\n\t\t\t\tt.Errorf(\"want %f for φ=%f, got %f\", want, φ, got)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestLowBiasedQuery(t *testing.T) {\n\trand.Seed(42)\n\ts := NewLowBiased(RelativeEpsilon)\n\ta := populateStream(s)\n\tverifyLowPercsWithRelativeEpsilon(t, a, s)\n}\n\nfunc TestHighBiasedQuery(t *testing.T) {\n\trand.Seed(42)\n\ts := NewHighBiased(RelativeEpsilon)\n\ta := populateStream(s)\n\tverifyHighPercsWithRelativeEpsilon(t, a, s)\n}\n\n// BrokenTestTargetedMerge is broken, see Merge doc comment.\nfunc BrokenTestTargetedMerge(t *testing.T) {\n\trand.Seed(42)\n\ts1 := NewTargeted(Targets)\n\ts2 := NewTargeted(Targets)\n\ta := populateStream(s1)\n\ta = append(a, populateStream(s2)...)\n\ts1.Merge(s2.Samples())\n\tverifyPercsWithAbsoluteEpsilon(t, a, s1)\n}\n\n// BrokenTestLowBiasedMerge is broken, see Merge doc comment.\nfunc BrokenTestLowBiasedMerge(t *testing.T) {\n\trand.Seed(42)\n\ts1 := NewLowBiased(RelativeEpsilon)\n\ts2 := NewLowBiased(RelativeEpsilon)\n\ta := populateStream(s1)\n\ta = append(a, populateStream(s2)...)\n\ts1.Merge(s2.Samples())\n\tverifyLowPercsWithRelativeEpsilon(t, a, s2)\n}\n\n// BrokenTestHighBiasedMerge is broken, see Merge doc comment.\nfunc BrokenTestHighBiasedMerge(t *testing.T) {\n\trand.Seed(42)\n\ts1 := NewHighBiased(RelativeEpsilon)\n\ts2 := NewHighBiased(RelativeEpsilon)\n\ta := populateStream(s1)\n\ta = append(a, populateStream(s2)...)\n\ts1.Merge(s2.Samples())\n\tverifyHighPercsWithRelativeEpsilon(t, a, s2)\n}\n\nfunc TestUncompressed(t *testing.T) {\n\tq := NewTargeted(Targets)\n\tfor i := 100; i > 0; i-- {\n\t\tq.Insert(float64(i))\n\t}\n\tif g := q.Count(); g != 100 {\n\t\tt.Errorf(\"want count 100, got %d\", g)\n\t}\n\t// Before compression, Query should have 100% accuracy.\n\tfor quantile := range Targets {\n\t\tw := quantile * 100\n\t\tif g := q.Query(quantile); g != w {\n\t\t\tt.Errorf(\"want %f, got %f\", w, g)\n\t\t}\n\t}\n}\n\nfunc TestUncompressedSamples(t *testing.T) {\n\tq := NewTargeted(map[float64]float64{0.99: 0.001})\n\tfor i := 1; i <= 100; i++ {\n\t\tq.Insert(float64(i))\n\t}\n\tif g := q.Samples().Len(); g != 100 {\n\t\tt.Errorf(\"want count 100, got %d\", g)\n\t}\n}\n\nfunc TestUncompressedOne(t *testing.T) {\n\tq := NewTargeted(map[float64]float64{0.99: 0.01})\n\tq.Insert(3.14)\n\tif g := q.Query(0.90); g != 3.14 {\n\t\tt.Error(\"want PI, got\", g)\n\t}\n}\n\nfunc TestDefaults(t *testing.T) {\n\tif g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {\n\t\tt.Errorf(\"want 0, got %f\", g)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/.gitignore",
    "content": "# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\n\n# Architecture specific extensions/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/.travis.yml",
    "content": "language: go\ngo:\n    - 1.5.4\n    - 1.6.3\n    - 1.7\ninstall:\n    - go get -v golang.org/x/tools/cmd/cover\nscript:\n    - go test -v -tags=safe ./spew\n    - go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov\nafter_success:\n    - go get -v github.com/mattn/goveralls\n    - export PATH=$PATH:$HOME/gopath/bin\n    - goveralls -coverprofile=profile.cov -service=travis-ci\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/LICENSE",
    "content": "ISC License\n\nCopyright (c) 2012-2016 Dave Collins <dave@davec.name>\n\nPermission to use, copy, modify, and distribute this software for any\npurpose with or without fee is hereby granted, provided that the above\ncopyright notice and this permission notice appear in all copies.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\nWITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\nMERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\nANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\nWHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\nACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\nOR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/README.md",
    "content": "go-spew\n=======\n\n[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)]\n(https://travis-ci.org/davecgh/go-spew) [![ISC License]\n(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status]\n(https://img.shields.io/coveralls/davecgh/go-spew.svg)]\n(https://coveralls.io/r/davecgh/go-spew?branch=master)\n\n\nGo-spew implements a deep pretty printer for Go data structures to aid in\ndebugging.  A comprehensive suite of tests with 100% test coverage is provided\nto ensure proper functionality.  See `test_coverage.txt` for the gocov coverage\nreport.  Go-spew is licensed under the liberal ISC license, so it may be used in\nopen source or commercial projects.\n\nIf you're interested in reading about how this package came to life and some\nof the challenges involved in providing a deep pretty printer, there is a blog\npost about it\n[here](https://web.archive.org/web/20160304013555/https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).\n\n## Documentation\n\n[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)]\n(http://godoc.org/github.com/davecgh/go-spew/spew)\n\nFull `go doc` style documentation for the project can be viewed online without\ninstalling this package by using the excellent GoDoc site here:\nhttp://godoc.org/github.com/davecgh/go-spew/spew\n\nYou can also view the documentation locally once the package is installed with\nthe `godoc` tool by running `godoc -http=\":6060\"` and pointing your browser to\nhttp://localhost:6060/pkg/github.com/davecgh/go-spew/spew\n\n## Installation\n\n```bash\n$ go get -u github.com/davecgh/go-spew/spew\n```\n\n## Quick Start\n\nAdd this import line to the file you're working in:\n\n```Go\nimport \"github.com/davecgh/go-spew/spew\"\n```\n\nTo dump a variable with full newlines, indentation, type, and pointer\ninformation use Dump, Fdump, or Sdump:\n\n```Go\nspew.Dump(myVar1, myVar2, ...)\nspew.Fdump(someWriter, myVar1, myVar2, ...)\nstr := spew.Sdump(myVar1, myVar2, ...)\n```\n\nAlternatively, if you would prefer to use format strings with a compacted inline\nprinting style, use the convenience wrappers Printf, Fprintf, etc with %v (most\ncompact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types\nand pointer addresses): \n\n```Go\nspew.Printf(\"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\nspew.Printf(\"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\nspew.Fprintf(someWriter, \"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\nspew.Fprintf(someWriter, \"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n```\n\n## Debugging a Web Application Example\n\nHere is an example of how you can use `spew.Sdump()` to help debug a web application. Please be sure to wrap your output using the `html.EscapeString()` function for safety reasons. You should also only use this debugging technique in a development environment, never in production.\n\n```Go\npackage main\n\nimport (\n    \"fmt\"\n    \"html\"\n    \"net/http\"\n\n    \"github.com/davecgh/go-spew/spew\"\n)\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n    w.Header().Set(\"Content-Type\", \"text/html\")\n    fmt.Fprintf(w, \"Hi there, %s!\", r.URL.Path[1:])\n    fmt.Fprintf(w, \"<!--\\n\" + html.EscapeString(spew.Sdump(w)) + \"\\n-->\")\n}\n\nfunc main() {\n    http.HandleFunc(\"/\", handler)\n    http.ListenAndServe(\":8080\", nil)\n}\n```\n\n## Sample Dump Output\n\n```\n(main.Foo) {\n unexportedField: (*main.Bar)(0xf84002e210)({\n  flag: (main.Flag) flagTwo,\n  data: (uintptr) <nil>\n }),\n ExportedField: (map[interface {}]interface {}) {\n  (string) \"one\": (bool) true\n }\n}\n([]uint8) {\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |\n 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!\"#$%&'()*+,-./0|\n 00000020  31 32                                             |12|\n}\n```\n\n## Sample Formatter Output\n\nDouble pointer to a uint8:\n```\n\t  %v: <**>5\n\t %+v: <**>(0xf8400420d0->0xf8400420c8)5\n\t %#v: (**uint8)5\n\t%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5\n```\n\nPointer to circular struct with a uint8 field and a pointer to itself:\n```\n\t  %v: <*>{1 <*><shown>}\n\t %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}\n\t %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}\n\t%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}\n```\n\n## Configuration Options\n\nConfiguration of spew is handled by fields in the ConfigState type. For\nconvenience, all of the top-level functions use a global state available via the\nspew.Config global.\n\nIt is also possible to create a ConfigState instance that provides methods\nequivalent to the top-level functions. This allows concurrent configuration\noptions. See the ConfigState documentation for more details.\n\n```\n* Indent\n\tString to use for each indentation level for Dump functions.\n\tIt is a single space by default.  A popular alternative is \"\\t\".\n\n* MaxDepth\n\tMaximum number of levels to descend into nested data structures.\n\tThere is no limit by default.\n\n* DisableMethods\n\tDisables invocation of error and Stringer interface methods.\n\tMethod invocation is enabled by default.\n\n* DisablePointerMethods\n\tDisables invocation of error and Stringer interface methods on types\n\twhich only accept pointer receivers from non-pointer variables.  This option\n\trelies on access to the unsafe package, so it will not have any effect when\n\trunning in environments without access to the unsafe package such as Google\n\tApp Engine or with the \"safe\" build tag specified.\n\tPointer method invocation is enabled by default.\n\n* DisablePointerAddresses\n\tDisablePointerAddresses specifies whether to disable the printing of\n\tpointer addresses. This is useful when diffing data structures in tests.\n\n* DisableCapacities\n\tDisableCapacities specifies whether to disable the printing of capacities\n\tfor arrays, slices, maps and channels. This is useful when diffing data\n\tstructures in tests.\n\n* ContinueOnMethod\n\tEnables recursion into types after invoking error and Stringer interface\n\tmethods. Recursion after method invocation is disabled by default.\n\n* SortKeys\n\tSpecifies map keys should be sorted before being printed. Use\n\tthis to have a more deterministic, diffable output.  Note that\n\tonly native types (bool, int, uint, floats, uintptr and string)\n\tand types which implement error or Stringer interfaces are supported,\n\twith other types sorted according to the reflect.Value.String() output\n\twhich guarantees display stability.  Natural map order is used by\n\tdefault.\n\n* SpewKeys\n\tSpewKeys specifies that, as a last resort attempt, map keys should be\n\tspewed to strings and sorted by those strings.  This is only considered\n\tif SortKeys is true.\n\n```\n\n## Unsafe Package Dependency\n\nThis package relies on the unsafe package to perform some of the more advanced\nfeatures, however it also supports a \"limited\" mode which allows it to work in\nenvironments where the unsafe package is not available.  By default, it will\noperate in this mode on Google App Engine and when compiled with GopherJS.  The\n\"safe\" build tag may also be specified to force the package to build without\nusing the unsafe package.\n\n## License\n\nGo-spew is licensed under the [copyfree](http://copyfree.org) ISC License.\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/cov_report.sh",
    "content": "#!/bin/sh\n\n# This script uses gocov to generate a test coverage report.\n# The gocov tool my be obtained with the following command:\n#   go get github.com/axw/gocov/gocov\n#\n# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.\n\n# Check for gocov.\nif ! type gocov >/dev/null 2>&1; then\n\techo >&2 \"This script requires the gocov tool.\"\n\techo >&2 \"You may obtain it with the following command:\"\n\techo >&2 \"go get github.com/axw/gocov/gocov\"\n\texit 1\nfi\n\n# Only run the cgo tests if gcc is installed.\nif type gcc >/dev/null 2>&1; then\n\t(cd spew && gocov test -tags testcgo | gocov report)\nelse\n\t(cd spew && gocov test | gocov report)\nfi\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/bypass.go",
    "content": "// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when the code is not running on Google App Engine, compiled by GopherJS, and\n// \"-tags safe\" is not added to the go build command line.  The \"disableunsafe\"\n// tag is deprecated and thus should not be used.\n// +build !js,!appengine,!safe,!disableunsafe\n\npackage spew\n\nimport (\n\t\"reflect\"\n\t\"unsafe\"\n)\n\nconst (\n\t// UnsafeDisabled is a build-time constant which specifies whether or\n\t// not access to the unsafe package is available.\n\tUnsafeDisabled = false\n\n\t// ptrSize is the size of a pointer on the current arch.\n\tptrSize = unsafe.Sizeof((*byte)(nil))\n)\n\nvar (\n\t// offsetPtr, offsetScalar, and offsetFlag are the offsets for the\n\t// internal reflect.Value fields.  These values are valid before golang\n\t// commit ecccf07e7f9d which changed the format.  The are also valid\n\t// after commit 82f48826c6c7 which changed the format again to mirror\n\t// the original format.  Code in the init function updates these offsets\n\t// as necessary.\n\toffsetPtr    = uintptr(ptrSize)\n\toffsetScalar = uintptr(0)\n\toffsetFlag   = uintptr(ptrSize * 2)\n\n\t// flagKindWidth and flagKindShift indicate various bits that the\n\t// reflect package uses internally to track kind information.\n\t//\n\t// flagRO indicates whether or not the value field of a reflect.Value is\n\t// read-only.\n\t//\n\t// flagIndir indicates whether the value field of a reflect.Value is\n\t// the actual data or a pointer to the data.\n\t//\n\t// These values are valid before golang commit 90a7c3c86944 which\n\t// changed their positions.  Code in the init function updates these\n\t// flags as necessary.\n\tflagKindWidth = uintptr(5)\n\tflagKindShift = uintptr(flagKindWidth - 1)\n\tflagRO        = uintptr(1 << 0)\n\tflagIndir     = uintptr(1 << 1)\n)\n\nfunc init() {\n\t// Older versions of reflect.Value stored small integers directly in the\n\t// ptr field (which is named val in the older versions).  Versions\n\t// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named\n\t// scalar for this purpose which unfortunately came before the flag\n\t// field, so the offset of the flag field is different for those\n\t// versions.\n\t//\n\t// This code constructs a new reflect.Value from a known small integer\n\t// and checks if the size of the reflect.Value struct indicates it has\n\t// the scalar field. When it does, the offsets are updated accordingly.\n\tvv := reflect.ValueOf(0xf00)\n\tif unsafe.Sizeof(vv) == (ptrSize * 4) {\n\t\toffsetScalar = ptrSize * 2\n\t\toffsetFlag = ptrSize * 3\n\t}\n\n\t// Commit 90a7c3c86944 changed the flag positions such that the low\n\t// order bits are the kind.  This code extracts the kind from the flags\n\t// field and ensures it's the correct type.  When it's not, the flag\n\t// order has been changed to the newer format, so the flags are updated\n\t// accordingly.\n\tupf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)\n\tupfv := *(*uintptr)(upf)\n\tflagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)\n\tif (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {\n\t\tflagKindShift = 0\n\t\tflagRO = 1 << 5\n\t\tflagIndir = 1 << 6\n\n\t\t// Commit adf9b30e5594 modified the flags to separate the\n\t\t// flagRO flag into two bits which specifies whether or not the\n\t\t// field is embedded.  This causes flagIndir to move over a bit\n\t\t// and means that flagRO is the combination of either of the\n\t\t// original flagRO bit and the new bit.\n\t\t//\n\t\t// This code detects the change by extracting what used to be\n\t\t// the indirect bit to ensure it's set.  When it's not, the flag\n\t\t// order has been changed to the newer format, so the flags are\n\t\t// updated accordingly.\n\t\tif upfv&flagIndir == 0 {\n\t\t\tflagRO = 3 << 5\n\t\t\tflagIndir = 1 << 7\n\t\t}\n\t}\n}\n\n// unsafeReflectValue converts the passed reflect.Value into a one that bypasses\n// the typical safety restrictions preventing access to unaddressable and\n// unexported data.  It works by digging the raw pointer to the underlying\n// value out of the protected value and generating a new unprotected (unsafe)\n// reflect.Value to it.\n//\n// This allows us to check for implementations of the Stringer and error\n// interfaces to be used for pretty printing ordinarily unaddressable and\n// inaccessible values such as unexported struct fields.\nfunc unsafeReflectValue(v reflect.Value) (rv reflect.Value) {\n\tindirects := 1\n\tvt := v.Type()\n\tupv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)\n\trvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))\n\tif rvf&flagIndir != 0 {\n\t\tvt = reflect.PtrTo(v.Type())\n\t\tindirects++\n\t} else if offsetScalar != 0 {\n\t\t// The value is in the scalar field when it's not one of the\n\t\t// reference types.\n\t\tswitch vt.Kind() {\n\t\tcase reflect.Uintptr:\n\t\tcase reflect.Chan:\n\t\tcase reflect.Func:\n\t\tcase reflect.Map:\n\t\tcase reflect.Ptr:\n\t\tcase reflect.UnsafePointer:\n\t\tdefault:\n\t\t\tupv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +\n\t\t\t\toffsetScalar)\n\t\t}\n\t}\n\n\tpv := reflect.NewAt(vt, upv)\n\trv = pv\n\tfor i := 0; i < indirects; i++ {\n\t\trv = rv.Elem()\n\t}\n\treturn rv\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/bypasssafe.go",
    "content": "// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when the code is running on Google App Engine, compiled by GopherJS, or\n// \"-tags safe\" is added to the go build command line.  The \"disableunsafe\"\n// tag is deprecated and thus should not be used.\n// +build js appengine safe disableunsafe\n\npackage spew\n\nimport \"reflect\"\n\nconst (\n\t// UnsafeDisabled is a build-time constant which specifies whether or\n\t// not access to the unsafe package is available.\n\tUnsafeDisabled = true\n)\n\n// unsafeReflectValue typically converts the passed reflect.Value into a one\n// that bypasses the typical safety restrictions preventing access to\n// unaddressable and unexported data.  However, doing this relies on access to\n// the unsafe package.  This is a stub version which simply returns the passed\n// reflect.Value when the unsafe package is not available.\nfunc unsafeReflectValue(v reflect.Value) reflect.Value {\n\treturn v\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/common.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n)\n\n// Some constants in the form of bytes to avoid string overhead.  This mirrors\n// the technique used in the fmt package.\nvar (\n\tpanicBytes            = []byte(\"(PANIC=\")\n\tplusBytes             = []byte(\"+\")\n\tiBytes                = []byte(\"i\")\n\ttrueBytes             = []byte(\"true\")\n\tfalseBytes            = []byte(\"false\")\n\tinterfaceBytes        = []byte(\"(interface {})\")\n\tcommaNewlineBytes     = []byte(\",\\n\")\n\tnewlineBytes          = []byte(\"\\n\")\n\topenBraceBytes        = []byte(\"{\")\n\topenBraceNewlineBytes = []byte(\"{\\n\")\n\tcloseBraceBytes       = []byte(\"}\")\n\tasteriskBytes         = []byte(\"*\")\n\tcolonBytes            = []byte(\":\")\n\tcolonSpaceBytes       = []byte(\": \")\n\topenParenBytes        = []byte(\"(\")\n\tcloseParenBytes       = []byte(\")\")\n\tspaceBytes            = []byte(\" \")\n\tpointerChainBytes     = []byte(\"->\")\n\tnilAngleBytes         = []byte(\"<nil>\")\n\tmaxNewlineBytes       = []byte(\"<max depth reached>\\n\")\n\tmaxShortBytes         = []byte(\"<max>\")\n\tcircularBytes         = []byte(\"<already shown>\")\n\tcircularShortBytes    = []byte(\"<shown>\")\n\tinvalidAngleBytes     = []byte(\"<invalid>\")\n\topenBracketBytes      = []byte(\"[\")\n\tcloseBracketBytes     = []byte(\"]\")\n\tpercentBytes          = []byte(\"%\")\n\tprecisionBytes        = []byte(\".\")\n\topenAngleBytes        = []byte(\"<\")\n\tcloseAngleBytes       = []byte(\">\")\n\topenMapBytes          = []byte(\"map[\")\n\tcloseMapBytes         = []byte(\"]\")\n\tlenEqualsBytes        = []byte(\"len=\")\n\tcapEqualsBytes        = []byte(\"cap=\")\n)\n\n// hexDigits is used to map a decimal value to a hex digit.\nvar hexDigits = \"0123456789abcdef\"\n\n// catchPanic handles any panics that might occur during the handleMethods\n// calls.\nfunc catchPanic(w io.Writer, v reflect.Value) {\n\tif err := recover(); err != nil {\n\t\tw.Write(panicBytes)\n\t\tfmt.Fprintf(w, \"%v\", err)\n\t\tw.Write(closeParenBytes)\n\t}\n}\n\n// handleMethods attempts to call the Error and String methods on the underlying\n// type the passed reflect.Value represents and outputes the result to Writer w.\n//\n// It handles panics in any called methods by catching and displaying the error\n// as the formatted value.\nfunc handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {\n\t// We need an interface to check if the type implements the error or\n\t// Stringer interface.  However, the reflect package won't give us an\n\t// interface on certain things like unexported struct fields in order\n\t// to enforce visibility rules.  We use unsafe, when it's available,\n\t// to bypass these restrictions since this package does not mutate the\n\t// values.\n\tif !v.CanInterface() {\n\t\tif UnsafeDisabled {\n\t\t\treturn false\n\t\t}\n\n\t\tv = unsafeReflectValue(v)\n\t}\n\n\t// Choose whether or not to do error and Stringer interface lookups against\n\t// the base type or a pointer to the base type depending on settings.\n\t// Technically calling one of these methods with a pointer receiver can\n\t// mutate the value, however, types which choose to satisify an error or\n\t// Stringer interface with a pointer receiver should not be mutating their\n\t// state inside these interface methods.\n\tif !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {\n\t\tv = unsafeReflectValue(v)\n\t}\n\tif v.CanAddr() {\n\t\tv = v.Addr()\n\t}\n\n\t// Is it an error or Stringer?\n\tswitch iface := v.Interface().(type) {\n\tcase error:\n\t\tdefer catchPanic(w, v)\n\t\tif cs.ContinueOnMethod {\n\t\t\tw.Write(openParenBytes)\n\t\t\tw.Write([]byte(iface.Error()))\n\t\t\tw.Write(closeParenBytes)\n\t\t\tw.Write(spaceBytes)\n\t\t\treturn false\n\t\t}\n\n\t\tw.Write([]byte(iface.Error()))\n\t\treturn true\n\n\tcase fmt.Stringer:\n\t\tdefer catchPanic(w, v)\n\t\tif cs.ContinueOnMethod {\n\t\t\tw.Write(openParenBytes)\n\t\t\tw.Write([]byte(iface.String()))\n\t\t\tw.Write(closeParenBytes)\n\t\t\tw.Write(spaceBytes)\n\t\t\treturn false\n\t\t}\n\t\tw.Write([]byte(iface.String()))\n\t\treturn true\n\t}\n\treturn false\n}\n\n// printBool outputs a boolean value as true or false to Writer w.\nfunc printBool(w io.Writer, val bool) {\n\tif val {\n\t\tw.Write(trueBytes)\n\t} else {\n\t\tw.Write(falseBytes)\n\t}\n}\n\n// printInt outputs a signed integer value to Writer w.\nfunc printInt(w io.Writer, val int64, base int) {\n\tw.Write([]byte(strconv.FormatInt(val, base)))\n}\n\n// printUint outputs an unsigned integer value to Writer w.\nfunc printUint(w io.Writer, val uint64, base int) {\n\tw.Write([]byte(strconv.FormatUint(val, base)))\n}\n\n// printFloat outputs a floating point value using the specified precision,\n// which is expected to be 32 or 64bit, to Writer w.\nfunc printFloat(w io.Writer, val float64, precision int) {\n\tw.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))\n}\n\n// printComplex outputs a complex value using the specified float precision\n// for the real and imaginary parts to Writer w.\nfunc printComplex(w io.Writer, c complex128, floatPrecision int) {\n\tr := real(c)\n\tw.Write(openParenBytes)\n\tw.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))\n\ti := imag(c)\n\tif i >= 0 {\n\t\tw.Write(plusBytes)\n\t}\n\tw.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))\n\tw.Write(iBytes)\n\tw.Write(closeParenBytes)\n}\n\n// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'\n// prefix to Writer w.\nfunc printHexPtr(w io.Writer, p uintptr) {\n\t// Null pointer.\n\tnum := uint64(p)\n\tif num == 0 {\n\t\tw.Write(nilAngleBytes)\n\t\treturn\n\t}\n\n\t// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix\n\tbuf := make([]byte, 18)\n\n\t// It's simpler to construct the hex string right to left.\n\tbase := uint64(16)\n\ti := len(buf) - 1\n\tfor num >= base {\n\t\tbuf[i] = hexDigits[num%base]\n\t\tnum /= base\n\t\ti--\n\t}\n\tbuf[i] = hexDigits[num]\n\n\t// Add '0x' prefix.\n\ti--\n\tbuf[i] = 'x'\n\ti--\n\tbuf[i] = '0'\n\n\t// Strip unused leading bytes.\n\tbuf = buf[i:]\n\tw.Write(buf)\n}\n\n// valuesSorter implements sort.Interface to allow a slice of reflect.Value\n// elements to be sorted.\ntype valuesSorter struct {\n\tvalues  []reflect.Value\n\tstrings []string // either nil or same len and values\n\tcs      *ConfigState\n}\n\n// newValuesSorter initializes a valuesSorter instance, which holds a set of\n// surrogate keys on which the data should be sorted.  It uses flags in\n// ConfigState to decide if and how to populate those surrogate keys.\nfunc newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {\n\tvs := &valuesSorter{values: values, cs: cs}\n\tif canSortSimply(vs.values[0].Kind()) {\n\t\treturn vs\n\t}\n\tif !cs.DisableMethods {\n\t\tvs.strings = make([]string, len(values))\n\t\tfor i := range vs.values {\n\t\t\tb := bytes.Buffer{}\n\t\t\tif !handleMethods(cs, &b, vs.values[i]) {\n\t\t\t\tvs.strings = nil\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvs.strings[i] = b.String()\n\t\t}\n\t}\n\tif vs.strings == nil && cs.SpewKeys {\n\t\tvs.strings = make([]string, len(values))\n\t\tfor i := range vs.values {\n\t\t\tvs.strings[i] = Sprintf(\"%#v\", vs.values[i].Interface())\n\t\t}\n\t}\n\treturn vs\n}\n\n// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted\n// directly, or whether it should be considered for sorting by surrogate keys\n// (if the ConfigState allows it).\nfunc canSortSimply(kind reflect.Kind) bool {\n\t// This switch parallels valueSortLess, except for the default case.\n\tswitch kind {\n\tcase reflect.Bool:\n\t\treturn true\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn true\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn true\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn true\n\tcase reflect.String:\n\t\treturn true\n\tcase reflect.Uintptr:\n\t\treturn true\n\tcase reflect.Array:\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Len returns the number of values in the slice.  It is part of the\n// sort.Interface implementation.\nfunc (s *valuesSorter) Len() int {\n\treturn len(s.values)\n}\n\n// Swap swaps the values at the passed indices.  It is part of the\n// sort.Interface implementation.\nfunc (s *valuesSorter) Swap(i, j int) {\n\ts.values[i], s.values[j] = s.values[j], s.values[i]\n\tif s.strings != nil {\n\t\ts.strings[i], s.strings[j] = s.strings[j], s.strings[i]\n\t}\n}\n\n// valueSortLess returns whether the first value should sort before the second\n// value.  It is used by valueSorter.Less as part of the sort.Interface\n// implementation.\nfunc valueSortLess(a, b reflect.Value) bool {\n\tswitch a.Kind() {\n\tcase reflect.Bool:\n\t\treturn !a.Bool() && b.Bool()\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\treturn a.Int() < b.Int()\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\treturn a.Uint() < b.Uint()\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn a.Float() < b.Float()\n\tcase reflect.String:\n\t\treturn a.String() < b.String()\n\tcase reflect.Uintptr:\n\t\treturn a.Uint() < b.Uint()\n\tcase reflect.Array:\n\t\t// Compare the contents of both arrays.\n\t\tl := a.Len()\n\t\tfor i := 0; i < l; i++ {\n\t\t\tav := a.Index(i)\n\t\t\tbv := b.Index(i)\n\t\t\tif av.Interface() == bv.Interface() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn valueSortLess(av, bv)\n\t\t}\n\t}\n\treturn a.String() < b.String()\n}\n\n// Less returns whether the value at index i should sort before the\n// value at index j.  It is part of the sort.Interface implementation.\nfunc (s *valuesSorter) Less(i, j int) bool {\n\tif s.strings == nil {\n\t\treturn valueSortLess(s.values[i], s.values[j])\n\t}\n\treturn s.strings[i] < s.strings[j]\n}\n\n// sortValues is a sort function that handles both native types and any type that\n// can be converted to error or Stringer.  Other inputs are sorted according to\n// their Value.String() value to ensure display stability.\nfunc sortValues(values []reflect.Value, cs *ConfigState) {\n\tif len(values) == 0 {\n\t\treturn\n\t}\n\tsort.Sort(newValuesSorter(values, cs))\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/common_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew_test\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\n// custom type to test Stinger interface on non-pointer receiver.\ntype stringer string\n\n// String implements the Stringer interface for testing invocation of custom\n// stringers on types with non-pointer receivers.\nfunc (s stringer) String() string {\n\treturn \"stringer \" + string(s)\n}\n\n// custom type to test Stinger interface on pointer receiver.\ntype pstringer string\n\n// String implements the Stringer interface for testing invocation of custom\n// stringers on types with only pointer receivers.\nfunc (s *pstringer) String() string {\n\treturn \"stringer \" + string(*s)\n}\n\n// xref1 and xref2 are cross referencing structs for testing circular reference\n// detection.\ntype xref1 struct {\n\tps2 *xref2\n}\ntype xref2 struct {\n\tps1 *xref1\n}\n\n// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular\n// reference for testing detection.\ntype indirCir1 struct {\n\tps2 *indirCir2\n}\ntype indirCir2 struct {\n\tps3 *indirCir3\n}\ntype indirCir3 struct {\n\tps1 *indirCir1\n}\n\n// embed is used to test embedded structures.\ntype embed struct {\n\ta string\n}\n\n// embedwrap is used to test embedded structures.\ntype embedwrap struct {\n\t*embed\n\te *embed\n}\n\n// panicer is used to intentionally cause a panic for testing spew properly\n// handles them\ntype panicer int\n\nfunc (p panicer) String() string {\n\tpanic(\"test panic\")\n}\n\n// customError is used to test custom error interface invocation.\ntype customError int\n\nfunc (e customError) Error() string {\n\treturn fmt.Sprintf(\"error: %d\", int(e))\n}\n\n// stringizeWants converts a slice of wanted test output into a format suitable\n// for a test error message.\nfunc stringizeWants(wants []string) string {\n\ts := \"\"\n\tfor i, want := range wants {\n\t\tif i > 0 {\n\t\t\ts += fmt.Sprintf(\"want%d: %s\", i+1, want)\n\t\t} else {\n\t\t\ts += \"want: \" + want\n\t\t}\n\t}\n\treturn s\n}\n\n// testFailed returns whether or not a test failed by checking if the result\n// of the test is in the slice of wanted strings.\nfunc testFailed(result string, wants []string) bool {\n\tfor _, want := range wants {\n\t\tif result == want {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\ntype sortableStruct struct {\n\tx int\n}\n\nfunc (ss sortableStruct) String() string {\n\treturn fmt.Sprintf(\"ss.%d\", ss.x)\n}\n\ntype unsortableStruct struct {\n\tx int\n}\n\ntype sortTestCase struct {\n\tinput    []reflect.Value\n\texpected []reflect.Value\n}\n\nfunc helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {\n\tgetInterfaces := func(values []reflect.Value) []interface{} {\n\t\tinterfaces := []interface{}{}\n\t\tfor _, v := range values {\n\t\t\tinterfaces = append(interfaces, v.Interface())\n\t\t}\n\t\treturn interfaces\n\t}\n\n\tfor _, test := range tests {\n\t\tspew.SortValues(test.input, cs)\n\t\t// reflect.DeepEqual cannot really make sense of reflect.Value,\n\t\t// probably because of all the pointer tricks. For instance,\n\t\t// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}\n\t\t// instead.\n\t\tinput := getInterfaces(test.input)\n\t\texpected := getInterfaces(test.expected)\n\t\tif !reflect.DeepEqual(input, expected) {\n\t\t\tt.Errorf(\"Sort mismatch:\\n %v != %v\", input, expected)\n\t\t}\n\t}\n}\n\n// TestSortValues ensures the sort functionality for relect.Value based sorting\n// works as intended.\nfunc TestSortValues(t *testing.T) {\n\tv := reflect.ValueOf\n\n\ta := v(\"a\")\n\tb := v(\"b\")\n\tc := v(\"c\")\n\tembedA := v(embed{\"a\"})\n\tembedB := v(embed{\"b\"})\n\tembedC := v(embed{\"c\"})\n\ttests := []sortTestCase{\n\t\t// No values.\n\t\t{\n\t\t\t[]reflect.Value{},\n\t\t\t[]reflect.Value{},\n\t\t},\n\t\t// Bools.\n\t\t{\n\t\t\t[]reflect.Value{v(false), v(true), v(false)},\n\t\t\t[]reflect.Value{v(false), v(false), v(true)},\n\t\t},\n\t\t// Ints.\n\t\t{\n\t\t\t[]reflect.Value{v(2), v(1), v(3)},\n\t\t\t[]reflect.Value{v(1), v(2), v(3)},\n\t\t},\n\t\t// Uints.\n\t\t{\n\t\t\t[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},\n\t\t\t[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},\n\t\t},\n\t\t// Floats.\n\t\t{\n\t\t\t[]reflect.Value{v(2.0), v(1.0), v(3.0)},\n\t\t\t[]reflect.Value{v(1.0), v(2.0), v(3.0)},\n\t\t},\n\t\t// Strings.\n\t\t{\n\t\t\t[]reflect.Value{b, a, c},\n\t\t\t[]reflect.Value{a, b, c},\n\t\t},\n\t\t// Array\n\t\t{\n\t\t\t[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},\n\t\t\t[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},\n\t\t},\n\t\t// Uintptrs.\n\t\t{\n\t\t\t[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},\n\t\t\t[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},\n\t\t},\n\t\t// SortableStructs.\n\t\t{\n\t\t\t// Note: not sorted - DisableMethods is set.\n\t\t\t[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},\n\t\t\t[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},\n\t\t},\n\t\t// UnsortableStructs.\n\t\t{\n\t\t\t// Note: not sorted - SpewKeys is false.\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t},\n\t\t// Invalid.\n\t\t{\n\t\t\t[]reflect.Value{embedB, embedA, embedC},\n\t\t\t[]reflect.Value{embedB, embedA, embedC},\n\t\t},\n\t}\n\tcs := spew.ConfigState{DisableMethods: true, SpewKeys: false}\n\thelpTestSortValues(tests, &cs, t)\n}\n\n// TestSortValuesWithMethods ensures the sort functionality for relect.Value\n// based sorting works as intended when using string methods.\nfunc TestSortValuesWithMethods(t *testing.T) {\n\tv := reflect.ValueOf\n\n\ta := v(\"a\")\n\tb := v(\"b\")\n\tc := v(\"c\")\n\ttests := []sortTestCase{\n\t\t// Ints.\n\t\t{\n\t\t\t[]reflect.Value{v(2), v(1), v(3)},\n\t\t\t[]reflect.Value{v(1), v(2), v(3)},\n\t\t},\n\t\t// Strings.\n\t\t{\n\t\t\t[]reflect.Value{b, a, c},\n\t\t\t[]reflect.Value{a, b, c},\n\t\t},\n\t\t// SortableStructs.\n\t\t{\n\t\t\t[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},\n\t\t\t[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},\n\t\t},\n\t\t// UnsortableStructs.\n\t\t{\n\t\t\t// Note: not sorted - SpewKeys is false.\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t},\n\t}\n\tcs := spew.ConfigState{DisableMethods: false, SpewKeys: false}\n\thelpTestSortValues(tests, &cs, t)\n}\n\n// TestSortValuesWithSpew ensures the sort functionality for relect.Value\n// based sorting works as intended when using spew to stringify keys.\nfunc TestSortValuesWithSpew(t *testing.T) {\n\tv := reflect.ValueOf\n\n\ta := v(\"a\")\n\tb := v(\"b\")\n\tc := v(\"c\")\n\ttests := []sortTestCase{\n\t\t// Ints.\n\t\t{\n\t\t\t[]reflect.Value{v(2), v(1), v(3)},\n\t\t\t[]reflect.Value{v(1), v(2), v(3)},\n\t\t},\n\t\t// Strings.\n\t\t{\n\t\t\t[]reflect.Value{b, a, c},\n\t\t\t[]reflect.Value{a, b, c},\n\t\t},\n\t\t// SortableStructs.\n\t\t{\n\t\t\t[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},\n\t\t\t[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},\n\t\t},\n\t\t// UnsortableStructs.\n\t\t{\n\t\t\t[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},\n\t\t\t[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},\n\t\t},\n\t}\n\tcs := spew.ConfigState{DisableMethods: true, SpewKeys: true}\n\thelpTestSortValues(tests, &cs, t)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/config.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n// ConfigState houses the configuration options used by spew to format and\n// display values.  There is a global instance, Config, that is used to control\n// all top-level Formatter and Dump functionality.  Each ConfigState instance\n// provides methods equivalent to the top-level functions.\n//\n// The zero value for ConfigState provides no indentation.  You would typically\n// want to set it to a space or a tab.\n//\n// Alternatively, you can use NewDefaultConfig to get a ConfigState instance\n// with default settings.  See the documentation of NewDefaultConfig for default\n// values.\ntype ConfigState struct {\n\t// Indent specifies the string to use for each indentation level.  The\n\t// global config instance that all top-level functions use set this to a\n\t// single space by default.  If you would like more indentation, you might\n\t// set this to a tab with \"\\t\" or perhaps two spaces with \"  \".\n\tIndent string\n\n\t// MaxDepth controls the maximum number of levels to descend into nested\n\t// data structures.  The default, 0, means there is no limit.\n\t//\n\t// NOTE: Circular data structures are properly detected, so it is not\n\t// necessary to set this value unless you specifically want to limit deeply\n\t// nested data structures.\n\tMaxDepth int\n\n\t// DisableMethods specifies whether or not error and Stringer interfaces are\n\t// invoked for types that implement them.\n\tDisableMethods bool\n\n\t// DisablePointerMethods specifies whether or not to check for and invoke\n\t// error and Stringer interfaces on types which only accept a pointer\n\t// receiver when the current type is not a pointer.\n\t//\n\t// NOTE: This might be an unsafe action since calling one of these methods\n\t// with a pointer receiver could technically mutate the value, however,\n\t// in practice, types which choose to satisify an error or Stringer\n\t// interface with a pointer receiver should not be mutating their state\n\t// inside these interface methods.  As a result, this option relies on\n\t// access to the unsafe package, so it will not have any effect when\n\t// running in environments without access to the unsafe package such as\n\t// Google App Engine or with the \"safe\" build tag specified.\n\tDisablePointerMethods bool\n\n\t// DisablePointerAddresses specifies whether to disable the printing of\n\t// pointer addresses. This is useful when diffing data structures in tests.\n\tDisablePointerAddresses bool\n\n\t// DisableCapacities specifies whether to disable the printing of capacities\n\t// for arrays, slices, maps and channels. This is useful when diffing\n\t// data structures in tests.\n\tDisableCapacities bool\n\n\t// ContinueOnMethod specifies whether or not recursion should continue once\n\t// a custom error or Stringer interface is invoked.  The default, false,\n\t// means it will print the results of invoking the custom error or Stringer\n\t// interface and return immediately instead of continuing to recurse into\n\t// the internals of the data type.\n\t//\n\t// NOTE: This flag does not have any effect if method invocation is disabled\n\t// via the DisableMethods or DisablePointerMethods options.\n\tContinueOnMethod bool\n\n\t// SortKeys specifies map keys should be sorted before being printed. Use\n\t// this to have a more deterministic, diffable output.  Note that only\n\t// native types (bool, int, uint, floats, uintptr and string) and types\n\t// that support the error or Stringer interfaces (if methods are\n\t// enabled) are supported, with other types sorted according to the\n\t// reflect.Value.String() output which guarantees display stability.\n\tSortKeys bool\n\n\t// SpewKeys specifies that, as a last resort attempt, map keys should\n\t// be spewed to strings and sorted by those strings.  This is only\n\t// considered if SortKeys is true.\n\tSpewKeys bool\n}\n\n// Config is the active configuration of the top-level functions.\n// The configuration can be changed by modifying the contents of spew.Config.\nvar Config = ConfigState{Indent: \" \"}\n\n// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the formatted string as a value that satisfies error.  See NewFormatter\n// for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {\n\treturn fmt.Errorf(format, c.convertArgs(a)...)\n}\n\n// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprint(w, c.convertArgs(a)...)\n}\n\n// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprintf(w, format, c.convertArgs(a)...)\n}\n\n// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it\n// passed with a Formatter interface returned by c.NewFormatter.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprintln(w, c.convertArgs(a)...)\n}\n\n// Print is a wrapper for fmt.Print that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Print(c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Print(a ...interface{}) (n int, err error) {\n\treturn fmt.Print(c.convertArgs(a)...)\n}\n\n// Printf is a wrapper for fmt.Printf that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Printf(format, c.convertArgs(a)...)\n}\n\n// Println is a wrapper for fmt.Println that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Println(c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Println(a ...interface{}) (n int, err error) {\n\treturn fmt.Println(c.convertArgs(a)...)\n}\n\n// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Sprint(a ...interface{}) string {\n\treturn fmt.Sprint(c.convertArgs(a)...)\n}\n\n// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were\n// passed with a Formatter interface returned by c.NewFormatter.  It returns\n// the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Sprintf(format string, a ...interface{}) string {\n\treturn fmt.Sprintf(format, c.convertArgs(a)...)\n}\n\n// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it\n// were passed with a Formatter interface returned by c.NewFormatter.  It\n// returns the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))\nfunc (c *ConfigState) Sprintln(a ...interface{}) string {\n\treturn fmt.Sprintln(c.convertArgs(a)...)\n}\n\n/*\nNewFormatter returns a custom formatter that satisfies the fmt.Formatter\ninterface.  As a result, it integrates cleanly with standard fmt package\nprinting functions.  The formatter is useful for inline printing of smaller data\ntypes similar to the standard %v format specifier.\n\nThe custom formatter only responds to the %v (most compact), %+v (adds pointer\naddresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb\ncombinations.  Any other verbs such as %x and %q will be sent to the the\nstandard fmt package for formatting.  In addition, the custom formatter ignores\nthe width and precision arguments (however they will still work on the format\nspecifiers not handled by the custom formatter).\n\nTypically this function shouldn't be called directly.  It is much easier to make\nuse of the custom formatter by calling one of the convenience functions such as\nc.Printf, c.Println, or c.Printf.\n*/\nfunc (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {\n\treturn newFormatter(c, v)\n}\n\n// Fdump formats and displays the passed arguments to io.Writer w.  It formats\n// exactly the same as Dump.\nfunc (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {\n\tfdump(c, w, a...)\n}\n\n/*\nDump displays the passed parameters to standard out with newlines, customizable\nindentation, and additional debug information such as complete types and all\npointer addresses used to indirect to the final value.  It provides the\nfollowing features over the built-in printing facilities provided by the fmt\npackage:\n\n\t* Pointers are dereferenced and followed\n\t* Circular data structures are detected and handled properly\n\t* Custom Stringer/error interfaces are optionally invoked, including\n\t  on unexported types\n\t* Custom types which only implement the Stringer/error interfaces via\n\t  a pointer receiver are optionally invoked when passing non-pointer\n\t  variables\n\t* Byte arrays and slices are dumped like the hexdump -C command which\n\t  includes offsets, byte values in hex, and ASCII output\n\nThe configuration options are controlled by modifying the public members\nof c.  See ConfigState for options documentation.\n\nSee Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to\nget the formatted result as a string.\n*/\nfunc (c *ConfigState) Dump(a ...interface{}) {\n\tfdump(c, os.Stdout, a...)\n}\n\n// Sdump returns a string with the passed arguments formatted exactly the same\n// as Dump.\nfunc (c *ConfigState) Sdump(a ...interface{}) string {\n\tvar buf bytes.Buffer\n\tfdump(c, &buf, a...)\n\treturn buf.String()\n}\n\n// convertArgs accepts a slice of arguments and returns a slice of the same\n// length with each argument converted to a spew Formatter interface using\n// the ConfigState associated with s.\nfunc (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {\n\tformatters = make([]interface{}, len(args))\n\tfor index, arg := range args {\n\t\tformatters[index] = newFormatter(c, arg)\n\t}\n\treturn formatters\n}\n\n// NewDefaultConfig returns a ConfigState with the following default settings.\n//\n// \tIndent: \" \"\n// \tMaxDepth: 0\n// \tDisableMethods: false\n// \tDisablePointerMethods: false\n// \tContinueOnMethod: false\n// \tSortKeys: false\nfunc NewDefaultConfig() *ConfigState {\n\treturn &ConfigState{Indent: \" \"}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/doc.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/*\nPackage spew implements a deep pretty printer for Go data structures to aid in\ndebugging.\n\nA quick overview of the additional features spew provides over the built-in\nprinting facilities for Go data types are as follows:\n\n\t* Pointers are dereferenced and followed\n\t* Circular data structures are detected and handled properly\n\t* Custom Stringer/error interfaces are optionally invoked, including\n\t  on unexported types\n\t* Custom types which only implement the Stringer/error interfaces via\n\t  a pointer receiver are optionally invoked when passing non-pointer\n\t  variables\n\t* Byte arrays and slices are dumped like the hexdump -C command which\n\t  includes offsets, byte values in hex, and ASCII output (only when using\n\t  Dump style)\n\nThere are two different approaches spew allows for dumping Go data structures:\n\n\t* Dump style which prints with newlines, customizable indentation,\n\t  and additional debug information such as types and all pointer addresses\n\t  used to indirect to the final value\n\t* A custom Formatter interface that integrates cleanly with the standard fmt\n\t  package and replaces %v, %+v, %#v, and %#+v to provide inline printing\n\t  similar to the default %v while providing the additional functionality\n\t  outlined above and passing unsupported format verbs such as %x and %q\n\t  along to fmt\n\nQuick Start\n\nThis section demonstrates how to quickly get started with spew.  See the\nsections below for further details on formatting and configuration options.\n\nTo dump a variable with full newlines, indentation, type, and pointer\ninformation use Dump, Fdump, or Sdump:\n\tspew.Dump(myVar1, myVar2, ...)\n\tspew.Fdump(someWriter, myVar1, myVar2, ...)\n\tstr := spew.Sdump(myVar1, myVar2, ...)\n\nAlternatively, if you would prefer to use format strings with a compacted inline\nprinting style, use the convenience wrappers Printf, Fprintf, etc with\n%v (most compact), %+v (adds pointer addresses), %#v (adds types), or\n%#+v (adds types and pointer addresses):\n\tspew.Printf(\"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\n\tspew.Printf(\"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n\tspew.Fprintf(someWriter, \"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\n\tspew.Fprintf(someWriter, \"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n\nConfiguration Options\n\nConfiguration of spew is handled by fields in the ConfigState type.  For\nconvenience, all of the top-level functions use a global state available\nvia the spew.Config global.\n\nIt is also possible to create a ConfigState instance that provides methods\nequivalent to the top-level functions.  This allows concurrent configuration\noptions.  See the ConfigState documentation for more details.\n\nThe following configuration options are available:\n\t* Indent\n\t\tString to use for each indentation level for Dump functions.\n\t\tIt is a single space by default.  A popular alternative is \"\\t\".\n\n\t* MaxDepth\n\t\tMaximum number of levels to descend into nested data structures.\n\t\tThere is no limit by default.\n\n\t* DisableMethods\n\t\tDisables invocation of error and Stringer interface methods.\n\t\tMethod invocation is enabled by default.\n\n\t* DisablePointerMethods\n\t\tDisables invocation of error and Stringer interface methods on types\n\t\twhich only accept pointer receivers from non-pointer variables.\n\t\tPointer method invocation is enabled by default.\n\n\t* DisablePointerAddresses\n\t\tDisablePointerAddresses specifies whether to disable the printing of\n\t\tpointer addresses. This is useful when diffing data structures in tests.\n\n\t* DisableCapacities\n\t\tDisableCapacities specifies whether to disable the printing of\n\t\tcapacities for arrays, slices, maps and channels. This is useful when\n\t\tdiffing data structures in tests.\n\n\t* ContinueOnMethod\n\t\tEnables recursion into types after invoking error and Stringer interface\n\t\tmethods. Recursion after method invocation is disabled by default.\n\n\t* SortKeys\n\t\tSpecifies map keys should be sorted before being printed. Use\n\t\tthis to have a more deterministic, diffable output.  Note that\n\t\tonly native types (bool, int, uint, floats, uintptr and string)\n\t\tand types which implement error or Stringer interfaces are\n\t\tsupported with other types sorted according to the\n\t\treflect.Value.String() output which guarantees display\n\t\tstability.  Natural map order is used by default.\n\n\t* SpewKeys\n\t\tSpecifies that, as a last resort attempt, map keys should be\n\t\tspewed to strings and sorted by those strings.  This is only\n\t\tconsidered if SortKeys is true.\n\nDump Usage\n\nSimply call spew.Dump with a list of variables you want to dump:\n\n\tspew.Dump(myVar1, myVar2, ...)\n\nYou may also call spew.Fdump if you would prefer to output to an arbitrary\nio.Writer.  For example, to dump to standard error:\n\n\tspew.Fdump(os.Stderr, myVar1, myVar2, ...)\n\nA third option is to call spew.Sdump to get the formatted output as a string:\n\n\tstr := spew.Sdump(myVar1, myVar2, ...)\n\nSample Dump Output\n\nSee the Dump example for details on the setup of the types and variables being\nshown here.\n\n\t(main.Foo) {\n\t unexportedField: (*main.Bar)(0xf84002e210)({\n\t  flag: (main.Flag) flagTwo,\n\t  data: (uintptr) <nil>\n\t }),\n\t ExportedField: (map[interface {}]interface {}) (len=1) {\n\t  (string) (len=3) \"one\": (bool) true\n\t }\n\t}\n\nByte (and uint8) arrays and slices are displayed uniquely like the hexdump -C\ncommand as shown.\n\t([]uint8) (len=32 cap=32) {\n\t 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |\n\t 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!\"#$%&'()*+,-./0|\n\t 00000020  31 32                                             |12|\n\t}\n\nCustom Formatter\n\nSpew provides a custom formatter that implements the fmt.Formatter interface\nso that it integrates cleanly with standard fmt package printing functions. The\nformatter is useful for inline printing of smaller data types similar to the\nstandard %v format specifier.\n\nThe custom formatter only responds to the %v (most compact), %+v (adds pointer\naddresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb\ncombinations.  Any other verbs such as %x and %q will be sent to the the\nstandard fmt package for formatting.  In addition, the custom formatter ignores\nthe width and precision arguments (however they will still work on the format\nspecifiers not handled by the custom formatter).\n\nCustom Formatter Usage\n\nThe simplest way to make use of the spew custom formatter is to call one of the\nconvenience functions such as spew.Printf, spew.Println, or spew.Printf.  The\nfunctions have syntax you are most likely already familiar with:\n\n\tspew.Printf(\"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\n\tspew.Printf(\"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n\tspew.Println(myVar, myVar2)\n\tspew.Fprintf(os.Stderr, \"myVar1: %v -- myVar2: %+v\", myVar1, myVar2)\n\tspew.Fprintf(os.Stderr, \"myVar3: %#v -- myVar4: %#+v\", myVar3, myVar4)\n\nSee the Index for the full list convenience functions.\n\nSample Formatter Output\n\nDouble pointer to a uint8:\n\t  %v: <**>5\n\t %+v: <**>(0xf8400420d0->0xf8400420c8)5\n\t %#v: (**uint8)5\n\t%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5\n\nPointer to circular struct with a uint8 field and a pointer to itself:\n\t  %v: <*>{1 <*><shown>}\n\t %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}\n\t %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}\n\t%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}\n\nSee the Printf example for details on the setup of variables being shown\nhere.\n\nErrors\n\nSince it is possible for custom Stringer/error interfaces to panic, spew\ndetects them and handles them internally by printing the panic information\ninline with the output.  Since spew is intended to provide deep pretty printing\ncapabilities on structures, it intentionally does not return any errors.\n*/\npackage spew\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/dump.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\t// uint8Type is a reflect.Type representing a uint8.  It is used to\n\t// convert cgo types to uint8 slices for hexdumping.\n\tuint8Type = reflect.TypeOf(uint8(0))\n\n\t// cCharRE is a regular expression that matches a cgo char.\n\t// It is used to detect character arrays to hexdump them.\n\tcCharRE = regexp.MustCompile(\"^.*\\\\._Ctype_char$\")\n\n\t// cUnsignedCharRE is a regular expression that matches a cgo unsigned\n\t// char.  It is used to detect unsigned character arrays to hexdump\n\t// them.\n\tcUnsignedCharRE = regexp.MustCompile(\"^.*\\\\._Ctype_unsignedchar$\")\n\n\t// cUint8tCharRE is a regular expression that matches a cgo uint8_t.\n\t// It is used to detect uint8_t arrays to hexdump them.\n\tcUint8tCharRE = regexp.MustCompile(\"^.*\\\\._Ctype_uint8_t$\")\n)\n\n// dumpState contains information about the state of a dump operation.\ntype dumpState struct {\n\tw                io.Writer\n\tdepth            int\n\tpointers         map[uintptr]int\n\tignoreNextType   bool\n\tignoreNextIndent bool\n\tcs               *ConfigState\n}\n\n// indent performs indentation according to the depth level and cs.Indent\n// option.\nfunc (d *dumpState) indent() {\n\tif d.ignoreNextIndent {\n\t\td.ignoreNextIndent = false\n\t\treturn\n\t}\n\td.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))\n}\n\n// unpackValue returns values inside of non-nil interfaces when possible.\n// This is useful for data types like structs, arrays, slices, and maps which\n// can contain varying types packed inside an interface.\nfunc (d *dumpState) unpackValue(v reflect.Value) reflect.Value {\n\tif v.Kind() == reflect.Interface && !v.IsNil() {\n\t\tv = v.Elem()\n\t}\n\treturn v\n}\n\n// dumpPtr handles formatting of pointers by indirecting them as necessary.\nfunc (d *dumpState) dumpPtr(v reflect.Value) {\n\t// Remove pointers at or below the current depth from map used to detect\n\t// circular refs.\n\tfor k, depth := range d.pointers {\n\t\tif depth >= d.depth {\n\t\t\tdelete(d.pointers, k)\n\t\t}\n\t}\n\n\t// Keep list of all dereferenced pointers to show later.\n\tpointerChain := make([]uintptr, 0)\n\n\t// Figure out how many levels of indirection there are by dereferencing\n\t// pointers and unpacking interfaces down the chain while detecting circular\n\t// references.\n\tnilFound := false\n\tcycleFound := false\n\tindirects := 0\n\tve := v\n\tfor ve.Kind() == reflect.Ptr {\n\t\tif ve.IsNil() {\n\t\t\tnilFound = true\n\t\t\tbreak\n\t\t}\n\t\tindirects++\n\t\taddr := ve.Pointer()\n\t\tpointerChain = append(pointerChain, addr)\n\t\tif pd, ok := d.pointers[addr]; ok && pd < d.depth {\n\t\t\tcycleFound = true\n\t\t\tindirects--\n\t\t\tbreak\n\t\t}\n\t\td.pointers[addr] = d.depth\n\n\t\tve = ve.Elem()\n\t\tif ve.Kind() == reflect.Interface {\n\t\t\tif ve.IsNil() {\n\t\t\t\tnilFound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tve = ve.Elem()\n\t\t}\n\t}\n\n\t// Display type information.\n\td.w.Write(openParenBytes)\n\td.w.Write(bytes.Repeat(asteriskBytes, indirects))\n\td.w.Write([]byte(ve.Type().String()))\n\td.w.Write(closeParenBytes)\n\n\t// Display pointer information.\n\tif !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {\n\t\td.w.Write(openParenBytes)\n\t\tfor i, addr := range pointerChain {\n\t\t\tif i > 0 {\n\t\t\t\td.w.Write(pointerChainBytes)\n\t\t\t}\n\t\t\tprintHexPtr(d.w, addr)\n\t\t}\n\t\td.w.Write(closeParenBytes)\n\t}\n\n\t// Display dereferenced value.\n\td.w.Write(openParenBytes)\n\tswitch {\n\tcase nilFound == true:\n\t\td.w.Write(nilAngleBytes)\n\n\tcase cycleFound == true:\n\t\td.w.Write(circularBytes)\n\n\tdefault:\n\t\td.ignoreNextType = true\n\t\td.dump(ve)\n\t}\n\td.w.Write(closeParenBytes)\n}\n\n// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under\n// reflection) arrays and slices are dumped in hexdump -C fashion.\nfunc (d *dumpState) dumpSlice(v reflect.Value) {\n\t// Determine whether this type should be hex dumped or not.  Also,\n\t// for types which should be hexdumped, try to use the underlying data\n\t// first, then fall back to trying to convert them to a uint8 slice.\n\tvar buf []uint8\n\tdoConvert := false\n\tdoHexDump := false\n\tnumEntries := v.Len()\n\tif numEntries > 0 {\n\t\tvt := v.Index(0).Type()\n\t\tvts := vt.String()\n\t\tswitch {\n\t\t// C types that need to be converted.\n\t\tcase cCharRE.MatchString(vts):\n\t\t\tfallthrough\n\t\tcase cUnsignedCharRE.MatchString(vts):\n\t\t\tfallthrough\n\t\tcase cUint8tCharRE.MatchString(vts):\n\t\t\tdoConvert = true\n\n\t\t// Try to use existing uint8 slices and fall back to converting\n\t\t// and copying if that fails.\n\t\tcase vt.Kind() == reflect.Uint8:\n\t\t\t// We need an addressable interface to convert the type\n\t\t\t// to a byte slice.  However, the reflect package won't\n\t\t\t// give us an interface on certain things like\n\t\t\t// unexported struct fields in order to enforce\n\t\t\t// visibility rules.  We use unsafe, when available, to\n\t\t\t// bypass these restrictions since this package does not\n\t\t\t// mutate the values.\n\t\t\tvs := v\n\t\t\tif !vs.CanInterface() || !vs.CanAddr() {\n\t\t\t\tvs = unsafeReflectValue(vs)\n\t\t\t}\n\t\t\tif !UnsafeDisabled {\n\t\t\t\tvs = vs.Slice(0, numEntries)\n\n\t\t\t\t// Use the existing uint8 slice if it can be\n\t\t\t\t// type asserted.\n\t\t\t\tiface := vs.Interface()\n\t\t\t\tif slice, ok := iface.([]uint8); ok {\n\t\t\t\t\tbuf = slice\n\t\t\t\t\tdoHexDump = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// The underlying data needs to be converted if it can't\n\t\t\t// be type asserted to a uint8 slice.\n\t\t\tdoConvert = true\n\t\t}\n\n\t\t// Copy and convert the underlying type if needed.\n\t\tif doConvert && vt.ConvertibleTo(uint8Type) {\n\t\t\t// Convert and copy each element into a uint8 byte\n\t\t\t// slice.\n\t\t\tbuf = make([]uint8, numEntries)\n\t\t\tfor i := 0; i < numEntries; i++ {\n\t\t\t\tvv := v.Index(i)\n\t\t\t\tbuf[i] = uint8(vv.Convert(uint8Type).Uint())\n\t\t\t}\n\t\t\tdoHexDump = true\n\t\t}\n\t}\n\n\t// Hexdump the entire slice as needed.\n\tif doHexDump {\n\t\tindent := strings.Repeat(d.cs.Indent, d.depth)\n\t\tstr := indent + hex.Dump(buf)\n\t\tstr = strings.Replace(str, \"\\n\", \"\\n\"+indent, -1)\n\t\tstr = strings.TrimRight(str, d.cs.Indent)\n\t\td.w.Write([]byte(str))\n\t\treturn\n\t}\n\n\t// Recursively call dump for each item.\n\tfor i := 0; i < numEntries; i++ {\n\t\td.dump(d.unpackValue(v.Index(i)))\n\t\tif i < (numEntries - 1) {\n\t\t\td.w.Write(commaNewlineBytes)\n\t\t} else {\n\t\t\td.w.Write(newlineBytes)\n\t\t}\n\t}\n}\n\n// dump is the main workhorse for dumping a value.  It uses the passed reflect\n// value to figure out what kind of object we are dealing with and formats it\n// appropriately.  It is a recursive function, however circular data structures\n// are detected and handled properly.\nfunc (d *dumpState) dump(v reflect.Value) {\n\t// Handle invalid reflect values immediately.\n\tkind := v.Kind()\n\tif kind == reflect.Invalid {\n\t\td.w.Write(invalidAngleBytes)\n\t\treturn\n\t}\n\n\t// Handle pointers specially.\n\tif kind == reflect.Ptr {\n\t\td.indent()\n\t\td.dumpPtr(v)\n\t\treturn\n\t}\n\n\t// Print type information unless already handled elsewhere.\n\tif !d.ignoreNextType {\n\t\td.indent()\n\t\td.w.Write(openParenBytes)\n\t\td.w.Write([]byte(v.Type().String()))\n\t\td.w.Write(closeParenBytes)\n\t\td.w.Write(spaceBytes)\n\t}\n\td.ignoreNextType = false\n\n\t// Display length and capacity if the built-in len and cap functions\n\t// work with the value's kind and the len/cap itself is non-zero.\n\tvalueLen, valueCap := 0, 0\n\tswitch v.Kind() {\n\tcase reflect.Array, reflect.Slice, reflect.Chan:\n\t\tvalueLen, valueCap = v.Len(), v.Cap()\n\tcase reflect.Map, reflect.String:\n\t\tvalueLen = v.Len()\n\t}\n\tif valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {\n\t\td.w.Write(openParenBytes)\n\t\tif valueLen != 0 {\n\t\t\td.w.Write(lenEqualsBytes)\n\t\t\tprintInt(d.w, int64(valueLen), 10)\n\t\t}\n\t\tif !d.cs.DisableCapacities && valueCap != 0 {\n\t\t\tif valueLen != 0 {\n\t\t\t\td.w.Write(spaceBytes)\n\t\t\t}\n\t\t\td.w.Write(capEqualsBytes)\n\t\t\tprintInt(d.w, int64(valueCap), 10)\n\t\t}\n\t\td.w.Write(closeParenBytes)\n\t\td.w.Write(spaceBytes)\n\t}\n\n\t// Call Stringer/error interfaces if they exist and the handle methods flag\n\t// is enabled\n\tif !d.cs.DisableMethods {\n\t\tif (kind != reflect.Invalid) && (kind != reflect.Interface) {\n\t\t\tif handled := handleMethods(d.cs, d.w, v); handled {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch kind {\n\tcase reflect.Invalid:\n\t\t// Do nothing.  We should never get here since invalid has already\n\t\t// been handled above.\n\n\tcase reflect.Bool:\n\t\tprintBool(d.w, v.Bool())\n\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\tprintInt(d.w, v.Int(), 10)\n\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\tprintUint(d.w, v.Uint(), 10)\n\n\tcase reflect.Float32:\n\t\tprintFloat(d.w, v.Float(), 32)\n\n\tcase reflect.Float64:\n\t\tprintFloat(d.w, v.Float(), 64)\n\n\tcase reflect.Complex64:\n\t\tprintComplex(d.w, v.Complex(), 32)\n\n\tcase reflect.Complex128:\n\t\tprintComplex(d.w, v.Complex(), 64)\n\n\tcase reflect.Slice:\n\t\tif v.IsNil() {\n\t\t\td.w.Write(nilAngleBytes)\n\t\t\tbreak\n\t\t}\n\t\tfallthrough\n\n\tcase reflect.Array:\n\t\td.w.Write(openBraceNewlineBytes)\n\t\td.depth++\n\t\tif (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {\n\t\t\td.indent()\n\t\t\td.w.Write(maxNewlineBytes)\n\t\t} else {\n\t\t\td.dumpSlice(v)\n\t\t}\n\t\td.depth--\n\t\td.indent()\n\t\td.w.Write(closeBraceBytes)\n\n\tcase reflect.String:\n\t\td.w.Write([]byte(strconv.Quote(v.String())))\n\n\tcase reflect.Interface:\n\t\t// The only time we should get here is for nil interfaces due to\n\t\t// unpackValue calls.\n\t\tif v.IsNil() {\n\t\t\td.w.Write(nilAngleBytes)\n\t\t}\n\n\tcase reflect.Ptr:\n\t\t// Do nothing.  We should never get here since pointers have already\n\t\t// been handled above.\n\n\tcase reflect.Map:\n\t\t// nil maps should be indicated as different than empty maps\n\t\tif v.IsNil() {\n\t\t\td.w.Write(nilAngleBytes)\n\t\t\tbreak\n\t\t}\n\n\t\td.w.Write(openBraceNewlineBytes)\n\t\td.depth++\n\t\tif (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {\n\t\t\td.indent()\n\t\t\td.w.Write(maxNewlineBytes)\n\t\t} else {\n\t\t\tnumEntries := v.Len()\n\t\t\tkeys := v.MapKeys()\n\t\t\tif d.cs.SortKeys {\n\t\t\t\tsortValues(keys, d.cs)\n\t\t\t}\n\t\t\tfor i, key := range keys {\n\t\t\t\td.dump(d.unpackValue(key))\n\t\t\t\td.w.Write(colonSpaceBytes)\n\t\t\t\td.ignoreNextIndent = true\n\t\t\t\td.dump(d.unpackValue(v.MapIndex(key)))\n\t\t\t\tif i < (numEntries - 1) {\n\t\t\t\t\td.w.Write(commaNewlineBytes)\n\t\t\t\t} else {\n\t\t\t\t\td.w.Write(newlineBytes)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\td.depth--\n\t\td.indent()\n\t\td.w.Write(closeBraceBytes)\n\n\tcase reflect.Struct:\n\t\td.w.Write(openBraceNewlineBytes)\n\t\td.depth++\n\t\tif (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {\n\t\t\td.indent()\n\t\t\td.w.Write(maxNewlineBytes)\n\t\t} else {\n\t\t\tvt := v.Type()\n\t\t\tnumFields := v.NumField()\n\t\t\tfor i := 0; i < numFields; i++ {\n\t\t\t\td.indent()\n\t\t\t\tvtf := vt.Field(i)\n\t\t\t\td.w.Write([]byte(vtf.Name))\n\t\t\t\td.w.Write(colonSpaceBytes)\n\t\t\t\td.ignoreNextIndent = true\n\t\t\t\td.dump(d.unpackValue(v.Field(i)))\n\t\t\t\tif i < (numFields - 1) {\n\t\t\t\t\td.w.Write(commaNewlineBytes)\n\t\t\t\t} else {\n\t\t\t\t\td.w.Write(newlineBytes)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\td.depth--\n\t\td.indent()\n\t\td.w.Write(closeBraceBytes)\n\n\tcase reflect.Uintptr:\n\t\tprintHexPtr(d.w, uintptr(v.Uint()))\n\n\tcase reflect.UnsafePointer, reflect.Chan, reflect.Func:\n\t\tprintHexPtr(d.w, v.Pointer())\n\n\t// There were not any other types at the time this code was written, but\n\t// fall back to letting the default fmt package handle it in case any new\n\t// types are added.\n\tdefault:\n\t\tif v.CanInterface() {\n\t\t\tfmt.Fprintf(d.w, \"%v\", v.Interface())\n\t\t} else {\n\t\t\tfmt.Fprintf(d.w, \"%v\", v.String())\n\t\t}\n\t}\n}\n\n// fdump is a helper function to consolidate the logic from the various public\n// methods which take varying writers and config states.\nfunc fdump(cs *ConfigState, w io.Writer, a ...interface{}) {\n\tfor _, arg := range a {\n\t\tif arg == nil {\n\t\t\tw.Write(interfaceBytes)\n\t\t\tw.Write(spaceBytes)\n\t\t\tw.Write(nilAngleBytes)\n\t\t\tw.Write(newlineBytes)\n\t\t\tcontinue\n\t\t}\n\n\t\td := dumpState{w: w, cs: cs}\n\t\td.pointers = make(map[uintptr]int)\n\t\td.dump(reflect.ValueOf(arg))\n\t\td.w.Write(newlineBytes)\n\t}\n}\n\n// Fdump formats and displays the passed arguments to io.Writer w.  It formats\n// exactly the same as Dump.\nfunc Fdump(w io.Writer, a ...interface{}) {\n\tfdump(&Config, w, a...)\n}\n\n// Sdump returns a string with the passed arguments formatted exactly the same\n// as Dump.\nfunc Sdump(a ...interface{}) string {\n\tvar buf bytes.Buffer\n\tfdump(&Config, &buf, a...)\n\treturn buf.String()\n}\n\n/*\nDump displays the passed parameters to standard out with newlines, customizable\nindentation, and additional debug information such as complete types and all\npointer addresses used to indirect to the final value.  It provides the\nfollowing features over the built-in printing facilities provided by the fmt\npackage:\n\n\t* Pointers are dereferenced and followed\n\t* Circular data structures are detected and handled properly\n\t* Custom Stringer/error interfaces are optionally invoked, including\n\t  on unexported types\n\t* Custom types which only implement the Stringer/error interfaces via\n\t  a pointer receiver are optionally invoked when passing non-pointer\n\t  variables\n\t* Byte arrays and slices are dumped like the hexdump -C command which\n\t  includes offsets, byte values in hex, and ASCII output\n\nThe configuration options are controlled by an exported package global,\nspew.Config.  See ConfigState for options documentation.\n\nSee Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to\nget the formatted result as a string.\n*/\nfunc Dump(a ...interface{}) {\n\tfdump(&Config, os.Stdout, a...)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/dump_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/*\nTest Summary:\nNOTE: For each test, a nil pointer, a single pointer and double pointer to the\nbase test element are also tested to ensure proper indirection across all types.\n\n- Max int8, int16, int32, int64, int\n- Max uint8, uint16, uint32, uint64, uint\n- Boolean true and false\n- Standard complex64 and complex128\n- Array containing standard ints\n- Array containing type with custom formatter on pointer receiver only\n- Array containing interfaces\n- Array containing bytes\n- Slice containing standard float32 values\n- Slice containing type with custom formatter on pointer receiver only\n- Slice containing interfaces\n- Slice containing bytes\n- Nil slice\n- Standard string\n- Nil interface\n- Sub-interface\n- Map with string keys and int vals\n- Map with custom formatter type on pointer receiver only keys and vals\n- Map with interface keys and values\n- Map with nil interface value\n- Struct with primitives\n- Struct that contains another struct\n- Struct that contains custom type with Stringer pointer interface via both\n  exported and unexported fields\n- Struct that contains embedded struct and field to same struct\n- Uintptr to 0 (null pointer)\n- Uintptr address of real variable\n- Unsafe.Pointer to 0 (null pointer)\n- Unsafe.Pointer to address of real variable\n- Nil channel\n- Standard int channel\n- Function with no params and no returns\n- Function with param and no returns\n- Function with multiple params and multiple returns\n- Struct that is circular through self referencing\n- Structs that are circular through cross referencing\n- Structs that are indirectly circular\n- Type that panics in its Stringer interface\n*/\n\npackage spew_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\t\"unsafe\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\n// dumpTest is used to describe a test to be performed against the Dump method.\ntype dumpTest struct {\n\tin    interface{}\n\twants []string\n}\n\n// dumpTests houses all of the tests to be performed against the Dump method.\nvar dumpTests = make([]dumpTest, 0)\n\n// addDumpTest is a helper method to append the passed input and desired result\n// to dumpTests\nfunc addDumpTest(in interface{}, wants ...string) {\n\ttest := dumpTest{in, wants}\n\tdumpTests = append(dumpTests, test)\n}\n\nfunc addIntDumpTests() {\n\t// Max int8.\n\tv := int8(127)\n\tnv := (*int8)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"int8\"\n\tvs := \"127\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Max int16.\n\tv2 := int16(32767)\n\tnv2 := (*int16)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"int16\"\n\tv2s := \"32767\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n\n\t// Max int32.\n\tv3 := int32(2147483647)\n\tnv3 := (*int32)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"int32\"\n\tv3s := \"2147483647\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Max int64.\n\tv4 := int64(9223372036854775807)\n\tnv4 := (*int64)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"int64\"\n\tv4s := \"9223372036854775807\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n\n\t// Max int.\n\tv5 := int(2147483647)\n\tnv5 := (*int)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"int\"\n\tv5s := \"2147483647\"\n\taddDumpTest(v5, \"(\"+v5t+\") \"+v5s+\"\\n\")\n\taddDumpTest(pv5, \"(*\"+v5t+\")(\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(&pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(nv5, \"(*\"+v5t+\")(<nil>)\\n\")\n}\n\nfunc addUintDumpTests() {\n\t// Max uint8.\n\tv := uint8(255)\n\tnv := (*uint8)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"uint8\"\n\tvs := \"255\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Max uint16.\n\tv2 := uint16(65535)\n\tnv2 := (*uint16)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uint16\"\n\tv2s := \"65535\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n\n\t// Max uint32.\n\tv3 := uint32(4294967295)\n\tnv3 := (*uint32)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"uint32\"\n\tv3s := \"4294967295\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Max uint64.\n\tv4 := uint64(18446744073709551615)\n\tnv4 := (*uint64)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"uint64\"\n\tv4s := \"18446744073709551615\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n\n\t// Max uint.\n\tv5 := uint(4294967295)\n\tnv5 := (*uint)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"uint\"\n\tv5s := \"4294967295\"\n\taddDumpTest(v5, \"(\"+v5t+\") \"+v5s+\"\\n\")\n\taddDumpTest(pv5, \"(*\"+v5t+\")(\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(&pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(nv5, \"(*\"+v5t+\")(<nil>)\\n\")\n}\n\nfunc addBoolDumpTests() {\n\t// Boolean true.\n\tv := bool(true)\n\tnv := (*bool)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"bool\"\n\tvs := \"true\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Boolean false.\n\tv2 := bool(false)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"bool\"\n\tv2s := \"false\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n}\n\nfunc addFloatDumpTests() {\n\t// Standard float32.\n\tv := float32(3.1415)\n\tnv := (*float32)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"float32\"\n\tvs := \"3.1415\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Standard float64.\n\tv2 := float64(3.1415926)\n\tnv2 := (*float64)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"float64\"\n\tv2s := \"3.1415926\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n}\n\nfunc addComplexDumpTests() {\n\t// Standard complex64.\n\tv := complex(float32(6), -2)\n\tnv := (*complex64)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"complex64\"\n\tvs := \"(6-2i)\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Standard complex128.\n\tv2 := complex(float64(-6), 2)\n\tnv2 := (*complex128)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"complex128\"\n\tv2s := \"(-6+2i)\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n}\n\nfunc addArrayDumpTests() {\n\t// Array containing standard ints.\n\tv := [3]int{1, 2, 3}\n\tvLen := fmt.Sprintf(\"%d\", len(v))\n\tvCap := fmt.Sprintf(\"%d\", cap(v))\n\tnv := (*[3]int)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"int\"\n\tvs := \"(len=\" + vLen + \" cap=\" + vCap + \") {\\n (\" + vt + \") 1,\\n (\" +\n\t\tvt + \") 2,\\n (\" + vt + \") 3\\n}\"\n\taddDumpTest(v, \"([3]\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*[3]\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**[3]\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*[3]\"+vt+\")(<nil>)\\n\")\n\n\t// Array containing type with custom formatter on pointer receiver only.\n\tv2i0 := pstringer(\"1\")\n\tv2i1 := pstringer(\"2\")\n\tv2i2 := pstringer(\"3\")\n\tv2 := [3]pstringer{v2i0, v2i1, v2i2}\n\tv2i0Len := fmt.Sprintf(\"%d\", len(v2i0))\n\tv2i1Len := fmt.Sprintf(\"%d\", len(v2i1))\n\tv2i2Len := fmt.Sprintf(\"%d\", len(v2i2))\n\tv2Len := fmt.Sprintf(\"%d\", len(v2))\n\tv2Cap := fmt.Sprintf(\"%d\", cap(v2))\n\tnv2 := (*[3]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.pstringer\"\n\tv2sp := \"(len=\" + v2Len + \" cap=\" + v2Cap + \") {\\n (\" + v2t +\n\t\t\") (len=\" + v2i0Len + \") stringer 1,\\n (\" + v2t +\n\t\t\") (len=\" + v2i1Len + \") stringer 2,\\n (\" + v2t +\n\t\t\") (len=\" + v2i2Len + \") \" + \"stringer 3\\n}\"\n\tv2s := v2sp\n\tif spew.UnsafeDisabled {\n\t\tv2s = \"(len=\" + v2Len + \" cap=\" + v2Cap + \") {\\n (\" + v2t +\n\t\t\t\") (len=\" + v2i0Len + \") \\\"1\\\",\\n (\" + v2t + \") (len=\" +\n\t\t\tv2i1Len + \") \\\"2\\\",\\n (\" + v2t + \") (len=\" + v2i2Len +\n\t\t\t\") \" + \"\\\"3\\\"\\n}\"\n\t}\n\taddDumpTest(v2, \"([3]\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*[3]\"+v2t+\")(\"+v2Addr+\")(\"+v2sp+\")\\n\")\n\taddDumpTest(&pv2, \"(**[3]\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2sp+\")\\n\")\n\taddDumpTest(nv2, \"(*[3]\"+v2t+\")(<nil>)\\n\")\n\n\t// Array containing interfaces.\n\tv3i0 := \"one\"\n\tv3 := [3]interface{}{v3i0, int(2), uint(3)}\n\tv3i0Len := fmt.Sprintf(\"%d\", len(v3i0))\n\tv3Len := fmt.Sprintf(\"%d\", len(v3))\n\tv3Cap := fmt.Sprintf(\"%d\", cap(v3))\n\tnv3 := (*[3]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"[3]interface {}\"\n\tv3t2 := \"string\"\n\tv3t3 := \"int\"\n\tv3t4 := \"uint\"\n\tv3s := \"(len=\" + v3Len + \" cap=\" + v3Cap + \") {\\n (\" + v3t2 + \") \" +\n\t\t\"(len=\" + v3i0Len + \") \\\"one\\\",\\n (\" + v3t3 + \") 2,\\n (\" +\n\t\tv3t4 + \") 3\\n}\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Array containing bytes.\n\tv4 := [34]byte{\n\t\t0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,\n\t\t0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,\n\t\t0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,\n\t\t0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,\n\t\t0x31, 0x32,\n\t}\n\tv4Len := fmt.Sprintf(\"%d\", len(v4))\n\tv4Cap := fmt.Sprintf(\"%d\", cap(v4))\n\tnv4 := (*[34]byte)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"[34]uint8\"\n\tv4s := \"(len=\" + v4Len + \" cap=\" + v4Cap + \") \" +\n\t\t\"{\\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20\" +\n\t\t\"  |............... |\\n\" +\n\t\t\" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30\" +\n\t\t\"  |!\\\"#$%&'()*+,-./0|\\n\" +\n\t\t\" 00000020  31 32                                           \" +\n\t\t\"  |12|\\n}\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n}\n\nfunc addSliceDumpTests() {\n\t// Slice containing standard float32 values.\n\tv := []float32{3.14, 6.28, 12.56}\n\tvLen := fmt.Sprintf(\"%d\", len(v))\n\tvCap := fmt.Sprintf(\"%d\", cap(v))\n\tnv := (*[]float32)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"float32\"\n\tvs := \"(len=\" + vLen + \" cap=\" + vCap + \") {\\n (\" + vt + \") 3.14,\\n (\" +\n\t\tvt + \") 6.28,\\n (\" + vt + \") 12.56\\n}\"\n\taddDumpTest(v, \"([]\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*[]\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**[]\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*[]\"+vt+\")(<nil>)\\n\")\n\n\t// Slice containing type with custom formatter on pointer receiver only.\n\tv2i0 := pstringer(\"1\")\n\tv2i1 := pstringer(\"2\")\n\tv2i2 := pstringer(\"3\")\n\tv2 := []pstringer{v2i0, v2i1, v2i2}\n\tv2i0Len := fmt.Sprintf(\"%d\", len(v2i0))\n\tv2i1Len := fmt.Sprintf(\"%d\", len(v2i1))\n\tv2i2Len := fmt.Sprintf(\"%d\", len(v2i2))\n\tv2Len := fmt.Sprintf(\"%d\", len(v2))\n\tv2Cap := fmt.Sprintf(\"%d\", cap(v2))\n\tnv2 := (*[]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.pstringer\"\n\tv2s := \"(len=\" + v2Len + \" cap=\" + v2Cap + \") {\\n (\" + v2t + \") (len=\" +\n\t\tv2i0Len + \") stringer 1,\\n (\" + v2t + \") (len=\" + v2i1Len +\n\t\t\") stringer 2,\\n (\" + v2t + \") (len=\" + v2i2Len + \") \" +\n\t\t\"stringer 3\\n}\"\n\taddDumpTest(v2, \"([]\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*[]\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**[]\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*[]\"+v2t+\")(<nil>)\\n\")\n\n\t// Slice containing interfaces.\n\tv3i0 := \"one\"\n\tv3 := []interface{}{v3i0, int(2), uint(3), nil}\n\tv3i0Len := fmt.Sprintf(\"%d\", len(v3i0))\n\tv3Len := fmt.Sprintf(\"%d\", len(v3))\n\tv3Cap := fmt.Sprintf(\"%d\", cap(v3))\n\tnv3 := (*[]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"[]interface {}\"\n\tv3t2 := \"string\"\n\tv3t3 := \"int\"\n\tv3t4 := \"uint\"\n\tv3t5 := \"interface {}\"\n\tv3s := \"(len=\" + v3Len + \" cap=\" + v3Cap + \") {\\n (\" + v3t2 + \") \" +\n\t\t\"(len=\" + v3i0Len + \") \\\"one\\\",\\n (\" + v3t3 + \") 2,\\n (\" +\n\t\tv3t4 + \") 3,\\n (\" + v3t5 + \") <nil>\\n}\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Slice containing bytes.\n\tv4 := []byte{\n\t\t0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,\n\t\t0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,\n\t\t0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,\n\t\t0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,\n\t\t0x31, 0x32,\n\t}\n\tv4Len := fmt.Sprintf(\"%d\", len(v4))\n\tv4Cap := fmt.Sprintf(\"%d\", cap(v4))\n\tnv4 := (*[]byte)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"[]uint8\"\n\tv4s := \"(len=\" + v4Len + \" cap=\" + v4Cap + \") \" +\n\t\t\"{\\n 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20\" +\n\t\t\"  |............... |\\n\" +\n\t\t\" 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30\" +\n\t\t\"  |!\\\"#$%&'()*+,-./0|\\n\" +\n\t\t\" 00000020  31 32                                           \" +\n\t\t\"  |12|\\n}\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n\n\t// Nil slice.\n\tv5 := []int(nil)\n\tnv5 := (*[]int)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"[]int\"\n\tv5s := \"<nil>\"\n\taddDumpTest(v5, \"(\"+v5t+\") \"+v5s+\"\\n\")\n\taddDumpTest(pv5, \"(*\"+v5t+\")(\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(&pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")(\"+v5s+\")\\n\")\n\taddDumpTest(nv5, \"(*\"+v5t+\")(<nil>)\\n\")\n}\n\nfunc addStringDumpTests() {\n\t// Standard string.\n\tv := \"test\"\n\tvLen := fmt.Sprintf(\"%d\", len(v))\n\tnv := (*string)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"string\"\n\tvs := \"(len=\" + vLen + \") \\\"test\\\"\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n}\n\nfunc addInterfaceDumpTests() {\n\t// Nil interface.\n\tvar v interface{}\n\tnv := (*interface{})(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"interface {}\"\n\tvs := \"<nil>\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Sub-interface.\n\tv2 := interface{}(uint16(65535))\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uint16\"\n\tv2s := \"65535\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n}\n\nfunc addMapDumpTests() {\n\t// Map with string keys and int vals.\n\tk := \"one\"\n\tkk := \"two\"\n\tm := map[string]int{k: 1, kk: 2}\n\tklen := fmt.Sprintf(\"%d\", len(k)) // not kLen to shut golint up\n\tkkLen := fmt.Sprintf(\"%d\", len(kk))\n\tmLen := fmt.Sprintf(\"%d\", len(m))\n\tnilMap := map[string]int(nil)\n\tnm := (*map[string]int)(nil)\n\tpm := &m\n\tmAddr := fmt.Sprintf(\"%p\", pm)\n\tpmAddr := fmt.Sprintf(\"%p\", &pm)\n\tmt := \"map[string]int\"\n\tmt1 := \"string\"\n\tmt2 := \"int\"\n\tms := \"(len=\" + mLen + \") {\\n (\" + mt1 + \") (len=\" + klen + \") \" +\n\t\t\"\\\"one\\\": (\" + mt2 + \") 1,\\n (\" + mt1 + \") (len=\" + kkLen +\n\t\t\") \\\"two\\\": (\" + mt2 + \") 2\\n}\"\n\tms2 := \"(len=\" + mLen + \") {\\n (\" + mt1 + \") (len=\" + kkLen + \") \" +\n\t\t\"\\\"two\\\": (\" + mt2 + \") 2,\\n (\" + mt1 + \") (len=\" + klen +\n\t\t\") \\\"one\\\": (\" + mt2 + \") 1\\n}\"\n\taddDumpTest(m, \"(\"+mt+\") \"+ms+\"\\n\", \"(\"+mt+\") \"+ms2+\"\\n\")\n\taddDumpTest(pm, \"(*\"+mt+\")(\"+mAddr+\")(\"+ms+\")\\n\",\n\t\t\"(*\"+mt+\")(\"+mAddr+\")(\"+ms2+\")\\n\")\n\taddDumpTest(&pm, \"(**\"+mt+\")(\"+pmAddr+\"->\"+mAddr+\")(\"+ms+\")\\n\",\n\t\t\"(**\"+mt+\")(\"+pmAddr+\"->\"+mAddr+\")(\"+ms2+\")\\n\")\n\taddDumpTest(nm, \"(*\"+mt+\")(<nil>)\\n\")\n\taddDumpTest(nilMap, \"(\"+mt+\") <nil>\\n\")\n\n\t// Map with custom formatter type on pointer receiver only keys and vals.\n\tk2 := pstringer(\"one\")\n\tv2 := pstringer(\"1\")\n\tm2 := map[pstringer]pstringer{k2: v2}\n\tk2Len := fmt.Sprintf(\"%d\", len(k2))\n\tv2Len := fmt.Sprintf(\"%d\", len(v2))\n\tm2Len := fmt.Sprintf(\"%d\", len(m2))\n\tnilMap2 := map[pstringer]pstringer(nil)\n\tnm2 := (*map[pstringer]pstringer)(nil)\n\tpm2 := &m2\n\tm2Addr := fmt.Sprintf(\"%p\", pm2)\n\tpm2Addr := fmt.Sprintf(\"%p\", &pm2)\n\tm2t := \"map[spew_test.pstringer]spew_test.pstringer\"\n\tm2t1 := \"spew_test.pstringer\"\n\tm2t2 := \"spew_test.pstringer\"\n\tm2s := \"(len=\" + m2Len + \") {\\n (\" + m2t1 + \") (len=\" + k2Len + \") \" +\n\t\t\"stringer one: (\" + m2t2 + \") (len=\" + v2Len + \") stringer 1\\n}\"\n\tif spew.UnsafeDisabled {\n\t\tm2s = \"(len=\" + m2Len + \") {\\n (\" + m2t1 + \") (len=\" + k2Len +\n\t\t\t\") \" + \"\\\"one\\\": (\" + m2t2 + \") (len=\" + v2Len +\n\t\t\t\") \\\"1\\\"\\n}\"\n\t}\n\taddDumpTest(m2, \"(\"+m2t+\") \"+m2s+\"\\n\")\n\taddDumpTest(pm2, \"(*\"+m2t+\")(\"+m2Addr+\")(\"+m2s+\")\\n\")\n\taddDumpTest(&pm2, \"(**\"+m2t+\")(\"+pm2Addr+\"->\"+m2Addr+\")(\"+m2s+\")\\n\")\n\taddDumpTest(nm2, \"(*\"+m2t+\")(<nil>)\\n\")\n\taddDumpTest(nilMap2, \"(\"+m2t+\") <nil>\\n\")\n\n\t// Map with interface keys and values.\n\tk3 := \"one\"\n\tk3Len := fmt.Sprintf(\"%d\", len(k3))\n\tm3 := map[interface{}]interface{}{k3: 1}\n\tm3Len := fmt.Sprintf(\"%d\", len(m3))\n\tnilMap3 := map[interface{}]interface{}(nil)\n\tnm3 := (*map[interface{}]interface{})(nil)\n\tpm3 := &m3\n\tm3Addr := fmt.Sprintf(\"%p\", pm3)\n\tpm3Addr := fmt.Sprintf(\"%p\", &pm3)\n\tm3t := \"map[interface {}]interface {}\"\n\tm3t1 := \"string\"\n\tm3t2 := \"int\"\n\tm3s := \"(len=\" + m3Len + \") {\\n (\" + m3t1 + \") (len=\" + k3Len + \") \" +\n\t\t\"\\\"one\\\": (\" + m3t2 + \") 1\\n}\"\n\taddDumpTest(m3, \"(\"+m3t+\") \"+m3s+\"\\n\")\n\taddDumpTest(pm3, \"(*\"+m3t+\")(\"+m3Addr+\")(\"+m3s+\")\\n\")\n\taddDumpTest(&pm3, \"(**\"+m3t+\")(\"+pm3Addr+\"->\"+m3Addr+\")(\"+m3s+\")\\n\")\n\taddDumpTest(nm3, \"(*\"+m3t+\")(<nil>)\\n\")\n\taddDumpTest(nilMap3, \"(\"+m3t+\") <nil>\\n\")\n\n\t// Map with nil interface value.\n\tk4 := \"nil\"\n\tk4Len := fmt.Sprintf(\"%d\", len(k4))\n\tm4 := map[string]interface{}{k4: nil}\n\tm4Len := fmt.Sprintf(\"%d\", len(m4))\n\tnilMap4 := map[string]interface{}(nil)\n\tnm4 := (*map[string]interface{})(nil)\n\tpm4 := &m4\n\tm4Addr := fmt.Sprintf(\"%p\", pm4)\n\tpm4Addr := fmt.Sprintf(\"%p\", &pm4)\n\tm4t := \"map[string]interface {}\"\n\tm4t1 := \"string\"\n\tm4t2 := \"interface {}\"\n\tm4s := \"(len=\" + m4Len + \") {\\n (\" + m4t1 + \") (len=\" + k4Len + \")\" +\n\t\t\" \\\"nil\\\": (\" + m4t2 + \") <nil>\\n}\"\n\taddDumpTest(m4, \"(\"+m4t+\") \"+m4s+\"\\n\")\n\taddDumpTest(pm4, \"(*\"+m4t+\")(\"+m4Addr+\")(\"+m4s+\")\\n\")\n\taddDumpTest(&pm4, \"(**\"+m4t+\")(\"+pm4Addr+\"->\"+m4Addr+\")(\"+m4s+\")\\n\")\n\taddDumpTest(nm4, \"(*\"+m4t+\")(<nil>)\\n\")\n\taddDumpTest(nilMap4, \"(\"+m4t+\") <nil>\\n\")\n}\n\nfunc addStructDumpTests() {\n\t// Struct with primitives.\n\ttype s1 struct {\n\t\ta int8\n\t\tb uint8\n\t}\n\tv := s1{127, 255}\n\tnv := (*s1)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.s1\"\n\tvt2 := \"int8\"\n\tvt3 := \"uint8\"\n\tvs := \"{\\n a: (\" + vt2 + \") 127,\\n b: (\" + vt3 + \") 255\\n}\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Struct that contains another struct.\n\ttype s2 struct {\n\t\ts1 s1\n\t\tb  bool\n\t}\n\tv2 := s2{s1{127, 255}, true}\n\tnv2 := (*s2)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.s2\"\n\tv2t2 := \"spew_test.s1\"\n\tv2t3 := \"int8\"\n\tv2t4 := \"uint8\"\n\tv2t5 := \"bool\"\n\tv2s := \"{\\n s1: (\" + v2t2 + \") {\\n  a: (\" + v2t3 + \") 127,\\n  b: (\" +\n\t\tv2t4 + \") 255\\n },\\n b: (\" + v2t5 + \") true\\n}\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n\n\t// Struct that contains custom type with Stringer pointer interface via both\n\t// exported and unexported fields.\n\ttype s3 struct {\n\t\ts pstringer\n\t\tS pstringer\n\t}\n\tv3 := s3{\"test\", \"test2\"}\n\tnv3 := (*s3)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"spew_test.s3\"\n\tv3t2 := \"spew_test.pstringer\"\n\tv3s := \"{\\n s: (\" + v3t2 + \") (len=4) stringer test,\\n S: (\" + v3t2 +\n\t\t\") (len=5) stringer test2\\n}\"\n\tv3sp := v3s\n\tif spew.UnsafeDisabled {\n\t\tv3s = \"{\\n s: (\" + v3t2 + \") (len=4) \\\"test\\\",\\n S: (\" +\n\t\t\tv3t2 + \") (len=5) \\\"test2\\\"\\n}\"\n\t\tv3sp = \"{\\n s: (\" + v3t2 + \") (len=4) \\\"test\\\",\\n S: (\" +\n\t\t\tv3t2 + \") (len=5) stringer test2\\n}\"\n\t}\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3sp+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3sp+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n\n\t// Struct that contains embedded struct and field to same struct.\n\te := embed{\"embedstr\"}\n\teLen := fmt.Sprintf(\"%d\", len(\"embedstr\"))\n\tv4 := embedwrap{embed: &e, e: &e}\n\tnv4 := (*embedwrap)(nil)\n\tpv4 := &v4\n\teAddr := fmt.Sprintf(\"%p\", &e)\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"spew_test.embedwrap\"\n\tv4t2 := \"spew_test.embed\"\n\tv4t3 := \"string\"\n\tv4s := \"{\\n embed: (*\" + v4t2 + \")(\" + eAddr + \")({\\n  a: (\" + v4t3 +\n\t\t\") (len=\" + eLen + \") \\\"embedstr\\\"\\n }),\\n e: (*\" + v4t2 +\n\t\t\")(\" + eAddr + \")({\\n  a: (\" + v4t3 + \") (len=\" + eLen + \")\" +\n\t\t\" \\\"embedstr\\\"\\n })\\n}\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\taddDumpTest(pv4, \"(*\"+v4t+\")(\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(&pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")(\"+v4s+\")\\n\")\n\taddDumpTest(nv4, \"(*\"+v4t+\")(<nil>)\\n\")\n}\n\nfunc addUintptrDumpTests() {\n\t// Null pointer.\n\tv := uintptr(0)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"uintptr\"\n\tvs := \"<nil>\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\n\t// Address of real variable.\n\ti := 1\n\tv2 := uintptr(unsafe.Pointer(&i))\n\tnv2 := (*uintptr)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uintptr\"\n\tv2s := fmt.Sprintf(\"%p\", &i)\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n}\n\nfunc addUnsafePointerDumpTests() {\n\t// Null pointer.\n\tv := unsafe.Pointer(uintptr(0))\n\tnv := (*unsafe.Pointer)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"unsafe.Pointer\"\n\tvs := \"<nil>\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Address of real variable.\n\ti := 1\n\tv2 := unsafe.Pointer(&i)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"unsafe.Pointer\"\n\tv2s := fmt.Sprintf(\"%p\", &i)\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n}\n\nfunc addChanDumpTests() {\n\t// Nil channel.\n\tvar v chan int\n\tpv := &v\n\tnv := (*chan int)(nil)\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"chan int\"\n\tvs := \"<nil>\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Real channel.\n\tv2 := make(chan int)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"chan int\"\n\tv2s := fmt.Sprintf(\"%p\", v2)\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n}\n\nfunc addFuncDumpTests() {\n\t// Function with no params and no returns.\n\tv := addIntDumpTests\n\tnv := (*func())(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"func()\"\n\tvs := fmt.Sprintf(\"%p\", v)\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n\n\t// Function with param and no returns.\n\tv2 := TestDump\n\tnv2 := (*func(*testing.T))(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"func(*testing.T)\"\n\tv2s := fmt.Sprintf(\"%p\", v2)\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s+\")\\n\")\n\taddDumpTest(nv2, \"(*\"+v2t+\")(<nil>)\\n\")\n\n\t// Function with multiple params and multiple returns.\n\tvar v3 = func(i int, s string) (b bool, err error) {\n\t\treturn true, nil\n\t}\n\tnv3 := (*func(int, string) (bool, error))(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"func(int, string) (bool, error)\"\n\tv3s := fmt.Sprintf(\"%p\", v3)\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s+\")\\n\")\n\taddDumpTest(nv3, \"(*\"+v3t+\")(<nil>)\\n\")\n}\n\nfunc addCircularDumpTests() {\n\t// Struct that is circular through self referencing.\n\ttype circular struct {\n\t\tc *circular\n\t}\n\tv := circular{nil}\n\tv.c = &v\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.circular\"\n\tvs := \"{\\n c: (*\" + vt + \")(\" + vAddr + \")({\\n  c: (*\" + vt + \")(\" +\n\t\tvAddr + \")(<already shown>)\\n })\\n}\"\n\tvs2 := \"{\\n c: (*\" + vt + \")(\" + vAddr + \")(<already shown>)\\n}\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs2+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs2+\")\\n\")\n\n\t// Structs that are circular through cross referencing.\n\tv2 := xref1{nil}\n\tts2 := xref2{&v2}\n\tv2.ps2 = &ts2\n\tpv2 := &v2\n\tts2Addr := fmt.Sprintf(\"%p\", &ts2)\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.xref1\"\n\tv2t2 := \"spew_test.xref2\"\n\tv2s := \"{\\n ps2: (*\" + v2t2 + \")(\" + ts2Addr + \")({\\n  ps1: (*\" + v2t +\n\t\t\")(\" + v2Addr + \")({\\n   ps2: (*\" + v2t2 + \")(\" + ts2Addr +\n\t\t\")(<already shown>)\\n  })\\n })\\n}\"\n\tv2s2 := \"{\\n ps2: (*\" + v2t2 + \")(\" + ts2Addr + \")({\\n  ps1: (*\" + v2t +\n\t\t\")(\" + v2Addr + \")(<already shown>)\\n })\\n}\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\taddDumpTest(pv2, \"(*\"+v2t+\")(\"+v2Addr+\")(\"+v2s2+\")\\n\")\n\taddDumpTest(&pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")(\"+v2s2+\")\\n\")\n\n\t// Structs that are indirectly circular.\n\tv3 := indirCir1{nil}\n\ttic2 := indirCir2{nil}\n\ttic3 := indirCir3{&v3}\n\ttic2.ps3 = &tic3\n\tv3.ps2 = &tic2\n\tpv3 := &v3\n\ttic2Addr := fmt.Sprintf(\"%p\", &tic2)\n\ttic3Addr := fmt.Sprintf(\"%p\", &tic3)\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"spew_test.indirCir1\"\n\tv3t2 := \"spew_test.indirCir2\"\n\tv3t3 := \"spew_test.indirCir3\"\n\tv3s := \"{\\n ps2: (*\" + v3t2 + \")(\" + tic2Addr + \")({\\n  ps3: (*\" + v3t3 +\n\t\t\")(\" + tic3Addr + \")({\\n   ps1: (*\" + v3t + \")(\" + v3Addr +\n\t\t\")({\\n    ps2: (*\" + v3t2 + \")(\" + tic2Addr +\n\t\t\")(<already shown>)\\n   })\\n  })\\n })\\n}\"\n\tv3s2 := \"{\\n ps2: (*\" + v3t2 + \")(\" + tic2Addr + \")({\\n  ps3: (*\" + v3t3 +\n\t\t\")(\" + tic3Addr + \")({\\n   ps1: (*\" + v3t + \")(\" + v3Addr +\n\t\t\")(<already shown>)\\n  })\\n })\\n}\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\")\n\taddDumpTest(pv3, \"(*\"+v3t+\")(\"+v3Addr+\")(\"+v3s2+\")\\n\")\n\taddDumpTest(&pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")(\"+v3s2+\")\\n\")\n}\n\nfunc addPanicDumpTests() {\n\t// Type that panics in its Stringer interface.\n\tv := panicer(127)\n\tnv := (*panicer)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.panicer\"\n\tvs := \"(PANIC=test panic)127\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n}\n\nfunc addErrorDumpTests() {\n\t// Type that has a custom Error interface.\n\tv := customError(127)\n\tnv := (*customError)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.customError\"\n\tvs := \"error: 127\"\n\taddDumpTest(v, \"(\"+vt+\") \"+vs+\"\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(*\"+vt+\")(<nil>)\\n\")\n}\n\n// TestDump executes all of the tests described by dumpTests.\nfunc TestDump(t *testing.T) {\n\t// Setup tests.\n\taddIntDumpTests()\n\taddUintDumpTests()\n\taddBoolDumpTests()\n\taddFloatDumpTests()\n\taddComplexDumpTests()\n\taddArrayDumpTests()\n\taddSliceDumpTests()\n\taddStringDumpTests()\n\taddInterfaceDumpTests()\n\taddMapDumpTests()\n\taddStructDumpTests()\n\taddUintptrDumpTests()\n\taddUnsafePointerDumpTests()\n\taddChanDumpTests()\n\taddFuncDumpTests()\n\taddCircularDumpTests()\n\taddPanicDumpTests()\n\taddErrorDumpTests()\n\taddCgoDumpTests()\n\n\tt.Logf(\"Running %d tests\", len(dumpTests))\n\tfor i, test := range dumpTests {\n\t\tbuf := new(bytes.Buffer)\n\t\tspew.Fdump(buf, test.in)\n\t\ts := buf.String()\n\t\tif testFailed(s, test.wants) {\n\t\t\tt.Errorf(\"Dump #%d\\n got: %s %s\", i, s, stringizeWants(test.wants))\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\nfunc TestDumpSortedKeys(t *testing.T) {\n\tcfg := spew.ConfigState{SortKeys: true}\n\ts := cfg.Sdump(map[int]string{1: \"1\", 3: \"3\", 2: \"2\"})\n\texpected := \"(map[int]string) (len=3) {\\n(int) 1: (string) (len=1) \" +\n\t\t\"\\\"1\\\",\\n(int) 2: (string) (len=1) \\\"2\\\",\\n(int) 3: (string) \" +\n\t\t\"(len=1) \\\"3\\\"\\n\" +\n\t\t\"}\\n\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sdump(map[stringer]int{\"1\": 1, \"3\": 3, \"2\": 2})\n\texpected = \"(map[spew_test.stringer]int) (len=3) {\\n\" +\n\t\t\"(spew_test.stringer) (len=1) stringer 1: (int) 1,\\n\" +\n\t\t\"(spew_test.stringer) (len=1) stringer 2: (int) 2,\\n\" +\n\t\t\"(spew_test.stringer) (len=1) stringer 3: (int) 3\\n\" +\n\t\t\"}\\n\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sdump(map[pstringer]int{pstringer(\"1\"): 1, pstringer(\"3\"): 3, pstringer(\"2\"): 2})\n\texpected = \"(map[spew_test.pstringer]int) (len=3) {\\n\" +\n\t\t\"(spew_test.pstringer) (len=1) stringer 1: (int) 1,\\n\" +\n\t\t\"(spew_test.pstringer) (len=1) stringer 2: (int) 2,\\n\" +\n\t\t\"(spew_test.pstringer) (len=1) stringer 3: (int) 3\\n\" +\n\t\t\"}\\n\"\n\tif spew.UnsafeDisabled {\n\t\texpected = \"(map[spew_test.pstringer]int) (len=3) {\\n\" +\n\t\t\t\"(spew_test.pstringer) (len=1) \\\"1\\\": (int) 1,\\n\" +\n\t\t\t\"(spew_test.pstringer) (len=1) \\\"2\\\": (int) 2,\\n\" +\n\t\t\t\"(spew_test.pstringer) (len=1) \\\"3\\\": (int) 3\\n\" +\n\t\t\t\"}\\n\"\n\t}\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})\n\texpected = \"(map[spew_test.customError]int) (len=3) {\\n\" +\n\t\t\"(spew_test.customError) error: 1: (int) 1,\\n\" +\n\t\t\"(spew_test.customError) error: 2: (int) 2,\\n\" +\n\t\t\"(spew_test.customError) error: 3: (int) 3\\n\" +\n\t\t\"}\\n\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch:\\n  %v %v\", s, expected)\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go",
    "content": "// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when both cgo is supported and \"-tags testcgo\" is added to the go test\n// command line.  This means the cgo tests are only added (and hence run) when\n// specifially requested.  This configuration is used because spew itself\n// does not require cgo to run even though it does handle certain cgo types\n// specially.  Rather than forcing all clients to require cgo and an external\n// C compiler just to run the tests, this scheme makes them optional.\n// +build cgo,testcgo\n\npackage spew_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/davecgh/go-spew/spew/testdata\"\n)\n\nfunc addCgoDumpTests() {\n\t// C char pointer.\n\tv := testdata.GetCgoCharPointer()\n\tnv := testdata.GetCgoNullCharPointer()\n\tpv := &v\n\tvcAddr := fmt.Sprintf(\"%p\", v)\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"*testdata._Ctype_char\"\n\tvs := \"116\"\n\taddDumpTest(v, \"(\"+vt+\")(\"+vcAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(pv, \"(*\"+vt+\")(\"+vAddr+\"->\"+vcAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(&pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\"->\"+vcAddr+\")(\"+vs+\")\\n\")\n\taddDumpTest(nv, \"(\"+vt+\")(<nil>)\\n\")\n\n\t// C char array.\n\tv2, v2l, v2c := testdata.GetCgoCharArray()\n\tv2Len := fmt.Sprintf(\"%d\", v2l)\n\tv2Cap := fmt.Sprintf(\"%d\", v2c)\n\tv2t := \"[6]testdata._Ctype_char\"\n\tv2s := \"(len=\" + v2Len + \" cap=\" + v2Cap + \") \" +\n\t\t\"{\\n 00000000  74 65 73 74 32 00                               \" +\n\t\t\"  |test2.|\\n}\"\n\taddDumpTest(v2, \"(\"+v2t+\") \"+v2s+\"\\n\")\n\n\t// C unsigned char array.\n\tv3, v3l, v3c := testdata.GetCgoUnsignedCharArray()\n\tv3Len := fmt.Sprintf(\"%d\", v3l)\n\tv3Cap := fmt.Sprintf(\"%d\", v3c)\n\tv3t := \"[6]testdata._Ctype_unsignedchar\"\n\tv3t2 := \"[6]testdata._Ctype_uchar\"\n\tv3s := \"(len=\" + v3Len + \" cap=\" + v3Cap + \") \" +\n\t\t\"{\\n 00000000  74 65 73 74 33 00                               \" +\n\t\t\"  |test3.|\\n}\"\n\taddDumpTest(v3, \"(\"+v3t+\") \"+v3s+\"\\n\", \"(\"+v3t2+\") \"+v3s+\"\\n\")\n\n\t// C signed char array.\n\tv4, v4l, v4c := testdata.GetCgoSignedCharArray()\n\tv4Len := fmt.Sprintf(\"%d\", v4l)\n\tv4Cap := fmt.Sprintf(\"%d\", v4c)\n\tv4t := \"[6]testdata._Ctype_schar\"\n\tv4t2 := \"testdata._Ctype_schar\"\n\tv4s := \"(len=\" + v4Len + \" cap=\" + v4Cap + \") \" +\n\t\t\"{\\n (\" + v4t2 + \") 116,\\n (\" + v4t2 + \") 101,\\n (\" + v4t2 +\n\t\t\") 115,\\n (\" + v4t2 + \") 116,\\n (\" + v4t2 + \") 52,\\n (\" + v4t2 +\n\t\t\") 0\\n}\"\n\taddDumpTest(v4, \"(\"+v4t+\") \"+v4s+\"\\n\")\n\n\t// C uint8_t array.\n\tv5, v5l, v5c := testdata.GetCgoUint8tArray()\n\tv5Len := fmt.Sprintf(\"%d\", v5l)\n\tv5Cap := fmt.Sprintf(\"%d\", v5c)\n\tv5t := \"[6]testdata._Ctype_uint8_t\"\n\tv5s := \"(len=\" + v5Len + \" cap=\" + v5Cap + \") \" +\n\t\t\"{\\n 00000000  74 65 73 74 35 00                               \" +\n\t\t\"  |test5.|\\n}\"\n\taddDumpTest(v5, \"(\"+v5t+\") \"+v5s+\"\\n\")\n\n\t// C typedefed unsigned char array.\n\tv6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()\n\tv6Len := fmt.Sprintf(\"%d\", v6l)\n\tv6Cap := fmt.Sprintf(\"%d\", v6c)\n\tv6t := \"[6]testdata._Ctype_custom_uchar_t\"\n\tv6s := \"(len=\" + v6Len + \" cap=\" + v6Cap + \") \" +\n\t\t\"{\\n 00000000  74 65 73 74 36 00                               \" +\n\t\t\"  |test6.|\\n}\"\n\taddDumpTest(v6, \"(\"+v6t+\") \"+v6s+\"\\n\")\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go",
    "content": "// Copyright (c) 2013 Dave Collins <dave@davec.name>\n//\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when either cgo is not supported or \"-tags testcgo\" is not added to the go\n// test command line.  This file intentionally does not setup any cgo tests in\n// this scenario.\n// +build !cgo !testcgo\n\npackage spew_test\n\nfunc addCgoDumpTests() {\n\t// Don't add any tests for cgo since this file is only compiled when\n\t// there should not be any cgo tests.\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/example_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\ntype Flag int\n\nconst (\n\tflagOne Flag = iota\n\tflagTwo\n)\n\nvar flagStrings = map[Flag]string{\n\tflagOne: \"flagOne\",\n\tflagTwo: \"flagTwo\",\n}\n\nfunc (f Flag) String() string {\n\tif s, ok := flagStrings[f]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"Unknown flag (%d)\", int(f))\n}\n\ntype Bar struct {\n\tdata uintptr\n}\n\ntype Foo struct {\n\tunexportedField Bar\n\tExportedField   map[interface{}]interface{}\n}\n\n// This example demonstrates how to use Dump to dump variables to stdout.\nfunc ExampleDump() {\n\t// The following package level declarations are assumed for this example:\n\t/*\n\t\ttype Flag int\n\n\t\tconst (\n\t\t\tflagOne Flag = iota\n\t\t\tflagTwo\n\t\t)\n\n\t\tvar flagStrings = map[Flag]string{\n\t\t\tflagOne: \"flagOne\",\n\t\t\tflagTwo: \"flagTwo\",\n\t\t}\n\n\t\tfunc (f Flag) String() string {\n\t\t\tif s, ok := flagStrings[f]; ok {\n\t\t\t\treturn s\n\t\t\t}\n\t\t\treturn fmt.Sprintf(\"Unknown flag (%d)\", int(f))\n\t\t}\n\n\t\ttype Bar struct {\n\t\t\tdata uintptr\n\t\t}\n\n\t\ttype Foo struct {\n\t\t\tunexportedField Bar\n\t\t\tExportedField   map[interface{}]interface{}\n\t\t}\n\t*/\n\n\t// Setup some sample data structures for the example.\n\tbar := Bar{uintptr(0)}\n\ts1 := Foo{bar, map[interface{}]interface{}{\"one\": true}}\n\tf := Flag(5)\n\tb := []byte{\n\t\t0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,\n\t\t0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,\n\t\t0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,\n\t\t0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,\n\t\t0x31, 0x32,\n\t}\n\n\t// Dump!\n\tspew.Dump(s1, f, b)\n\n\t// Output:\n\t// (spew_test.Foo) {\n\t//  unexportedField: (spew_test.Bar) {\n\t//   data: (uintptr) <nil>\n\t//  },\n\t//  ExportedField: (map[interface {}]interface {}) (len=1) {\n\t//   (string) (len=3) \"one\": (bool) true\n\t//  }\n\t// }\n\t// (spew_test.Flag) Unknown flag (5)\n\t// ([]uint8) (len=34 cap=34) {\n\t//  00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |\n\t//  00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!\"#$%&'()*+,-./0|\n\t//  00000020  31 32                                             |12|\n\t// }\n\t//\n}\n\n// This example demonstrates how to use Printf to display a variable with a\n// format string and inline formatting.\nfunc ExamplePrintf() {\n\t// Create a double pointer to a uint 8.\n\tui8 := uint8(5)\n\tpui8 := &ui8\n\tppui8 := &pui8\n\n\t// Create a circular data type.\n\ttype circular struct {\n\t\tui8 uint8\n\t\tc   *circular\n\t}\n\tc := circular{ui8: 1}\n\tc.c = &c\n\n\t// Print!\n\tspew.Printf(\"ppui8: %v\\n\", ppui8)\n\tspew.Printf(\"circular: %v\\n\", c)\n\n\t// Output:\n\t// ppui8: <**>5\n\t// circular: {1 <*>{1 <*><shown>}}\n}\n\n// This example demonstrates how to use a ConfigState.\nfunc ExampleConfigState() {\n\t// Modify the indent level of the ConfigState only.  The global\n\t// configuration is not modified.\n\tscs := spew.ConfigState{Indent: \"\\t\"}\n\n\t// Output using the ConfigState instance.\n\tv := map[string]int{\"one\": 1}\n\tscs.Printf(\"v: %v\\n\", v)\n\tscs.Dump(v)\n\n\t// Output:\n\t// v: map[one:1]\n\t// (map[string]int) (len=1) {\n\t// \t(string) (len=3) \"one\": (int) 1\n\t// }\n}\n\n// This example demonstrates how to use ConfigState.Dump to dump variables to\n// stdout\nfunc ExampleConfigState_Dump() {\n\t// See the top-level Dump example for details on the types used in this\n\t// example.\n\n\t// Create two ConfigState instances with different indentation.\n\tscs := spew.ConfigState{Indent: \"\\t\"}\n\tscs2 := spew.ConfigState{Indent: \" \"}\n\n\t// Setup some sample data structures for the example.\n\tbar := Bar{uintptr(0)}\n\ts1 := Foo{bar, map[interface{}]interface{}{\"one\": true}}\n\n\t// Dump using the ConfigState instances.\n\tscs.Dump(s1)\n\tscs2.Dump(s1)\n\n\t// Output:\n\t// (spew_test.Foo) {\n\t// \tunexportedField: (spew_test.Bar) {\n\t// \t\tdata: (uintptr) <nil>\n\t// \t},\n\t// \tExportedField: (map[interface {}]interface {}) (len=1) {\n\t//\t\t(string) (len=3) \"one\": (bool) true\n\t// \t}\n\t// }\n\t// (spew_test.Foo) {\n\t//  unexportedField: (spew_test.Bar) {\n\t//   data: (uintptr) <nil>\n\t//  },\n\t//  ExportedField: (map[interface {}]interface {}) (len=1) {\n\t//   (string) (len=3) \"one\": (bool) true\n\t//  }\n\t// }\n\t//\n}\n\n// This example demonstrates how to use ConfigState.Printf to display a variable\n// with a format string and inline formatting.\nfunc ExampleConfigState_Printf() {\n\t// See the top-level Dump example for details on the types used in this\n\t// example.\n\n\t// Create two ConfigState instances and modify the method handling of the\n\t// first ConfigState only.\n\tscs := spew.NewDefaultConfig()\n\tscs2 := spew.NewDefaultConfig()\n\tscs.DisableMethods = true\n\n\t// Alternatively\n\t// scs := spew.ConfigState{Indent: \" \", DisableMethods: true}\n\t// scs2 := spew.ConfigState{Indent: \" \"}\n\n\t// This is of type Flag which implements a Stringer and has raw value 1.\n\tf := flagTwo\n\n\t// Dump using the ConfigState instances.\n\tscs.Printf(\"f: %v\\n\", f)\n\tscs2.Printf(\"f: %v\\n\", f)\n\n\t// Output:\n\t// f: 1\n\t// f: flagTwo\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/format.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// supportedFlags is a list of all the character flags supported by fmt package.\nconst supportedFlags = \"0-+# \"\n\n// formatState implements the fmt.Formatter interface and contains information\n// about the state of a formatting operation.  The NewFormatter function can\n// be used to get a new Formatter which can be used directly as arguments\n// in standard fmt package printing calls.\ntype formatState struct {\n\tvalue          interface{}\n\tfs             fmt.State\n\tdepth          int\n\tpointers       map[uintptr]int\n\tignoreNextType bool\n\tcs             *ConfigState\n}\n\n// buildDefaultFormat recreates the original format string without precision\n// and width information to pass in to fmt.Sprintf in the case of an\n// unrecognized type.  Unless new types are added to the language, this\n// function won't ever be called.\nfunc (f *formatState) buildDefaultFormat() (format string) {\n\tbuf := bytes.NewBuffer(percentBytes)\n\n\tfor _, flag := range supportedFlags {\n\t\tif f.fs.Flag(int(flag)) {\n\t\t\tbuf.WriteRune(flag)\n\t\t}\n\t}\n\n\tbuf.WriteRune('v')\n\n\tformat = buf.String()\n\treturn format\n}\n\n// constructOrigFormat recreates the original format string including precision\n// and width information to pass along to the standard fmt package.  This allows\n// automatic deferral of all format strings this package doesn't support.\nfunc (f *formatState) constructOrigFormat(verb rune) (format string) {\n\tbuf := bytes.NewBuffer(percentBytes)\n\n\tfor _, flag := range supportedFlags {\n\t\tif f.fs.Flag(int(flag)) {\n\t\t\tbuf.WriteRune(flag)\n\t\t}\n\t}\n\n\tif width, ok := f.fs.Width(); ok {\n\t\tbuf.WriteString(strconv.Itoa(width))\n\t}\n\n\tif precision, ok := f.fs.Precision(); ok {\n\t\tbuf.Write(precisionBytes)\n\t\tbuf.WriteString(strconv.Itoa(precision))\n\t}\n\n\tbuf.WriteRune(verb)\n\n\tformat = buf.String()\n\treturn format\n}\n\n// unpackValue returns values inside of non-nil interfaces when possible and\n// ensures that types for values which have been unpacked from an interface\n// are displayed when the show types flag is also set.\n// This is useful for data types like structs, arrays, slices, and maps which\n// can contain varying types packed inside an interface.\nfunc (f *formatState) unpackValue(v reflect.Value) reflect.Value {\n\tif v.Kind() == reflect.Interface {\n\t\tf.ignoreNextType = false\n\t\tif !v.IsNil() {\n\t\t\tv = v.Elem()\n\t\t}\n\t}\n\treturn v\n}\n\n// formatPtr handles formatting of pointers by indirecting them as necessary.\nfunc (f *formatState) formatPtr(v reflect.Value) {\n\t// Display nil if top level pointer is nil.\n\tshowTypes := f.fs.Flag('#')\n\tif v.IsNil() && (!showTypes || f.ignoreNextType) {\n\t\tf.fs.Write(nilAngleBytes)\n\t\treturn\n\t}\n\n\t// Remove pointers at or below the current depth from map used to detect\n\t// circular refs.\n\tfor k, depth := range f.pointers {\n\t\tif depth >= f.depth {\n\t\t\tdelete(f.pointers, k)\n\t\t}\n\t}\n\n\t// Keep list of all dereferenced pointers to possibly show later.\n\tpointerChain := make([]uintptr, 0)\n\n\t// Figure out how many levels of indirection there are by derferencing\n\t// pointers and unpacking interfaces down the chain while detecting circular\n\t// references.\n\tnilFound := false\n\tcycleFound := false\n\tindirects := 0\n\tve := v\n\tfor ve.Kind() == reflect.Ptr {\n\t\tif ve.IsNil() {\n\t\t\tnilFound = true\n\t\t\tbreak\n\t\t}\n\t\tindirects++\n\t\taddr := ve.Pointer()\n\t\tpointerChain = append(pointerChain, addr)\n\t\tif pd, ok := f.pointers[addr]; ok && pd < f.depth {\n\t\t\tcycleFound = true\n\t\t\tindirects--\n\t\t\tbreak\n\t\t}\n\t\tf.pointers[addr] = f.depth\n\n\t\tve = ve.Elem()\n\t\tif ve.Kind() == reflect.Interface {\n\t\t\tif ve.IsNil() {\n\t\t\t\tnilFound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tve = ve.Elem()\n\t\t}\n\t}\n\n\t// Display type or indirection level depending on flags.\n\tif showTypes && !f.ignoreNextType {\n\t\tf.fs.Write(openParenBytes)\n\t\tf.fs.Write(bytes.Repeat(asteriskBytes, indirects))\n\t\tf.fs.Write([]byte(ve.Type().String()))\n\t\tf.fs.Write(closeParenBytes)\n\t} else {\n\t\tif nilFound || cycleFound {\n\t\t\tindirects += strings.Count(ve.Type().String(), \"*\")\n\t\t}\n\t\tf.fs.Write(openAngleBytes)\n\t\tf.fs.Write([]byte(strings.Repeat(\"*\", indirects)))\n\t\tf.fs.Write(closeAngleBytes)\n\t}\n\n\t// Display pointer information depending on flags.\n\tif f.fs.Flag('+') && (len(pointerChain) > 0) {\n\t\tf.fs.Write(openParenBytes)\n\t\tfor i, addr := range pointerChain {\n\t\t\tif i > 0 {\n\t\t\t\tf.fs.Write(pointerChainBytes)\n\t\t\t}\n\t\t\tprintHexPtr(f.fs, addr)\n\t\t}\n\t\tf.fs.Write(closeParenBytes)\n\t}\n\n\t// Display dereferenced value.\n\tswitch {\n\tcase nilFound == true:\n\t\tf.fs.Write(nilAngleBytes)\n\n\tcase cycleFound == true:\n\t\tf.fs.Write(circularShortBytes)\n\n\tdefault:\n\t\tf.ignoreNextType = true\n\t\tf.format(ve)\n\t}\n}\n\n// format is the main workhorse for providing the Formatter interface.  It\n// uses the passed reflect value to figure out what kind of object we are\n// dealing with and formats it appropriately.  It is a recursive function,\n// however circular data structures are detected and handled properly.\nfunc (f *formatState) format(v reflect.Value) {\n\t// Handle invalid reflect values immediately.\n\tkind := v.Kind()\n\tif kind == reflect.Invalid {\n\t\tf.fs.Write(invalidAngleBytes)\n\t\treturn\n\t}\n\n\t// Handle pointers specially.\n\tif kind == reflect.Ptr {\n\t\tf.formatPtr(v)\n\t\treturn\n\t}\n\n\t// Print type information unless already handled elsewhere.\n\tif !f.ignoreNextType && f.fs.Flag('#') {\n\t\tf.fs.Write(openParenBytes)\n\t\tf.fs.Write([]byte(v.Type().String()))\n\t\tf.fs.Write(closeParenBytes)\n\t}\n\tf.ignoreNextType = false\n\n\t// Call Stringer/error interfaces if they exist and the handle methods\n\t// flag is enabled.\n\tif !f.cs.DisableMethods {\n\t\tif (kind != reflect.Invalid) && (kind != reflect.Interface) {\n\t\t\tif handled := handleMethods(f.cs, f.fs, v); handled {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\tswitch kind {\n\tcase reflect.Invalid:\n\t\t// Do nothing.  We should never get here since invalid has already\n\t\t// been handled above.\n\n\tcase reflect.Bool:\n\t\tprintBool(f.fs, v.Bool())\n\n\tcase reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:\n\t\tprintInt(f.fs, v.Int(), 10)\n\n\tcase reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:\n\t\tprintUint(f.fs, v.Uint(), 10)\n\n\tcase reflect.Float32:\n\t\tprintFloat(f.fs, v.Float(), 32)\n\n\tcase reflect.Float64:\n\t\tprintFloat(f.fs, v.Float(), 64)\n\n\tcase reflect.Complex64:\n\t\tprintComplex(f.fs, v.Complex(), 32)\n\n\tcase reflect.Complex128:\n\t\tprintComplex(f.fs, v.Complex(), 64)\n\n\tcase reflect.Slice:\n\t\tif v.IsNil() {\n\t\t\tf.fs.Write(nilAngleBytes)\n\t\t\tbreak\n\t\t}\n\t\tfallthrough\n\n\tcase reflect.Array:\n\t\tf.fs.Write(openBracketBytes)\n\t\tf.depth++\n\t\tif (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {\n\t\t\tf.fs.Write(maxShortBytes)\n\t\t} else {\n\t\t\tnumEntries := v.Len()\n\t\t\tfor i := 0; i < numEntries; i++ {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tf.fs.Write(spaceBytes)\n\t\t\t\t}\n\t\t\t\tf.ignoreNextType = true\n\t\t\t\tf.format(f.unpackValue(v.Index(i)))\n\t\t\t}\n\t\t}\n\t\tf.depth--\n\t\tf.fs.Write(closeBracketBytes)\n\n\tcase reflect.String:\n\t\tf.fs.Write([]byte(v.String()))\n\n\tcase reflect.Interface:\n\t\t// The only time we should get here is for nil interfaces due to\n\t\t// unpackValue calls.\n\t\tif v.IsNil() {\n\t\t\tf.fs.Write(nilAngleBytes)\n\t\t}\n\n\tcase reflect.Ptr:\n\t\t// Do nothing.  We should never get here since pointers have already\n\t\t// been handled above.\n\n\tcase reflect.Map:\n\t\t// nil maps should be indicated as different than empty maps\n\t\tif v.IsNil() {\n\t\t\tf.fs.Write(nilAngleBytes)\n\t\t\tbreak\n\t\t}\n\n\t\tf.fs.Write(openMapBytes)\n\t\tf.depth++\n\t\tif (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {\n\t\t\tf.fs.Write(maxShortBytes)\n\t\t} else {\n\t\t\tkeys := v.MapKeys()\n\t\t\tif f.cs.SortKeys {\n\t\t\t\tsortValues(keys, f.cs)\n\t\t\t}\n\t\t\tfor i, key := range keys {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tf.fs.Write(spaceBytes)\n\t\t\t\t}\n\t\t\t\tf.ignoreNextType = true\n\t\t\t\tf.format(f.unpackValue(key))\n\t\t\t\tf.fs.Write(colonBytes)\n\t\t\t\tf.ignoreNextType = true\n\t\t\t\tf.format(f.unpackValue(v.MapIndex(key)))\n\t\t\t}\n\t\t}\n\t\tf.depth--\n\t\tf.fs.Write(closeMapBytes)\n\n\tcase reflect.Struct:\n\t\tnumFields := v.NumField()\n\t\tf.fs.Write(openBraceBytes)\n\t\tf.depth++\n\t\tif (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {\n\t\t\tf.fs.Write(maxShortBytes)\n\t\t} else {\n\t\t\tvt := v.Type()\n\t\t\tfor i := 0; i < numFields; i++ {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tf.fs.Write(spaceBytes)\n\t\t\t\t}\n\t\t\t\tvtf := vt.Field(i)\n\t\t\t\tif f.fs.Flag('+') || f.fs.Flag('#') {\n\t\t\t\t\tf.fs.Write([]byte(vtf.Name))\n\t\t\t\t\tf.fs.Write(colonBytes)\n\t\t\t\t}\n\t\t\t\tf.format(f.unpackValue(v.Field(i)))\n\t\t\t}\n\t\t}\n\t\tf.depth--\n\t\tf.fs.Write(closeBraceBytes)\n\n\tcase reflect.Uintptr:\n\t\tprintHexPtr(f.fs, uintptr(v.Uint()))\n\n\tcase reflect.UnsafePointer, reflect.Chan, reflect.Func:\n\t\tprintHexPtr(f.fs, v.Pointer())\n\n\t// There were not any other types at the time this code was written, but\n\t// fall back to letting the default fmt package handle it if any get added.\n\tdefault:\n\t\tformat := f.buildDefaultFormat()\n\t\tif v.CanInterface() {\n\t\t\tfmt.Fprintf(f.fs, format, v.Interface())\n\t\t} else {\n\t\t\tfmt.Fprintf(f.fs, format, v.String())\n\t\t}\n\t}\n}\n\n// Format satisfies the fmt.Formatter interface. See NewFormatter for usage\n// details.\nfunc (f *formatState) Format(fs fmt.State, verb rune) {\n\tf.fs = fs\n\n\t// Use standard formatting for verbs that are not v.\n\tif verb != 'v' {\n\t\tformat := f.constructOrigFormat(verb)\n\t\tfmt.Fprintf(fs, format, f.value)\n\t\treturn\n\t}\n\n\tif f.value == nil {\n\t\tif fs.Flag('#') {\n\t\t\tfs.Write(interfaceBytes)\n\t\t}\n\t\tfs.Write(nilAngleBytes)\n\t\treturn\n\t}\n\n\tf.format(reflect.ValueOf(f.value))\n}\n\n// newFormatter is a helper function to consolidate the logic from the various\n// public methods which take varying config states.\nfunc newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {\n\tfs := &formatState{value: v, cs: cs}\n\tfs.pointers = make(map[uintptr]int)\n\treturn fs\n}\n\n/*\nNewFormatter returns a custom formatter that satisfies the fmt.Formatter\ninterface.  As a result, it integrates cleanly with standard fmt package\nprinting functions.  The formatter is useful for inline printing of smaller data\ntypes similar to the standard %v format specifier.\n\nThe custom formatter only responds to the %v (most compact), %+v (adds pointer\naddresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb\ncombinations.  Any other verbs such as %x and %q will be sent to the the\nstandard fmt package for formatting.  In addition, the custom formatter ignores\nthe width and precision arguments (however they will still work on the format\nspecifiers not handled by the custom formatter).\n\nTypically this function shouldn't be called directly.  It is much easier to make\nuse of the custom formatter by calling one of the convenience functions such as\nPrintf, Println, or Fprintf.\n*/\nfunc NewFormatter(v interface{}) fmt.Formatter {\n\treturn newFormatter(&Config, v)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/format_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/*\nTest Summary:\nNOTE: For each test, a nil pointer, a single pointer and double pointer to the\nbase test element are also tested to ensure proper indirection across all types.\n\n- Max int8, int16, int32, int64, int\n- Max uint8, uint16, uint32, uint64, uint\n- Boolean true and false\n- Standard complex64 and complex128\n- Array containing standard ints\n- Array containing type with custom formatter on pointer receiver only\n- Array containing interfaces\n- Slice containing standard float32 values\n- Slice containing type with custom formatter on pointer receiver only\n- Slice containing interfaces\n- Nil slice\n- Standard string\n- Nil interface\n- Sub-interface\n- Map with string keys and int vals\n- Map with custom formatter type on pointer receiver only keys and vals\n- Map with interface keys and values\n- Map with nil interface value\n- Struct with primitives\n- Struct that contains another struct\n- Struct that contains custom type with Stringer pointer interface via both\n  exported and unexported fields\n- Struct that contains embedded struct and field to same struct\n- Uintptr to 0 (null pointer)\n- Uintptr address of real variable\n- Unsafe.Pointer to 0 (null pointer)\n- Unsafe.Pointer to address of real variable\n- Nil channel\n- Standard int channel\n- Function with no params and no returns\n- Function with param and no returns\n- Function with multiple params and multiple returns\n- Struct that is circular through self referencing\n- Structs that are circular through cross referencing\n- Structs that are indirectly circular\n- Type that panics in its Stringer interface\n- Type that has a custom Error interface\n- %x passthrough with uint\n- %#x passthrough with uint\n- %f passthrough with precision\n- %f passthrough with width and precision\n- %d passthrough with width\n- %q passthrough with string\n*/\n\npackage spew_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\t\"unsafe\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\n// formatterTest is used to describe a test to be performed against NewFormatter.\ntype formatterTest struct {\n\tformat string\n\tin     interface{}\n\twants  []string\n}\n\n// formatterTests houses all of the tests to be performed against NewFormatter.\nvar formatterTests = make([]formatterTest, 0)\n\n// addFormatterTest is a helper method to append the passed input and desired\n// result to formatterTests.\nfunc addFormatterTest(format string, in interface{}, wants ...string) {\n\ttest := formatterTest{format, in, wants}\n\tformatterTests = append(formatterTests, test)\n}\n\nfunc addIntFormatterTests() {\n\t// Max int8.\n\tv := int8(127)\n\tnv := (*int8)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"int8\"\n\tvs := \"127\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Max int16.\n\tv2 := int16(32767)\n\tnv2 := (*int16)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"int16\"\n\tv2s := \"32767\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Max int32.\n\tv3 := int32(2147483647)\n\tnv3 := (*int32)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"int32\"\n\tv3s := \"2147483647\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Max int64.\n\tv4 := int64(9223372036854775807)\n\tnv4 := (*int64)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"int64\"\n\tv4s := \"9223372036854775807\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\n\t// Max int.\n\tv5 := int(2147483647)\n\tnv5 := (*int)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"int\"\n\tv5s := \"2147483647\"\n\taddFormatterTest(\"%v\", v5, v5s)\n\taddFormatterTest(\"%v\", pv5, \"<*>\"+v5s)\n\taddFormatterTest(\"%v\", &pv5, \"<**>\"+v5s)\n\taddFormatterTest(\"%v\", nv5, \"<nil>\")\n\taddFormatterTest(\"%+v\", v5, v5s)\n\taddFormatterTest(\"%+v\", pv5, \"<*>(\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%+v\", &pv5, \"<**>(\"+pv5Addr+\"->\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%+v\", nv5, \"<nil>\")\n\taddFormatterTest(\"%#v\", v5, \"(\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", pv5, \"(*\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", &pv5, \"(**\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", nv5, \"(*\"+v5t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v5, \"(\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", pv5, \"(*\"+v5t+\")(\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", &pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", nv5, \"(*\"+v5t+\")\"+\"<nil>\")\n}\n\nfunc addUintFormatterTests() {\n\t// Max uint8.\n\tv := uint8(255)\n\tnv := (*uint8)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"uint8\"\n\tvs := \"255\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Max uint16.\n\tv2 := uint16(65535)\n\tnv2 := (*uint16)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uint16\"\n\tv2s := \"65535\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Max uint32.\n\tv3 := uint32(4294967295)\n\tnv3 := (*uint32)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"uint32\"\n\tv3s := \"4294967295\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Max uint64.\n\tv4 := uint64(18446744073709551615)\n\tnv4 := (*uint64)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"uint64\"\n\tv4s := \"18446744073709551615\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\n\t// Max uint.\n\tv5 := uint(4294967295)\n\tnv5 := (*uint)(nil)\n\tpv5 := &v5\n\tv5Addr := fmt.Sprintf(\"%p\", pv5)\n\tpv5Addr := fmt.Sprintf(\"%p\", &pv5)\n\tv5t := \"uint\"\n\tv5s := \"4294967295\"\n\taddFormatterTest(\"%v\", v5, v5s)\n\taddFormatterTest(\"%v\", pv5, \"<*>\"+v5s)\n\taddFormatterTest(\"%v\", &pv5, \"<**>\"+v5s)\n\taddFormatterTest(\"%v\", nv5, \"<nil>\")\n\taddFormatterTest(\"%+v\", v5, v5s)\n\taddFormatterTest(\"%+v\", pv5, \"<*>(\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%+v\", &pv5, \"<**>(\"+pv5Addr+\"->\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%+v\", nv5, \"<nil>\")\n\taddFormatterTest(\"%#v\", v5, \"(\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", pv5, \"(*\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", &pv5, \"(**\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#v\", nv5, \"(*\"+v5t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v5, \"(\"+v5t+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", pv5, \"(*\"+v5t+\")(\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%#+v\", &pv5, \"(**\"+v5t+\")(\"+pv5Addr+\"->\"+v5Addr+\")\"+v5s)\n\taddFormatterTest(\"%#v\", nv5, \"(*\"+v5t+\")\"+\"<nil>\")\n}\n\nfunc addBoolFormatterTests() {\n\t// Boolean true.\n\tv := bool(true)\n\tnv := (*bool)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"bool\"\n\tvs := \"true\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Boolean false.\n\tv2 := bool(false)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"bool\"\n\tv2s := \"false\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addFloatFormatterTests() {\n\t// Standard float32.\n\tv := float32(3.1415)\n\tnv := (*float32)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"float32\"\n\tvs := \"3.1415\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Standard float64.\n\tv2 := float64(3.1415926)\n\tnv2 := (*float64)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"float64\"\n\tv2s := \"3.1415926\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n}\n\nfunc addComplexFormatterTests() {\n\t// Standard complex64.\n\tv := complex(float32(6), -2)\n\tnv := (*complex64)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"complex64\"\n\tvs := \"(6-2i)\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Standard complex128.\n\tv2 := complex(float64(-6), 2)\n\tnv2 := (*complex128)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"complex128\"\n\tv2s := \"(-6+2i)\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n}\n\nfunc addArrayFormatterTests() {\n\t// Array containing standard ints.\n\tv := [3]int{1, 2, 3}\n\tnv := (*[3]int)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"[3]int\"\n\tvs := \"[1 2 3]\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Array containing type with custom formatter on pointer receiver only.\n\tv2 := [3]pstringer{\"1\", \"2\", \"3\"}\n\tnv2 := (*[3]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"[3]spew_test.pstringer\"\n\tv2sp := \"[stringer 1 stringer 2 stringer 3]\"\n\tv2s := v2sp\n\tif spew.UnsafeDisabled {\n\t\tv2s = \"[1 2 3]\"\n\t}\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2sp)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2sp)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2sp)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2sp)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2sp)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2sp)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2sp)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2sp)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Array containing interfaces.\n\tv3 := [3]interface{}{\"one\", int(2), uint(3)}\n\tnv3 := (*[3]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"[3]interface {}\"\n\tv3t2 := \"string\"\n\tv3t3 := \"int\"\n\tv3t4 := \"uint\"\n\tv3s := \"[one 2 3]\"\n\tv3s2 := \"[(\" + v3t2 + \")one (\" + v3t3 + \")2 (\" + v3t4 + \")3]\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n}\n\nfunc addSliceFormatterTests() {\n\t// Slice containing standard float32 values.\n\tv := []float32{3.14, 6.28, 12.56}\n\tnv := (*[]float32)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"[]float32\"\n\tvs := \"[3.14 6.28 12.56]\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Slice containing type with custom formatter on pointer receiver only.\n\tv2 := []pstringer{\"1\", \"2\", \"3\"}\n\tnv2 := (*[]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"[]spew_test.pstringer\"\n\tv2s := \"[stringer 1 stringer 2 stringer 3]\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Slice containing interfaces.\n\tv3 := []interface{}{\"one\", int(2), uint(3), nil}\n\tnv3 := (*[]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"[]interface {}\"\n\tv3t2 := \"string\"\n\tv3t3 := \"int\"\n\tv3t4 := \"uint\"\n\tv3t5 := \"interface {}\"\n\tv3s := \"[one 2 3 <nil>]\"\n\tv3s2 := \"[(\" + v3t2 + \")one (\" + v3t3 + \")2 (\" + v3t4 + \")3 (\" + v3t5 +\n\t\t\")<nil>]\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Nil slice.\n\tvar v4 []int\n\tnv4 := (*[]int)(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"[]int\"\n\tv4s := \"<nil>\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n}\n\nfunc addStringFormatterTests() {\n\t// Standard string.\n\tv := \"test\"\n\tnv := (*string)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"string\"\n\tvs := \"test\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n}\n\nfunc addInterfaceFormatterTests() {\n\t// Nil interface.\n\tvar v interface{}\n\tnv := (*interface{})(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"interface {}\"\n\tvs := \"<nil>\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Sub-interface.\n\tv2 := interface{}(uint16(65535))\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uint16\"\n\tv2s := \"65535\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addMapFormatterTests() {\n\t// Map with string keys and int vals.\n\tv := map[string]int{\"one\": 1, \"two\": 2}\n\tnilMap := map[string]int(nil)\n\tnv := (*map[string]int)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"map[string]int\"\n\tvs := \"map[one:1 two:2]\"\n\tvs2 := \"map[two:2 one:1]\"\n\taddFormatterTest(\"%v\", v, vs, vs2)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs, \"<*>\"+vs2)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs, \"<**>\"+vs2)\n\taddFormatterTest(\"%+v\", nilMap, \"<nil>\")\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs, vs2)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs, \"<*>(\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs,\n\t\t\"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%+v\", nilMap, \"<nil>\")\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs, \"(\"+vt+\")\"+vs2)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs, \"(*\"+vt+\")\"+vs2)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs, \"(**\"+vt+\")\"+vs2)\n\taddFormatterTest(\"%#v\", nilMap, \"(\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs, \"(\"+vt+\")\"+vs2)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs,\n\t\t\"(*\"+vt+\")(\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs,\n\t\t\"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%#+v\", nilMap, \"(\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Map with custom formatter type on pointer receiver only keys and vals.\n\tv2 := map[pstringer]pstringer{\"one\": \"1\"}\n\tnv2 := (*map[pstringer]pstringer)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"map[spew_test.pstringer]spew_test.pstringer\"\n\tv2s := \"map[stringer one:stringer 1]\"\n\tif spew.UnsafeDisabled {\n\t\tv2s = \"map[one:1]\"\n\t}\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Map with interface keys and values.\n\tv3 := map[interface{}]interface{}{\"one\": 1}\n\tnv3 := (*map[interface{}]interface{})(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"map[interface {}]interface {}\"\n\tv3t1 := \"string\"\n\tv3t2 := \"int\"\n\tv3s := \"map[one:1]\"\n\tv3s2 := \"map[(\" + v3t1 + \")one:(\" + v3t2 + \")1]\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s2)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Map with nil interface value\n\tv4 := map[string]interface{}{\"nil\": nil}\n\tnv4 := (*map[string]interface{})(nil)\n\tpv4 := &v4\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"map[string]interface {}\"\n\tv4t1 := \"interface {}\"\n\tv4s := \"map[nil:<nil>]\"\n\tv4s2 := \"map[nil:(\" + v4t1 + \")<nil>]\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s2)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s2)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s2)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s2)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s2)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s2)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n}\n\nfunc addStructFormatterTests() {\n\t// Struct with primitives.\n\ttype s1 struct {\n\t\ta int8\n\t\tb uint8\n\t}\n\tv := s1{127, 255}\n\tnv := (*s1)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.s1\"\n\tvt2 := \"int8\"\n\tvt3 := \"uint8\"\n\tvs := \"{127 255}\"\n\tvs2 := \"{a:127 b:255}\"\n\tvs3 := \"{a:(\" + vt2 + \")127 b:(\" + vt3 + \")255}\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs2)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs2)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs3)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs3)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs3)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs3)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs3)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs3)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Struct that contains another struct.\n\ttype s2 struct {\n\t\ts1 s1\n\t\tb  bool\n\t}\n\tv2 := s2{s1{127, 255}, true}\n\tnv2 := (*s2)(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.s2\"\n\tv2t2 := \"spew_test.s1\"\n\tv2t3 := \"int8\"\n\tv2t4 := \"uint8\"\n\tv2t5 := \"bool\"\n\tv2s := \"{{127 255} true}\"\n\tv2s2 := \"{s1:{a:127 b:255} b:true}\"\n\tv2s3 := \"{s1:(\" + v2t2 + \"){a:(\" + v2t3 + \")127 b:(\" + v2t4 + \")255} b:(\" +\n\t\tv2t5 + \")true}\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s2)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s2)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s2)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s3)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s3)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s3)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s3)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s3)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s3)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Struct that contains custom type with Stringer pointer interface via both\n\t// exported and unexported fields.\n\ttype s3 struct {\n\t\ts pstringer\n\t\tS pstringer\n\t}\n\tv3 := s3{\"test\", \"test2\"}\n\tnv3 := (*s3)(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"spew_test.s3\"\n\tv3t2 := \"spew_test.pstringer\"\n\tv3s := \"{stringer test stringer test2}\"\n\tv3sp := v3s\n\tv3s2 := \"{s:stringer test S:stringer test2}\"\n\tv3s2p := v3s2\n\tv3s3 := \"{s:(\" + v3t2 + \")stringer test S:(\" + v3t2 + \")stringer test2}\"\n\tv3s3p := v3s3\n\tif spew.UnsafeDisabled {\n\t\tv3s = \"{test test2}\"\n\t\tv3sp = \"{test stringer test2}\"\n\t\tv3s2 = \"{s:test S:test2}\"\n\t\tv3s2p = \"{s:test S:stringer test2}\"\n\t\tv3s3 = \"{s:(\" + v3t2 + \")test S:(\" + v3t2 + \")test2}\"\n\t\tv3s3p = \"{s:(\" + v3t2 + \")test S:(\" + v3t2 + \")stringer test2}\"\n\t}\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3sp)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3sp)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s2)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s2p)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s2p)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s3)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s3p)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s3p)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s3)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s3p)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s3p)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\n\t// Struct that contains embedded struct and field to same struct.\n\te := embed{\"embedstr\"}\n\tv4 := embedwrap{embed: &e, e: &e}\n\tnv4 := (*embedwrap)(nil)\n\tpv4 := &v4\n\teAddr := fmt.Sprintf(\"%p\", &e)\n\tv4Addr := fmt.Sprintf(\"%p\", pv4)\n\tpv4Addr := fmt.Sprintf(\"%p\", &pv4)\n\tv4t := \"spew_test.embedwrap\"\n\tv4t2 := \"spew_test.embed\"\n\tv4t3 := \"string\"\n\tv4s := \"{<*>{embedstr} <*>{embedstr}}\"\n\tv4s2 := \"{embed:<*>(\" + eAddr + \"){a:embedstr} e:<*>(\" + eAddr +\n\t\t\"){a:embedstr}}\"\n\tv4s3 := \"{embed:(*\" + v4t2 + \"){a:(\" + v4t3 + \")embedstr} e:(*\" + v4t2 +\n\t\t\"){a:(\" + v4t3 + \")embedstr}}\"\n\tv4s4 := \"{embed:(*\" + v4t2 + \")(\" + eAddr + \"){a:(\" + v4t3 +\n\t\t\")embedstr} e:(*\" + v4t2 + \")(\" + eAddr + \"){a:(\" + v4t3 + \")embedstr}}\"\n\taddFormatterTest(\"%v\", v4, v4s)\n\taddFormatterTest(\"%v\", pv4, \"<*>\"+v4s)\n\taddFormatterTest(\"%v\", &pv4, \"<**>\"+v4s)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%+v\", v4, v4s2)\n\taddFormatterTest(\"%+v\", pv4, \"<*>(\"+v4Addr+\")\"+v4s2)\n\taddFormatterTest(\"%+v\", &pv4, \"<**>(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s2)\n\taddFormatterTest(\"%+v\", nv4, \"<nil>\")\n\taddFormatterTest(\"%#v\", v4, \"(\"+v4t+\")\"+v4s3)\n\taddFormatterTest(\"%#v\", pv4, \"(*\"+v4t+\")\"+v4s3)\n\taddFormatterTest(\"%#v\", &pv4, \"(**\"+v4t+\")\"+v4s3)\n\taddFormatterTest(\"%#v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v4, \"(\"+v4t+\")\"+v4s4)\n\taddFormatterTest(\"%#+v\", pv4, \"(*\"+v4t+\")(\"+v4Addr+\")\"+v4s4)\n\taddFormatterTest(\"%#+v\", &pv4, \"(**\"+v4t+\")(\"+pv4Addr+\"->\"+v4Addr+\")\"+v4s4)\n\taddFormatterTest(\"%#+v\", nv4, \"(*\"+v4t+\")\"+\"<nil>\")\n}\n\nfunc addUintptrFormatterTests() {\n\t// Null pointer.\n\tv := uintptr(0)\n\tnv := (*uintptr)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"uintptr\"\n\tvs := \"<nil>\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Address of real variable.\n\ti := 1\n\tv2 := uintptr(unsafe.Pointer(&i))\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"uintptr\"\n\tv2s := fmt.Sprintf(\"%p\", &i)\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addUnsafePointerFormatterTests() {\n\t// Null pointer.\n\tv := unsafe.Pointer(uintptr(0))\n\tnv := (*unsafe.Pointer)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"unsafe.Pointer\"\n\tvs := \"<nil>\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Address of real variable.\n\ti := 1\n\tv2 := unsafe.Pointer(&i)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"unsafe.Pointer\"\n\tv2s := fmt.Sprintf(\"%p\", &i)\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addChanFormatterTests() {\n\t// Nil channel.\n\tvar v chan int\n\tpv := &v\n\tnv := (*chan int)(nil)\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"chan int\"\n\tvs := \"<nil>\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Real channel.\n\tv2 := make(chan int)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"chan int\"\n\tv2s := fmt.Sprintf(\"%p\", v2)\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n}\n\nfunc addFuncFormatterTests() {\n\t// Function with no params and no returns.\n\tv := addIntFormatterTests\n\tnv := (*func())(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"func()\"\n\tvs := fmt.Sprintf(\"%p\", v)\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\n\t// Function with param and no returns.\n\tv2 := TestFormatter\n\tnv2 := (*func(*testing.T))(nil)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"func(*testing.T)\"\n\tv2s := fmt.Sprintf(\"%p\", v2)\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%+v\", v2, v2s)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%+v\", nv2, \"<nil>\")\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s)\n\taddFormatterTest(\"%#+v\", nv2, \"(*\"+v2t+\")\"+\"<nil>\")\n\n\t// Function with multiple params and multiple returns.\n\tvar v3 = func(i int, s string) (b bool, err error) {\n\t\treturn true, nil\n\t}\n\tnv3 := (*func(int, string) (bool, error))(nil)\n\tpv3 := &v3\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"func(int, string) (bool, error)\"\n\tv3s := fmt.Sprintf(\"%p\", v3)\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%+v\", v3, v3s)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%+v\", nv3, \"<nil>\")\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s)\n\taddFormatterTest(\"%#+v\", nv3, \"(*\"+v3t+\")\"+\"<nil>\")\n}\n\nfunc addCircularFormatterTests() {\n\t// Struct that is circular through self referencing.\n\ttype circular struct {\n\t\tc *circular\n\t}\n\tv := circular{nil}\n\tv.c = &v\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.circular\"\n\tvs := \"{<*>{<*><shown>}}\"\n\tvs2 := \"{<*><shown>}\"\n\tvs3 := \"{c:<*>(\" + vAddr + \"){c:<*>(\" + vAddr + \")<shown>}}\"\n\tvs4 := \"{c:<*>(\" + vAddr + \")<shown>}\"\n\tvs5 := \"{c:(*\" + vt + \"){c:(*\" + vt + \")<shown>}}\"\n\tvs6 := \"{c:(*\" + vt + \")<shown>}\"\n\tvs7 := \"{c:(*\" + vt + \")(\" + vAddr + \"){c:(*\" + vt + \")(\" + vAddr +\n\t\t\")<shown>}}\"\n\tvs8 := \"{c:(*\" + vt + \")(\" + vAddr + \")<shown>}\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs2)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs2)\n\taddFormatterTest(\"%+v\", v, vs3)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs4)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs4)\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs5)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs6)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs6)\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs7)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs8)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs8)\n\n\t// Structs that are circular through cross referencing.\n\tv2 := xref1{nil}\n\tts2 := xref2{&v2}\n\tv2.ps2 = &ts2\n\tpv2 := &v2\n\tts2Addr := fmt.Sprintf(\"%p\", &ts2)\n\tv2Addr := fmt.Sprintf(\"%p\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%p\", &pv2)\n\tv2t := \"spew_test.xref1\"\n\tv2t2 := \"spew_test.xref2\"\n\tv2s := \"{<*>{<*>{<*><shown>}}}\"\n\tv2s2 := \"{<*>{<*><shown>}}\"\n\tv2s3 := \"{ps2:<*>(\" + ts2Addr + \"){ps1:<*>(\" + v2Addr + \"){ps2:<*>(\" +\n\t\tts2Addr + \")<shown>}}}\"\n\tv2s4 := \"{ps2:<*>(\" + ts2Addr + \"){ps1:<*>(\" + v2Addr + \")<shown>}}\"\n\tv2s5 := \"{ps2:(*\" + v2t2 + \"){ps1:(*\" + v2t + \"){ps2:(*\" + v2t2 +\n\t\t\")<shown>}}}\"\n\tv2s6 := \"{ps2:(*\" + v2t2 + \"){ps1:(*\" + v2t + \")<shown>}}\"\n\tv2s7 := \"{ps2:(*\" + v2t2 + \")(\" + ts2Addr + \"){ps1:(*\" + v2t +\n\t\t\")(\" + v2Addr + \"){ps2:(*\" + v2t2 + \")(\" + ts2Addr +\n\t\t\")<shown>}}}\"\n\tv2s8 := \"{ps2:(*\" + v2t2 + \")(\" + ts2Addr + \"){ps1:(*\" + v2t +\n\t\t\")(\" + v2Addr + \")<shown>}}\"\n\taddFormatterTest(\"%v\", v2, v2s)\n\taddFormatterTest(\"%v\", pv2, \"<*>\"+v2s2)\n\taddFormatterTest(\"%v\", &pv2, \"<**>\"+v2s2)\n\taddFormatterTest(\"%+v\", v2, v2s3)\n\taddFormatterTest(\"%+v\", pv2, \"<*>(\"+v2Addr+\")\"+v2s4)\n\taddFormatterTest(\"%+v\", &pv2, \"<**>(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s4)\n\taddFormatterTest(\"%#v\", v2, \"(\"+v2t+\")\"+v2s5)\n\taddFormatterTest(\"%#v\", pv2, \"(*\"+v2t+\")\"+v2s6)\n\taddFormatterTest(\"%#v\", &pv2, \"(**\"+v2t+\")\"+v2s6)\n\taddFormatterTest(\"%#+v\", v2, \"(\"+v2t+\")\"+v2s7)\n\taddFormatterTest(\"%#+v\", pv2, \"(*\"+v2t+\")(\"+v2Addr+\")\"+v2s8)\n\taddFormatterTest(\"%#+v\", &pv2, \"(**\"+v2t+\")(\"+pv2Addr+\"->\"+v2Addr+\")\"+v2s8)\n\n\t// Structs that are indirectly circular.\n\tv3 := indirCir1{nil}\n\ttic2 := indirCir2{nil}\n\ttic3 := indirCir3{&v3}\n\ttic2.ps3 = &tic3\n\tv3.ps2 = &tic2\n\tpv3 := &v3\n\ttic2Addr := fmt.Sprintf(\"%p\", &tic2)\n\ttic3Addr := fmt.Sprintf(\"%p\", &tic3)\n\tv3Addr := fmt.Sprintf(\"%p\", pv3)\n\tpv3Addr := fmt.Sprintf(\"%p\", &pv3)\n\tv3t := \"spew_test.indirCir1\"\n\tv3t2 := \"spew_test.indirCir2\"\n\tv3t3 := \"spew_test.indirCir3\"\n\tv3s := \"{<*>{<*>{<*>{<*><shown>}}}}\"\n\tv3s2 := \"{<*>{<*>{<*><shown>}}}\"\n\tv3s3 := \"{ps2:<*>(\" + tic2Addr + \"){ps3:<*>(\" + tic3Addr + \"){ps1:<*>(\" +\n\t\tv3Addr + \"){ps2:<*>(\" + tic2Addr + \")<shown>}}}}\"\n\tv3s4 := \"{ps2:<*>(\" + tic2Addr + \"){ps3:<*>(\" + tic3Addr + \"){ps1:<*>(\" +\n\t\tv3Addr + \")<shown>}}}\"\n\tv3s5 := \"{ps2:(*\" + v3t2 + \"){ps3:(*\" + v3t3 + \"){ps1:(*\" + v3t +\n\t\t\"){ps2:(*\" + v3t2 + \")<shown>}}}}\"\n\tv3s6 := \"{ps2:(*\" + v3t2 + \"){ps3:(*\" + v3t3 + \"){ps1:(*\" + v3t +\n\t\t\")<shown>}}}\"\n\tv3s7 := \"{ps2:(*\" + v3t2 + \")(\" + tic2Addr + \"){ps3:(*\" + v3t3 + \")(\" +\n\t\ttic3Addr + \"){ps1:(*\" + v3t + \")(\" + v3Addr + \"){ps2:(*\" + v3t2 +\n\t\t\")(\" + tic2Addr + \")<shown>}}}}\"\n\tv3s8 := \"{ps2:(*\" + v3t2 + \")(\" + tic2Addr + \"){ps3:(*\" + v3t3 + \")(\" +\n\t\ttic3Addr + \"){ps1:(*\" + v3t + \")(\" + v3Addr + \")<shown>}}}\"\n\taddFormatterTest(\"%v\", v3, v3s)\n\taddFormatterTest(\"%v\", pv3, \"<*>\"+v3s2)\n\taddFormatterTest(\"%v\", &pv3, \"<**>\"+v3s2)\n\taddFormatterTest(\"%+v\", v3, v3s3)\n\taddFormatterTest(\"%+v\", pv3, \"<*>(\"+v3Addr+\")\"+v3s4)\n\taddFormatterTest(\"%+v\", &pv3, \"<**>(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s4)\n\taddFormatterTest(\"%#v\", v3, \"(\"+v3t+\")\"+v3s5)\n\taddFormatterTest(\"%#v\", pv3, \"(*\"+v3t+\")\"+v3s6)\n\taddFormatterTest(\"%#v\", &pv3, \"(**\"+v3t+\")\"+v3s6)\n\taddFormatterTest(\"%#+v\", v3, \"(\"+v3t+\")\"+v3s7)\n\taddFormatterTest(\"%#+v\", pv3, \"(*\"+v3t+\")(\"+v3Addr+\")\"+v3s8)\n\taddFormatterTest(\"%#+v\", &pv3, \"(**\"+v3t+\")(\"+pv3Addr+\"->\"+v3Addr+\")\"+v3s8)\n}\n\nfunc addPanicFormatterTests() {\n\t// Type that panics in its Stringer interface.\n\tv := panicer(127)\n\tnv := (*panicer)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.panicer\"\n\tvs := \"(PANIC=test panic)127\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n}\n\nfunc addErrorFormatterTests() {\n\t// Type that has a custom Error interface.\n\tv := customError(127)\n\tnv := (*customError)(nil)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%p\", pv)\n\tpvAddr := fmt.Sprintf(\"%p\", &pv)\n\tvt := \"spew_test.customError\"\n\tvs := \"error: 127\"\n\taddFormatterTest(\"%v\", v, vs)\n\taddFormatterTest(\"%v\", pv, \"<*>\"+vs)\n\taddFormatterTest(\"%v\", &pv, \"<**>\"+vs)\n\taddFormatterTest(\"%v\", nv, \"<nil>\")\n\taddFormatterTest(\"%+v\", v, vs)\n\taddFormatterTest(\"%+v\", pv, \"<*>(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", &pv, \"<**>(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%+v\", nv, \"<nil>\")\n\taddFormatterTest(\"%#v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", pv, \"(*\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", &pv, \"(**\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n\taddFormatterTest(\"%#+v\", v, \"(\"+vt+\")\"+vs)\n\taddFormatterTest(\"%#+v\", pv, \"(*\"+vt+\")(\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", &pv, \"(**\"+vt+\")(\"+pvAddr+\"->\"+vAddr+\")\"+vs)\n\taddFormatterTest(\"%#+v\", nv, \"(*\"+vt+\")\"+\"<nil>\")\n}\n\nfunc addPassthroughFormatterTests() {\n\t// %x passthrough with uint.\n\tv := uint(4294967295)\n\tpv := &v\n\tvAddr := fmt.Sprintf(\"%x\", pv)\n\tpvAddr := fmt.Sprintf(\"%x\", &pv)\n\tvs := \"ffffffff\"\n\taddFormatterTest(\"%x\", v, vs)\n\taddFormatterTest(\"%x\", pv, vAddr)\n\taddFormatterTest(\"%x\", &pv, pvAddr)\n\n\t// %#x passthrough with uint.\n\tv2 := int(2147483647)\n\tpv2 := &v2\n\tv2Addr := fmt.Sprintf(\"%#x\", pv2)\n\tpv2Addr := fmt.Sprintf(\"%#x\", &pv2)\n\tv2s := \"0x7fffffff\"\n\taddFormatterTest(\"%#x\", v2, v2s)\n\taddFormatterTest(\"%#x\", pv2, v2Addr)\n\taddFormatterTest(\"%#x\", &pv2, pv2Addr)\n\n\t// %f passthrough with precision.\n\taddFormatterTest(\"%.2f\", 3.1415, \"3.14\")\n\taddFormatterTest(\"%.3f\", 3.1415, \"3.142\")\n\taddFormatterTest(\"%.4f\", 3.1415, \"3.1415\")\n\n\t// %f passthrough with width and precision.\n\taddFormatterTest(\"%5.2f\", 3.1415, \" 3.14\")\n\taddFormatterTest(\"%6.3f\", 3.1415, \" 3.142\")\n\taddFormatterTest(\"%7.4f\", 3.1415, \" 3.1415\")\n\n\t// %d passthrough with width.\n\taddFormatterTest(\"%3d\", 127, \"127\")\n\taddFormatterTest(\"%4d\", 127, \" 127\")\n\taddFormatterTest(\"%5d\", 127, \"  127\")\n\n\t// %q passthrough with string.\n\taddFormatterTest(\"%q\", \"test\", \"\\\"test\\\"\")\n}\n\n// TestFormatter executes all of the tests described by formatterTests.\nfunc TestFormatter(t *testing.T) {\n\t// Setup tests.\n\taddIntFormatterTests()\n\taddUintFormatterTests()\n\taddBoolFormatterTests()\n\taddFloatFormatterTests()\n\taddComplexFormatterTests()\n\taddArrayFormatterTests()\n\taddSliceFormatterTests()\n\taddStringFormatterTests()\n\taddInterfaceFormatterTests()\n\taddMapFormatterTests()\n\taddStructFormatterTests()\n\taddUintptrFormatterTests()\n\taddUnsafePointerFormatterTests()\n\taddChanFormatterTests()\n\taddFuncFormatterTests()\n\taddCircularFormatterTests()\n\taddPanicFormatterTests()\n\taddErrorFormatterTests()\n\taddPassthroughFormatterTests()\n\n\tt.Logf(\"Running %d tests\", len(formatterTests))\n\tfor i, test := range formatterTests {\n\t\tbuf := new(bytes.Buffer)\n\t\tspew.Fprintf(buf, test.format, test.in)\n\t\ts := buf.String()\n\t\tif testFailed(s, test.wants) {\n\t\t\tt.Errorf(\"Formatter #%d format: %s got: %s %s\", i, test.format, s,\n\t\t\t\tstringizeWants(test.wants))\n\t\t\tcontinue\n\t\t}\n\t}\n}\n\ntype testStruct struct {\n\tx int\n}\n\nfunc (ts testStruct) String() string {\n\treturn fmt.Sprintf(\"ts.%d\", ts.x)\n}\n\ntype testStructP struct {\n\tx int\n}\n\nfunc (ts *testStructP) String() string {\n\treturn fmt.Sprintf(\"ts.%d\", ts.x)\n}\n\nfunc TestPrintSortedKeys(t *testing.T) {\n\tcfg := spew.ConfigState{SortKeys: true}\n\ts := cfg.Sprint(map[int]string{1: \"1\", 3: \"3\", 2: \"2\"})\n\texpected := \"map[1:1 2:2 3:3]\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 1:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sprint(map[stringer]int{\"1\": 1, \"3\": 3, \"2\": 2})\n\texpected = \"map[stringer 1:1 stringer 2:2 stringer 3:3]\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 2:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sprint(map[pstringer]int{pstringer(\"1\"): 1, pstringer(\"3\"): 3, pstringer(\"2\"): 2})\n\texpected = \"map[stringer 1:1 stringer 2:2 stringer 3:3]\"\n\tif spew.UnsafeDisabled {\n\t\texpected = \"map[1:1 2:2 3:3]\"\n\t}\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 3:\\n  %v %v\", s, expected)\n\t}\n\n\ts = cfg.Sprint(map[testStruct]int{testStruct{1}: 1, testStruct{3}: 3, testStruct{2}: 2})\n\texpected = \"map[ts.1:1 ts.2:2 ts.3:3]\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 4:\\n  %v %v\", s, expected)\n\t}\n\n\tif !spew.UnsafeDisabled {\n\t\ts = cfg.Sprint(map[testStructP]int{testStructP{1}: 1, testStructP{3}: 3, testStructP{2}: 2})\n\t\texpected = \"map[ts.1:1 ts.2:2 ts.3:3]\"\n\t\tif s != expected {\n\t\t\tt.Errorf(\"Sorted keys mismatch 5:\\n  %v %v\", s, expected)\n\t\t}\n\t}\n\n\ts = cfg.Sprint(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})\n\texpected = \"map[error: 1:1 error: 2:2 error: 3:3]\"\n\tif s != expected {\n\t\tt.Errorf(\"Sorted keys mismatch 6:\\n  %v %v\", s, expected)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/internal_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\n/*\nThis test file is part of the spew package rather than than the spew_test\npackage because it needs access to internals to properly test certain cases\nwhich are not possible via the public interface since they should never happen.\n*/\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n)\n\n// dummyFmtState implements a fake fmt.State to use for testing invalid\n// reflect.Value handling.  This is necessary because the fmt package catches\n// invalid values before invoking the formatter on them.\ntype dummyFmtState struct {\n\tbytes.Buffer\n}\n\nfunc (dfs *dummyFmtState) Flag(f int) bool {\n\tif f == int('+') {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (dfs *dummyFmtState) Precision() (int, bool) {\n\treturn 0, false\n}\n\nfunc (dfs *dummyFmtState) Width() (int, bool) {\n\treturn 0, false\n}\n\n// TestInvalidReflectValue ensures the dump and formatter code handles an\n// invalid reflect value properly.  This needs access to internal state since it\n// should never happen in real code and therefore can't be tested via the public\n// API.\nfunc TestInvalidReflectValue(t *testing.T) {\n\ti := 1\n\n\t// Dump invalid reflect value.\n\tv := new(reflect.Value)\n\tbuf := new(bytes.Buffer)\n\td := dumpState{w: buf, cs: &Config}\n\td.dump(*v)\n\ts := buf.String()\n\twant := \"<invalid>\"\n\tif s != want {\n\t\tt.Errorf(\"InvalidReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t// Formatter invalid reflect value.\n\tbuf2 := new(dummyFmtState)\n\tf := formatState{value: *v, cs: &Config, fs: buf2}\n\tf.format(*v)\n\ts = buf2.String()\n\twant = \"<invalid>\"\n\tif s != want {\n\t\tt.Errorf(\"InvalidReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n}\n\n// SortValues makes the internal sortValues function available to the test\n// package.\nfunc SortValues(values []reflect.Value, cs *ConfigState) {\n\tsortValues(values, cs)\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/internalunsafe_test.go",
    "content": "// Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n\n// Permission to use, copy, modify, and distribute this software for any\n// purpose with or without fee is hereby granted, provided that the above\n// copyright notice and this permission notice appear in all copies.\n\n// THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n\n// NOTE: Due to the following build constraints, this file will only be compiled\n// when the code is not running on Google App Engine, compiled by GopherJS, and\n// \"-tags safe\" is not added to the go build command line.  The \"disableunsafe\"\n// tag is deprecated and thus should not be used.\n// +build !js,!appengine,!safe,!disableunsafe\n\n/*\nThis test file is part of the spew package rather than than the spew_test\npackage because it needs access to internals to properly test certain cases\nwhich are not possible via the public interface since they should never happen.\n*/\n\npackage spew\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n\t\"unsafe\"\n)\n\n// changeKind uses unsafe to intentionally change the kind of a reflect.Value to\n// the maximum kind value which does not exist.  This is needed to test the\n// fallback code which punts to the standard fmt library for new types that\n// might get added to the language.\nfunc changeKind(v *reflect.Value, readOnly bool) {\n\trvf := (*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + offsetFlag))\n\t*rvf = *rvf | ((1<<flagKindWidth - 1) << flagKindShift)\n\tif readOnly {\n\t\t*rvf |= flagRO\n\t} else {\n\t\t*rvf &= ^uintptr(flagRO)\n\t}\n}\n\n// TestAddedReflectValue tests functionaly of the dump and formatter code which\n// falls back to the standard fmt library for new types that might get added to\n// the language.\nfunc TestAddedReflectValue(t *testing.T) {\n\ti := 1\n\n\t// Dump using a reflect.Value that is exported.\n\tv := reflect.ValueOf(int8(5))\n\tchangeKind(&v, false)\n\tbuf := new(bytes.Buffer)\n\td := dumpState{w: buf, cs: &Config}\n\td.dump(v)\n\ts := buf.String()\n\twant := \"(int8) 5\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t// Dump using a reflect.Value that is not exported.\n\tchangeKind(&v, true)\n\tbuf.Reset()\n\td.dump(v)\n\ts = buf.String()\n\twant = \"(int8) <int8 Value>\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d\\n got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t// Formatter using a reflect.Value that is exported.\n\tchangeKind(&v, false)\n\tbuf2 := new(dummyFmtState)\n\tf := formatState{value: v, cs: &Config, fs: buf2}\n\tf.format(v)\n\ts = buf2.String()\n\twant = \"5\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n\ti++\n\n\t// Formatter using a reflect.Value that is not exported.\n\tchangeKind(&v, true)\n\tbuf2.Reset()\n\tf = formatState{value: v, cs: &Config, fs: buf2}\n\tf.format(v)\n\ts = buf2.String()\n\twant = \"<int8 Value>\"\n\tif s != want {\n\t\tt.Errorf(\"TestAddedReflectValue #%d got: %s want: %s\", i, s, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/spew.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew\n\nimport (\n\t\"fmt\"\n\t\"io\"\n)\n\n// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the formatted string as a value that satisfies error.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Errorf(format string, a ...interface{}) (err error) {\n\treturn fmt.Errorf(format, convertArgs(a)...)\n}\n\n// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Fprint(w io.Writer, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprint(w, convertArgs(a)...)\n}\n\n// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprintf(w, format, convertArgs(a)...)\n}\n\n// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it\n// passed with a default Formatter interface returned by NewFormatter.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Fprintln(w io.Writer, a ...interface{}) (n int, err error) {\n\treturn fmt.Fprintln(w, convertArgs(a)...)\n}\n\n// Print is a wrapper for fmt.Print that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Print(a ...interface{}) (n int, err error) {\n\treturn fmt.Print(convertArgs(a)...)\n}\n\n// Printf is a wrapper for fmt.Printf that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Printf(format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Printf(format, convertArgs(a)...)\n}\n\n// Println is a wrapper for fmt.Println that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the number of bytes written and any write error encountered.  See\n// NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Println(a ...interface{}) (n int, err error) {\n\treturn fmt.Println(convertArgs(a)...)\n}\n\n// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Sprint(a ...interface{}) string {\n\treturn fmt.Sprint(convertArgs(a)...)\n}\n\n// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were\n// passed with a default Formatter interface returned by NewFormatter.  It\n// returns the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Sprintf(format string, a ...interface{}) string {\n\treturn fmt.Sprintf(format, convertArgs(a)...)\n}\n\n// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it\n// were passed with a default Formatter interface returned by NewFormatter.  It\n// returns the resulting string.  See NewFormatter for formatting details.\n//\n// This function is shorthand for the following syntax:\n//\n//\tfmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))\nfunc Sprintln(a ...interface{}) string {\n\treturn fmt.Sprintln(convertArgs(a)...)\n}\n\n// convertArgs accepts a slice of arguments and returns a slice of the same\n// length with each argument converted to a default spew Formatter interface.\nfunc convertArgs(args []interface{}) (formatters []interface{}) {\n\tformatters = make([]interface{}, len(args))\n\tfor index, arg := range args {\n\t\tformatters[index] = NewFormatter(arg)\n\t}\n\treturn formatters\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/spew/spew_test.go",
    "content": "/*\n * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>\n *\n * Permission to use, copy, modify, and distribute this software for any\n * purpose with or without fee is hereby granted, provided that the above\n * copyright notice and this permission notice appear in all copies.\n *\n * THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES\n * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF\n * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR\n * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES\n * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN\n * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF\n * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.\n */\n\npackage spew_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n)\n\n// spewFunc is used to identify which public function of the spew package or\n// ConfigState a test applies to.\ntype spewFunc int\n\nconst (\n\tfCSFdump spewFunc = iota\n\tfCSFprint\n\tfCSFprintf\n\tfCSFprintln\n\tfCSPrint\n\tfCSPrintln\n\tfCSSdump\n\tfCSSprint\n\tfCSSprintf\n\tfCSSprintln\n\tfCSErrorf\n\tfCSNewFormatter\n\tfErrorf\n\tfFprint\n\tfFprintln\n\tfPrint\n\tfPrintln\n\tfSdump\n\tfSprint\n\tfSprintf\n\tfSprintln\n)\n\n// Map of spewFunc values to names for pretty printing.\nvar spewFuncStrings = map[spewFunc]string{\n\tfCSFdump:        \"ConfigState.Fdump\",\n\tfCSFprint:       \"ConfigState.Fprint\",\n\tfCSFprintf:      \"ConfigState.Fprintf\",\n\tfCSFprintln:     \"ConfigState.Fprintln\",\n\tfCSSdump:        \"ConfigState.Sdump\",\n\tfCSPrint:        \"ConfigState.Print\",\n\tfCSPrintln:      \"ConfigState.Println\",\n\tfCSSprint:       \"ConfigState.Sprint\",\n\tfCSSprintf:      \"ConfigState.Sprintf\",\n\tfCSSprintln:     \"ConfigState.Sprintln\",\n\tfCSErrorf:       \"ConfigState.Errorf\",\n\tfCSNewFormatter: \"ConfigState.NewFormatter\",\n\tfErrorf:         \"spew.Errorf\",\n\tfFprint:         \"spew.Fprint\",\n\tfFprintln:       \"spew.Fprintln\",\n\tfPrint:          \"spew.Print\",\n\tfPrintln:        \"spew.Println\",\n\tfSdump:          \"spew.Sdump\",\n\tfSprint:         \"spew.Sprint\",\n\tfSprintf:        \"spew.Sprintf\",\n\tfSprintln:       \"spew.Sprintln\",\n}\n\nfunc (f spewFunc) String() string {\n\tif s, ok := spewFuncStrings[f]; ok {\n\t\treturn s\n\t}\n\treturn fmt.Sprintf(\"Unknown spewFunc (%d)\", int(f))\n}\n\n// spewTest is used to describe a test to be performed against the public\n// functions of the spew package or ConfigState.\ntype spewTest struct {\n\tcs     *spew.ConfigState\n\tf      spewFunc\n\tformat string\n\tin     interface{}\n\twant   string\n}\n\n// spewTests houses the tests to be performed against the public functions of\n// the spew package and ConfigState.\n//\n// These tests are only intended to ensure the public functions are exercised\n// and are intentionally not exhaustive of types.  The exhaustive type\n// tests are handled in the dump and format tests.\nvar spewTests []spewTest\n\n// redirStdout is a helper function to return the standard output from f as a\n// byte slice.\nfunc redirStdout(f func()) ([]byte, error) {\n\ttempFile, err := ioutil.TempFile(\"\", \"ss-test\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfileName := tempFile.Name()\n\tdefer os.Remove(fileName) // Ignore error\n\n\torigStdout := os.Stdout\n\tos.Stdout = tempFile\n\tf()\n\tos.Stdout = origStdout\n\ttempFile.Close()\n\n\treturn ioutil.ReadFile(fileName)\n}\n\nfunc initSpewTests() {\n\t// Config states with various settings.\n\tscsDefault := spew.NewDefaultConfig()\n\tscsNoMethods := &spew.ConfigState{Indent: \" \", DisableMethods: true}\n\tscsNoPmethods := &spew.ConfigState{Indent: \" \", DisablePointerMethods: true}\n\tscsMaxDepth := &spew.ConfigState{Indent: \" \", MaxDepth: 1}\n\tscsContinue := &spew.ConfigState{Indent: \" \", ContinueOnMethod: true}\n\tscsNoPtrAddr := &spew.ConfigState{DisablePointerAddresses: true}\n\tscsNoCap := &spew.ConfigState{DisableCapacities: true}\n\n\t// Variables for tests on types which implement Stringer interface with and\n\t// without a pointer receiver.\n\tts := stringer(\"test\")\n\ttps := pstringer(\"test\")\n\n\ttype ptrTester struct {\n\t\ts *struct{}\n\t}\n\ttptr := &ptrTester{s: &struct{}{}}\n\n\t// depthTester is used to test max depth handling for structs, array, slices\n\t// and maps.\n\ttype depthTester struct {\n\t\tic    indirCir1\n\t\tarr   [1]string\n\t\tslice []string\n\t\tm     map[string]int\n\t}\n\tdt := depthTester{indirCir1{nil}, [1]string{\"arr\"}, []string{\"slice\"},\n\t\tmap[string]int{\"one\": 1}}\n\n\t// Variable for tests on types which implement error interface.\n\tte := customError(10)\n\n\tspewTests = []spewTest{\n\t\t{scsDefault, fCSFdump, \"\", int8(127), \"(int8) 127\\n\"},\n\t\t{scsDefault, fCSFprint, \"\", int16(32767), \"32767\"},\n\t\t{scsDefault, fCSFprintf, \"%v\", int32(2147483647), \"2147483647\"},\n\t\t{scsDefault, fCSFprintln, \"\", int(2147483647), \"2147483647\\n\"},\n\t\t{scsDefault, fCSPrint, \"\", int64(9223372036854775807), \"9223372036854775807\"},\n\t\t{scsDefault, fCSPrintln, \"\", uint8(255), \"255\\n\"},\n\t\t{scsDefault, fCSSdump, \"\", uint8(64), \"(uint8) 64\\n\"},\n\t\t{scsDefault, fCSSprint, \"\", complex(1, 2), \"(1+2i)\"},\n\t\t{scsDefault, fCSSprintf, \"%v\", complex(float32(3), 4), \"(3+4i)\"},\n\t\t{scsDefault, fCSSprintln, \"\", complex(float64(5), 6), \"(5+6i)\\n\"},\n\t\t{scsDefault, fCSErrorf, \"%#v\", uint16(65535), \"(uint16)65535\"},\n\t\t{scsDefault, fCSNewFormatter, \"%v\", uint32(4294967295), \"4294967295\"},\n\t\t{scsDefault, fErrorf, \"%v\", uint64(18446744073709551615), \"18446744073709551615\"},\n\t\t{scsDefault, fFprint, \"\", float32(3.14), \"3.14\"},\n\t\t{scsDefault, fFprintln, \"\", float64(6.28), \"6.28\\n\"},\n\t\t{scsDefault, fPrint, \"\", true, \"true\"},\n\t\t{scsDefault, fPrintln, \"\", false, \"false\\n\"},\n\t\t{scsDefault, fSdump, \"\", complex(-10, -20), \"(complex128) (-10-20i)\\n\"},\n\t\t{scsDefault, fSprint, \"\", complex(-1, -2), \"(-1-2i)\"},\n\t\t{scsDefault, fSprintf, \"%v\", complex(float32(-3), -4), \"(-3-4i)\"},\n\t\t{scsDefault, fSprintln, \"\", complex(float64(-5), -6), \"(-5-6i)\\n\"},\n\t\t{scsNoMethods, fCSFprint, \"\", ts, \"test\"},\n\t\t{scsNoMethods, fCSFprint, \"\", &ts, \"<*>test\"},\n\t\t{scsNoMethods, fCSFprint, \"\", tps, \"test\"},\n\t\t{scsNoMethods, fCSFprint, \"\", &tps, \"<*>test\"},\n\t\t{scsNoPmethods, fCSFprint, \"\", ts, \"stringer test\"},\n\t\t{scsNoPmethods, fCSFprint, \"\", &ts, \"<*>stringer test\"},\n\t\t{scsNoPmethods, fCSFprint, \"\", tps, \"test\"},\n\t\t{scsNoPmethods, fCSFprint, \"\", &tps, \"<*>stringer test\"},\n\t\t{scsMaxDepth, fCSFprint, \"\", dt, \"{{<max>} [<max>] [<max>] map[<max>]}\"},\n\t\t{scsMaxDepth, fCSFdump, \"\", dt, \"(spew_test.depthTester) {\\n\" +\n\t\t\t\" ic: (spew_test.indirCir1) {\\n  <max depth reached>\\n },\\n\" +\n\t\t\t\" arr: ([1]string) (len=1 cap=1) {\\n  <max depth reached>\\n },\\n\" +\n\t\t\t\" slice: ([]string) (len=1 cap=1) {\\n  <max depth reached>\\n },\\n\" +\n\t\t\t\" m: (map[string]int) (len=1) {\\n  <max depth reached>\\n }\\n}\\n\"},\n\t\t{scsContinue, fCSFprint, \"\", ts, \"(stringer test) test\"},\n\t\t{scsContinue, fCSFdump, \"\", ts, \"(spew_test.stringer) \" +\n\t\t\t\"(len=4) (stringer test) \\\"test\\\"\\n\"},\n\t\t{scsContinue, fCSFprint, \"\", te, \"(error: 10) 10\"},\n\t\t{scsContinue, fCSFdump, \"\", te, \"(spew_test.customError) \" +\n\t\t\t\"(error: 10) 10\\n\"},\n\t\t{scsNoPtrAddr, fCSFprint, \"\", tptr, \"<*>{<*>{}}\"},\n\t\t{scsNoPtrAddr, fCSSdump, \"\", tptr, \"(*spew_test.ptrTester)({\\ns: (*struct {})({\\n})\\n})\\n\"},\n\t\t{scsNoCap, fCSSdump, \"\", make([]string, 0, 10), \"([]string) {\\n}\\n\"},\n\t\t{scsNoCap, fCSSdump, \"\", make([]string, 1, 10), \"([]string) (len=1) {\\n(string) \\\"\\\"\\n}\\n\"},\n\t}\n}\n\n// TestSpew executes all of the tests described by spewTests.\nfunc TestSpew(t *testing.T) {\n\tinitSpewTests()\n\n\tt.Logf(\"Running %d tests\", len(spewTests))\n\tfor i, test := range spewTests {\n\t\tbuf := new(bytes.Buffer)\n\t\tswitch test.f {\n\t\tcase fCSFdump:\n\t\t\ttest.cs.Fdump(buf, test.in)\n\n\t\tcase fCSFprint:\n\t\t\ttest.cs.Fprint(buf, test.in)\n\n\t\tcase fCSFprintf:\n\t\t\ttest.cs.Fprintf(buf, test.format, test.in)\n\n\t\tcase fCSFprintln:\n\t\t\ttest.cs.Fprintln(buf, test.in)\n\n\t\tcase fCSPrint:\n\t\t\tb, err := redirStdout(func() { test.cs.Print(test.in) })\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v #%d %v\", test.f, i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf.Write(b)\n\n\t\tcase fCSPrintln:\n\t\t\tb, err := redirStdout(func() { test.cs.Println(test.in) })\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v #%d %v\", test.f, i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf.Write(b)\n\n\t\tcase fCSSdump:\n\t\t\tstr := test.cs.Sdump(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fCSSprint:\n\t\t\tstr := test.cs.Sprint(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fCSSprintf:\n\t\t\tstr := test.cs.Sprintf(test.format, test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fCSSprintln:\n\t\t\tstr := test.cs.Sprintln(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fCSErrorf:\n\t\t\terr := test.cs.Errorf(test.format, test.in)\n\t\t\tbuf.WriteString(err.Error())\n\n\t\tcase fCSNewFormatter:\n\t\t\tfmt.Fprintf(buf, test.format, test.cs.NewFormatter(test.in))\n\n\t\tcase fErrorf:\n\t\t\terr := spew.Errorf(test.format, test.in)\n\t\t\tbuf.WriteString(err.Error())\n\n\t\tcase fFprint:\n\t\t\tspew.Fprint(buf, test.in)\n\n\t\tcase fFprintln:\n\t\t\tspew.Fprintln(buf, test.in)\n\n\t\tcase fPrint:\n\t\t\tb, err := redirStdout(func() { spew.Print(test.in) })\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v #%d %v\", test.f, i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf.Write(b)\n\n\t\tcase fPrintln:\n\t\t\tb, err := redirStdout(func() { spew.Println(test.in) })\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"%v #%d %v\", test.f, i, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbuf.Write(b)\n\n\t\tcase fSdump:\n\t\t\tstr := spew.Sdump(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fSprint:\n\t\t\tstr := spew.Sprint(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fSprintf:\n\t\t\tstr := spew.Sprintf(test.format, test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tcase fSprintln:\n\t\t\tstr := spew.Sprintln(test.in)\n\t\t\tbuf.WriteString(str)\n\n\t\tdefault:\n\t\t\tt.Errorf(\"%v #%d unrecognized function\", test.f, i)\n\t\t\tcontinue\n\t\t}\n\t\ts := buf.String()\n\t\tif test.want != s {\n\t\t\tt.Errorf(\"ConfigState #%d\\n got: %s want: %s\", i, s, test.want)\n\t\t\tcontinue\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/davecgh/go-spew/test_coverage.txt",
    "content": "\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.dump\t\t\t 100.00% (88/88)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.format\t\t 100.00% (82/82)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.formatPtr\t\t 100.00% (52/52)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.dumpPtr\t\t 100.00% (44/44)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.dumpSlice\t\t 100.00% (39/39)\ngithub.com/davecgh/go-spew/spew/common.go\t handleMethods\t\t\t 100.00% (30/30)\ngithub.com/davecgh/go-spew/spew/common.go\t printHexPtr\t\t\t 100.00% (18/18)\ngithub.com/davecgh/go-spew/spew/common.go\t unsafeReflectValue\t\t 100.00% (13/13)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.constructOrigFormat 100.00% (12/12)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t fdump\t\t\t\t 100.00% (11/11)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.Format\t\t 100.00% (11/11)\ngithub.com/davecgh/go-spew/spew/common.go\t init\t\t\t\t 100.00% (10/10)\ngithub.com/davecgh/go-spew/spew/common.go\t printComplex\t\t\t 100.00% (9/9)\ngithub.com/davecgh/go-spew/spew/common.go\t valuesSorter.Less\t\t 100.00% (8/8)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.buildDefaultFormat\t 100.00% (7/7)\ngithub.com/davecgh/go-spew/spew/format.go\t formatState.unpackValue\t 100.00% (5/5)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.indent\t\t 100.00% (4/4)\ngithub.com/davecgh/go-spew/spew/common.go\t catchPanic\t\t\t 100.00% (4/4)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.convertArgs\t 100.00% (4/4)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t convertArgs\t\t\t 100.00% (4/4)\ngithub.com/davecgh/go-spew/spew/format.go\t newFormatter\t\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t Sdump\t\t\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/common.go\t printBool\t\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/common.go\t sortValues\t\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Sdump\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t dumpState.unpackValue\t\t 100.00% (3/3)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Printf\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Println\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Sprint\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Sprintf\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Sprintln\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t printFloat\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t NewDefaultConfig\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t printInt\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t printUint\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t valuesSorter.Len\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/common.go\t valuesSorter.Swap\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Errorf\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Fprint\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Fprintf\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Fprintln\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Print\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Printf\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Println\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Sprint\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Sprintf\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Sprintln\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.NewFormatter\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Fdump\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/config.go\t ConfigState.Dump\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t Fdump\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/dump.go\t\t Dump\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Fprintln\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/format.go\t NewFormatter\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Errorf\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Fprint\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Fprintf\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew/spew.go\t\t Print\t\t\t\t 100.00% (1/1)\ngithub.com/davecgh/go-spew/spew\t\t\t ------------------------------- 100.00% (505/505)\n\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/.gitignore",
    "content": ".DS_Store\n*.[568ao]\n*.ao\n*.so\n*.pyc\n._*\n.nfs.*\n[568a].out\n*~\n*.orig\ncore\n_obj\n_test\n_testmain.go\nprotoc-gen-go/testdata/multi/*.pb.go\n_conformance/_conformance\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/.travis.yml",
    "content": "sudo: false\nlanguage: go\ngo:\n- 1.6.x\n- 1.7.x\n- 1.8.x\n- 1.9.x\n\ninstall:\n  - go get -v -d -t github.com/golang/protobuf/...\n  - curl -L https://github.com/google/protobuf/releases/download/v3.3.0/protoc-3.3.0-linux-x86_64.zip -o /tmp/protoc.zip\n  - unzip /tmp/protoc.zip -d $HOME/protoc\n\nenv:\n  - PATH=$HOME/protoc/bin:$PATH\n\nscript:\n  - make all test\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/AUTHORS",
    "content": "# This source code refers to The Go Authors for copyright purposes.\n# The master list of authors is in the main Go distribution,\n# visible at http://tip.golang.org/AUTHORS.\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/CONTRIBUTORS",
    "content": "# This source code was written by the Go contributors.\n# The master list of contributors is in the main Go distribution,\n# visible at http://tip.golang.org/CONTRIBUTORS.\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/LICENSE",
    "content": "Go support for Protocol Buffers - Google's data interchange format\n\nCopyright 2010 The Go Authors.  All rights reserved.\nhttps://github.com/golang/protobuf\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n    * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/Make.protobuf",
    "content": "# Go support for Protocol Buffers - Google's data interchange format\n#\n# Copyright 2010 The Go Authors.  All rights reserved.\n# https://github.com/golang/protobuf\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n#     * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#     * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n#     * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n# Includable Makefile to add a rule for generating .pb.go files from .proto files\n# (Google protocol buffer descriptions).\n# Typical use if myproto.proto is a file in package mypackage in this directory:\n#\n#\tinclude $(GOROOT)/src/pkg/github.com/golang/protobuf/Make.protobuf\n\n%.pb.go:\t%.proto\n\tprotoc --go_out=. $<\n\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/Makefile",
    "content": "# Go support for Protocol Buffers - Google's data interchange format\n#\n# Copyright 2010 The Go Authors.  All rights reserved.\n# https://github.com/golang/protobuf\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n#     * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#     * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n#     * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nall:\tinstall\n\ninstall:\n\tgo install ./proto ./jsonpb ./ptypes\n\tgo install ./protoc-gen-go\n\ntest:\n\tgo test ./proto ./jsonpb ./ptypes\n\tmake -C protoc-gen-go/testdata test\n\nclean:\n\tgo clean ./...\n\nnuke:\n\tgo clean -i ./...\n\nregenerate:\n\tmake -C protoc-gen-go/descriptor regenerate\n\tmake -C protoc-gen-go/plugin regenerate\n\tmake -C protoc-gen-go/testdata regenerate\n\tmake -C proto/testdata regenerate\n\tmake -C jsonpb/jsonpb_test_proto regenerate\n\tmake -C _conformance regenerate\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/README.md",
    "content": "# Go support for Protocol Buffers\n\n[![Build Status](https://travis-ci.org/golang/protobuf.svg?branch=master)](https://travis-ci.org/golang/protobuf)\n\nGoogle's data interchange format.\nCopyright 2010 The Go Authors.\nhttps://github.com/golang/protobuf\n\nThis package and the code it generates requires at least Go 1.4.\n\nThis software implements Go bindings for protocol buffers.  For\ninformation about protocol buffers themselves, see\n\thttps://developers.google.com/protocol-buffers/\n\n## Installation ##\n\nTo use this software, you must:\n- Install the standard C++ implementation of protocol buffers from\n\thttps://developers.google.com/protocol-buffers/\n- Of course, install the Go compiler and tools from\n\thttps://golang.org/\n  See\n\thttps://golang.org/doc/install\n  for details or, if you are using gccgo, follow the instructions at\n\thttps://golang.org/doc/install/gccgo\n- Grab the code from the repository and install the proto package.\n  The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`.\n  The compiler plugin, protoc-gen-go, will be installed in $GOBIN,\n  defaulting to $GOPATH/bin.  It must be in your $PATH for the protocol\n  compiler, protoc, to find it.\n\nThis software has two parts: a 'protocol compiler plugin' that\ngenerates Go source files that, once compiled, can access and manage\nprotocol buffers; and a library that implements run-time support for\nencoding (marshaling), decoding (unmarshaling), and accessing protocol\nbuffers.\n\nThere is support for gRPC in Go using protocol buffers.\nSee the note at the bottom of this file for details.\n\nThere are no insertion points in the plugin.\n\n\n## Using protocol buffers with Go ##\n\nOnce the software is installed, there are two steps to using it.\nFirst you must compile the protocol buffer definitions and then import\nthem, with the support library, into your program.\n\nTo compile the protocol buffer definition, run protoc with the --go_out\nparameter set to the directory you want to output the Go code to.\n\n\tprotoc --go_out=. *.proto\n\nThe generated files will be suffixed .pb.go.  See the Test code below\nfor an example using such a file.\n\n\nThe package comment for the proto library contains text describing\nthe interface provided in Go for protocol buffers. Here is an edited\nversion.\n\n==========\n\nThe proto package converts data structures to and from the\nwire format of protocol buffers.  It works in concert with the\nGo source code generated for .proto files by the protocol compiler.\n\nA summary of the properties of the protocol buffer interface\nfor a protocol buffer variable v:\n\n  - Names are turned from camel_case to CamelCase for export.\n  - There are no methods on v to set fields; just treat\n  \tthem as structure fields.\n  - There are getters that return a field's value if set,\n\tand return the field's default value if unset.\n\tThe getters work even if the receiver is a nil message.\n  - The zero value for a struct is its correct initialization state.\n\tAll desired fields must be set before marshaling.\n  - A Reset() method will restore a protobuf struct to its zero state.\n  - Non-repeated fields are pointers to the values; nil means unset.\n\tThat is, optional or required field int32 f becomes F *int32.\n  - Repeated fields are slices.\n  - Helper functions are available to aid the setting of fields.\n\tHelpers for getting values are superseded by the\n\tGetFoo methods and their use is deprecated.\n\t\tmsg.Foo = proto.String(\"hello\") // set field\n  - Constants are defined to hold the default values of all fields that\n\thave them.  They have the form Default_StructName_FieldName.\n\tBecause the getter methods handle defaulted values,\n\tdirect use of these constants should be rare.\n  - Enums are given type names and maps from names to values.\n\tEnum values are prefixed with the enum's type name. Enum types have\n\ta String method, and a Enum method to assist in message construction.\n  - Nested groups and enums have type names prefixed with the name of\n  \tthe surrounding message type.\n  - Extensions are given descriptor names that start with E_,\n\tfollowed by an underscore-delimited list of the nested messages\n\tthat contain it (if any) followed by the CamelCased name of the\n\textension field itself.  HasExtension, ClearExtension, GetExtension\n\tand SetExtension are functions for manipulating extensions.\n  - Oneof field sets are given a single field in their message,\n\twith distinguished wrapper types for each possible field value.\n  - Marshal and Unmarshal are functions to encode and decode the wire format.\n\nWhen the .proto file specifies `syntax=\"proto3\"`, there are some differences:\n\n  - Non-repeated fields of non-message type are values instead of pointers.\n  - Enum types do not get an Enum method.\n\nConsider file test.proto, containing\n\n```proto\n\tpackage example;\n\t\n\tenum FOO { X = 17; };\n\t\n\tmessage Test {\n\t  required string label = 1;\n\t  optional int32 type = 2 [default=77];\n\t  repeated int64 reps = 3;\n\t  optional group OptionalGroup = 4 {\n\t    required string RequiredField = 5;\n\t  }\n\t}\n```\n\nTo create and play with a Test object from the example package,\n\n```go\n\tpackage main\n\n\timport (\n\t\t\"log\"\n\n\t\t\"github.com/golang/protobuf/proto\"\n\t\t\"path/to/example\"\n\t)\n\n\tfunc main() {\n\t\ttest := &example.Test {\n\t\t\tLabel: proto.String(\"hello\"),\n\t\t\tType:  proto.Int32(17),\n\t\t\tReps:  []int64{1, 2, 3},\n\t\t\tOptionalgroup: &example.Test_OptionalGroup {\n\t\t\t\tRequiredField: proto.String(\"good bye\"),\n\t\t\t},\n\t\t}\n\t\tdata, err := proto.Marshal(test)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"marshaling error: \", err)\n\t\t}\n\t\tnewTest := &example.Test{}\n\t\terr = proto.Unmarshal(data, newTest)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unmarshaling error: \", err)\n\t\t}\n\t\t// Now test and newTest contain the same data.\n\t\tif test.GetLabel() != newTest.GetLabel() {\n\t\t\tlog.Fatalf(\"data mismatch %q != %q\", test.GetLabel(), newTest.GetLabel())\n\t\t}\n\t\t// etc.\n\t}\n```\n\n## Parameters ##\n\nTo pass extra parameters to the plugin, use a comma-separated\nparameter list separated from the output directory by a colon:\n\n\n\tprotoc --go_out=plugins=grpc,import_path=mypackage:. *.proto\n\n\n- `import_prefix=xxx` - a prefix that is added onto the beginning of\n  all imports. Useful for things like generating protos in a\n  subdirectory, or regenerating vendored protobufs in-place.\n- `import_path=foo/bar` - used as the package if no input files\n  declare `go_package`. If it contains slashes, everything up to the\n  rightmost slash is ignored.\n- `plugins=plugin1+plugin2` - specifies the list of sub-plugins to\n  load. The only plugin in this repo is `grpc`.\n- `Mfoo/bar.proto=quux/shme` - declares that foo/bar.proto is\n  associated with Go package quux/shme.  This is subject to the\n  import_prefix parameter.\n\n## gRPC Support ##\n\nIf a proto file specifies RPC services, protoc-gen-go can be instructed to\ngenerate code compatible with gRPC (http://www.grpc.io/). To do this, pass\nthe `plugins` parameter to protoc-gen-go; the usual way is to insert it into\nthe --go_out argument to protoc:\n\n\tprotoc --go_out=plugins=grpc:. *.proto\n\n## Compatibility ##\n\nThe library and the generated code are expected to be stable over time.\nHowever, we reserve the right to make breaking changes without notice for the\nfollowing reasons:\n\n- Security. A security issue in the specification or implementation may come to\n  light whose resolution requires breaking compatibility. We reserve the right\n  to address such security issues.\n- Unspecified behavior.  There are some aspects of the Protocol Buffers\n  specification that are undefined.  Programs that depend on such unspecified\n  behavior may break in future releases.\n- Specification errors or changes. If it becomes necessary to address an\n  inconsistency, incompleteness, or change in the Protocol Buffers\n  specification, resolving the issue could affect the meaning or legality of\n  existing programs.  We reserve the right to address such issues, including\n  updating the implementations.\n- Bugs.  If the library has a bug that violates the specification, a program\n  that depends on the buggy behavior may break if the bug is fixed.  We reserve\n  the right to fix such bugs.\n- Adding methods or fields to generated structs.  These may conflict with field\n  names that already exist in a schema, causing applications to break.  When the\n  code generator encounters a field in the schema that would collide with a\n  generated field or method name, the code generator will append an underscore\n  to the generated field or method name.\n- Adding, removing, or changing methods or fields in generated structs that\n  start with `XXX`.  These parts of the generated code are exported out of\n  necessity, but should not be considered part of the public API.\n- Adding, removing, or changing unexported symbols in generated code.\n\nAny breaking changes outside of these will be announced 6 months in advance to\nprotobuf@googlegroups.com.\n\nYou should, whenever possible, use generated code created by the `protoc-gen-go`\ntool built at the same commit as the `proto` package.  The `proto` package\ndeclares package-level constants in the form `ProtoPackageIsVersionX`.\nApplication code and generated code may depend on one of these constants to\nensure that compilation will fail if the available version of the proto library\nis too old.  Whenever we make a change to the generated code that requires newer\nlibrary support, in the same commit we will increment the version number of the\ngenerated code and declare a new package-level constant whose name incorporates\nthe latest version number.  Removing a compatibility constant is considered a\nbreaking change and would be subject to the announcement policy stated above.\n\nThe `protoc-gen-go/generator` package exposes a plugin interface,\nwhich is used by the gRPC code generation. This interface is not\nsupported and is subject to incompatible changes without notice.\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/Makefile",
    "content": "# Go support for Protocol Buffers - Google's data interchange format\n#\n# Copyright 2010 The Go Authors.  All rights reserved.\n# https://github.com/golang/protobuf\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n#     * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n#     * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following disclaimer\n# in the documentation and/or other materials provided with the\n# distribution.\n#     * Neither the name of Google Inc. nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\ninstall:\n\tgo install\n\ntest: install generate-test-pbs\n\tgo test\n\n\ngenerate-test-pbs:\n\tmake install\n\tmake -C testdata\n\tprotoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto\n\tmake\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/all_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"runtime/debug\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\t. \"github.com/golang/protobuf/proto/testdata\"\n)\n\nvar globalO *Buffer\n\nfunc old() *Buffer {\n\tif globalO == nil {\n\t\tglobalO = NewBuffer(nil)\n\t}\n\tglobalO.Reset()\n\treturn globalO\n}\n\nfunc equalbytes(b1, b2 []byte, t *testing.T) {\n\tif len(b1) != len(b2) {\n\t\tt.Errorf(\"wrong lengths: 2*%d != %d\", len(b1), len(b2))\n\t\treturn\n\t}\n\tfor i := 0; i < len(b1); i++ {\n\t\tif b1[i] != b2[i] {\n\t\t\tt.Errorf(\"bad byte[%d]:%x %x: %s %s\", i, b1[i], b2[i], b1, b2)\n\t\t}\n\t}\n}\n\nfunc initGoTestField() *GoTestField {\n\tf := new(GoTestField)\n\tf.Label = String(\"label\")\n\tf.Type = String(\"type\")\n\treturn f\n}\n\n// These are all structurally equivalent but the tag numbers differ.\n// (It's remarkable that required, optional, and repeated all have\n// 8 letters.)\nfunc initGoTest_RequiredGroup() *GoTest_RequiredGroup {\n\treturn &GoTest_RequiredGroup{\n\t\tRequiredField: String(\"required\"),\n\t}\n}\n\nfunc initGoTest_OptionalGroup() *GoTest_OptionalGroup {\n\treturn &GoTest_OptionalGroup{\n\t\tRequiredField: String(\"optional\"),\n\t}\n}\n\nfunc initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {\n\treturn &GoTest_RepeatedGroup{\n\t\tRequiredField: String(\"repeated\"),\n\t}\n}\n\nfunc initGoTest(setdefaults bool) *GoTest {\n\tpb := new(GoTest)\n\tif setdefaults {\n\t\tpb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)\n\t\tpb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)\n\t\tpb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)\n\t\tpb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)\n\t\tpb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)\n\t\tpb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)\n\t\tpb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)\n\t\tpb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)\n\t\tpb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)\n\t\tpb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)\n\t\tpb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted\n\t\tpb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)\n\t\tpb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)\n\t}\n\n\tpb.Kind = GoTest_TIME.Enum()\n\tpb.RequiredField = initGoTestField()\n\tpb.F_BoolRequired = Bool(true)\n\tpb.F_Int32Required = Int32(3)\n\tpb.F_Int64Required = Int64(6)\n\tpb.F_Fixed32Required = Uint32(32)\n\tpb.F_Fixed64Required = Uint64(64)\n\tpb.F_Uint32Required = Uint32(3232)\n\tpb.F_Uint64Required = Uint64(6464)\n\tpb.F_FloatRequired = Float32(3232)\n\tpb.F_DoubleRequired = Float64(6464)\n\tpb.F_StringRequired = String(\"string\")\n\tpb.F_BytesRequired = []byte(\"bytes\")\n\tpb.F_Sint32Required = Int32(-32)\n\tpb.F_Sint64Required = Int64(-64)\n\tpb.Requiredgroup = initGoTest_RequiredGroup()\n\n\treturn pb\n}\n\nfunc fail(msg string, b *bytes.Buffer, s string, t *testing.T) {\n\tdata := b.Bytes()\n\tld := len(data)\n\tls := len(s) / 2\n\n\tfmt.Printf(\"fail %s ld=%d ls=%d\\n\", msg, ld, ls)\n\n\t// find the interesting spot - n\n\tn := ls\n\tif ld < ls {\n\t\tn = ld\n\t}\n\tj := 0\n\tfor i := 0; i < n; i++ {\n\t\tbs := hex(s[j])*16 + hex(s[j+1])\n\t\tj += 2\n\t\tif data[i] == bs {\n\t\t\tcontinue\n\t\t}\n\t\tn = i\n\t\tbreak\n\t}\n\tl := n - 10\n\tif l < 0 {\n\t\tl = 0\n\t}\n\th := n + 10\n\n\t// find the interesting spot - n\n\tfmt.Printf(\"is[%d]:\", l)\n\tfor i := l; i < h; i++ {\n\t\tif i >= ld {\n\t\t\tfmt.Printf(\" --\")\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Printf(\" %.2x\", data[i])\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tfmt.Printf(\"sb[%d]:\", l)\n\tfor i := l; i < h; i++ {\n\t\tif i >= ls {\n\t\t\tfmt.Printf(\" --\")\n\t\t\tcontinue\n\t\t}\n\t\tbs := hex(s[j])*16 + hex(s[j+1])\n\t\tj += 2\n\t\tfmt.Printf(\" %.2x\", bs)\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tt.Fail()\n\n\t//\tt.Errorf(\"%s: \\ngood: %s\\nbad: %x\", msg, s, b.Bytes())\n\t// Print the output in a partially-decoded format; can\n\t// be helpful when updating the test.  It produces the output\n\t// that is pasted, with minor edits, into the argument to verify().\n\t//\tdata := b.Bytes()\n\t//\tnesting := 0\n\t//\tfor b.Len() > 0 {\n\t//\t\tstart := len(data) - b.Len()\n\t//\t\tvar u uint64\n\t//\t\tu, err := DecodeVarint(b)\n\t//\t\tif err != nil {\n\t//\t\t\tfmt.Printf(\"decode error on varint:\", err)\n\t//\t\t\treturn\n\t//\t\t}\n\t//\t\twire := u & 0x7\n\t//\t\ttag := u >> 3\n\t//\t\tswitch wire {\n\t//\t\tcase WireVarint:\n\t//\t\t\tv, err := DecodeVarint(b)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on varint:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"  // field %d, encoding %d, value %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, wire, v)\n\t//\t\tcase WireFixed32:\n\t//\t\t\tv, err := DecodeFixed32(b)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on fixed32:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"  // field %d, encoding %d, value %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, wire, v)\n\t//\t\tcase WireFixed64:\n\t//\t\t\tv, err := DecodeFixed64(b)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on fixed64:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"  // field %d, encoding %d, value %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, wire, v)\n\t//\t\tcase WireBytes:\n\t//\t\t\tnb, err := DecodeVarint(b)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on bytes:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tafter_tag := len(data) - b.Len()\n\t//\t\t\tstr := make([]byte, nb)\n\t//\t\t\t_, err = b.Read(str)\n\t//\t\t\tif err != nil {\n\t//\t\t\t\tfmt.Printf(\"decode error on bytes:\", err)\n\t//\t\t\t\treturn\n\t//\t\t\t}\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\" \\\"%x\\\"  // field %d, encoding %d (FIELD)\\n\",\n\t//\t\t\t\tdata[start:after_tag], str, tag, wire)\n\t//\t\tcase WireStartGroup:\n\t//\t\t\tnesting++\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"\\t\\t// start group field %d level %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, nesting)\n\t//\t\tcase WireEndGroup:\n\t//\t\t\tfmt.Printf(\"\\t\\t\\\"%x\\\"\\t\\t// end group field %d level %d\\n\",\n\t//\t\t\t\tdata[start:len(data)-b.Len()], tag, nesting)\n\t//\t\t\tnesting--\n\t//\t\tdefault:\n\t//\t\t\tfmt.Printf(\"unrecognized wire type %d\\n\", wire)\n\t//\t\t\treturn\n\t//\t\t}\n\t//\t}\n}\n\nfunc hex(c uint8) uint8 {\n\tif '0' <= c && c <= '9' {\n\t\treturn c - '0'\n\t}\n\tif 'a' <= c && c <= 'f' {\n\t\treturn 10 + c - 'a'\n\t}\n\tif 'A' <= c && c <= 'F' {\n\t\treturn 10 + c - 'A'\n\t}\n\treturn 0\n}\n\nfunc equal(b []byte, s string, t *testing.T) bool {\n\tif 2*len(b) != len(s) {\n\t\t//\t\tfail(fmt.Sprintf(\"wrong lengths: 2*%d != %d\", len(b), len(s)), b, s, t)\n\t\tfmt.Printf(\"wrong lengths: 2*%d != %d\\n\", len(b), len(s))\n\t\treturn false\n\t}\n\tfor i, j := 0, 0; i < len(b); i, j = i+1, j+2 {\n\t\tx := hex(s[j])*16 + hex(s[j+1])\n\t\tif b[i] != x {\n\t\t\t//\t\t\tfail(fmt.Sprintf(\"bad byte[%d]:%x %x\", i, b[i], x), b, s, t)\n\t\t\tfmt.Printf(\"bad byte[%d]:%x %x\", i, b[i], x)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc overify(t *testing.T, pb *GoTest, expected string) {\n\to := old()\n\terr := o.Marshal(pb)\n\tif err != nil {\n\t\tfmt.Printf(\"overify marshal-1 err = %v\", err)\n\t\to.DebugPrint(\"\", o.Bytes())\n\t\tt.Fatalf(\"expected = %s\", expected)\n\t}\n\tif !equal(o.Bytes(), expected, t) {\n\t\to.DebugPrint(\"overify neq 1\", o.Bytes())\n\t\tt.Fatalf(\"expected = %s\", expected)\n\t}\n\n\t// Now test Unmarshal by recreating the original buffer.\n\tpbd := new(GoTest)\n\terr = o.Unmarshal(pbd)\n\tif err != nil {\n\t\tt.Fatalf(\"overify unmarshal err = %v\", err)\n\t\to.DebugPrint(\"\", o.Bytes())\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n\to.Reset()\n\terr = o.Marshal(pbd)\n\tif err != nil {\n\t\tt.Errorf(\"overify marshal-2 err = %v\", err)\n\t\to.DebugPrint(\"\", o.Bytes())\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n\tif !equal(o.Bytes(), expected, t) {\n\t\to.DebugPrint(\"overify neq 2\", o.Bytes())\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n}\n\n// Simple tests for numeric encode/decode primitives (varint, etc.)\nfunc TestNumericPrimitives(t *testing.T) {\n\tfor i := uint64(0); i < 1e6; i += 111 {\n\t\to := old()\n\t\tif o.EncodeVarint(i) != nil {\n\t\t\tt.Error(\"EncodeVarint\")\n\t\t\tbreak\n\t\t}\n\t\tx, e := o.DecodeVarint()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeVarint\")\n\t\t}\n\t\tif x != i {\n\t\t\tt.Fatal(\"varint decode fail:\", i, x)\n\t\t}\n\n\t\to = old()\n\t\tif o.EncodeFixed32(i) != nil {\n\t\t\tt.Fatal(\"encFixed32\")\n\t\t}\n\t\tx, e = o.DecodeFixed32()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"decFixed32\")\n\t\t}\n\t\tif x != i {\n\t\t\tt.Fatal(\"fixed32 decode fail:\", i, x)\n\t\t}\n\n\t\to = old()\n\t\tif o.EncodeFixed64(i*1234567) != nil {\n\t\t\tt.Error(\"encFixed64\")\n\t\t\tbreak\n\t\t}\n\t\tx, e = o.DecodeFixed64()\n\t\tif e != nil {\n\t\t\tt.Error(\"decFixed64\")\n\t\t\tbreak\n\t\t}\n\t\tif x != i*1234567 {\n\t\t\tt.Error(\"fixed64 decode fail:\", i*1234567, x)\n\t\t\tbreak\n\t\t}\n\n\t\to = old()\n\t\ti32 := int32(i - 12345)\n\t\tif o.EncodeZigzag32(uint64(i32)) != nil {\n\t\t\tt.Fatal(\"EncodeZigzag32\")\n\t\t}\n\t\tx, e = o.DecodeZigzag32()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeZigzag32\")\n\t\t}\n\t\tif x != uint64(uint32(i32)) {\n\t\t\tt.Fatal(\"zigzag32 decode fail:\", i32, x)\n\t\t}\n\n\t\to = old()\n\t\ti64 := int64(i - 12345)\n\t\tif o.EncodeZigzag64(uint64(i64)) != nil {\n\t\t\tt.Fatal(\"EncodeZigzag64\")\n\t\t}\n\t\tx, e = o.DecodeZigzag64()\n\t\tif e != nil {\n\t\t\tt.Fatal(\"DecodeZigzag64\")\n\t\t}\n\t\tif x != uint64(i64) {\n\t\t\tt.Fatal(\"zigzag64 decode fail:\", i64, x)\n\t\t}\n\t}\n}\n\n// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces.\ntype fakeMarshaler struct {\n\tb   []byte\n\terr error\n}\n\nfunc (f *fakeMarshaler) Marshal() ([]byte, error) { return f.b, f.err }\nfunc (f *fakeMarshaler) String() string           { return fmt.Sprintf(\"Bytes: %v Error: %v\", f.b, f.err) }\nfunc (f *fakeMarshaler) ProtoMessage()            {}\nfunc (f *fakeMarshaler) Reset()                   {}\n\ntype msgWithFakeMarshaler struct {\n\tM *fakeMarshaler `protobuf:\"bytes,1,opt,name=fake\"`\n}\n\nfunc (m *msgWithFakeMarshaler) String() string { return CompactTextString(m) }\nfunc (m *msgWithFakeMarshaler) ProtoMessage()  {}\nfunc (m *msgWithFakeMarshaler) Reset()         {}\n\n// Simple tests for proto messages that implement the Marshaler interface.\nfunc TestMarshalerEncoding(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tm       Message\n\t\twant    []byte\n\t\terrType reflect.Type\n\t}{\n\t\t{\n\t\t\tname: \"Marshaler that fails\",\n\t\t\tm: &fakeMarshaler{\n\t\t\t\terr: errors.New(\"some marshal err\"),\n\t\t\t\tb:   []byte{5, 6, 7},\n\t\t\t},\n\t\t\t// Since the Marshal method returned bytes, they should be written to the\n\t\t\t// buffer.  (For efficiency, we assume that Marshal implementations are\n\t\t\t// always correct w.r.t. RequiredNotSetError and output.)\n\t\t\twant:    []byte{5, 6, 7},\n\t\t\terrType: reflect.TypeOf(errors.New(\"some marshal err\")),\n\t\t},\n\t\t{\n\t\t\tname: \"Marshaler that fails with RequiredNotSetError\",\n\t\t\tm: &msgWithFakeMarshaler{\n\t\t\t\tM: &fakeMarshaler{\n\t\t\t\t\terr: &RequiredNotSetError{},\n\t\t\t\t\tb:   []byte{5, 6, 7},\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Since there's an error that can be continued after,\n\t\t\t// the buffer should be written.\n\t\t\twant: []byte{\n\t\t\t\t10, 3, // for &msgWithFakeMarshaler\n\t\t\t\t5, 6, 7, // for &fakeMarshaler\n\t\t\t},\n\t\t\terrType: reflect.TypeOf(&RequiredNotSetError{}),\n\t\t},\n\t\t{\n\t\t\tname: \"Marshaler that succeeds\",\n\t\t\tm: &fakeMarshaler{\n\t\t\t\tb: []byte{0, 1, 2, 3, 4, 127, 255},\n\t\t\t},\n\t\t\twant: []byte{0, 1, 2, 3, 4, 127, 255},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tb := NewBuffer(nil)\n\t\terr := b.Marshal(test.m)\n\t\tif reflect.TypeOf(err) != test.errType {\n\t\t\tt.Errorf(\"%s: got err %T(%v) wanted %T\", test.name, err, err, test.errType)\n\t\t}\n\t\tif !reflect.DeepEqual(test.want, b.Bytes()) {\n\t\t\tt.Errorf(\"%s: got bytes %v wanted %v\", test.name, b.Bytes(), test.want)\n\t\t}\n\t\tif size := Size(test.m); size != len(b.Bytes()) {\n\t\t\tt.Errorf(\"%s: Size(_) = %v, but marshaled to %v bytes\", test.name, size, len(b.Bytes()))\n\t\t}\n\n\t\tm, mErr := Marshal(test.m)\n\t\tif !bytes.Equal(b.Bytes(), m) {\n\t\t\tt.Errorf(\"%s: Marshal returned %v, but (*Buffer).Marshal wrote %v\", test.name, m, b.Bytes())\n\t\t}\n\t\tif !reflect.DeepEqual(err, mErr) {\n\t\t\tt.Errorf(\"%s: Marshal err = %q, but (*Buffer).Marshal returned %q\",\n\t\t\t\ttest.name, fmt.Sprint(mErr), fmt.Sprint(err))\n\t\t}\n\t}\n}\n\n// Simple tests for bytes\nfunc TestBytesPrimitives(t *testing.T) {\n\to := old()\n\tbytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'}\n\tif o.EncodeRawBytes(bytes) != nil {\n\t\tt.Error(\"EncodeRawBytes\")\n\t}\n\tdecb, e := o.DecodeRawBytes(false)\n\tif e != nil {\n\t\tt.Error(\"DecodeRawBytes\")\n\t}\n\tequalbytes(bytes, decb, t)\n}\n\n// Simple tests for strings\nfunc TestStringPrimitives(t *testing.T) {\n\to := old()\n\ts := \"now is the time\"\n\tif o.EncodeStringBytes(s) != nil {\n\t\tt.Error(\"enc_string\")\n\t}\n\tdecs, e := o.DecodeStringBytes()\n\tif e != nil {\n\t\tt.Error(\"dec_string\")\n\t}\n\tif s != decs {\n\t\tt.Error(\"string encode/decode fail:\", s, decs)\n\t}\n}\n\n// Do we catch the \"required bit not set\" case?\nfunc TestRequiredBit(t *testing.T) {\n\to := old()\n\tpb := new(GoTest)\n\terr := o.Marshal(pb)\n\tif err == nil {\n\t\tt.Error(\"did not catch missing required fields\")\n\t} else if strings.Index(err.Error(), \"Kind\") < 0 {\n\t\tt.Error(\"wrong error type:\", err)\n\t}\n}\n\n// Check that all fields are nil.\n// Clearly silly, and a residue from a more interesting test with an earlier,\n// different initialization property, but it once caught a compiler bug so\n// it lives.\nfunc checkInitialized(pb *GoTest, t *testing.T) {\n\tif pb.F_BoolDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set boolean:\", *pb.F_BoolDefaulted)\n\t}\n\tif pb.F_Int32Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set int32:\", *pb.F_Int32Defaulted)\n\t}\n\tif pb.F_Int64Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set int64:\", *pb.F_Int64Defaulted)\n\t}\n\tif pb.F_Fixed32Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set fixed32:\", *pb.F_Fixed32Defaulted)\n\t}\n\tif pb.F_Fixed64Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set fixed64:\", *pb.F_Fixed64Defaulted)\n\t}\n\tif pb.F_Uint32Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set uint32:\", *pb.F_Uint32Defaulted)\n\t}\n\tif pb.F_Uint64Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set uint64:\", *pb.F_Uint64Defaulted)\n\t}\n\tif pb.F_FloatDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set float:\", *pb.F_FloatDefaulted)\n\t}\n\tif pb.F_DoubleDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set double:\", *pb.F_DoubleDefaulted)\n\t}\n\tif pb.F_StringDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set string:\", *pb.F_StringDefaulted)\n\t}\n\tif pb.F_BytesDefaulted != nil {\n\t\tt.Error(\"New or Reset did not set bytes:\", string(pb.F_BytesDefaulted))\n\t}\n\tif pb.F_Sint32Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set int32:\", *pb.F_Sint32Defaulted)\n\t}\n\tif pb.F_Sint64Defaulted != nil {\n\t\tt.Error(\"New or Reset did not set int64:\", *pb.F_Sint64Defaulted)\n\t}\n}\n\n// Does Reset() reset?\nfunc TestReset(t *testing.T) {\n\tpb := initGoTest(true)\n\t// muck with some values\n\tpb.F_BoolDefaulted = Bool(false)\n\tpb.F_Int32Defaulted = Int32(237)\n\tpb.F_Int64Defaulted = Int64(12346)\n\tpb.F_Fixed32Defaulted = Uint32(32000)\n\tpb.F_Fixed64Defaulted = Uint64(666)\n\tpb.F_Uint32Defaulted = Uint32(323232)\n\tpb.F_Uint64Defaulted = nil\n\tpb.F_FloatDefaulted = nil\n\tpb.F_DoubleDefaulted = Float64(0)\n\tpb.F_StringDefaulted = String(\"gotcha\")\n\tpb.F_BytesDefaulted = []byte(\"asdfasdf\")\n\tpb.F_Sint32Defaulted = Int32(123)\n\tpb.F_Sint64Defaulted = Int64(789)\n\tpb.Reset()\n\tcheckInitialized(pb, t)\n}\n\n// All required fields set, no defaults provided.\nfunc TestEncodeDecode1(t *testing.T) {\n\tpb := initGoTest(false)\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 0x20\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 0x40\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 0xca0 = 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 0x1940 = 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2, string \"string\"\n\t\t\t\"b304\"+ // field 70, encoding 3, start group\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // field 70, encoding 4, end group\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2, string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\") // field 103, encoding 0, 0x7f zigzag64\n}\n\n// All required fields set, defaults provided.\nfunc TestEncodeDecode2(t *testing.T) {\n\tpb := initGoTest(true)\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"c00201\"+ // field 40, encoding 0, value 1\n\t\t\t\"c80220\"+ // field 41, encoding 0, value 32\n\t\t\t\"d00240\"+ // field 42, encoding 0, value 64\n\t\t\t\"dd0240010000\"+ // field 43, encoding 5, value 320\n\t\t\t\"e1028002000000000000\"+ // field 44, encoding 1, value 640\n\t\t\t\"e8028019\"+ // field 45, encoding 0, value 3200\n\t\t\t\"f0028032\"+ // field 46, encoding 0, value 6400\n\t\t\t\"fd02e0659948\"+ // field 47, encoding 5, value 314159.0\n\t\t\t\"81030000000050971041\"+ // field 48, encoding 1, value 271828.0\n\t\t\t\"8a0310\"+\"68656c6c6f2c2022776f726c6421220a\"+ // field 49, encoding 2 string \"hello, \\\"world!\\\"\\n\"\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"8a1907\"+\"4269676e6f7365\"+ // field 401, encoding 2, string \"Bignose\"\n\t\t\t\"90193f\"+ // field 402, encoding 0, value 63\n\t\t\t\"98197f\") // field 403, encoding 0, value 127\n\n}\n\n// All default fields set to their default value by hand\nfunc TestEncodeDecode3(t *testing.T) {\n\tpb := initGoTest(false)\n\tpb.F_BoolDefaulted = Bool(true)\n\tpb.F_Int32Defaulted = Int32(32)\n\tpb.F_Int64Defaulted = Int64(64)\n\tpb.F_Fixed32Defaulted = Uint32(320)\n\tpb.F_Fixed64Defaulted = Uint64(640)\n\tpb.F_Uint32Defaulted = Uint32(3200)\n\tpb.F_Uint64Defaulted = Uint64(6400)\n\tpb.F_FloatDefaulted = Float32(314159)\n\tpb.F_DoubleDefaulted = Float64(271828)\n\tpb.F_StringDefaulted = String(\"hello, \\\"world!\\\"\\n\")\n\tpb.F_BytesDefaulted = []byte(\"Bignose\")\n\tpb.F_Sint32Defaulted = Int32(-32)\n\tpb.F_Sint64Defaulted = Int64(-64)\n\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"c00201\"+ // field 40, encoding 0, value 1\n\t\t\t\"c80220\"+ // field 41, encoding 0, value 32\n\t\t\t\"d00240\"+ // field 42, encoding 0, value 64\n\t\t\t\"dd0240010000\"+ // field 43, encoding 5, value 320\n\t\t\t\"e1028002000000000000\"+ // field 44, encoding 1, value 640\n\t\t\t\"e8028019\"+ // field 45, encoding 0, value 3200\n\t\t\t\"f0028032\"+ // field 46, encoding 0, value 6400\n\t\t\t\"fd02e0659948\"+ // field 47, encoding 5, value 314159.0\n\t\t\t\"81030000000050971041\"+ // field 48, encoding 1, value 271828.0\n\t\t\t\"8a0310\"+\"68656c6c6f2c2022776f726c6421220a\"+ // field 49, encoding 2 string \"hello, \\\"world!\\\"\\n\"\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"8a1907\"+\"4269676e6f7365\"+ // field 401, encoding 2, string \"Bignose\"\n\t\t\t\"90193f\"+ // field 402, encoding 0, value 63\n\t\t\t\"98197f\") // field 403, encoding 0, value 127\n\n}\n\n// All required fields set, defaults provided, all non-defaulted optional fields have values.\nfunc TestEncodeDecode4(t *testing.T) {\n\tpb := initGoTest(true)\n\tpb.Table = String(\"hello\")\n\tpb.Param = Int32(7)\n\tpb.OptionalField = initGoTestField()\n\tpb.F_BoolOptional = Bool(true)\n\tpb.F_Int32Optional = Int32(32)\n\tpb.F_Int64Optional = Int64(64)\n\tpb.F_Fixed32Optional = Uint32(3232)\n\tpb.F_Fixed64Optional = Uint64(6464)\n\tpb.F_Uint32Optional = Uint32(323232)\n\tpb.F_Uint64Optional = Uint64(646464)\n\tpb.F_FloatOptional = Float32(32.)\n\tpb.F_DoubleOptional = Float64(64.)\n\tpb.F_StringOptional = String(\"hello\")\n\tpb.F_BytesOptional = []byte(\"Bignose\")\n\tpb.F_Sint32Optional = Int32(-32)\n\tpb.F_Sint64Optional = Int64(-64)\n\tpb.Optionalgroup = initGoTest_OptionalGroup()\n\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"1205\"+\"68656c6c6f\"+ // field 2, encoding 2, string \"hello\"\n\t\t\t\"1807\"+ // field 3, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"320d\"+\"0a056c6162656c120474797065\"+ // field 6, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"f00101\"+ // field 30, encoding 0, value 1\n\t\t\t\"f80120\"+ // field 31, encoding 0, value 32\n\t\t\t\"800240\"+ // field 32, encoding 0, value 64\n\t\t\t\"8d02a00c0000\"+ // field 33, encoding 5, value 3232\n\t\t\t\"91024019000000000000\"+ // field 34, encoding 1, value 6464\n\t\t\t\"9802a0dd13\"+ // field 35, encoding 0, value 323232\n\t\t\t\"a002c0ba27\"+ // field 36, encoding 0, value 646464\n\t\t\t\"ad0200000042\"+ // field 37, encoding 5, value 32.0\n\t\t\t\"b1020000000000005040\"+ // field 38, encoding 1, value 64.0\n\t\t\t\"ba0205\"+\"68656c6c6f\"+ // field 39, encoding 2, string \"hello\"\n\t\t\t\"c00201\"+ // field 40, encoding 0, value 1\n\t\t\t\"c80220\"+ // field 41, encoding 0, value 32\n\t\t\t\"d00240\"+ // field 42, encoding 0, value 64\n\t\t\t\"dd0240010000\"+ // field 43, encoding 5, value 320\n\t\t\t\"e1028002000000000000\"+ // field 44, encoding 1, value 640\n\t\t\t\"e8028019\"+ // field 45, encoding 0, value 3200\n\t\t\t\"f0028032\"+ // field 46, encoding 0, value 6400\n\t\t\t\"fd02e0659948\"+ // field 47, encoding 5, value 314159.0\n\t\t\t\"81030000000050971041\"+ // field 48, encoding 1, value 271828.0\n\t\t\t\"8a0310\"+\"68656c6c6f2c2022776f726c6421220a\"+ // field 49, encoding 2 string \"hello, \\\"world!\\\"\\n\"\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"d305\"+ // start group field 90 level 1\n\t\t\t\"da0508\"+\"6f7074696f6e616c\"+ // field 91, encoding 2, string \"optional\"\n\t\t\t\"d405\"+ // end group field 90 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"ea1207\"+\"4269676e6f7365\"+ // field 301, encoding 2, string \"Bignose\"\n\t\t\t\"f0123f\"+ // field 302, encoding 0, value 63\n\t\t\t\"f8127f\"+ // field 303, encoding 0, value 127\n\t\t\t\"8a1907\"+\"4269676e6f7365\"+ // field 401, encoding 2, string \"Bignose\"\n\t\t\t\"90193f\"+ // field 402, encoding 0, value 63\n\t\t\t\"98197f\") // field 403, encoding 0, value 127\n\n}\n\n// All required fields set, defaults provided, all repeated fields given two values.\nfunc TestEncodeDecode5(t *testing.T) {\n\tpb := initGoTest(true)\n\tpb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()}\n\tpb.F_BoolRepeated = []bool{false, true}\n\tpb.F_Int32Repeated = []int32{32, 33}\n\tpb.F_Int64Repeated = []int64{64, 65}\n\tpb.F_Fixed32Repeated = []uint32{3232, 3333}\n\tpb.F_Fixed64Repeated = []uint64{6464, 6565}\n\tpb.F_Uint32Repeated = []uint32{323232, 333333}\n\tpb.F_Uint64Repeated = []uint64{646464, 656565}\n\tpb.F_FloatRepeated = []float32{32., 33.}\n\tpb.F_DoubleRepeated = []float64{64., 65.}\n\tpb.F_StringRepeated = []string{\"hello\", \"sailor\"}\n\tpb.F_BytesRepeated = [][]byte{[]byte(\"big\"), []byte(\"nose\")}\n\tpb.F_Sint32Repeated = []int32{32, -32}\n\tpb.F_Sint64Repeated = []int64{64, -64}\n\tpb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}\n\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"2a0d\"+\"0a056c6162656c120474797065\"+ // field 5, encoding 2 (GoTestField)\n\t\t\t\"2a0d\"+\"0a056c6162656c120474797065\"+ // field 5, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"a00100\"+ // field 20, encoding 0, value 0\n\t\t\t\"a00101\"+ // field 20, encoding 0, value 1\n\t\t\t\"a80120\"+ // field 21, encoding 0, value 32\n\t\t\t\"a80121\"+ // field 21, encoding 0, value 33\n\t\t\t\"b00140\"+ // field 22, encoding 0, value 64\n\t\t\t\"b00141\"+ // field 22, encoding 0, value 65\n\t\t\t\"bd01a00c0000\"+ // field 23, encoding 5, value 3232\n\t\t\t\"bd01050d0000\"+ // field 23, encoding 5, value 3333\n\t\t\t\"c1014019000000000000\"+ // field 24, encoding 1, value 6464\n\t\t\t\"c101a519000000000000\"+ // field 24, encoding 1, value 6565\n\t\t\t\"c801a0dd13\"+ // field 25, encoding 0, value 323232\n\t\t\t\"c80195ac14\"+ // field 25, encoding 0, value 333333\n\t\t\t\"d001c0ba27\"+ // field 26, encoding 0, value 646464\n\t\t\t\"d001b58928\"+ // field 26, encoding 0, value 656565\n\t\t\t\"dd0100000042\"+ // field 27, encoding 5, value 32.0\n\t\t\t\"dd0100000442\"+ // field 27, encoding 5, value 33.0\n\t\t\t\"e1010000000000005040\"+ // field 28, encoding 1, value 64.0\n\t\t\t\"e1010000000000405040\"+ // field 28, encoding 1, value 65.0\n\t\t\t\"ea0105\"+\"68656c6c6f\"+ // field 29, encoding 2, string \"hello\"\n\t\t\t\"ea0106\"+\"7361696c6f72\"+ // field 29, encoding 2, string \"sailor\"\n\t\t\t\"c00201\"+ // field 40, encoding 0, value 1\n\t\t\t\"c80220\"+ // field 41, encoding 0, value 32\n\t\t\t\"d00240\"+ // field 42, encoding 0, value 64\n\t\t\t\"dd0240010000\"+ // field 43, encoding 5, value 320\n\t\t\t\"e1028002000000000000\"+ // field 44, encoding 1, value 640\n\t\t\t\"e8028019\"+ // field 45, encoding 0, value 3200\n\t\t\t\"f0028032\"+ // field 46, encoding 0, value 6400\n\t\t\t\"fd02e0659948\"+ // field 47, encoding 5, value 314159.0\n\t\t\t\"81030000000050971041\"+ // field 48, encoding 1, value 271828.0\n\t\t\t\"8a0310\"+\"68656c6c6f2c2022776f726c6421220a\"+ // field 49, encoding 2 string \"hello, \\\"world!\\\"\\n\"\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"8305\"+ // start group field 80 level 1\n\t\t\t\"8a0508\"+\"7265706561746564\"+ // field 81, encoding 2, string \"repeated\"\n\t\t\t\"8405\"+ // end group field 80 level 1\n\t\t\t\"8305\"+ // start group field 80 level 1\n\t\t\t\"8a0508\"+\"7265706561746564\"+ // field 81, encoding 2, string \"repeated\"\n\t\t\t\"8405\"+ // end group field 80 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"ca0c03\"+\"626967\"+ // field 201, encoding 2, string \"big\"\n\t\t\t\"ca0c04\"+\"6e6f7365\"+ // field 201, encoding 2, string \"nose\"\n\t\t\t\"d00c40\"+ // field 202, encoding 0, value 32\n\t\t\t\"d00c3f\"+ // field 202, encoding 0, value -32\n\t\t\t\"d80c8001\"+ // field 203, encoding 0, value 64\n\t\t\t\"d80c7f\"+ // field 203, encoding 0, value -64\n\t\t\t\"8a1907\"+\"4269676e6f7365\"+ // field 401, encoding 2, string \"Bignose\"\n\t\t\t\"90193f\"+ // field 402, encoding 0, value 63\n\t\t\t\"98197f\") // field 403, encoding 0, value 127\n\n}\n\n// All required fields set, all packed repeated fields given two values.\nfunc TestEncodeDecode6(t *testing.T) {\n\tpb := initGoTest(false)\n\tpb.F_BoolRepeatedPacked = []bool{false, true}\n\tpb.F_Int32RepeatedPacked = []int32{32, 33}\n\tpb.F_Int64RepeatedPacked = []int64{64, 65}\n\tpb.F_Fixed32RepeatedPacked = []uint32{3232, 3333}\n\tpb.F_Fixed64RepeatedPacked = []uint64{6464, 6565}\n\tpb.F_Uint32RepeatedPacked = []uint32{323232, 333333}\n\tpb.F_Uint64RepeatedPacked = []uint64{646464, 656565}\n\tpb.F_FloatRepeatedPacked = []float32{32., 33.}\n\tpb.F_DoubleRepeatedPacked = []float64{64., 65.}\n\tpb.F_Sint32RepeatedPacked = []int32{32, -32}\n\tpb.F_Sint64RepeatedPacked = []int64{64, -64}\n\n\toverify(t, pb,\n\t\t\"0807\"+ // field 1, encoding 0, value 7\n\t\t\t\"220d\"+\"0a056c6162656c120474797065\"+ // field 4, encoding 2 (GoTestField)\n\t\t\t\"5001\"+ // field 10, encoding 0, value 1\n\t\t\t\"5803\"+ // field 11, encoding 0, value 3\n\t\t\t\"6006\"+ // field 12, encoding 0, value 6\n\t\t\t\"6d20000000\"+ // field 13, encoding 5, value 32\n\t\t\t\"714000000000000000\"+ // field 14, encoding 1, value 64\n\t\t\t\"78a019\"+ // field 15, encoding 0, value 3232\n\t\t\t\"8001c032\"+ // field 16, encoding 0, value 6464\n\t\t\t\"8d0100004a45\"+ // field 17, encoding 5, value 3232.0\n\t\t\t\"9101000000000040b940\"+ // field 18, encoding 1, value 6464.0\n\t\t\t\"9a0106\"+\"737472696e67\"+ // field 19, encoding 2 string \"string\"\n\t\t\t\"9203020001\"+ // field 50, encoding 2, 2 bytes, value 0, value 1\n\t\t\t\"9a03022021\"+ // field 51, encoding 2, 2 bytes, value 32, value 33\n\t\t\t\"a203024041\"+ // field 52, encoding 2, 2 bytes, value 64, value 65\n\t\t\t\"aa0308\"+ // field 53, encoding 2, 8 bytes\n\t\t\t\"a00c0000050d0000\"+ // value 3232, value 3333\n\t\t\t\"b20310\"+ // field 54, encoding 2, 16 bytes\n\t\t\t\"4019000000000000a519000000000000\"+ // value 6464, value 6565\n\t\t\t\"ba0306\"+ // field 55, encoding 2, 6 bytes\n\t\t\t\"a0dd1395ac14\"+ // value 323232, value 333333\n\t\t\t\"c20306\"+ // field 56, encoding 2, 6 bytes\n\t\t\t\"c0ba27b58928\"+ // value 646464, value 656565\n\t\t\t\"ca0308\"+ // field 57, encoding 2, 8 bytes\n\t\t\t\"0000004200000442\"+ // value 32.0, value 33.0\n\t\t\t\"d20310\"+ // field 58, encoding 2, 16 bytes\n\t\t\t\"00000000000050400000000000405040\"+ // value 64.0, value 65.0\n\t\t\t\"b304\"+ // start group field 70 level 1\n\t\t\t\"ba0408\"+\"7265717569726564\"+ // field 71, encoding 2, string \"required\"\n\t\t\t\"b404\"+ // end group field 70 level 1\n\t\t\t\"aa0605\"+\"6279746573\"+ // field 101, encoding 2 string \"bytes\"\n\t\t\t\"b0063f\"+ // field 102, encoding 0, 0x3f zigzag32\n\t\t\t\"b8067f\"+ // field 103, encoding 0, 0x7f zigzag64\n\t\t\t\"b21f02\"+ // field 502, encoding 2, 2 bytes\n\t\t\t\"403f\"+ // value 32, value -32\n\t\t\t\"ba1f03\"+ // field 503, encoding 2, 3 bytes\n\t\t\t\"80017f\") // value 64, value -64\n}\n\n// Test that we can encode empty bytes fields.\nfunc TestEncodeDecodeBytes1(t *testing.T) {\n\tpb := initGoTest(false)\n\n\t// Create our bytes\n\tpb.F_BytesRequired = []byte{}\n\tpb.F_BytesRepeated = [][]byte{{}}\n\tpb.F_BytesOptional = []byte{}\n\n\td, err := Marshal(pb)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tpbd := new(GoTest)\n\tif err := Unmarshal(d, pbd); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 {\n\t\tt.Error(\"required empty bytes field is incorrect\")\n\t}\n\tif pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil {\n\t\tt.Error(\"repeated empty bytes field is incorrect\")\n\t}\n\tif pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 {\n\t\tt.Error(\"optional empty bytes field is incorrect\")\n\t}\n}\n\n// Test that we encode nil-valued fields of a repeated bytes field correctly.\n// Since entries in a repeated field cannot be nil, nil must mean empty value.\nfunc TestEncodeDecodeBytes2(t *testing.T) {\n\tpb := initGoTest(false)\n\n\t// Create our bytes\n\tpb.F_BytesRepeated = [][]byte{nil}\n\n\td, err := Marshal(pb)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\n\tpbd := new(GoTest)\n\tif err := Unmarshal(d, pbd); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tif len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil {\n\t\tt.Error(\"Unexpected value for repeated bytes field\")\n\t}\n}\n\n// All required fields set, defaults provided, all repeated fields given two values.\nfunc TestSkippingUnrecognizedFields(t *testing.T) {\n\to := old()\n\tpb := initGoTestField()\n\n\t// Marshal it normally.\n\to.Marshal(pb)\n\n\t// Now new a GoSkipTest record.\n\tskip := &GoSkipTest{\n\t\tSkipInt32:   Int32(32),\n\t\tSkipFixed32: Uint32(3232),\n\t\tSkipFixed64: Uint64(6464),\n\t\tSkipString:  String(\"skipper\"),\n\t\tSkipgroup: &GoSkipTest_SkipGroup{\n\t\t\tGroupInt32:  Int32(75),\n\t\t\tGroupString: String(\"wxyz\"),\n\t\t},\n\t}\n\n\t// Marshal it into same buffer.\n\to.Marshal(skip)\n\n\tpbd := new(GoTestField)\n\to.Unmarshal(pbd)\n\n\t// The __unrecognized field should be a marshaling of GoSkipTest\n\tskipd := new(GoSkipTest)\n\n\to.SetBuf(pbd.XXX_unrecognized)\n\to.Unmarshal(skipd)\n\n\tif *skipd.SkipInt32 != *skip.SkipInt32 {\n\t\tt.Error(\"skip int32\", skipd.SkipInt32)\n\t}\n\tif *skipd.SkipFixed32 != *skip.SkipFixed32 {\n\t\tt.Error(\"skip fixed32\", skipd.SkipFixed32)\n\t}\n\tif *skipd.SkipFixed64 != *skip.SkipFixed64 {\n\t\tt.Error(\"skip fixed64\", skipd.SkipFixed64)\n\t}\n\tif *skipd.SkipString != *skip.SkipString {\n\t\tt.Error(\"skip string\", *skipd.SkipString)\n\t}\n\tif *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 {\n\t\tt.Error(\"skip group int32\", skipd.Skipgroup.GroupInt32)\n\t}\n\tif *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString {\n\t\tt.Error(\"skip group string\", *skipd.Skipgroup.GroupString)\n\t}\n}\n\n// Check that unrecognized fields of a submessage are preserved.\nfunc TestSubmessageUnrecognizedFields(t *testing.T) {\n\tnm := &NewMessage{\n\t\tNested: &NewMessage_Nested{\n\t\t\tName:      String(\"Nigel\"),\n\t\t\tFoodGroup: String(\"carbs\"),\n\t\t},\n\t}\n\tb, err := Marshal(nm)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal of NewMessage: %v\", err)\n\t}\n\n\t// Unmarshal into an OldMessage.\n\tom := new(OldMessage)\n\tif err := Unmarshal(b, om); err != nil {\n\t\tt.Fatalf(\"Unmarshal to OldMessage: %v\", err)\n\t}\n\texp := &OldMessage{\n\t\tNested: &OldMessage_Nested{\n\t\t\tName: String(\"Nigel\"),\n\t\t\t// normal protocol buffer users should not do this\n\t\t\tXXX_unrecognized: []byte(\"\\x12\\x05carbs\"),\n\t\t},\n\t}\n\tif !Equal(om, exp) {\n\t\tt.Errorf(\"om = %v, want %v\", om, exp)\n\t}\n\n\t// Clone the OldMessage.\n\tom = Clone(om).(*OldMessage)\n\tif !Equal(om, exp) {\n\t\tt.Errorf(\"Clone(om) = %v, want %v\", om, exp)\n\t}\n\n\t// Marshal the OldMessage, then unmarshal it into an empty NewMessage.\n\tif b, err = Marshal(om); err != nil {\n\t\tt.Fatalf(\"Marshal of OldMessage: %v\", err)\n\t}\n\tt.Logf(\"Marshal(%v) -> %q\", om, b)\n\tnm2 := new(NewMessage)\n\tif err := Unmarshal(b, nm2); err != nil {\n\t\tt.Fatalf(\"Unmarshal to NewMessage: %v\", err)\n\t}\n\tif !Equal(nm, nm2) {\n\t\tt.Errorf(\"NewMessage round-trip: %v => %v\", nm, nm2)\n\t}\n}\n\n// Check that an int32 field can be upgraded to an int64 field.\nfunc TestNegativeInt32(t *testing.T) {\n\tom := &OldMessage{\n\t\tNum: Int32(-1),\n\t}\n\tb, err := Marshal(om)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal of OldMessage: %v\", err)\n\t}\n\n\t// Check the size. It should be 11 bytes;\n\t// 1 for the field/wire type, and 10 for the negative number.\n\tif len(b) != 11 {\n\t\tt.Errorf(\"%v marshaled as %q, wanted 11 bytes\", om, b)\n\t}\n\n\t// Unmarshal into a NewMessage.\n\tnm := new(NewMessage)\n\tif err := Unmarshal(b, nm); err != nil {\n\t\tt.Fatalf(\"Unmarshal to NewMessage: %v\", err)\n\t}\n\twant := &NewMessage{\n\t\tNum: Int64(-1),\n\t}\n\tif !Equal(nm, want) {\n\t\tt.Errorf(\"nm = %v, want %v\", nm, want)\n\t}\n}\n\n// Check that we can grow an array (repeated field) to have many elements.\n// This test doesn't depend only on our encoding; for variety, it makes sure\n// we create, encode, and decode the correct contents explicitly.  It's therefore\n// a bit messier.\n// This test also uses (and hence tests) the Marshal/Unmarshal functions\n// instead of the methods.\nfunc TestBigRepeated(t *testing.T) {\n\tpb := initGoTest(true)\n\n\t// Create the arrays\n\tconst N = 50 // Internally the library starts much smaller.\n\tpb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)\n\tpb.F_Sint64Repeated = make([]int64, N)\n\tpb.F_Sint32Repeated = make([]int32, N)\n\tpb.F_BytesRepeated = make([][]byte, N)\n\tpb.F_StringRepeated = make([]string, N)\n\tpb.F_DoubleRepeated = make([]float64, N)\n\tpb.F_FloatRepeated = make([]float32, N)\n\tpb.F_Uint64Repeated = make([]uint64, N)\n\tpb.F_Uint32Repeated = make([]uint32, N)\n\tpb.F_Fixed64Repeated = make([]uint64, N)\n\tpb.F_Fixed32Repeated = make([]uint32, N)\n\tpb.F_Int64Repeated = make([]int64, N)\n\tpb.F_Int32Repeated = make([]int32, N)\n\tpb.F_BoolRepeated = make([]bool, N)\n\tpb.RepeatedField = make([]*GoTestField, N)\n\n\t// Fill in the arrays with checkable values.\n\tigtf := initGoTestField()\n\tigtrg := initGoTest_RepeatedGroup()\n\tfor i := 0; i < N; i++ {\n\t\tpb.Repeatedgroup[i] = igtrg\n\t\tpb.F_Sint64Repeated[i] = int64(i)\n\t\tpb.F_Sint32Repeated[i] = int32(i)\n\t\ts := fmt.Sprint(i)\n\t\tpb.F_BytesRepeated[i] = []byte(s)\n\t\tpb.F_StringRepeated[i] = s\n\t\tpb.F_DoubleRepeated[i] = float64(i)\n\t\tpb.F_FloatRepeated[i] = float32(i)\n\t\tpb.F_Uint64Repeated[i] = uint64(i)\n\t\tpb.F_Uint32Repeated[i] = uint32(i)\n\t\tpb.F_Fixed64Repeated[i] = uint64(i)\n\t\tpb.F_Fixed32Repeated[i] = uint32(i)\n\t\tpb.F_Int64Repeated[i] = int64(i)\n\t\tpb.F_Int32Repeated[i] = int32(i)\n\t\tpb.F_BoolRepeated[i] = i%2 == 0\n\t\tpb.RepeatedField[i] = igtf\n\t}\n\n\t// Marshal.\n\tbuf, _ := Marshal(pb)\n\n\t// Now test Unmarshal by recreating the original buffer.\n\tpbd := new(GoTest)\n\tUnmarshal(buf, pbd)\n\n\t// Check the checkable values\n\tfor i := uint64(0); i < N; i++ {\n\t\tif pbd.Repeatedgroup[i] == nil { // TODO: more checking?\n\t\t\tt.Error(\"pbd.Repeatedgroup bad\")\n\t\t}\n\t\tvar x uint64\n\t\tx = uint64(pbd.F_Sint64Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Sint64Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Sint32Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Sint32Repeated bad\", x, i)\n\t\t}\n\t\ts := fmt.Sprint(i)\n\t\tequalbytes(pbd.F_BytesRepeated[i], []byte(s), t)\n\t\tif pbd.F_StringRepeated[i] != s {\n\t\t\tt.Error(\"pbd.F_Sint32Repeated bad\", pbd.F_StringRepeated[i], i)\n\t\t}\n\t\tx = uint64(pbd.F_DoubleRepeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_DoubleRepeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_FloatRepeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_FloatRepeated bad\", x, i)\n\t\t}\n\t\tx = pbd.F_Uint64Repeated[i]\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Uint64Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Uint32Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Uint32Repeated bad\", x, i)\n\t\t}\n\t\tx = pbd.F_Fixed64Repeated[i]\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Fixed64Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Fixed32Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Fixed32Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Int64Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Int64Repeated bad\", x, i)\n\t\t}\n\t\tx = uint64(pbd.F_Int32Repeated[i])\n\t\tif x != i {\n\t\t\tt.Error(\"pbd.F_Int32Repeated bad\", x, i)\n\t\t}\n\t\tif pbd.F_BoolRepeated[i] != (i%2 == 0) {\n\t\t\tt.Error(\"pbd.F_BoolRepeated bad\", x, i)\n\t\t}\n\t\tif pbd.RepeatedField[i] == nil { // TODO: more checking?\n\t\t\tt.Error(\"pbd.RepeatedField bad\")\n\t\t}\n\t}\n}\n\n// Verify we give a useful message when decoding to the wrong structure type.\nfunc TestTypeMismatch(t *testing.T) {\n\tpb1 := initGoTest(true)\n\n\t// Marshal\n\to := old()\n\to.Marshal(pb1)\n\n\t// Now Unmarshal it to the wrong type.\n\tpb2 := initGoTestField()\n\terr := o.Unmarshal(pb2)\n\tif err == nil {\n\t\tt.Error(\"expected error, got no error\")\n\t} else if !strings.Contains(err.Error(), \"bad wiretype\") {\n\t\tt.Error(\"expected bad wiretype error, got\", err)\n\t}\n}\n\nfunc encodeDecode(t *testing.T, in, out Message, msg string) {\n\tbuf, err := Marshal(in)\n\tif err != nil {\n\t\tt.Fatalf(\"failed marshaling %v: %v\", msg, err)\n\t}\n\tif err := Unmarshal(buf, out); err != nil {\n\t\tt.Fatalf(\"failed unmarshaling %v: %v\", msg, err)\n\t}\n}\n\nfunc TestPackedNonPackedDecoderSwitching(t *testing.T) {\n\tnp, p := new(NonPackedTest), new(PackedTest)\n\n\t// non-packed -> packed\n\tnp.A = []int32{0, 1, 1, 2, 3, 5}\n\tencodeDecode(t, np, p, \"non-packed -> packed\")\n\tif !reflect.DeepEqual(np.A, p.B) {\n\t\tt.Errorf(\"failed non-packed -> packed; np.A=%+v, p.B=%+v\", np.A, p.B)\n\t}\n\n\t// packed -> non-packed\n\tnp.Reset()\n\tp.B = []int32{3, 1, 4, 1, 5, 9}\n\tencodeDecode(t, p, np, \"packed -> non-packed\")\n\tif !reflect.DeepEqual(p.B, np.A) {\n\t\tt.Errorf(\"failed packed -> non-packed; p.B=%+v, np.A=%+v\", p.B, np.A)\n\t}\n}\n\nfunc TestProto1RepeatedGroup(t *testing.T) {\n\tpb := &MessageList{\n\t\tMessage: []*MessageList_Message{\n\t\t\t{\n\t\t\t\tName:  String(\"blah\"),\n\t\t\t\tCount: Int32(7),\n\t\t\t},\n\t\t\t// NOTE: pb.Message[1] is a nil\n\t\t\tnil,\n\t\t},\n\t}\n\n\to := old()\n\terr := o.Marshal(pb)\n\tif err == nil || !strings.Contains(err.Error(), \"repeated field Message has nil\") {\n\t\tt.Fatalf(\"unexpected or no error when marshaling: %v\", err)\n\t}\n}\n\n// Test that enums work.  Checks for a bug introduced by making enums\n// named types instead of int32: newInt32FromUint64 would crash with\n// a type mismatch in reflect.PointTo.\nfunc TestEnum(t *testing.T) {\n\tpb := new(GoEnum)\n\tpb.Foo = FOO_FOO1.Enum()\n\to := old()\n\tif err := o.Marshal(pb); err != nil {\n\t\tt.Fatal(\"error encoding enum:\", err)\n\t}\n\tpb1 := new(GoEnum)\n\tif err := o.Unmarshal(pb1); err != nil {\n\t\tt.Fatal(\"error decoding enum:\", err)\n\t}\n\tif *pb1.Foo != FOO_FOO1 {\n\t\tt.Error(\"expected 7 but got \", *pb1.Foo)\n\t}\n}\n\n// Enum types have String methods. Check that enum fields can be printed.\n// We don't care what the value actually is, just as long as it doesn't crash.\nfunc TestPrintingNilEnumFields(t *testing.T) {\n\tpb := new(GoEnum)\n\t_ = fmt.Sprintf(\"%+v\", pb)\n}\n\n// Verify that absent required fields cause Marshal/Unmarshal to return errors.\nfunc TestRequiredFieldEnforcement(t *testing.T) {\n\tpb := new(GoTestField)\n\t_, err := Marshal(pb)\n\tif err == nil {\n\t\tt.Error(\"marshal: expected error, got nil\")\n\t} else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), \"Label\") {\n\t\tt.Errorf(\"marshal: bad error type: %v\", err)\n\t}\n\n\t// A slightly sneaky, yet valid, proto. It encodes the same required field twice,\n\t// so simply counting the required fields is insufficient.\n\t// field 1, encoding 2, value \"hi\"\n\tbuf := []byte(\"\\x0A\\x02hi\\x0A\\x02hi\")\n\terr = Unmarshal(buf, pb)\n\tif err == nil {\n\t\tt.Error(\"unmarshal: expected error, got nil\")\n\t} else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), \"{Unknown}\") {\n\t\tt.Errorf(\"unmarshal: bad error type: %v\", err)\n\t}\n}\n\n// Verify that absent required fields in groups cause Marshal/Unmarshal to return errors.\nfunc TestRequiredFieldEnforcementGroups(t *testing.T) {\n\tpb := &GoTestRequiredGroupField{Group: &GoTestRequiredGroupField_Group{}}\n\tif _, err := Marshal(pb); err == nil {\n\t\tt.Error(\"marshal: expected error, got nil\")\n\t} else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), \"Group.Field\") {\n\t\tt.Errorf(\"marshal: bad error type: %v\", err)\n\t}\n\n\tbuf := []byte{11, 12}\n\tif err := Unmarshal(buf, pb); err == nil {\n\t\tt.Error(\"unmarshal: expected error, got nil\")\n\t} else if _, ok := err.(*RequiredNotSetError); !ok || !strings.Contains(err.Error(), \"Group.{Unknown}\") {\n\t\tt.Errorf(\"unmarshal: bad error type: %v\", err)\n\t}\n}\n\nfunc TestTypedNilMarshal(t *testing.T) {\n\t// A typed nil should return ErrNil and not crash.\n\t{\n\t\tvar m *GoEnum\n\t\tif _, err := Marshal(m); err != ErrNil {\n\t\t\tt.Errorf(\"Marshal(%#v): got %v, want ErrNil\", m, err)\n\t\t}\n\t}\n\n\t{\n\t\tm := &Communique{Union: &Communique_Msg{nil}}\n\t\tif _, err := Marshal(m); err == nil || err == ErrNil {\n\t\t\tt.Errorf(\"Marshal(%#v): got %v, want errOneofHasNil\", m, err)\n\t\t}\n\t}\n}\n\n// A type that implements the Marshaler interface, but is not nillable.\ntype nonNillableInt uint64\n\nfunc (nni nonNillableInt) Marshal() ([]byte, error) {\n\treturn EncodeVarint(uint64(nni)), nil\n}\n\ntype NNIMessage struct {\n\tnni nonNillableInt\n}\n\nfunc (*NNIMessage) Reset()         {}\nfunc (*NNIMessage) String() string { return \"\" }\nfunc (*NNIMessage) ProtoMessage()  {}\n\n// A type that implements the Marshaler interface and is nillable.\ntype nillableMessage struct {\n\tx uint64\n}\n\nfunc (nm *nillableMessage) Marshal() ([]byte, error) {\n\treturn EncodeVarint(nm.x), nil\n}\n\ntype NMMessage struct {\n\tnm *nillableMessage\n}\n\nfunc (*NMMessage) Reset()         {}\nfunc (*NMMessage) String() string { return \"\" }\nfunc (*NMMessage) ProtoMessage()  {}\n\n// Verify a type that uses the Marshaler interface, but has a nil pointer.\nfunc TestNilMarshaler(t *testing.T) {\n\t// Try a struct with a Marshaler field that is nil.\n\t// It should be directly marshable.\n\tnmm := new(NMMessage)\n\tif _, err := Marshal(nmm); err != nil {\n\t\tt.Error(\"unexpected error marshaling nmm: \", err)\n\t}\n\n\t// Try a struct with a Marshaler field that is not nillable.\n\tnnim := new(NNIMessage)\n\tnnim.nni = 7\n\tvar _ Marshaler = nnim.nni // verify it is truly a Marshaler\n\tif _, err := Marshal(nnim); err != nil {\n\t\tt.Error(\"unexpected error marshaling nnim: \", err)\n\t}\n}\n\nfunc TestAllSetDefaults(t *testing.T) {\n\t// Exercise SetDefaults with all scalar field types.\n\tm := &Defaults{\n\t\t// NaN != NaN, so override that here.\n\t\tF_Nan: Float32(1.7),\n\t}\n\texpected := &Defaults{\n\t\tF_Bool:    Bool(true),\n\t\tF_Int32:   Int32(32),\n\t\tF_Int64:   Int64(64),\n\t\tF_Fixed32: Uint32(320),\n\t\tF_Fixed64: Uint64(640),\n\t\tF_Uint32:  Uint32(3200),\n\t\tF_Uint64:  Uint64(6400),\n\t\tF_Float:   Float32(314159),\n\t\tF_Double:  Float64(271828),\n\t\tF_String:  String(`hello, \"world!\"` + \"\\n\"),\n\t\tF_Bytes:   []byte(\"Bignose\"),\n\t\tF_Sint32:  Int32(-32),\n\t\tF_Sint64:  Int64(-64),\n\t\tF_Enum:    Defaults_GREEN.Enum(),\n\t\tF_Pinf:    Float32(float32(math.Inf(1))),\n\t\tF_Ninf:    Float32(float32(math.Inf(-1))),\n\t\tF_Nan:     Float32(1.7),\n\t\tStrZero:   String(\"\"),\n\t}\n\tSetDefaults(m)\n\tif !Equal(m, expected) {\n\t\tt.Errorf(\"SetDefaults failed\\n got %v\\nwant %v\", m, expected)\n\t}\n}\n\nfunc TestSetDefaultsWithSetField(t *testing.T) {\n\t// Check that a set value is not overridden.\n\tm := &Defaults{\n\t\tF_Int32: Int32(12),\n\t}\n\tSetDefaults(m)\n\tif v := m.GetF_Int32(); v != 12 {\n\t\tt.Errorf(\"m.FInt32 = %v, want 12\", v)\n\t}\n}\n\nfunc TestSetDefaultsWithSubMessage(t *testing.T) {\n\tm := &OtherMessage{\n\t\tKey: Int64(123),\n\t\tInner: &InnerMessage{\n\t\t\tHost: String(\"gopher\"),\n\t\t},\n\t}\n\texpected := &OtherMessage{\n\t\tKey: Int64(123),\n\t\tInner: &InnerMessage{\n\t\t\tHost: String(\"gopher\"),\n\t\t\tPort: Int32(4000),\n\t\t},\n\t}\n\tSetDefaults(m)\n\tif !Equal(m, expected) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, expected)\n\t}\n}\n\nfunc TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {\n\tm := &MyMessage{\n\t\tRepInner: []*InnerMessage{{}},\n\t}\n\texpected := &MyMessage{\n\t\tRepInner: []*InnerMessage{{\n\t\t\tPort: Int32(4000),\n\t\t}},\n\t}\n\tSetDefaults(m)\n\tif !Equal(m, expected) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, expected)\n\t}\n}\n\nfunc TestSetDefaultWithRepeatedNonMessage(t *testing.T) {\n\tm := &MyMessage{\n\t\tPet: []string{\"turtle\", \"wombat\"},\n\t}\n\texpected := Clone(m)\n\tSetDefaults(m)\n\tif !Equal(m, expected) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, expected)\n\t}\n}\n\nfunc TestMaximumTagNumber(t *testing.T) {\n\tm := &MaxTag{\n\t\tLastField: String(\"natural goat essence\"),\n\t}\n\tbuf, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"proto.Marshal failed: %v\", err)\n\t}\n\tm2 := new(MaxTag)\n\tif err := Unmarshal(buf, m2); err != nil {\n\t\tt.Fatalf(\"proto.Unmarshal failed: %v\", err)\n\t}\n\tif got, want := m2.GetLastField(), *m.LastField; got != want {\n\t\tt.Errorf(\"got %q, want %q\", got, want)\n\t}\n}\n\nfunc TestJSON(t *testing.T) {\n\tm := &MyMessage{\n\t\tCount: Int32(4),\n\t\tPet:   []string{\"bunny\", \"kitty\"},\n\t\tInner: &InnerMessage{\n\t\t\tHost: String(\"cauchy\"),\n\t\t},\n\t\tBikeshed: MyMessage_GREEN.Enum(),\n\t}\n\tconst expected = `{\"count\":4,\"pet\":[\"bunny\",\"kitty\"],\"inner\":{\"host\":\"cauchy\"},\"bikeshed\":1}`\n\n\tb, err := json.Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"json.Marshal failed: %v\", err)\n\t}\n\ts := string(b)\n\tif s != expected {\n\t\tt.Errorf(\"got  %s\\nwant %s\", s, expected)\n\t}\n\n\treceived := new(MyMessage)\n\tif err := json.Unmarshal(b, received); err != nil {\n\t\tt.Fatalf(\"json.Unmarshal failed: %v\", err)\n\t}\n\tif !Equal(received, m) {\n\t\tt.Fatalf(\"got %s, want %s\", received, m)\n\t}\n\n\t// Test unmarshalling of JSON with symbolic enum name.\n\tconst old = `{\"count\":4,\"pet\":[\"bunny\",\"kitty\"],\"inner\":{\"host\":\"cauchy\"},\"bikeshed\":\"GREEN\"}`\n\treceived.Reset()\n\tif err := json.Unmarshal([]byte(old), received); err != nil {\n\t\tt.Fatalf(\"json.Unmarshal failed: %v\", err)\n\t}\n\tif !Equal(received, m) {\n\t\tt.Fatalf(\"got %s, want %s\", received, m)\n\t}\n}\n\nfunc TestBadWireType(t *testing.T) {\n\tb := []byte{7<<3 | 6} // field 7, wire type 6\n\tpb := new(OtherMessage)\n\tif err := Unmarshal(b, pb); err == nil {\n\t\tt.Errorf(\"Unmarshal did not fail\")\n\t} else if !strings.Contains(err.Error(), \"unknown wire type\") {\n\t\tt.Errorf(\"wrong error: %v\", err)\n\t}\n}\n\nfunc TestBytesWithInvalidLength(t *testing.T) {\n\t// If a byte sequence has an invalid (negative) length, Unmarshal should not panic.\n\tb := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0}\n\tUnmarshal(b, new(MyMessage))\n}\n\nfunc TestLengthOverflow(t *testing.T) {\n\t// Overflowing a length should not panic.\n\tb := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01}\n\tUnmarshal(b, new(MyMessage))\n}\n\nfunc TestVarintOverflow(t *testing.T) {\n\t// Overflowing a 64-bit length should not be allowed.\n\tb := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}\n\tif err := Unmarshal(b, new(MyMessage)); err == nil {\n\t\tt.Fatalf(\"Overflowed uint64 length without error\")\n\t}\n}\n\nfunc TestUnmarshalFuzz(t *testing.T) {\n\tconst N = 1000\n\tseed := time.Now().UnixNano()\n\tt.Logf(\"RNG seed is %d\", seed)\n\trng := rand.New(rand.NewSource(seed))\n\tbuf := make([]byte, 20)\n\tfor i := 0; i < N; i++ {\n\t\tfor j := range buf {\n\t\t\tbuf[j] = byte(rng.Intn(256))\n\t\t}\n\t\tfuzzUnmarshal(t, buf)\n\t}\n}\n\nfunc TestMergeMessages(t *testing.T) {\n\tpb := &MessageList{Message: []*MessageList_Message{{Name: String(\"x\"), Count: Int32(1)}}}\n\tdata, err := Marshal(pb)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\n\tpb1 := new(MessageList)\n\tif err := Unmarshal(data, pb1); err != nil {\n\t\tt.Fatalf(\"first Unmarshal: %v\", err)\n\t}\n\tif err := Unmarshal(data, pb1); err != nil {\n\t\tt.Fatalf(\"second Unmarshal: %v\", err)\n\t}\n\tif len(pb1.Message) != 1 {\n\t\tt.Errorf(\"two Unmarshals produced %d Messages, want 1\", len(pb1.Message))\n\t}\n\n\tpb2 := new(MessageList)\n\tif err := UnmarshalMerge(data, pb2); err != nil {\n\t\tt.Fatalf(\"first UnmarshalMerge: %v\", err)\n\t}\n\tif err := UnmarshalMerge(data, pb2); err != nil {\n\t\tt.Fatalf(\"second UnmarshalMerge: %v\", err)\n\t}\n\tif len(pb2.Message) != 2 {\n\t\tt.Errorf(\"two UnmarshalMerges produced %d Messages, want 2\", len(pb2.Message))\n\t}\n}\n\nfunc TestExtensionMarshalOrder(t *testing.T) {\n\tm := &MyMessage{Count: Int(123)}\n\tif err := SetExtension(m, E_Ext_More, &Ext{Data: String(\"alpha\")}); err != nil {\n\t\tt.Fatalf(\"SetExtension: %v\", err)\n\t}\n\tif err := SetExtension(m, E_Ext_Text, String(\"aleph\")); err != nil {\n\t\tt.Fatalf(\"SetExtension: %v\", err)\n\t}\n\tif err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil {\n\t\tt.Fatalf(\"SetExtension: %v\", err)\n\t}\n\n\t// Serialize m several times, and check we get the same bytes each time.\n\tvar orig []byte\n\tfor i := 0; i < 100; i++ {\n\t\tb, err := Marshal(m)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Marshal: %v\", err)\n\t\t}\n\t\tif i == 0 {\n\t\t\torig = b\n\t\t\tcontinue\n\t\t}\n\t\tif !bytes.Equal(b, orig) {\n\t\t\tt.Errorf(\"Bytes differ on attempt #%d\", i)\n\t\t}\n\t}\n}\n\n// Many extensions, because small maps might not iterate differently on each iteration.\nvar exts = []*ExtensionDesc{\n\tE_X201,\n\tE_X202,\n\tE_X203,\n\tE_X204,\n\tE_X205,\n\tE_X206,\n\tE_X207,\n\tE_X208,\n\tE_X209,\n\tE_X210,\n\tE_X211,\n\tE_X212,\n\tE_X213,\n\tE_X214,\n\tE_X215,\n\tE_X216,\n\tE_X217,\n\tE_X218,\n\tE_X219,\n\tE_X220,\n\tE_X221,\n\tE_X222,\n\tE_X223,\n\tE_X224,\n\tE_X225,\n\tE_X226,\n\tE_X227,\n\tE_X228,\n\tE_X229,\n\tE_X230,\n\tE_X231,\n\tE_X232,\n\tE_X233,\n\tE_X234,\n\tE_X235,\n\tE_X236,\n\tE_X237,\n\tE_X238,\n\tE_X239,\n\tE_X240,\n\tE_X241,\n\tE_X242,\n\tE_X243,\n\tE_X244,\n\tE_X245,\n\tE_X246,\n\tE_X247,\n\tE_X248,\n\tE_X249,\n\tE_X250,\n}\n\nfunc TestMessageSetMarshalOrder(t *testing.T) {\n\tm := &MyMessageSet{}\n\tfor _, x := range exts {\n\t\tif err := SetExtension(m, x, &Empty{}); err != nil {\n\t\t\tt.Fatalf(\"SetExtension: %v\", err)\n\t\t}\n\t}\n\n\tbuf, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\n\t// Serialize m several times, and check we get the same bytes each time.\n\tfor i := 0; i < 10; i++ {\n\t\tb1, err := Marshal(m)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Marshal: %v\", err)\n\t\t}\n\t\tif !bytes.Equal(b1, buf) {\n\t\t\tt.Errorf(\"Bytes differ on re-Marshal #%d\", i)\n\t\t}\n\n\t\tm2 := &MyMessageSet{}\n\t\tif err := Unmarshal(buf, m2); err != nil {\n\t\t\tt.Errorf(\"Unmarshal: %v\", err)\n\t\t}\n\t\tb2, err := Marshal(m2)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"re-Marshal: %v\", err)\n\t\t}\n\t\tif !bytes.Equal(b2, buf) {\n\t\t\tt.Errorf(\"Bytes differ on round-trip #%d\", i)\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalMergesMessages(t *testing.T) {\n\t// If a nested message occurs twice in the input,\n\t// the fields should be merged when decoding.\n\ta := &OtherMessage{\n\t\tKey: Int64(123),\n\t\tInner: &InnerMessage{\n\t\t\tHost: String(\"polhode\"),\n\t\t\tPort: Int32(1234),\n\t\t},\n\t}\n\taData, err := Marshal(a)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal(a): %v\", err)\n\t}\n\tb := &OtherMessage{\n\t\tWeight: Float32(1.2),\n\t\tInner: &InnerMessage{\n\t\t\tHost:      String(\"herpolhode\"),\n\t\t\tConnected: Bool(true),\n\t\t},\n\t}\n\tbData, err := Marshal(b)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal(b): %v\", err)\n\t}\n\twant := &OtherMessage{\n\t\tKey:    Int64(123),\n\t\tWeight: Float32(1.2),\n\t\tInner: &InnerMessage{\n\t\t\tHost:      String(\"herpolhode\"),\n\t\t\tPort:      Int32(1234),\n\t\t\tConnected: Bool(true),\n\t\t},\n\t}\n\tgot := new(OtherMessage)\n\tif err := Unmarshal(append(aData, bData...), got); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %v\", err)\n\t}\n\tif !Equal(got, want) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", got, want)\n\t}\n}\n\nfunc TestEncodingSizes(t *testing.T) {\n\ttests := []struct {\n\t\tm Message\n\t\tn int\n\t}{\n\t\t{&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},\n\t\t{&Defaults{F_Int32: Int32(math.MinInt32)}, 11},\n\t\t{&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6},\n\t\t{&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6},\n\t}\n\tfor _, test := range tests {\n\t\tb, err := Marshal(test.m)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Marshal(%v): %v\", test.m, err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(b) != test.n {\n\t\t\tt.Errorf(\"Marshal(%v) yielded %d bytes, want %d bytes\", test.m, len(b), test.n)\n\t\t}\n\t}\n}\n\nfunc TestRequiredNotSetError(t *testing.T) {\n\tpb := initGoTest(false)\n\tpb.RequiredField.Label = nil\n\tpb.F_Int32Required = nil\n\tpb.F_Int64Required = nil\n\n\texpected := \"0807\" + // field 1, encoding 0, value 7\n\t\t\"2206\" + \"120474797065\" + // field 4, encoding 2 (GoTestField)\n\t\t\"5001\" + // field 10, encoding 0, value 1\n\t\t\"6d20000000\" + // field 13, encoding 5, value 0x20\n\t\t\"714000000000000000\" + // field 14, encoding 1, value 0x40\n\t\t\"78a019\" + // field 15, encoding 0, value 0xca0 = 3232\n\t\t\"8001c032\" + // field 16, encoding 0, value 0x1940 = 6464\n\t\t\"8d0100004a45\" + // field 17, encoding 5, value 3232.0\n\t\t\"9101000000000040b940\" + // field 18, encoding 1, value 6464.0\n\t\t\"9a0106\" + \"737472696e67\" + // field 19, encoding 2, string \"string\"\n\t\t\"b304\" + // field 70, encoding 3, start group\n\t\t\"ba0408\" + \"7265717569726564\" + // field 71, encoding 2, string \"required\"\n\t\t\"b404\" + // field 70, encoding 4, end group\n\t\t\"aa0605\" + \"6279746573\" + // field 101, encoding 2, string \"bytes\"\n\t\t\"b0063f\" + // field 102, encoding 0, 0x3f zigzag32\n\t\t\"b8067f\" // field 103, encoding 0, 0x7f zigzag64\n\n\to := old()\n\tbytes, err := Marshal(pb)\n\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\tfmt.Printf(\"marshal-1 err = %v, want *RequiredNotSetError\", err)\n\t\to.DebugPrint(\"\", bytes)\n\t\tt.Fatalf(\"expected = %s\", expected)\n\t}\n\tif strings.Index(err.Error(), \"RequiredField.Label\") < 0 {\n\t\tt.Errorf(\"marshal-1 wrong err msg: %v\", err)\n\t}\n\tif !equal(bytes, expected, t) {\n\t\to.DebugPrint(\"neq 1\", bytes)\n\t\tt.Fatalf(\"expected = %s\", expected)\n\t}\n\n\t// Now test Unmarshal by recreating the original buffer.\n\tpbd := new(GoTest)\n\terr = Unmarshal(bytes, pbd)\n\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\tt.Fatalf(\"unmarshal err = %v, want *RequiredNotSetError\", err)\n\t\to.DebugPrint(\"\", bytes)\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n\tif strings.Index(err.Error(), \"RequiredField.{Unknown}\") < 0 {\n\t\tt.Errorf(\"unmarshal wrong err msg: %v\", err)\n\t}\n\tbytes, err = Marshal(pbd)\n\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\tt.Errorf(\"marshal-2 err = %v, want *RequiredNotSetError\", err)\n\t\to.DebugPrint(\"\", bytes)\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n\tif strings.Index(err.Error(), \"RequiredField.Label\") < 0 {\n\t\tt.Errorf(\"marshal-2 wrong err msg: %v\", err)\n\t}\n\tif !equal(bytes, expected, t) {\n\t\to.DebugPrint(\"neq 2\", bytes)\n\t\tt.Fatalf(\"string = %s\", expected)\n\t}\n}\n\nfunc fuzzUnmarshal(t *testing.T, data []byte) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tt.Errorf(\"These bytes caused a panic: %+v\", data)\n\t\t\tt.Logf(\"Stack:\\n%s\", debug.Stack())\n\t\t\tt.FailNow()\n\t\t}\n\t}()\n\n\tpb := new(MyMessage)\n\tUnmarshal(data, pb)\n}\n\nfunc TestMapFieldMarshal(t *testing.T) {\n\tm := &MessageWithMap{\n\t\tNameMapping: map[int32]string{\n\t\t\t1: \"Rob\",\n\t\t\t4: \"Ian\",\n\t\t\t8: \"Dave\",\n\t\t},\n\t}\n\tb, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\n\t// b should be the concatenation of these three byte sequences in some order.\n\tparts := []string{\n\t\t\"\\n\\a\\b\\x01\\x12\\x03Rob\",\n\t\t\"\\n\\a\\b\\x04\\x12\\x03Ian\",\n\t\t\"\\n\\b\\b\\x08\\x12\\x04Dave\",\n\t}\n\tok := false\n\tfor i := range parts {\n\t\tfor j := range parts {\n\t\t\tif j == i {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor k := range parts {\n\t\t\t\tif k == i || k == j {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttry := parts[i] + parts[j] + parts[k]\n\t\t\t\tif bytes.Equal(b, []byte(try)) {\n\t\t\t\t\tok = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tif !ok {\n\t\tt.Fatalf(\"Incorrect Marshal output.\\n got %q\\nwant %q (or a permutation of that)\", b, parts[0]+parts[1]+parts[2])\n\t}\n\tt.Logf(\"FYI b: %q\", b)\n\n\t(new(Buffer)).DebugPrint(\"Dump of b\", b)\n}\n\nfunc TestMapFieldRoundTrips(t *testing.T) {\n\tm := &MessageWithMap{\n\t\tNameMapping: map[int32]string{\n\t\t\t1: \"Rob\",\n\t\t\t4: \"Ian\",\n\t\t\t8: \"Dave\",\n\t\t},\n\t\tMsgMapping: map[int64]*FloatingPoint{\n\t\t\t0x7001: &FloatingPoint{F: Float64(2.0)},\n\t\t},\n\t\tByteMapping: map[bool][]byte{\n\t\t\tfalse: []byte(\"that's not right!\"),\n\t\t\ttrue:  []byte(\"aye, 'tis true!\"),\n\t\t},\n\t}\n\tb, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\tt.Logf(\"FYI b: %q\", b)\n\tm2 := new(MessageWithMap)\n\tif err := Unmarshal(b, m2); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %v\", err)\n\t}\n\tfor _, pair := range [][2]interface{}{\n\t\t{m.NameMapping, m2.NameMapping},\n\t\t{m.MsgMapping, m2.MsgMapping},\n\t\t{m.ByteMapping, m2.ByteMapping},\n\t} {\n\t\tif !reflect.DeepEqual(pair[0], pair[1]) {\n\t\t\tt.Errorf(\"Map did not survive a round trip.\\ninitial: %v\\n  final: %v\", pair[0], pair[1])\n\t\t}\n\t}\n}\n\nfunc TestMapFieldWithNil(t *testing.T) {\n\tm1 := &MessageWithMap{\n\t\tMsgMapping: map[int64]*FloatingPoint{\n\t\t\t1: nil,\n\t\t},\n\t}\n\tb, err := Marshal(m1)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\tm2 := new(MessageWithMap)\n\tif err := Unmarshal(b, m2); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %v, got these bytes: %v\", err, b)\n\t}\n\tif v, ok := m2.MsgMapping[1]; !ok {\n\t\tt.Error(\"msg_mapping[1] not present\")\n\t} else if v != nil {\n\t\tt.Errorf(\"msg_mapping[1] not nil: %v\", v)\n\t}\n}\n\nfunc TestMapFieldWithNilBytes(t *testing.T) {\n\tm1 := &MessageWithMap{\n\t\tByteMapping: map[bool][]byte{\n\t\t\tfalse: []byte{},\n\t\t\ttrue:  nil,\n\t\t},\n\t}\n\tn := Size(m1)\n\tb, err := Marshal(m1)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\tif n != len(b) {\n\t\tt.Errorf(\"Size(m1) = %d; want len(Marshal(m1)) = %d\", n, len(b))\n\t}\n\tm2 := new(MessageWithMap)\n\tif err := Unmarshal(b, m2); err != nil {\n\t\tt.Fatalf(\"Unmarshal: %v, got these bytes: %v\", err, b)\n\t}\n\tif v, ok := m2.ByteMapping[false]; !ok {\n\t\tt.Error(\"byte_mapping[false] not present\")\n\t} else if len(v) != 0 {\n\t\tt.Errorf(\"byte_mapping[false] not empty: %#v\", v)\n\t}\n\tif v, ok := m2.ByteMapping[true]; !ok {\n\t\tt.Error(\"byte_mapping[true] not present\")\n\t} else if len(v) != 0 {\n\t\tt.Errorf(\"byte_mapping[true] not empty: %#v\", v)\n\t}\n}\n\nfunc TestDecodeMapFieldMissingKey(t *testing.T) {\n\tb := []byte{\n\t\t0x0A, 0x03, // message, tag 1 (name_mapping), of length 3 bytes\n\t\t// no key\n\t\t0x12, 0x01, 0x6D, // string value of length 1 byte, value \"m\"\n\t}\n\tgot := &MessageWithMap{}\n\terr := Unmarshal(b, got)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to marshal map with missing key: %v\", err)\n\t}\n\twant := &MessageWithMap{NameMapping: map[int32]string{0: \"m\"}}\n\tif !Equal(got, want) {\n\t\tt.Errorf(\"Unmarshaled map with no key was not as expected. got: %v, want %v\", got, want)\n\t}\n}\n\nfunc TestDecodeMapFieldMissingValue(t *testing.T) {\n\tb := []byte{\n\t\t0x0A, 0x02, // message, tag 1 (name_mapping), of length 2 bytes\n\t\t0x08, 0x01, // varint key, value 1\n\t\t// no value\n\t}\n\tgot := &MessageWithMap{}\n\terr := Unmarshal(b, got)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to marshal map with missing value: %v\", err)\n\t}\n\twant := &MessageWithMap{NameMapping: map[int32]string{1: \"\"}}\n\tif !Equal(got, want) {\n\t\tt.Errorf(\"Unmarshaled map with no value was not as expected. got: %v, want %v\", got, want)\n\t}\n}\n\nfunc TestOneof(t *testing.T) {\n\tm := &Communique{}\n\tb, err := Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal of empty message with oneof: %v\", err)\n\t}\n\tif len(b) != 0 {\n\t\tt.Errorf(\"Marshal of empty message yielded too many bytes: %v\", b)\n\t}\n\n\tm = &Communique{\n\t\tUnion: &Communique_Name{\"Barry\"},\n\t}\n\n\t// Round-trip.\n\tb, err = Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal of message with oneof: %v\", err)\n\t}\n\tif len(b) != 7 { // name tag/wire (1) + name len (1) + name (5)\n\t\tt.Errorf(\"Incorrect marshal of message with oneof: %v\", b)\n\t}\n\tm.Reset()\n\tif err := Unmarshal(b, m); err != nil {\n\t\tt.Fatalf(\"Unmarshal of message with oneof: %v\", err)\n\t}\n\tif x, ok := m.Union.(*Communique_Name); !ok || x.Name != \"Barry\" {\n\t\tt.Errorf(\"After round trip, Union = %+v\", m.Union)\n\t}\n\tif name := m.GetName(); name != \"Barry\" {\n\t\tt.Errorf(\"After round trip, GetName = %q, want %q\", name, \"Barry\")\n\t}\n\n\t// Let's try with a message in the oneof.\n\tm.Union = &Communique_Msg{&Strings{StringField: String(\"deep deep string\")}}\n\tb, err = Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal of message with oneof set to message: %v\", err)\n\t}\n\tif len(b) != 20 { // msg tag/wire (1) + msg len (1) + msg (1 + 1 + 16)\n\t\tt.Errorf(\"Incorrect marshal of message with oneof set to message: %v\", b)\n\t}\n\tm.Reset()\n\tif err := Unmarshal(b, m); err != nil {\n\t\tt.Fatalf(\"Unmarshal of message with oneof set to message: %v\", err)\n\t}\n\tss, ok := m.Union.(*Communique_Msg)\n\tif !ok || ss.Msg.GetStringField() != \"deep deep string\" {\n\t\tt.Errorf(\"After round trip with oneof set to message, Union = %+v\", m.Union)\n\t}\n}\n\nfunc TestInefficientPackedBool(t *testing.T) {\n\t// https://github.com/golang/protobuf/issues/76\n\tinp := []byte{\n\t\t0x12, 0x02, // 0x12 = 2<<3|2; 2 bytes\n\t\t// Usually a bool should take a single byte,\n\t\t// but it is permitted to be any varint.\n\t\t0xb9, 0x30,\n\t}\n\tif err := Unmarshal(inp, new(MoreRepeated)); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\n// Benchmarks\n\nfunc testMsg() *GoTest {\n\tpb := initGoTest(true)\n\tconst N = 1000 // Internally the library starts much smaller.\n\tpb.F_Int32Repeated = make([]int32, N)\n\tpb.F_DoubleRepeated = make([]float64, N)\n\tfor i := 0; i < N; i++ {\n\t\tpb.F_Int32Repeated[i] = int32(i)\n\t\tpb.F_DoubleRepeated[i] = float64(i)\n\t}\n\treturn pb\n}\n\nfunc bytesMsg() *GoTest {\n\tpb := initGoTest(true)\n\tbuf := make([]byte, 4000)\n\tfor i := range buf {\n\t\tbuf[i] = byte(i)\n\t}\n\tpb.F_BytesDefaulted = buf\n\treturn pb\n}\n\nfunc benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) {\n\td, _ := marshal(pb)\n\tb.SetBytes(int64(len(d)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tmarshal(pb)\n\t}\n}\n\nfunc benchmarkBufferMarshal(b *testing.B, pb Message) {\n\tp := NewBuffer(nil)\n\tbenchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {\n\t\tp.Reset()\n\t\terr := p.Marshal(pb0)\n\t\treturn p.Bytes(), err\n\t})\n}\n\nfunc benchmarkSize(b *testing.B, pb Message) {\n\tbenchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {\n\t\tSize(pb)\n\t\treturn nil, nil\n\t})\n}\n\nfunc newOf(pb Message) Message {\n\tin := reflect.ValueOf(pb)\n\tif in.IsNil() {\n\t\treturn pb\n\t}\n\treturn reflect.New(in.Type().Elem()).Interface().(Message)\n}\n\nfunc benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) {\n\td, _ := Marshal(pb)\n\tb.SetBytes(int64(len(d)))\n\tpbd := newOf(pb)\n\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tunmarshal(d, pbd)\n\t}\n}\n\nfunc benchmarkBufferUnmarshal(b *testing.B, pb Message) {\n\tp := NewBuffer(nil)\n\tbenchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error {\n\t\tp.SetBuf(d)\n\t\treturn p.Unmarshal(pb0)\n\t})\n}\n\n// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes}\n\nfunc BenchmarkMarshal(b *testing.B) {\n\tbenchmarkMarshal(b, testMsg(), Marshal)\n}\n\nfunc BenchmarkBufferMarshal(b *testing.B) {\n\tbenchmarkBufferMarshal(b, testMsg())\n}\n\nfunc BenchmarkSize(b *testing.B) {\n\tbenchmarkSize(b, testMsg())\n}\n\nfunc BenchmarkUnmarshal(b *testing.B) {\n\tbenchmarkUnmarshal(b, testMsg(), Unmarshal)\n}\n\nfunc BenchmarkBufferUnmarshal(b *testing.B) {\n\tbenchmarkBufferUnmarshal(b, testMsg())\n}\n\nfunc BenchmarkMarshalBytes(b *testing.B) {\n\tbenchmarkMarshal(b, bytesMsg(), Marshal)\n}\n\nfunc BenchmarkBufferMarshalBytes(b *testing.B) {\n\tbenchmarkBufferMarshal(b, bytesMsg())\n}\n\nfunc BenchmarkSizeBytes(b *testing.B) {\n\tbenchmarkSize(b, bytesMsg())\n}\n\nfunc BenchmarkUnmarshalBytes(b *testing.B) {\n\tbenchmarkUnmarshal(b, bytesMsg(), Unmarshal)\n}\n\nfunc BenchmarkBufferUnmarshalBytes(b *testing.B) {\n\tbenchmarkBufferUnmarshal(b, bytesMsg())\n}\n\nfunc BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {\n\tb.StopTimer()\n\tpb := initGoTestField()\n\tskip := &GoSkipTest{\n\t\tSkipInt32:   Int32(32),\n\t\tSkipFixed32: Uint32(3232),\n\t\tSkipFixed64: Uint64(6464),\n\t\tSkipString:  String(\"skipper\"),\n\t\tSkipgroup: &GoSkipTest_SkipGroup{\n\t\t\tGroupInt32:  Int32(75),\n\t\t\tGroupString: String(\"wxyz\"),\n\t\t},\n\t}\n\n\tpbd := new(GoTestField)\n\tp := NewBuffer(nil)\n\tp.Marshal(pb)\n\tp.Marshal(skip)\n\tp2 := NewBuffer(nil)\n\n\tb.StartTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tp2.SetBuf(p.Bytes())\n\t\tp2.Unmarshal(pbd)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/any_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2016 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tpb \"github.com/golang/protobuf/proto/proto3_proto\"\n\ttestpb \"github.com/golang/protobuf/proto/testdata\"\n\tanypb \"github.com/golang/protobuf/ptypes/any\"\n)\n\nvar (\n\texpandedMarshaler        = proto.TextMarshaler{ExpandAny: true}\n\texpandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true}\n)\n\n// anyEqual reports whether two messages which may be google.protobuf.Any or may\n// contain google.protobuf.Any fields are equal. We can't use proto.Equal for\n// comparison, because semantically equivalent messages may be marshaled to\n// binary in different tag order. Instead, trust that TextMarshaler with\n// ExpandAny option works and compare the text marshaling results.\nfunc anyEqual(got, want proto.Message) bool {\n\t// if messages are proto.Equal, no need to marshal.\n\tif proto.Equal(got, want) {\n\t\treturn true\n\t}\n\tg := expandedMarshaler.Text(got)\n\tw := expandedMarshaler.Text(want)\n\treturn g == w\n}\n\ntype golden struct {\n\tm    proto.Message\n\tt, c string\n}\n\nvar goldenMessages = makeGolden()\n\nfunc makeGolden() []golden {\n\tnested := &pb.Nested{Bunny: \"Monty\"}\n\tnb, err := proto.Marshal(nested)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tm1 := &pb.Message{\n\t\tName:        \"David\",\n\t\tResultCount: 47,\n\t\tAnything:    &anypb.Any{TypeUrl: \"type.googleapis.com/\" + proto.MessageName(nested), Value: nb},\n\t}\n\tm2 := &pb.Message{\n\t\tName:        \"David\",\n\t\tResultCount: 47,\n\t\tAnything:    &anypb.Any{TypeUrl: \"http://[::1]/type.googleapis.com/\" + proto.MessageName(nested), Value: nb},\n\t}\n\tm3 := &pb.Message{\n\t\tName:        \"David\",\n\t\tResultCount: 47,\n\t\tAnything:    &anypb.Any{TypeUrl: `type.googleapis.com/\"/` + proto.MessageName(nested), Value: nb},\n\t}\n\tm4 := &pb.Message{\n\t\tName:        \"David\",\n\t\tResultCount: 47,\n\t\tAnything:    &anypb.Any{TypeUrl: \"type.googleapis.com/a/path/\" + proto.MessageName(nested), Value: nb},\n\t}\n\tm5 := &anypb.Any{TypeUrl: \"type.googleapis.com/\" + proto.MessageName(nested), Value: nb}\n\n\tany1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String(\"David\")}\n\tproto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String(\"foo\")})\n\tproto.SetExtension(any1, testpb.E_Ext_Text, proto.String(\"bar\"))\n\tany1b, err := proto.Marshal(any1)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tany2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte(\"roboto\")}}\n\tproto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String(\"baz\")})\n\tany2b, err := proto.Marshal(any2)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tm6 := &pb.Message{\n\t\tName:        \"David\",\n\t\tResultCount: 47,\n\t\tAnything:    &anypb.Any{TypeUrl: \"type.googleapis.com/\" + proto.MessageName(any1), Value: any1b},\n\t\tManyThings: []*anypb.Any{\n\t\t\t&anypb.Any{TypeUrl: \"type.googleapis.com/\" + proto.MessageName(any2), Value: any2b},\n\t\t\t&anypb.Any{TypeUrl: \"type.googleapis.com/\" + proto.MessageName(any1), Value: any1b},\n\t\t},\n\t}\n\n\tconst (\n\t\tm1Golden = `\nname: \"David\"\nresult_count: 47\nanything: <\n  [type.googleapis.com/proto3_proto.Nested]: <\n    bunny: \"Monty\"\n  >\n>\n`\n\t\tm2Golden = `\nname: \"David\"\nresult_count: 47\nanything: <\n  [\"http://[::1]/type.googleapis.com/proto3_proto.Nested\"]: <\n    bunny: \"Monty\"\n  >\n>\n`\n\t\tm3Golden = `\nname: \"David\"\nresult_count: 47\nanything: <\n  [\"type.googleapis.com/\\\"/proto3_proto.Nested\"]: <\n    bunny: \"Monty\"\n  >\n>\n`\n\t\tm4Golden = `\nname: \"David\"\nresult_count: 47\nanything: <\n  [type.googleapis.com/a/path/proto3_proto.Nested]: <\n    bunny: \"Monty\"\n  >\n>\n`\n\t\tm5Golden = `\n[type.googleapis.com/proto3_proto.Nested]: <\n  bunny: \"Monty\"\n>\n`\n\t\tm6Golden = `\nname: \"David\"\nresult_count: 47\nanything: <\n  [type.googleapis.com/testdata.MyMessage]: <\n    count: 47\n    name: \"David\"\n    [testdata.Ext.more]: <\n      data: \"foo\"\n    >\n    [testdata.Ext.text]: \"bar\"\n  >\n>\nmany_things: <\n  [type.googleapis.com/testdata.MyMessage]: <\n    count: 42\n    bikeshed: GREEN\n    rep_bytes: \"roboto\"\n    [testdata.Ext.more]: <\n      data: \"baz\"\n    >\n  >\n>\nmany_things: <\n  [type.googleapis.com/testdata.MyMessage]: <\n    count: 47\n    name: \"David\"\n    [testdata.Ext.more]: <\n      data: \"foo\"\n    >\n    [testdata.Ext.text]: \"bar\"\n  >\n>\n`\n\t)\n\treturn []golden{\n\t\t{m1, strings.TrimSpace(m1Golden) + \"\\n\", strings.TrimSpace(compact(m1Golden)) + \" \"},\n\t\t{m2, strings.TrimSpace(m2Golden) + \"\\n\", strings.TrimSpace(compact(m2Golden)) + \" \"},\n\t\t{m3, strings.TrimSpace(m3Golden) + \"\\n\", strings.TrimSpace(compact(m3Golden)) + \" \"},\n\t\t{m4, strings.TrimSpace(m4Golden) + \"\\n\", strings.TrimSpace(compact(m4Golden)) + \" \"},\n\t\t{m5, strings.TrimSpace(m5Golden) + \"\\n\", strings.TrimSpace(compact(m5Golden)) + \" \"},\n\t\t{m6, strings.TrimSpace(m6Golden) + \"\\n\", strings.TrimSpace(compact(m6Golden)) + \" \"},\n\t}\n}\n\nfunc TestMarshalGolden(t *testing.T) {\n\tfor _, tt := range goldenMessages {\n\t\tif got, want := expandedMarshaler.Text(tt.m), tt.t; got != want {\n\t\t\tt.Errorf(\"message %v: got:\\n%s\\nwant:\\n%s\", tt.m, got, want)\n\t\t}\n\t\tif got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want {\n\t\t\tt.Errorf(\"message %v: got:\\n`%s`\\nwant:\\n`%s`\", tt.m, got, want)\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalGolden(t *testing.T) {\n\tfor _, tt := range goldenMessages {\n\t\twant := tt.m\n\t\tgot := proto.Clone(tt.m)\n\t\tgot.Reset()\n\t\tif err := proto.UnmarshalText(tt.t, got); err != nil {\n\t\t\tt.Errorf(\"failed to unmarshal\\n%s\\nerror: %v\", tt.t, err)\n\t\t}\n\t\tif !anyEqual(got, want) {\n\t\t\tt.Errorf(\"message:\\n%s\\ngot:\\n%s\\nwant:\\n%s\", tt.t, got, want)\n\t\t}\n\t\tgot.Reset()\n\t\tif err := proto.UnmarshalText(tt.c, got); err != nil {\n\t\t\tt.Errorf(\"failed to unmarshal\\n%s\\nerror: %v\", tt.c, err)\n\t\t}\n\t\tif !anyEqual(got, want) {\n\t\t\tt.Errorf(\"message:\\n%s\\ngot:\\n%s\\nwant:\\n%s\", tt.c, got, want)\n\t\t}\n\t}\n}\n\nfunc TestMarshalUnknownAny(t *testing.T) {\n\tm := &pb.Message{\n\t\tAnything: &anypb.Any{\n\t\t\tTypeUrl: \"foo\",\n\t\t\tValue:   []byte(\"bar\"),\n\t\t},\n\t}\n\twant := `anything: <\n  type_url: \"foo\"\n  value: \"bar\"\n>\n`\n\tgot := expandedMarshaler.Text(m)\n\tif got != want {\n\t\tt.Errorf(\"got\\n`%s`\\nwant\\n`%s`\", got, want)\n\t}\n}\n\nfunc TestAmbiguousAny(t *testing.T) {\n\tpb := &anypb.Any{}\n\terr := proto.UnmarshalText(`\n\ttype_url: \"ttt/proto3_proto.Nested\"\n\tvalue: \"\\n\\x05Monty\"\n\t`, pb)\n\tt.Logf(\"result: %v (error: %v)\", expandedMarshaler.Text(pb), err)\n\tif err != nil {\n\t\tt.Errorf(\"failed to parse ambiguous Any message: %v\", err)\n\t}\n}\n\nfunc TestUnmarshalOverwriteAny(t *testing.T) {\n\tpb := &anypb.Any{}\n\terr := proto.UnmarshalText(`\n  [type.googleapis.com/a/path/proto3_proto.Nested]: <\n    bunny: \"Monty\"\n  >\n  [type.googleapis.com/a/path/proto3_proto.Nested]: <\n    bunny: \"Rabbit of Caerbannog\"\n  >\n\t`, pb)\n\twant := `line 7: Any message unpacked multiple times, or \"type_url\" already set`\n\tif err.Error() != want {\n\t\tt.Errorf(\"incorrect error.\\nHave: %v\\nWant: %v\", err.Error(), want)\n\t}\n}\n\nfunc TestUnmarshalAnyMixAndMatch(t *testing.T) {\n\tpb := &anypb.Any{}\n\terr := proto.UnmarshalText(`\n\tvalue: \"\\n\\x05Monty\"\n  [type.googleapis.com/a/path/proto3_proto.Nested]: <\n    bunny: \"Rabbit of Caerbannog\"\n  >\n\t`, pb)\n\twant := `line 5: Any message unpacked multiple times, or \"value\" already set`\n\tif err.Error() != want {\n\t\tt.Errorf(\"incorrect error.\\nHave: %v\\nWant: %v\", err.Error(), want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/clone.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Protocol buffer deep copy and merge.\n// TODO: RawMessage.\n\npackage proto\n\nimport (\n\t\"log\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n// Clone returns a deep copy of a protocol buffer.\nfunc Clone(pb Message) Message {\n\tin := reflect.ValueOf(pb)\n\tif in.IsNil() {\n\t\treturn pb\n\t}\n\n\tout := reflect.New(in.Type().Elem())\n\t// out is empty so a merge is a deep copy.\n\tmergeStruct(out.Elem(), in.Elem())\n\treturn out.Interface().(Message)\n}\n\n// Merge merges src into dst.\n// Required and optional fields that are set in src will be set to that value in dst.\n// Elements of repeated fields will be appended.\n// Merge panics if src and dst are not the same type, or if dst is nil.\nfunc Merge(dst, src Message) {\n\tin := reflect.ValueOf(src)\n\tout := reflect.ValueOf(dst)\n\tif out.IsNil() {\n\t\tpanic(\"proto: nil destination\")\n\t}\n\tif in.Type() != out.Type() {\n\t\t// Explicit test prior to mergeStruct so that mistyped nils will fail\n\t\tpanic(\"proto: type mismatch\")\n\t}\n\tif in.IsNil() {\n\t\t// Merging nil into non-nil is a quiet no-op\n\t\treturn\n\t}\n\tmergeStruct(out.Elem(), in.Elem())\n}\n\nfunc mergeStruct(out, in reflect.Value) {\n\tsprop := GetProperties(in.Type())\n\tfor i := 0; i < in.NumField(); i++ {\n\t\tf := in.Type().Field(i)\n\t\tif strings.HasPrefix(f.Name, \"XXX_\") {\n\t\t\tcontinue\n\t\t}\n\t\tmergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])\n\t}\n\n\tif emIn, ok := extendable(in.Addr().Interface()); ok {\n\t\temOut, _ := extendable(out.Addr().Interface())\n\t\tmIn, muIn := emIn.extensionsRead()\n\t\tif mIn != nil {\n\t\t\tmOut := emOut.extensionsWrite()\n\t\t\tmuIn.Lock()\n\t\t\tmergeExtension(mOut, mIn)\n\t\t\tmuIn.Unlock()\n\t\t}\n\t}\n\n\tuf := in.FieldByName(\"XXX_unrecognized\")\n\tif !uf.IsValid() {\n\t\treturn\n\t}\n\tuin := uf.Bytes()\n\tif len(uin) > 0 {\n\t\tout.FieldByName(\"XXX_unrecognized\").SetBytes(append([]byte(nil), uin...))\n\t}\n}\n\n// mergeAny performs a merge between two values of the same type.\n// viaPtr indicates whether the values were indirected through a pointer (implying proto2).\n// prop is set if this is a struct field (it may be nil).\nfunc mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {\n\tif in.Type() == protoMessageType {\n\t\tif !in.IsNil() {\n\t\t\tif out.IsNil() {\n\t\t\t\tout.Set(reflect.ValueOf(Clone(in.Interface().(Message))))\n\t\t\t} else {\n\t\t\t\tMerge(out.Interface().(Message), in.Interface().(Message))\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tswitch in.Kind() {\n\tcase reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,\n\t\treflect.String, reflect.Uint32, reflect.Uint64:\n\t\tif !viaPtr && isProto3Zero(in) {\n\t\t\treturn\n\t\t}\n\t\tout.Set(in)\n\tcase reflect.Interface:\n\t\t// Probably a oneof field; copy non-nil values.\n\t\tif in.IsNil() {\n\t\t\treturn\n\t\t}\n\t\t// Allocate destination if it is not set, or set to a different type.\n\t\t// Otherwise we will merge as normal.\n\t\tif out.IsNil() || out.Elem().Type() != in.Elem().Type() {\n\t\t\tout.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)\n\t\t}\n\t\tmergeAny(out.Elem(), in.Elem(), false, nil)\n\tcase reflect.Map:\n\t\tif in.Len() == 0 {\n\t\t\treturn\n\t\t}\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.MakeMap(in.Type()))\n\t\t}\n\t\t// For maps with value types of *T or []byte we need to deep copy each value.\n\t\telemKind := in.Type().Elem().Kind()\n\t\tfor _, key := range in.MapKeys() {\n\t\t\tvar val reflect.Value\n\t\t\tswitch elemKind {\n\t\t\tcase reflect.Ptr:\n\t\t\t\tval = reflect.New(in.Type().Elem().Elem())\n\t\t\t\tmergeAny(val, in.MapIndex(key), false, nil)\n\t\t\tcase reflect.Slice:\n\t\t\t\tval = in.MapIndex(key)\n\t\t\t\tval = reflect.ValueOf(append([]byte{}, val.Bytes()...))\n\t\t\tdefault:\n\t\t\t\tval = in.MapIndex(key)\n\t\t\t}\n\t\t\tout.SetMapIndex(key, val)\n\t\t}\n\tcase reflect.Ptr:\n\t\tif in.IsNil() {\n\t\t\treturn\n\t\t}\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.New(in.Elem().Type()))\n\t\t}\n\t\tmergeAny(out.Elem(), in.Elem(), true, nil)\n\tcase reflect.Slice:\n\t\tif in.IsNil() {\n\t\t\treturn\n\t\t}\n\t\tif in.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t// []byte is a scalar bytes field, not a repeated field.\n\n\t\t\t// Edge case: if this is in a proto3 message, a zero length\n\t\t\t// bytes field is considered the zero value, and should not\n\t\t\t// be merged.\n\t\t\tif prop != nil && prop.proto3 && in.Len() == 0 {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Make a deep copy.\n\t\t\t// Append to []byte{} instead of []byte(nil) so that we never end up\n\t\t\t// with a nil result.\n\t\t\tout.SetBytes(append([]byte{}, in.Bytes()...))\n\t\t\treturn\n\t\t}\n\t\tn := in.Len()\n\t\tif out.IsNil() {\n\t\t\tout.Set(reflect.MakeSlice(in.Type(), 0, n))\n\t\t}\n\t\tswitch in.Type().Elem().Kind() {\n\t\tcase reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,\n\t\t\treflect.String, reflect.Uint32, reflect.Uint64:\n\t\t\tout.Set(reflect.AppendSlice(out, in))\n\t\tdefault:\n\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\tx := reflect.Indirect(reflect.New(in.Type().Elem()))\n\t\t\t\tmergeAny(x, in.Index(i), false, nil)\n\t\t\t\tout.Set(reflect.Append(out, x))\n\t\t\t}\n\t\t}\n\tcase reflect.Struct:\n\t\tmergeStruct(out, in)\n\tdefault:\n\t\t// unknown type, so not a protocol buffer\n\t\tlog.Printf(\"proto: don't know how to copy %v\", in)\n\t}\n}\n\nfunc mergeExtension(out, in map[int32]Extension) {\n\tfor extNum, eIn := range in {\n\t\teOut := Extension{desc: eIn.desc}\n\t\tif eIn.value != nil {\n\t\t\tv := reflect.New(reflect.TypeOf(eIn.value)).Elem()\n\t\t\tmergeAny(v, reflect.ValueOf(eIn.value), false, nil)\n\t\t\teOut.value = v.Interface()\n\t\t}\n\t\tif eIn.enc != nil {\n\t\t\teOut.enc = make([]byte, len(eIn.enc))\n\t\t\tcopy(eOut.enc, eIn.enc)\n\t\t}\n\n\t\tout[extNum] = eOut\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/clone_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tproto3pb \"github.com/golang/protobuf/proto/proto3_proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\nvar cloneTestMessage = &pb.MyMessage{\n\tCount: proto.Int32(42),\n\tName:  proto.String(\"Dave\"),\n\tPet:   []string{\"bunny\", \"kitty\", \"horsey\"},\n\tInner: &pb.InnerMessage{\n\t\tHost:      proto.String(\"niles\"),\n\t\tPort:      proto.Int32(9099),\n\t\tConnected: proto.Bool(true),\n\t},\n\tOthers: []*pb.OtherMessage{\n\t\t{\n\t\t\tValue: []byte(\"some bytes\"),\n\t\t},\n\t},\n\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\tGroupField: proto.Int32(6),\n\t},\n\tRepBytes: [][]byte{[]byte(\"sham\"), []byte(\"wow\")},\n}\n\nfunc init() {\n\text := &pb.Ext{\n\t\tData: proto.String(\"extension\"),\n\t}\n\tif err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {\n\t\tpanic(\"SetExtension: \" + err.Error())\n\t}\n}\n\nfunc TestClone(t *testing.T) {\n\tm := proto.Clone(cloneTestMessage).(*pb.MyMessage)\n\tif !proto.Equal(m, cloneTestMessage) {\n\t\tt.Errorf(\"Clone(%v) = %v\", cloneTestMessage, m)\n\t}\n\n\t// Verify it was a deep copy.\n\t*m.Inner.Port++\n\tif proto.Equal(m, cloneTestMessage) {\n\t\tt.Error(\"Mutating clone changed the original\")\n\t}\n\t// Byte fields and repeated fields should be copied.\n\tif &m.Pet[0] == &cloneTestMessage.Pet[0] {\n\t\tt.Error(\"Pet: repeated field not copied\")\n\t}\n\tif &m.Others[0] == &cloneTestMessage.Others[0] {\n\t\tt.Error(\"Others: repeated field not copied\")\n\t}\n\tif &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {\n\t\tt.Error(\"Others[0].Value: bytes field not copied\")\n\t}\n\tif &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {\n\t\tt.Error(\"RepBytes: repeated field not copied\")\n\t}\n\tif &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {\n\t\tt.Error(\"RepBytes[0]: bytes field not copied\")\n\t}\n}\n\nfunc TestCloneNil(t *testing.T) {\n\tvar m *pb.MyMessage\n\tif c := proto.Clone(m); !proto.Equal(m, c) {\n\t\tt.Errorf(\"Clone(%v) = %v\", m, c)\n\t}\n}\n\nvar mergeTests = []struct {\n\tsrc, dst, want proto.Message\n}{\n\t{\n\t\tsrc: &pb.MyMessage{\n\t\t\tCount: proto.Int32(42),\n\t\t},\n\t\tdst: &pb.MyMessage{\n\t\t\tName: proto.String(\"Dave\"),\n\t\t},\n\t\twant: &pb.MyMessage{\n\t\t\tCount: proto.Int32(42),\n\t\t\tName:  proto.String(\"Dave\"),\n\t\t},\n\t},\n\t{\n\t\tsrc: &pb.MyMessage{\n\t\t\tInner: &pb.InnerMessage{\n\t\t\t\tHost:      proto.String(\"hey\"),\n\t\t\t\tConnected: proto.Bool(true),\n\t\t\t},\n\t\t\tPet: []string{\"horsey\"},\n\t\t\tOthers: []*pb.OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tValue: []byte(\"some bytes\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tdst: &pb.MyMessage{\n\t\t\tInner: &pb.InnerMessage{\n\t\t\t\tHost: proto.String(\"niles\"),\n\t\t\t\tPort: proto.Int32(9099),\n\t\t\t},\n\t\t\tPet: []string{\"bunny\", \"kitty\"},\n\t\t\tOthers: []*pb.OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tKey: proto.Int64(31415926535),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\t// Explicitly test a src=nil field\n\t\t\t\t\tInner: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\twant: &pb.MyMessage{\n\t\t\tInner: &pb.InnerMessage{\n\t\t\t\tHost:      proto.String(\"hey\"),\n\t\t\t\tConnected: proto.Bool(true),\n\t\t\t\tPort:      proto.Int32(9099),\n\t\t\t},\n\t\t\tPet: []string{\"bunny\", \"kitty\", \"horsey\"},\n\t\t\tOthers: []*pb.OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tKey: proto.Int64(31415926535),\n\t\t\t\t},\n\t\t\t\t{},\n\t\t\t\t{\n\t\t\t\t\tValue: []byte(\"some bytes\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\t{\n\t\tsrc: &pb.MyMessage{\n\t\t\tRepBytes: [][]byte{[]byte(\"wow\")},\n\t\t},\n\t\tdst: &pb.MyMessage{\n\t\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\t\tGroupField: proto.Int32(6),\n\t\t\t},\n\t\t\tRepBytes: [][]byte{[]byte(\"sham\")},\n\t\t},\n\t\twant: &pb.MyMessage{\n\t\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\t\tGroupField: proto.Int32(6),\n\t\t\t},\n\t\t\tRepBytes: [][]byte{[]byte(\"sham\"), []byte(\"wow\")},\n\t\t},\n\t},\n\t// Check that a scalar bytes field replaces rather than appends.\n\t{\n\t\tsrc:  &pb.OtherMessage{Value: []byte(\"foo\")},\n\t\tdst:  &pb.OtherMessage{Value: []byte(\"bar\")},\n\t\twant: &pb.OtherMessage{Value: []byte(\"foo\")},\n\t},\n\t{\n\t\tsrc: &pb.MessageWithMap{\n\t\t\tNameMapping: map[int32]string{6: \"Nigel\"},\n\t\t\tMsgMapping: map[int64]*pb.FloatingPoint{\n\t\t\t\t0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},\n\t\t\t\t0x4002: &pb.FloatingPoint{\n\t\t\t\t\tF: proto.Float64(2.0),\n\t\t\t\t},\n\t\t\t},\n\t\t\tByteMapping: map[bool][]byte{true: []byte(\"wowsa\")},\n\t\t},\n\t\tdst: &pb.MessageWithMap{\n\t\t\tNameMapping: map[int32]string{\n\t\t\t\t6: \"Bruce\", // should be overwritten\n\t\t\t\t7: \"Andrew\",\n\t\t\t},\n\t\t\tMsgMapping: map[int64]*pb.FloatingPoint{\n\t\t\t\t0x4002: &pb.FloatingPoint{\n\t\t\t\t\tF:     proto.Float64(3.0),\n\t\t\t\t\tExact: proto.Bool(true),\n\t\t\t\t}, // the entire message should be overwritten\n\t\t\t},\n\t\t},\n\t\twant: &pb.MessageWithMap{\n\t\t\tNameMapping: map[int32]string{\n\t\t\t\t6: \"Nigel\",\n\t\t\t\t7: \"Andrew\",\n\t\t\t},\n\t\t\tMsgMapping: map[int64]*pb.FloatingPoint{\n\t\t\t\t0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},\n\t\t\t\t0x4002: &pb.FloatingPoint{\n\t\t\t\t\tF: proto.Float64(2.0),\n\t\t\t\t},\n\t\t\t},\n\t\t\tByteMapping: map[bool][]byte{true: []byte(\"wowsa\")},\n\t\t},\n\t},\n\t// proto3 shouldn't merge zero values,\n\t// in the same way that proto2 shouldn't merge nils.\n\t{\n\t\tsrc: &proto3pb.Message{\n\t\t\tName: \"Aaron\",\n\t\t\tData: []byte(\"\"), // zero value, but not nil\n\t\t},\n\t\tdst: &proto3pb.Message{\n\t\t\tHeightInCm: 176,\n\t\t\tData:       []byte(\"texas!\"),\n\t\t},\n\t\twant: &proto3pb.Message{\n\t\t\tName:       \"Aaron\",\n\t\t\tHeightInCm: 176,\n\t\t\tData:       []byte(\"texas!\"),\n\t\t},\n\t},\n\t// Oneof fields should merge by assignment.\n\t{\n\t\tsrc: &pb.Communique{\n\t\t\tUnion: &pb.Communique_Number{41},\n\t\t},\n\t\tdst: &pb.Communique{\n\t\t\tUnion: &pb.Communique_Name{\"Bobby Tables\"},\n\t\t},\n\t\twant: &pb.Communique{\n\t\t\tUnion: &pb.Communique_Number{41},\n\t\t},\n\t},\n\t// Oneof nil is the same as not set.\n\t{\n\t\tsrc: &pb.Communique{},\n\t\tdst: &pb.Communique{\n\t\t\tUnion: &pb.Communique_Name{\"Bobby Tables\"},\n\t\t},\n\t\twant: &pb.Communique{\n\t\t\tUnion: &pb.Communique_Name{\"Bobby Tables\"},\n\t\t},\n\t},\n\t{\n\t\tsrc: &proto3pb.Message{\n\t\t\tTerrain: map[string]*proto3pb.Nested{\n\t\t\t\t\"kay_a\": &proto3pb.Nested{Cute: true},      // replace\n\t\t\t\t\"kay_b\": &proto3pb.Nested{Bunny: \"rabbit\"}, // insert\n\t\t\t},\n\t\t},\n\t\tdst: &proto3pb.Message{\n\t\t\tTerrain: map[string]*proto3pb.Nested{\n\t\t\t\t\"kay_a\": &proto3pb.Nested{Bunny: \"lost\"},  // replaced\n\t\t\t\t\"kay_c\": &proto3pb.Nested{Bunny: \"bunny\"}, // keep\n\t\t\t},\n\t\t},\n\t\twant: &proto3pb.Message{\n\t\t\tTerrain: map[string]*proto3pb.Nested{\n\t\t\t\t\"kay_a\": &proto3pb.Nested{Cute: true},\n\t\t\t\t\"kay_b\": &proto3pb.Nested{Bunny: \"rabbit\"},\n\t\t\t\t\"kay_c\": &proto3pb.Nested{Bunny: \"bunny\"},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestMerge(t *testing.T) {\n\tfor _, m := range mergeTests {\n\t\tgot := proto.Clone(m.dst)\n\t\tproto.Merge(got, m.src)\n\t\tif !proto.Equal(got, m.want) {\n\t\t\tt.Errorf(\"Merge(%v, %v)\\n got %v\\nwant %v\\n\", m.dst, m.src, got, m.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/decode.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for decoding protocol buffer data to construct in-memory representations.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n)\n\n// errOverflow is returned when an integer is too large to be represented.\nvar errOverflow = errors.New(\"proto: integer overflow\")\n\n// ErrInternalBadWireType is returned by generated code when an incorrect\n// wire type is encountered. It does not get returned to user code.\nvar ErrInternalBadWireType = errors.New(\"proto: internal error: bad wiretype for oneof\")\n\n// The fundamental decoders that interpret bytes on the wire.\n// Those that take integer types all return uint64 and are\n// therefore of type valueDecoder.\n\n// DecodeVarint reads a varint-encoded integer from the slice.\n// It returns the integer and the number of bytes consumed, or\n// zero if there is not enough.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc DecodeVarint(buf []byte) (x uint64, n int) {\n\tfor shift := uint(0); shift < 64; shift += 7 {\n\t\tif n >= len(buf) {\n\t\t\treturn 0, 0\n\t\t}\n\t\tb := uint64(buf[n])\n\t\tn++\n\t\tx |= (b & 0x7F) << shift\n\t\tif (b & 0x80) == 0 {\n\t\t\treturn x, n\n\t\t}\n\t}\n\n\t// The number is too large to represent in a 64-bit value.\n\treturn 0, 0\n}\n\nfunc (p *Buffer) decodeVarintSlow() (x uint64, err error) {\n\ti := p.index\n\tl := len(p.buf)\n\n\tfor shift := uint(0); shift < 64; shift += 7 {\n\t\tif i >= l {\n\t\t\terr = io.ErrUnexpectedEOF\n\t\t\treturn\n\t\t}\n\t\tb := p.buf[i]\n\t\ti++\n\t\tx |= (uint64(b) & 0x7F) << shift\n\t\tif b < 0x80 {\n\t\t\tp.index = i\n\t\t\treturn\n\t\t}\n\t}\n\n\t// The number is too large to represent in a 64-bit value.\n\terr = errOverflow\n\treturn\n}\n\n// DecodeVarint reads a varint-encoded integer from the Buffer.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc (p *Buffer) DecodeVarint() (x uint64, err error) {\n\ti := p.index\n\tbuf := p.buf\n\n\tif i >= len(buf) {\n\t\treturn 0, io.ErrUnexpectedEOF\n\t} else if buf[i] < 0x80 {\n\t\tp.index++\n\t\treturn uint64(buf[i]), nil\n\t} else if len(buf)-i < 10 {\n\t\treturn p.decodeVarintSlow()\n\t}\n\n\tvar b uint64\n\t// we already checked the first byte\n\tx = uint64(buf[i]) - 0x80\n\ti++\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 7\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\tx -= 0x80 << 7\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 14\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\tx -= 0x80 << 14\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 21\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\tx -= 0x80 << 21\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 28\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\tx -= 0x80 << 28\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 35\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\tx -= 0x80 << 35\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 42\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\tx -= 0x80 << 42\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 49\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\tx -= 0x80 << 49\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 56\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\tx -= 0x80 << 56\n\n\tb = uint64(buf[i])\n\ti++\n\tx += b << 63\n\tif b&0x80 == 0 {\n\t\tgoto done\n\t}\n\t// x -= 0x80 << 63 // Always zero.\n\n\treturn 0, errOverflow\n\ndone:\n\tp.index = i\n\treturn x, nil\n}\n\n// DecodeFixed64 reads a 64-bit integer from the Buffer.\n// This is the format for the\n// fixed64, sfixed64, and double protocol buffer types.\nfunc (p *Buffer) DecodeFixed64() (x uint64, err error) {\n\t// x, err already 0\n\ti := p.index + 8\n\tif i < 0 || i > len(p.buf) {\n\t\terr = io.ErrUnexpectedEOF\n\t\treturn\n\t}\n\tp.index = i\n\n\tx = uint64(p.buf[i-8])\n\tx |= uint64(p.buf[i-7]) << 8\n\tx |= uint64(p.buf[i-6]) << 16\n\tx |= uint64(p.buf[i-5]) << 24\n\tx |= uint64(p.buf[i-4]) << 32\n\tx |= uint64(p.buf[i-3]) << 40\n\tx |= uint64(p.buf[i-2]) << 48\n\tx |= uint64(p.buf[i-1]) << 56\n\treturn\n}\n\n// DecodeFixed32 reads a 32-bit integer from the Buffer.\n// This is the format for the\n// fixed32, sfixed32, and float protocol buffer types.\nfunc (p *Buffer) DecodeFixed32() (x uint64, err error) {\n\t// x, err already 0\n\ti := p.index + 4\n\tif i < 0 || i > len(p.buf) {\n\t\terr = io.ErrUnexpectedEOF\n\t\treturn\n\t}\n\tp.index = i\n\n\tx = uint64(p.buf[i-4])\n\tx |= uint64(p.buf[i-3]) << 8\n\tx |= uint64(p.buf[i-2]) << 16\n\tx |= uint64(p.buf[i-1]) << 24\n\treturn\n}\n\n// DecodeZigzag64 reads a zigzag-encoded 64-bit integer\n// from the Buffer.\n// This is the format used for the sint64 protocol buffer type.\nfunc (p *Buffer) DecodeZigzag64() (x uint64, err error) {\n\tx, err = p.DecodeVarint()\n\tif err != nil {\n\t\treturn\n\t}\n\tx = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)\n\treturn\n}\n\n// DecodeZigzag32 reads a zigzag-encoded 32-bit integer\n// from  the Buffer.\n// This is the format used for the sint32 protocol buffer type.\nfunc (p *Buffer) DecodeZigzag32() (x uint64, err error) {\n\tx, err = p.DecodeVarint()\n\tif err != nil {\n\t\treturn\n\t}\n\tx = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))\n\treturn\n}\n\n// These are not ValueDecoders: they produce an array of bytes or a string.\n// bytes, embedded messages\n\n// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.\n// This is the format used for the bytes protocol buffer\n// type and for embedded messages.\nfunc (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {\n\tn, err := p.DecodeVarint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnb := int(n)\n\tif nb < 0 {\n\t\treturn nil, fmt.Errorf(\"proto: bad byte length %d\", nb)\n\t}\n\tend := p.index + nb\n\tif end < p.index || end > len(p.buf) {\n\t\treturn nil, io.ErrUnexpectedEOF\n\t}\n\n\tif !alloc {\n\t\t// todo: check if can get more uses of alloc=false\n\t\tbuf = p.buf[p.index:end]\n\t\tp.index += nb\n\t\treturn\n\t}\n\n\tbuf = make([]byte, nb)\n\tcopy(buf, p.buf[p.index:])\n\tp.index += nb\n\treturn\n}\n\n// DecodeStringBytes reads an encoded string from the Buffer.\n// This is the format used for the proto2 string type.\nfunc (p *Buffer) DecodeStringBytes() (s string, err error) {\n\tbuf, err := p.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn string(buf), nil\n}\n\n// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.\n// If the protocol buffer has extensions, and the field matches, add it as an extension.\n// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.\nfunc (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {\n\toi := o.index\n\n\terr := o.skip(t, tag, wire)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !unrecField.IsValid() {\n\t\treturn nil\n\t}\n\n\tptr := structPointer_Bytes(base, unrecField)\n\n\t// Add the skipped field to struct field\n\tobuf := o.buf\n\n\to.buf = *ptr\n\to.EncodeVarint(uint64(tag<<3 | wire))\n\t*ptr = append(o.buf, obuf[oi:o.index]...)\n\n\to.buf = obuf\n\n\treturn nil\n}\n\n// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.\nfunc (o *Buffer) skip(t reflect.Type, tag, wire int) error {\n\n\tvar u uint64\n\tvar err error\n\n\tswitch wire {\n\tcase WireVarint:\n\t\t_, err = o.DecodeVarint()\n\tcase WireFixed64:\n\t\t_, err = o.DecodeFixed64()\n\tcase WireBytes:\n\t\t_, err = o.DecodeRawBytes(false)\n\tcase WireFixed32:\n\t\t_, err = o.DecodeFixed32()\n\tcase WireStartGroup:\n\t\tfor {\n\t\t\tu, err = o.DecodeVarint()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfwire := int(u & 0x7)\n\t\t\tif fwire == WireEndGroup {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tftag := int(u >> 3)\n\t\t\terr = o.skip(t, ftag, fwire)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tdefault:\n\t\terr = fmt.Errorf(\"proto: can't skip unknown wire type %d for %s\", wire, t)\n\t}\n\treturn err\n}\n\n// Unmarshaler is the interface representing objects that can\n// unmarshal themselves.  The method should reset the receiver before\n// decoding starts.  The argument points to data that may be\n// overwritten, so implementations should not keep references to the\n// buffer.\ntype Unmarshaler interface {\n\tUnmarshal([]byte) error\n}\n\n// Unmarshal parses the protocol buffer representation in buf and places the\n// decoded result in pb.  If the struct underlying pb does not match\n// the data in buf, the results can be unpredictable.\n//\n// Unmarshal resets pb before starting to unmarshal, so any\n// existing data in pb is always removed. Use UnmarshalMerge\n// to preserve and append to existing data.\nfunc Unmarshal(buf []byte, pb Message) error {\n\tpb.Reset()\n\treturn UnmarshalMerge(buf, pb)\n}\n\n// UnmarshalMerge parses the protocol buffer representation in buf and\n// writes the decoded result to pb.  If the struct underlying pb does not match\n// the data in buf, the results can be unpredictable.\n//\n// UnmarshalMerge merges into existing data in pb.\n// Most code should use Unmarshal instead.\nfunc UnmarshalMerge(buf []byte, pb Message) error {\n\t// If the object can unmarshal itself, let it.\n\tif u, ok := pb.(Unmarshaler); ok {\n\t\treturn u.Unmarshal(buf)\n\t}\n\treturn NewBuffer(buf).Unmarshal(pb)\n}\n\n// DecodeMessage reads a count-delimited message from the Buffer.\nfunc (p *Buffer) DecodeMessage(pb Message) error {\n\tenc, err := p.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn NewBuffer(enc).Unmarshal(pb)\n}\n\n// DecodeGroup reads a tag-delimited group from the Buffer.\nfunc (p *Buffer) DecodeGroup(pb Message) error {\n\ttyp, base, err := getbase(pb)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)\n}\n\n// Unmarshal parses the protocol buffer representation in the\n// Buffer and places the decoded result in pb.  If the struct\n// underlying pb does not match the data in the buffer, the results can be\n// unpredictable.\n//\n// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.\nfunc (p *Buffer) Unmarshal(pb Message) error {\n\t// If the object can unmarshal itself, let it.\n\tif u, ok := pb.(Unmarshaler); ok {\n\t\terr := u.Unmarshal(p.buf[p.index:])\n\t\tp.index = len(p.buf)\n\t\treturn err\n\t}\n\n\ttyp, base, err := getbase(pb)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)\n\n\tif collectStats {\n\t\tstats.Decode++\n\t}\n\n\treturn err\n}\n\n// unmarshalType does the work of unmarshaling a structure.\nfunc (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {\n\tvar state errorState\n\trequired, reqFields := prop.reqCount, uint64(0)\n\n\tvar err error\n\tfor err == nil && o.index < len(o.buf) {\n\t\toi := o.index\n\t\tvar u uint64\n\t\tu, err = o.DecodeVarint()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\twire := int(u & 0x7)\n\t\tif wire == WireEndGroup {\n\t\t\tif is_group {\n\t\t\t\tif required > 0 {\n\t\t\t\t\t// Not enough information to determine the exact field.\n\t\t\t\t\t// (See below.)\n\t\t\t\t\treturn &RequiredNotSetError{\"{Unknown}\"}\n\t\t\t\t}\n\t\t\t\treturn nil // input is satisfied\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"proto: %s: wiretype end group for non-group\", st)\n\t\t}\n\t\ttag := int(u >> 3)\n\t\tif tag <= 0 {\n\t\t\treturn fmt.Errorf(\"proto: %s: illegal tag %d (wire type %d)\", st, tag, wire)\n\t\t}\n\t\tfieldnum, ok := prop.decoderTags.get(tag)\n\t\tif !ok {\n\t\t\t// Maybe it's an extension?\n\t\t\tif prop.extendable {\n\t\t\t\tif e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) {\n\t\t\t\t\tif err = o.skip(st, tag, wire); err == nil {\n\t\t\t\t\t\textmap := e.extensionsWrite()\n\t\t\t\t\t\text := extmap[int32(tag)] // may be missing\n\t\t\t\t\t\text.enc = append(ext.enc, o.buf[oi:o.index]...)\n\t\t\t\t\t\textmap[int32(tag)] = ext\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Maybe it's a oneof?\n\t\t\tif prop.oneofUnmarshaler != nil {\n\t\t\t\tm := structPointer_Interface(base, st).(Message)\n\t\t\t\t// First return value indicates whether tag is a oneof field.\n\t\t\t\tok, err = prop.oneofUnmarshaler(m, tag, wire, o)\n\t\t\t\tif err == ErrInternalBadWireType {\n\t\t\t\t\t// Map the error to something more descriptive.\n\t\t\t\t\t// Do the formatting here to save generated code space.\n\t\t\t\t\terr = fmt.Errorf(\"bad wiretype for oneof field in %T\", m)\n\t\t\t\t}\n\t\t\t\tif ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = o.skipAndSave(st, tag, wire, base, prop.unrecField)\n\t\t\tcontinue\n\t\t}\n\t\tp := prop.Prop[fieldnum]\n\n\t\tif p.dec == nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"proto: no protobuf decoder for %s.%s\\n\", st, st.Field(fieldnum).Name)\n\t\t\tcontinue\n\t\t}\n\t\tdec := p.dec\n\t\tif wire != WireStartGroup && wire != p.WireType {\n\t\t\tif wire == WireBytes && p.packedDec != nil {\n\t\t\t\t// a packable field\n\t\t\t\tdec = p.packedDec\n\t\t\t} else {\n\t\t\t\terr = fmt.Errorf(\"proto: bad wiretype for field %s.%s: got wiretype %d, want %d\", st, st.Field(fieldnum).Name, wire, p.WireType)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tdecErr := dec(o, p, base)\n\t\tif decErr != nil && !state.shouldContinue(decErr, p) {\n\t\t\terr = decErr\n\t\t}\n\t\tif err == nil && p.Required {\n\t\t\t// Successfully decoded a required field.\n\t\t\tif tag <= 64 {\n\t\t\t\t// use bitmap for fields 1-64 to catch field reuse.\n\t\t\t\tvar mask uint64 = 1 << uint64(tag-1)\n\t\t\t\tif reqFields&mask == 0 {\n\t\t\t\t\t// new required field\n\t\t\t\t\treqFields |= mask\n\t\t\t\t\trequired--\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// This is imprecise. It can be fooled by a required field\n\t\t\t\t// with a tag > 64 that is encoded twice; that's very rare.\n\t\t\t\t// A fully correct implementation would require allocating\n\t\t\t\t// a data structure, which we would like to avoid.\n\t\t\t\trequired--\n\t\t\t}\n\t\t}\n\t}\n\tif err == nil {\n\t\tif is_group {\n\t\t\treturn io.ErrUnexpectedEOF\n\t\t}\n\t\tif state.err != nil {\n\t\t\treturn state.err\n\t\t}\n\t\tif required > 0 {\n\t\t\t// Not enough information to determine the exact field. If we use extra\n\t\t\t// CPU, we could determine the field only if the missing required field\n\t\t\t// has a tag <= 64 and we check reqFields.\n\t\t\treturn &RequiredNotSetError{\"{Unknown}\"}\n\t\t}\n\t}\n\treturn err\n}\n\n// Individual type decoders\n// For each,\n//\tu is the decoded value,\n//\tv is a pointer to the field (pointer) in the struct\n\n// Sizes of the pools to allocate inside the Buffer.\n// The goal is modest amortization and allocation\n// on at least 16-byte boundaries.\nconst (\n\tboolPoolSize   = 16\n\tuint32PoolSize = 8\n\tuint64PoolSize = 4\n)\n\n// Decode a bool.\nfunc (o *Buffer) dec_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(o.bools) == 0 {\n\t\to.bools = make([]bool, boolPoolSize)\n\t}\n\to.bools[0] = u != 0\n\t*structPointer_Bool(base, p.field) = &o.bools[0]\n\to.bools = o.bools[1:]\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_BoolVal(base, p.field) = u != 0\n\treturn nil\n}\n\n// Decode an int32.\nfunc (o *Buffer) dec_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword32_Set(structPointer_Word32(base, p.field), o, uint32(u))\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))\n\treturn nil\n}\n\n// Decode an int64.\nfunc (o *Buffer) dec_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword64_Set(structPointer_Word64(base, p.field), o, u)\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tword64Val_Set(structPointer_Word64Val(base, p.field), o, u)\n\treturn nil\n}\n\n// Decode a string.\nfunc (o *Buffer) dec_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_String(base, p.field) = &s\n\treturn nil\n}\n\nfunc (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_StringVal(base, p.field) = s\n\treturn nil\n}\n\n// Decode a slice of bytes ([]byte).\nfunc (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {\n\tb, err := o.DecodeRawBytes(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*structPointer_Bytes(base, p.field) = b\n\treturn nil\n}\n\n// Decode a slice of bools ([]bool).\nfunc (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_BoolSlice(base, p.field)\n\t*v = append(*v, u != 0)\n\treturn nil\n}\n\n// Decode a slice of bools ([]bool) in packed format.\nfunc (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {\n\tv := structPointer_BoolSlice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded bools\n\tfin := o.index + nb\n\tif fin < o.index {\n\t\treturn errOverflow\n\t}\n\n\ty := *v\n\tfor o.index < fin {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ty = append(y, u != 0)\n\t}\n\n\t*v = y\n\treturn nil\n}\n\n// Decode a slice of int32s ([]int32).\nfunc (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\tstructPointer_Word32Slice(base, p.field).Append(uint32(u))\n\treturn nil\n}\n\n// Decode a slice of int32s ([]int32) in packed format.\nfunc (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Slice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded int32s\n\n\tfin := o.index + nb\n\tif fin < o.index {\n\t\treturn errOverflow\n\t}\n\tfor o.index < fin {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Append(uint32(u))\n\t}\n\treturn nil\n}\n\n// Decode a slice of int64s ([]int64).\nfunc (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {\n\tu, err := p.valDec(o)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstructPointer_Word64Slice(base, p.field).Append(u)\n\treturn nil\n}\n\n// Decode a slice of int64s ([]int64) in packed format.\nfunc (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64Slice(base, p.field)\n\n\tnn, err := o.DecodeVarint()\n\tif err != nil {\n\t\treturn err\n\t}\n\tnb := int(nn) // number of bytes of encoded int64s\n\n\tfin := o.index + nb\n\tif fin < o.index {\n\t\treturn errOverflow\n\t}\n\tfor o.index < fin {\n\t\tu, err := p.valDec(o)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv.Append(u)\n\t}\n\treturn nil\n}\n\n// Decode a slice of strings ([]string).\nfunc (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {\n\ts, err := o.DecodeStringBytes()\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_StringSlice(base, p.field)\n\t*v = append(*v, s)\n\treturn nil\n}\n\n// Decode a slice of slice of bytes ([][]byte).\nfunc (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {\n\tb, err := o.DecodeRawBytes(true)\n\tif err != nil {\n\t\treturn err\n\t}\n\tv := structPointer_BytesSlice(base, p.field)\n\t*v = append(*v, b)\n\treturn nil\n}\n\n// Decode a map field.\nfunc (o *Buffer) dec_new_map(p *Properties, base structPointer) error {\n\traw, err := o.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\toi := o.index       // index at the end of this map entry\n\to.index -= len(raw) // move buffer back to start of map entry\n\n\tmptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V\n\tif mptr.Elem().IsNil() {\n\t\tmptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))\n\t}\n\tv := mptr.Elem() // map[K]V\n\n\t// Prepare addressable doubly-indirect placeholders for the key and value types.\n\t// See enc_new_map for why.\n\tkeyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K\n\tkeybase := toStructPointer(keyptr.Addr())                  // **K\n\n\tvar valbase structPointer\n\tvar valptr reflect.Value\n\tswitch p.mtype.Elem().Kind() {\n\tcase reflect.Slice:\n\t\t// []byte\n\t\tvar dummy []byte\n\t\tvalptr = reflect.ValueOf(&dummy)  // *[]byte\n\t\tvalbase = toStructPointer(valptr) // *[]byte\n\tcase reflect.Ptr:\n\t\t// message; valptr is **Msg; need to allocate the intermediate pointer\n\t\tvalptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V\n\t\tvalptr.Set(reflect.New(valptr.Type().Elem()))\n\t\tvalbase = toStructPointer(valptr)\n\tdefault:\n\t\t// everything else\n\t\tvalptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V\n\t\tvalbase = toStructPointer(valptr.Addr())                   // **V\n\t}\n\n\t// Decode.\n\t// This parses a restricted wire format, namely the encoding of a message\n\t// with two fields. See enc_new_map for the format.\n\tfor o.index < oi {\n\t\t// tagcode for key and value properties are always a single byte\n\t\t// because they have tags 1 and 2.\n\t\ttagcode := o.buf[o.index]\n\t\to.index++\n\t\tswitch tagcode {\n\t\tcase p.mkeyprop.tagcode[0]:\n\t\t\tif err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase p.mvalprop.tagcode[0]:\n\t\t\tif err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\t// TODO: Should we silently skip this instead?\n\t\t\treturn fmt.Errorf(\"proto: bad map data tag %d\", raw[0])\n\t\t}\n\t}\n\tkeyelem, valelem := keyptr.Elem(), valptr.Elem()\n\tif !keyelem.IsValid() {\n\t\tkeyelem = reflect.Zero(p.mtype.Key())\n\t}\n\tif !valelem.IsValid() {\n\t\tvalelem = reflect.Zero(p.mtype.Elem())\n\t}\n\n\tv.SetMapIndex(keyelem, valelem)\n\treturn nil\n}\n\n// Decode a group.\nfunc (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {\n\tbas := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(bas) {\n\t\t// allocate new nested message\n\t\tbas = toStructPointer(reflect.New(p.stype))\n\t\tstructPointer_SetStructPointer(base, p.field, bas)\n\t}\n\treturn o.unmarshalType(p.stype, p.sprop, true, bas)\n}\n\n// Decode an embedded message.\nfunc (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {\n\traw, e := o.DecodeRawBytes(false)\n\tif e != nil {\n\t\treturn e\n\t}\n\n\tbas := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(bas) {\n\t\t// allocate new nested message\n\t\tbas = toStructPointer(reflect.New(p.stype))\n\t\tstructPointer_SetStructPointer(base, p.field, bas)\n\t}\n\n\t// If the object can unmarshal itself, let it.\n\tif p.isUnmarshaler {\n\t\tiv := structPointer_Interface(bas, p.stype)\n\t\treturn iv.(Unmarshaler).Unmarshal(raw)\n\t}\n\n\tobuf := o.buf\n\toi := o.index\n\to.buf = raw\n\to.index = 0\n\n\terr = o.unmarshalType(p.stype, p.sprop, false, bas)\n\to.buf = obuf\n\to.index = oi\n\n\treturn err\n}\n\n// Decode a slice of embedded messages.\nfunc (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {\n\treturn o.dec_slice_struct(p, false, base)\n}\n\n// Decode a slice of embedded groups.\nfunc (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {\n\treturn o.dec_slice_struct(p, true, base)\n}\n\n// Decode a slice of structs ([]*struct).\nfunc (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {\n\tv := reflect.New(p.stype)\n\tbas := toStructPointer(v)\n\tstructPointer_StructPointerSlice(base, p.field).Append(bas)\n\n\tif is_group {\n\t\terr := o.unmarshalType(p.stype, p.sprop, is_group, bas)\n\t\treturn err\n\t}\n\n\traw, err := o.DecodeRawBytes(false)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// If the object can unmarshal itself, let it.\n\tif p.isUnmarshaler {\n\t\tiv := v.Interface()\n\t\treturn iv.(Unmarshaler).Unmarshal(raw)\n\t}\n\n\tobuf := o.buf\n\toi := o.index\n\to.buf = raw\n\to.index = 0\n\n\terr = o.unmarshalType(p.stype, p.sprop, is_group, bas)\n\n\to.buf = obuf\n\to.index = oi\n\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/decode_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// +build go1.7\n\npackage proto_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\ttpb \"github.com/golang/protobuf/proto/proto3_proto\"\n)\n\nvar (\n\tbytesBlackhole []byte\n\tmsgBlackhole   = new(tpb.Message)\n)\n\n// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and\n// 2 bytes long).\nfunc BenchmarkVarint32ArraySmall(b *testing.B) {\n\tfor i := uint(1); i <= 10; i++ {\n\t\tdist := genInt32Dist([7]int{0, 3, 1}, 1<<i)\n\t\traw, err := proto.Marshal(&tpb.Message{\n\t\t\tShortKey: dist,\n\t\t})\n\t\tif err != nil {\n\t\t\tb.Error(\"wrong encode\", err)\n\t\t}\n\t\tb.Run(fmt.Sprintf(\"Len%v\", len(dist)), func(b *testing.B) {\n\t\t\tscratchBuf := proto.NewBuffer(nil)\n\t\t\tb.ResetTimer()\n\t\t\tfor k := 0; k < b.N; k++ {\n\t\t\t\tscratchBuf.SetBuf(raw)\n\t\t\t\tmsgBlackhole.Reset()\n\t\t\t\tif err := scratchBuf.Unmarshal(msgBlackhole); err != nil {\n\t\t\t\t\tb.Error(\"wrong decode\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkVarint32ArrayLarge shows the performance on an array of large int32 fields (3 and\n// 4 bytes long, with a small number of 1, 2, 5 and 10 byte long versions).\nfunc BenchmarkVarint32ArrayLarge(b *testing.B) {\n\tfor i := uint(1); i <= 10; i++ {\n\t\tdist := genInt32Dist([7]int{0, 1, 2, 4, 8, 1, 1}, 1<<i)\n\t\traw, err := proto.Marshal(&tpb.Message{\n\t\t\tShortKey: dist,\n\t\t})\n\t\tif err != nil {\n\t\t\tb.Error(\"wrong encode\", err)\n\t\t}\n\t\tb.Run(fmt.Sprintf(\"Len%v\", len(dist)), func(b *testing.B) {\n\t\t\tscratchBuf := proto.NewBuffer(nil)\n\t\t\tb.ResetTimer()\n\t\t\tfor k := 0; k < b.N; k++ {\n\t\t\t\tscratchBuf.SetBuf(raw)\n\t\t\t\tmsgBlackhole.Reset()\n\t\t\t\tif err := scratchBuf.Unmarshal(msgBlackhole); err != nil {\n\t\t\t\t\tb.Error(\"wrong decode\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkVarint64ArraySmall shows the performance on an array of small int64 fields (1 and\n// 2 bytes long).\nfunc BenchmarkVarint64ArraySmall(b *testing.B) {\n\tfor i := uint(1); i <= 10; i++ {\n\t\tdist := genUint64Dist([11]int{0, 3, 1}, 1<<i)\n\t\traw, err := proto.Marshal(&tpb.Message{\n\t\t\tKey: dist,\n\t\t})\n\t\tif err != nil {\n\t\t\tb.Error(\"wrong encode\", err)\n\t\t}\n\t\tb.Run(fmt.Sprintf(\"Len%v\", len(dist)), func(b *testing.B) {\n\t\t\tscratchBuf := proto.NewBuffer(nil)\n\t\t\tb.ResetTimer()\n\t\t\tfor k := 0; k < b.N; k++ {\n\t\t\t\tscratchBuf.SetBuf(raw)\n\t\t\t\tmsgBlackhole.Reset()\n\t\t\t\tif err := scratchBuf.Unmarshal(msgBlackhole); err != nil {\n\t\t\t\t\tb.Error(\"wrong decode\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkVarint64ArrayLarge shows the performance on an array of large int64 fields (6, 7,\n// and 8 bytes long with a small number of the other sizes).\nfunc BenchmarkVarint64ArrayLarge(b *testing.B) {\n\tfor i := uint(1); i <= 10; i++ {\n\t\tdist := genUint64Dist([11]int{0, 1, 1, 2, 4, 8, 16, 32, 16, 1, 1}, 1<<i)\n\t\traw, err := proto.Marshal(&tpb.Message{\n\t\t\tKey: dist,\n\t\t})\n\t\tif err != nil {\n\t\t\tb.Error(\"wrong encode\", err)\n\t\t}\n\t\tb.Run(fmt.Sprintf(\"Len%v\", len(dist)), func(b *testing.B) {\n\t\t\tscratchBuf := proto.NewBuffer(nil)\n\t\t\tb.ResetTimer()\n\t\t\tfor k := 0; k < b.N; k++ {\n\t\t\t\tscratchBuf.SetBuf(raw)\n\t\t\t\tmsgBlackhole.Reset()\n\t\t\t\tif err := scratchBuf.Unmarshal(msgBlackhole); err != nil {\n\t\t\t\t\tb.Error(\"wrong decode\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkVarint64ArrayMixed shows the performance of lots of small messages, each\n// containing a small number of large (3, 4, and 5 byte) repeated int64s.\nfunc BenchmarkVarint64ArrayMixed(b *testing.B) {\n\tfor i := uint(1); i <= 1<<5; i <<= 1 {\n\t\tdist := genUint64Dist([11]int{0, 0, 0, 4, 6, 4, 0, 0, 0, 0, 0}, int(i))\n\t\t// number of sub fields\n\t\tfor k := uint(1); k <= 1<<10; k <<= 2 {\n\t\t\tmsg := &tpb.Message{}\n\t\t\tfor m := uint(0); m < k; m++ {\n\t\t\t\tmsg.Children = append(msg.Children, &tpb.Message{\n\t\t\t\t\tKey: dist,\n\t\t\t\t})\n\t\t\t}\n\t\t\traw, err := proto.Marshal(msg)\n\t\t\tif err != nil {\n\t\t\t\tb.Error(\"wrong encode\", err)\n\t\t\t}\n\t\t\tb.Run(fmt.Sprintf(\"Fields%vLen%v\", k, i), func(b *testing.B) {\n\t\t\t\tscratchBuf := proto.NewBuffer(nil)\n\t\t\t\tb.ResetTimer()\n\t\t\t\tfor k := 0; k < b.N; k++ {\n\t\t\t\t\tscratchBuf.SetBuf(raw)\n\t\t\t\t\tmsgBlackhole.Reset()\n\t\t\t\t\tif err := scratchBuf.Unmarshal(msgBlackhole); err != nil {\n\t\t\t\t\t\tb.Error(\"wrong decode\", err)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\n// genInt32Dist generates a slice of ints that will match the size distribution of dist.\n// A size of 6 corresponds to a max length varint32, which is 10 bytes.  The distribution\n// is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).\nfunc genInt32Dist(dist [7]int, count int) (dest []int32) {\n\tfor i := 0; i < count; i++ {\n\t\tfor k := 0; k < len(dist); k++ {\n\t\t\tvar num int32\n\t\t\tswitch k {\n\t\t\tcase 1:\n\t\t\t\tnum = 1<<7 - 1\n\t\t\tcase 2:\n\t\t\t\tnum = 1<<14 - 1\n\t\t\tcase 3:\n\t\t\t\tnum = 1<<21 - 1\n\t\t\tcase 4:\n\t\t\t\tnum = 1<<28 - 1\n\t\t\tcase 5:\n\t\t\t\tnum = 1<<29 - 1\n\t\t\tcase 6:\n\t\t\t\tnum = -1\n\t\t\t}\n\t\t\tfor m := 0; m < dist[k]; m++ {\n\t\t\t\tdest = append(dest, num)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n// genUint64Dist generates a slice of ints that will match the size distribution of dist.\n// The distribution is 1-indexed. (i.e. the value at index 1 is how many 1 byte ints to create).\nfunc genUint64Dist(dist [11]int, count int) (dest []uint64) {\n\tfor i := 0; i < count; i++ {\n\t\tfor k := 0; k < len(dist); k++ {\n\t\t\tvar num uint64\n\t\t\tswitch k {\n\t\t\tcase 1:\n\t\t\t\tnum = 1<<7 - 1\n\t\t\tcase 2:\n\t\t\t\tnum = 1<<14 - 1\n\t\t\tcase 3:\n\t\t\t\tnum = 1<<21 - 1\n\t\t\tcase 4:\n\t\t\t\tnum = 1<<28 - 1\n\t\t\tcase 5:\n\t\t\t\tnum = 1<<35 - 1\n\t\t\tcase 6:\n\t\t\t\tnum = 1<<42 - 1\n\t\t\tcase 7:\n\t\t\t\tnum = 1<<49 - 1\n\t\t\tcase 8:\n\t\t\t\tnum = 1<<56 - 1\n\t\t\tcase 9:\n\t\t\t\tnum = 1<<63 - 1\n\t\t\tcase 10:\n\t\t\t\tnum = 1<<64 - 1\n\t\t\t}\n\t\t\tfor m := 0; m < dist[k]; m++ {\n\t\t\t\tdest = append(dest, num)\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n\n// BenchmarkDecodeEmpty measures the overhead of doing the minimal possible decode.\nfunc BenchmarkDecodeEmpty(b *testing.B) {\n\traw, err := proto.Marshal(&tpb.Message{})\n\tif err != nil {\n\t\tb.Error(\"wrong encode\", err)\n\t}\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tif err := proto.Unmarshal(raw, msgBlackhole); err != nil {\n\t\t\tb.Error(\"wrong decode\", err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/encode.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for encoding data into the wire format for protocol buffers.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n)\n\n// RequiredNotSetError is the error returned if Marshal is called with\n// a protocol buffer struct whose required fields have not\n// all been initialized. It is also the error returned if Unmarshal is\n// called with an encoded protocol buffer that does not include all the\n// required fields.\n//\n// When printed, RequiredNotSetError reports the first unset required field in a\n// message. If the field cannot be precisely determined, it is reported as\n// \"{Unknown}\".\ntype RequiredNotSetError struct {\n\tfield string\n}\n\nfunc (e *RequiredNotSetError) Error() string {\n\treturn fmt.Sprintf(\"proto: required field %q not set\", e.field)\n}\n\nvar (\n\t// errRepeatedHasNil is the error returned if Marshal is called with\n\t// a struct with a repeated field containing a nil element.\n\terrRepeatedHasNil = errors.New(\"proto: repeated field has nil element\")\n\n\t// errOneofHasNil is the error returned if Marshal is called with\n\t// a struct with a oneof field containing a nil element.\n\terrOneofHasNil = errors.New(\"proto: oneof field has nil value\")\n\n\t// ErrNil is the error returned if Marshal is called with nil.\n\tErrNil = errors.New(\"proto: Marshal called with nil\")\n\n\t// ErrTooLarge is the error returned if Marshal is called with a\n\t// message that encodes to >2GB.\n\tErrTooLarge = errors.New(\"proto: message encodes to over 2 GB\")\n)\n\n// The fundamental encoders that put bytes on the wire.\n// Those that take integer types all accept uint64 and are\n// therefore of type valueEncoder.\n\nconst maxVarintBytes = 10 // maximum length of a varint\n\n// maxMarshalSize is the largest allowed size of an encoded protobuf,\n// since C++ and Java use signed int32s for the size.\nconst maxMarshalSize = 1<<31 - 1\n\n// EncodeVarint returns the varint encoding of x.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\n// Not used by the package itself, but helpful to clients\n// wishing to use the same encoding.\nfunc EncodeVarint(x uint64) []byte {\n\tvar buf [maxVarintBytes]byte\n\tvar n int\n\tfor n = 0; x > 127; n++ {\n\t\tbuf[n] = 0x80 | uint8(x&0x7F)\n\t\tx >>= 7\n\t}\n\tbuf[n] = uint8(x)\n\tn++\n\treturn buf[0:n]\n}\n\n// EncodeVarint writes a varint-encoded integer to the Buffer.\n// This is the format for the\n// int32, int64, uint32, uint64, bool, and enum\n// protocol buffer types.\nfunc (p *Buffer) EncodeVarint(x uint64) error {\n\tfor x >= 1<<7 {\n\t\tp.buf = append(p.buf, uint8(x&0x7f|0x80))\n\t\tx >>= 7\n\t}\n\tp.buf = append(p.buf, uint8(x))\n\treturn nil\n}\n\n// SizeVarint returns the varint encoding size of an integer.\nfunc SizeVarint(x uint64) int {\n\treturn sizeVarint(x)\n}\n\nfunc sizeVarint(x uint64) (n int) {\n\tfor {\n\t\tn++\n\t\tx >>= 7\n\t\tif x == 0 {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn n\n}\n\n// EncodeFixed64 writes a 64-bit integer to the Buffer.\n// This is the format for the\n// fixed64, sfixed64, and double protocol buffer types.\nfunc (p *Buffer) EncodeFixed64(x uint64) error {\n\tp.buf = append(p.buf,\n\t\tuint8(x),\n\t\tuint8(x>>8),\n\t\tuint8(x>>16),\n\t\tuint8(x>>24),\n\t\tuint8(x>>32),\n\t\tuint8(x>>40),\n\t\tuint8(x>>48),\n\t\tuint8(x>>56))\n\treturn nil\n}\n\nfunc sizeFixed64(x uint64) int {\n\treturn 8\n}\n\n// EncodeFixed32 writes a 32-bit integer to the Buffer.\n// This is the format for the\n// fixed32, sfixed32, and float protocol buffer types.\nfunc (p *Buffer) EncodeFixed32(x uint64) error {\n\tp.buf = append(p.buf,\n\t\tuint8(x),\n\t\tuint8(x>>8),\n\t\tuint8(x>>16),\n\t\tuint8(x>>24))\n\treturn nil\n}\n\nfunc sizeFixed32(x uint64) int {\n\treturn 4\n}\n\n// EncodeZigzag64 writes a zigzag-encoded 64-bit integer\n// to the Buffer.\n// This is the format used for the sint64 protocol buffer type.\nfunc (p *Buffer) EncodeZigzag64(x uint64) error {\n\t// use signed number to get arithmetic right shift.\n\treturn p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))\n}\n\nfunc sizeZigzag64(x uint64) int {\n\treturn sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))\n}\n\n// EncodeZigzag32 writes a zigzag-encoded 32-bit integer\n// to the Buffer.\n// This is the format used for the sint32 protocol buffer type.\nfunc (p *Buffer) EncodeZigzag32(x uint64) error {\n\t// use signed number to get arithmetic right shift.\n\treturn p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))\n}\n\nfunc sizeZigzag32(x uint64) int {\n\treturn sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))\n}\n\n// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.\n// This is the format used for the bytes protocol buffer\n// type and for embedded messages.\nfunc (p *Buffer) EncodeRawBytes(b []byte) error {\n\tp.EncodeVarint(uint64(len(b)))\n\tp.buf = append(p.buf, b...)\n\treturn nil\n}\n\nfunc sizeRawBytes(b []byte) int {\n\treturn sizeVarint(uint64(len(b))) +\n\t\tlen(b)\n}\n\n// EncodeStringBytes writes an encoded string to the Buffer.\n// This is the format used for the proto2 string type.\nfunc (p *Buffer) EncodeStringBytes(s string) error {\n\tp.EncodeVarint(uint64(len(s)))\n\tp.buf = append(p.buf, s...)\n\treturn nil\n}\n\nfunc sizeStringBytes(s string) int {\n\treturn sizeVarint(uint64(len(s))) +\n\t\tlen(s)\n}\n\n// Marshaler is the interface representing objects that can marshal themselves.\ntype Marshaler interface {\n\tMarshal() ([]byte, error)\n}\n\n// Marshal takes the protocol buffer\n// and encodes it into the wire format, returning the data.\nfunc Marshal(pb Message) ([]byte, error) {\n\t// Can the object marshal itself?\n\tif m, ok := pb.(Marshaler); ok {\n\t\treturn m.Marshal()\n\t}\n\tp := NewBuffer(nil)\n\terr := p.Marshal(pb)\n\tif p.buf == nil && err == nil {\n\t\t// Return a non-nil slice on success.\n\t\treturn []byte{}, nil\n\t}\n\treturn p.buf, err\n}\n\n// EncodeMessage writes the protocol buffer to the Buffer,\n// prefixed by a varint-encoded length.\nfunc (p *Buffer) EncodeMessage(pb Message) error {\n\tt, base, err := getbase(pb)\n\tif structPointer_IsNil(base) {\n\t\treturn ErrNil\n\t}\n\tif err == nil {\n\t\tvar state errorState\n\t\terr = p.enc_len_struct(GetProperties(t.Elem()), base, &state)\n\t}\n\treturn err\n}\n\n// Marshal takes the protocol buffer\n// and encodes it into the wire format, writing the result to the\n// Buffer.\nfunc (p *Buffer) Marshal(pb Message) error {\n\t// Can the object marshal itself?\n\tif m, ok := pb.(Marshaler); ok {\n\t\tdata, err := m.Marshal()\n\t\tp.buf = append(p.buf, data...)\n\t\treturn err\n\t}\n\n\tt, base, err := getbase(pb)\n\tif structPointer_IsNil(base) {\n\t\treturn ErrNil\n\t}\n\tif err == nil {\n\t\terr = p.enc_struct(GetProperties(t.Elem()), base)\n\t}\n\n\tif collectStats {\n\t\t(stats).Encode++ // Parens are to work around a goimports bug.\n\t}\n\n\tif len(p.buf) > maxMarshalSize {\n\t\treturn ErrTooLarge\n\t}\n\treturn err\n}\n\n// Size returns the encoded size of a protocol buffer.\nfunc Size(pb Message) (n int) {\n\t// Can the object marshal itself?  If so, Size is slow.\n\t// TODO: add Size to Marshaler, or add a Sizer interface.\n\tif m, ok := pb.(Marshaler); ok {\n\t\tb, _ := m.Marshal()\n\t\treturn len(b)\n\t}\n\n\tt, base, err := getbase(pb)\n\tif structPointer_IsNil(base) {\n\t\treturn 0\n\t}\n\tif err == nil {\n\t\tn = size_struct(GetProperties(t.Elem()), base)\n\t}\n\n\tif collectStats {\n\t\t(stats).Size++ // Parens are to work around a goimports bug.\n\t}\n\n\treturn\n}\n\n// Individual type encoders.\n\n// Encode a bool.\nfunc (o *Buffer) enc_bool(p *Properties, base structPointer) error {\n\tv := *structPointer_Bool(base, p.field)\n\tif v == nil {\n\t\treturn ErrNil\n\t}\n\tx := 0\n\tif *v {\n\t\tx = 1\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {\n\tv := *structPointer_BoolVal(base, p.field)\n\tif !v {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, 1)\n\treturn nil\n}\n\nfunc size_bool(p *Properties, base structPointer) int {\n\tv := *structPointer_Bool(base, p.field)\n\tif v == nil {\n\t\treturn 0\n\t}\n\treturn len(p.tagcode) + 1 // each bool takes exactly one byte\n}\n\nfunc size_proto3_bool(p *Properties, base structPointer) int {\n\tv := *structPointer_BoolVal(base, p.field)\n\tif !v && !p.oneof {\n\t\treturn 0\n\t}\n\treturn len(p.tagcode) + 1 // each bool takes exactly one byte\n}\n\n// Encode an int32.\nfunc (o *Buffer) enc_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := int32(word32_Get(v)) // permit sign extension to use full 64-bit range\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc size_int32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := int32(word32_Get(v)) // permit sign extension to use full 64-bit range\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\nfunc size_proto3_int32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range\n\tif x == 0 && !p.oneof {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\n// Encode a uint32.\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_uint32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := word32_Get(v)\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := word32Val_Get(v)\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, uint64(x))\n\treturn nil\n}\n\nfunc size_uint32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32(base, p.field)\n\tif word32_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := word32_Get(v)\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\nfunc size_proto3_uint32(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word32Val(base, p.field)\n\tx := word32Val_Get(v)\n\tif x == 0 && !p.oneof {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(uint64(x))\n\treturn\n}\n\n// Encode an int64.\nfunc (o *Buffer) enc_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64(base, p.field)\n\tif word64_IsNil(v) {\n\t\treturn ErrNil\n\t}\n\tx := word64_Get(v)\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, x)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {\n\tv := structPointer_Word64Val(base, p.field)\n\tx := word64Val_Get(v)\n\tif x == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\tp.valEnc(o, x)\n\treturn nil\n}\n\nfunc size_int64(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word64(base, p.field)\n\tif word64_IsNil(v) {\n\t\treturn 0\n\t}\n\tx := word64_Get(v)\n\tn += len(p.tagcode)\n\tn += p.valSize(x)\n\treturn\n}\n\nfunc size_proto3_int64(p *Properties, base structPointer) (n int) {\n\tv := structPointer_Word64Val(base, p.field)\n\tx := word64Val_Get(v)\n\tif x == 0 && !p.oneof {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += p.valSize(x)\n\treturn\n}\n\n// Encode a string.\nfunc (o *Buffer) enc_string(p *Properties, base structPointer) error {\n\tv := *structPointer_String(base, p.field)\n\tif v == nil {\n\t\treturn ErrNil\n\t}\n\tx := *v\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeStringBytes(x)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {\n\tv := *structPointer_StringVal(base, p.field)\n\tif v == \"\" {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeStringBytes(v)\n\treturn nil\n}\n\nfunc size_string(p *Properties, base structPointer) (n int) {\n\tv := *structPointer_String(base, p.field)\n\tif v == nil {\n\t\treturn 0\n\t}\n\tx := *v\n\tn += len(p.tagcode)\n\tn += sizeStringBytes(x)\n\treturn\n}\n\nfunc size_proto3_string(p *Properties, base structPointer) (n int) {\n\tv := *structPointer_StringVal(base, p.field)\n\tif v == \"\" && !p.oneof {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeStringBytes(v)\n\treturn\n}\n\n// All protocol buffer fields are nillable, but be careful.\nfunc isNil(v reflect.Value) bool {\n\tswitch v.Kind() {\n\tcase reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:\n\t\treturn v.IsNil()\n\t}\n\treturn false\n}\n\n// Encode a message struct.\nfunc (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {\n\tvar state errorState\n\tstructp := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(structp) {\n\t\treturn ErrNil\n\t}\n\n\t// Can the object marshal itself?\n\tif p.isMarshaler {\n\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\tdata, err := m.Marshal()\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\treturn err\n\t\t}\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeRawBytes(data)\n\t\treturn state.err\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\treturn o.enc_len_struct(p.sprop, structp, &state)\n}\n\nfunc size_struct_message(p *Properties, base structPointer) int {\n\tstructp := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(structp) {\n\t\treturn 0\n\t}\n\n\t// Can the object marshal itself?\n\tif p.isMarshaler {\n\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\tdata, _ := m.Marshal()\n\t\tn0 := len(p.tagcode)\n\t\tn1 := sizeRawBytes(data)\n\t\treturn n0 + n1\n\t}\n\n\tn0 := len(p.tagcode)\n\tn1 := size_struct(p.sprop, structp)\n\tn2 := sizeVarint(uint64(n1)) // size of encoded length\n\treturn n0 + n1 + n2\n}\n\n// Encode a group struct.\nfunc (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {\n\tvar state errorState\n\tb := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(b) {\n\t\treturn ErrNil\n\t}\n\n\to.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\terr := o.enc_struct(p.sprop, b)\n\tif err != nil && !state.shouldContinue(err, nil) {\n\t\treturn err\n\t}\n\to.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\treturn state.err\n}\n\nfunc size_struct_group(p *Properties, base structPointer) (n int) {\n\tb := structPointer_GetStructPointer(base, p.field)\n\tif structPointer_IsNil(b) {\n\t\treturn 0\n\t}\n\n\tn += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\tn += size_struct(p.sprop, b)\n\tn += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\treturn\n}\n\n// Encode a slice of bools ([]bool).\nfunc (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor _, x := range s {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tv := uint64(0)\n\t\tif x {\n\t\t\tv = 1\n\t\t}\n\t\tp.valEnc(o, v)\n\t}\n\treturn nil\n}\n\nfunc size_slice_bool(p *Properties, base structPointer) int {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\treturn l * (len(p.tagcode) + 1) // each bool takes exactly one byte\n}\n\n// Encode a slice of bools ([]bool) in packed format.\nfunc (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(l)) // each bool takes exactly one byte\n\tfor _, x := range s {\n\t\tv := uint64(0)\n\t\tif x {\n\t\t\tv = 1\n\t\t}\n\t\tp.valEnc(o, v)\n\t}\n\treturn nil\n}\n\nfunc size_slice_packed_bool(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_BoolSlice(base, p.field)\n\tl := len(s)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(l))\n\tn += l // each bool takes exactly one byte\n\treturn\n}\n\n// Encode a slice of bytes ([]byte).\nfunc (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {\n\ts := *structPointer_Bytes(base, p.field)\n\tif s == nil {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeRawBytes(s)\n\treturn nil\n}\n\nfunc (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {\n\ts := *structPointer_Bytes(base, p.field)\n\tif len(s) == 0 {\n\t\treturn ErrNil\n\t}\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeRawBytes(s)\n\treturn nil\n}\n\nfunc size_slice_byte(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_Bytes(base, p.field)\n\tif s == nil && !p.oneof {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeRawBytes(s)\n\treturn\n}\n\nfunc size_proto3_slice_byte(p *Properties, base structPointer) (n int) {\n\ts := *structPointer_Bytes(base, p.field)\n\tif len(s) == 0 && !p.oneof {\n\t\treturn 0\n\t}\n\tn += len(p.tagcode)\n\tn += sizeRawBytes(s)\n\treturn\n}\n\n// Encode a slice of int32s ([]int32).\nfunc (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tp.valEnc(o, uint64(x))\n\t}\n\treturn nil\n}\n\nfunc size_slice_int32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tn += p.valSize(uint64(x))\n\t}\n\treturn\n}\n\n// Encode a slice of int32s ([]int32) in packed format.\nfunc (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tp.valEnc(buf, uint64(x))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_int32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tx := int32(s.Index(i)) // permit sign extension to use full 64-bit range\n\t\tbufSize += p.valSize(uint64(x))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of uint32s ([]uint32).\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tx := s.Index(i)\n\t\tp.valEnc(o, uint64(x))\n\t}\n\treturn nil\n}\n\nfunc size_slice_uint32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tx := s.Index(i)\n\t\tn += p.valSize(uint64(x))\n\t}\n\treturn\n}\n\n// Encode a slice of uint32s ([]uint32) in packed format.\n// Exactly the same as int32, except for no sign extension.\nfunc (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tp.valEnc(buf, uint64(s.Index(i)))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_uint32(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word32Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tbufSize += p.valSize(uint64(s.Index(i)))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of int64s ([]int64).\nfunc (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tp.valEnc(o, s.Index(i))\n\t}\n\treturn nil\n}\n\nfunc size_slice_int64(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tn += len(p.tagcode)\n\t\tn += p.valSize(s.Index(i))\n\t}\n\treturn\n}\n\n// Encode a slice of int64s ([]int64) in packed format.\nfunc (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\t// TODO: Reuse a Buffer.\n\tbuf := NewBuffer(nil)\n\tfor i := 0; i < l; i++ {\n\t\tp.valEnc(buf, s.Index(i))\n\t}\n\n\to.buf = append(o.buf, p.tagcode...)\n\to.EncodeVarint(uint64(len(buf.buf)))\n\to.buf = append(o.buf, buf.buf...)\n\treturn nil\n}\n\nfunc size_slice_packed_int64(p *Properties, base structPointer) (n int) {\n\ts := structPointer_Word64Slice(base, p.field)\n\tl := s.Len()\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tvar bufSize int\n\tfor i := 0; i < l; i++ {\n\t\tbufSize += p.valSize(s.Index(i))\n\t}\n\n\tn += len(p.tagcode)\n\tn += sizeVarint(uint64(bufSize))\n\tn += bufSize\n\treturn\n}\n\n// Encode a slice of slice of bytes ([][]byte).\nfunc (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {\n\tss := *structPointer_BytesSlice(base, p.field)\n\tl := len(ss)\n\tif l == 0 {\n\t\treturn ErrNil\n\t}\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeRawBytes(ss[i])\n\t}\n\treturn nil\n}\n\nfunc size_slice_slice_byte(p *Properties, base structPointer) (n int) {\n\tss := *structPointer_BytesSlice(base, p.field)\n\tl := len(ss)\n\tif l == 0 {\n\t\treturn 0\n\t}\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tn += sizeRawBytes(ss[i])\n\t}\n\treturn\n}\n\n// Encode a slice of strings ([]string).\nfunc (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {\n\tss := *structPointer_StringSlice(base, p.field)\n\tl := len(ss)\n\tfor i := 0; i < l; i++ {\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\to.EncodeStringBytes(ss[i])\n\t}\n\treturn nil\n}\n\nfunc size_slice_string(p *Properties, base structPointer) (n int) {\n\tss := *structPointer_StringSlice(base, p.field)\n\tl := len(ss)\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tn += sizeStringBytes(ss[i])\n\t}\n\treturn\n}\n\n// Encode a slice of message structs ([]*struct).\nfunc (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {\n\tvar state errorState\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tfor i := 0; i < l; i++ {\n\t\tstructp := s.Index(i)\n\t\tif structPointer_IsNil(structp) {\n\t\t\treturn errRepeatedHasNil\n\t\t}\n\n\t\t// Can the object marshal itself?\n\t\tif p.isMarshaler {\n\t\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\t\tdata, err := m.Marshal()\n\t\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\to.buf = append(o.buf, p.tagcode...)\n\t\t\to.EncodeRawBytes(data)\n\t\t\tcontinue\n\t\t}\n\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\terr := o.enc_len_struct(p.sprop, structp, &state)\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\tif err == ErrNil {\n\t\t\t\treturn errRepeatedHasNil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\treturn state.err\n}\n\nfunc size_slice_struct_message(p *Properties, base structPointer) (n int) {\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\tn += l * len(p.tagcode)\n\tfor i := 0; i < l; i++ {\n\t\tstructp := s.Index(i)\n\t\tif structPointer_IsNil(structp) {\n\t\t\treturn // return the size up to this point\n\t\t}\n\n\t\t// Can the object marshal itself?\n\t\tif p.isMarshaler {\n\t\t\tm := structPointer_Interface(structp, p.stype).(Marshaler)\n\t\t\tdata, _ := m.Marshal()\n\t\t\tn += sizeRawBytes(data)\n\t\t\tcontinue\n\t\t}\n\n\t\tn0 := size_struct(p.sprop, structp)\n\t\tn1 := sizeVarint(uint64(n0)) // size of encoded length\n\t\tn += n0 + n1\n\t}\n\treturn\n}\n\n// Encode a slice of group structs ([]*struct).\nfunc (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {\n\tvar state errorState\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tfor i := 0; i < l; i++ {\n\t\tb := s.Index(i)\n\t\tif structPointer_IsNil(b) {\n\t\t\treturn errRepeatedHasNil\n\t\t}\n\n\t\to.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))\n\n\t\terr := o.enc_struct(p.sprop, b)\n\n\t\tif err != nil && !state.shouldContinue(err, nil) {\n\t\t\tif err == ErrNil {\n\t\t\t\treturn errRepeatedHasNil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\to.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))\n\t}\n\treturn state.err\n}\n\nfunc size_slice_struct_group(p *Properties, base structPointer) (n int) {\n\ts := structPointer_StructPointerSlice(base, p.field)\n\tl := s.Len()\n\n\tn += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))\n\tn += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))\n\tfor i := 0; i < l; i++ {\n\t\tb := s.Index(i)\n\t\tif structPointer_IsNil(b) {\n\t\t\treturn // return size up to this point\n\t\t}\n\n\t\tn += size_struct(p.sprop, b)\n\t}\n\treturn\n}\n\n// Encode an extension map.\nfunc (o *Buffer) enc_map(p *Properties, base structPointer) error {\n\texts := structPointer_ExtMap(base, p.field)\n\tif err := encodeExtensionsMap(*exts); err != nil {\n\t\treturn err\n\t}\n\n\treturn o.enc_map_body(*exts)\n}\n\nfunc (o *Buffer) enc_exts(p *Properties, base structPointer) error {\n\texts := structPointer_Extensions(base, p.field)\n\n\tv, mu := exts.extensionsRead()\n\tif v == nil {\n\t\treturn nil\n\t}\n\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tif err := encodeExtensionsMap(v); err != nil {\n\t\treturn err\n\t}\n\n\treturn o.enc_map_body(v)\n}\n\nfunc (o *Buffer) enc_map_body(v map[int32]Extension) error {\n\t// Fast-path for common cases: zero or one extensions.\n\tif len(v) <= 1 {\n\t\tfor _, e := range v {\n\t\t\to.buf = append(o.buf, e.enc...)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Sort keys to provide a deterministic encoding.\n\tkeys := make([]int, 0, len(v))\n\tfor k := range v {\n\t\tkeys = append(keys, int(k))\n\t}\n\tsort.Ints(keys)\n\n\tfor _, k := range keys {\n\t\to.buf = append(o.buf, v[int32(k)].enc...)\n\t}\n\treturn nil\n}\n\nfunc size_map(p *Properties, base structPointer) int {\n\tv := structPointer_ExtMap(base, p.field)\n\treturn extensionsMapSize(*v)\n}\n\nfunc size_exts(p *Properties, base structPointer) int {\n\tv := structPointer_Extensions(base, p.field)\n\treturn extensionsSize(v)\n}\n\n// Encode a map field.\nfunc (o *Buffer) enc_new_map(p *Properties, base structPointer) error {\n\tvar state errorState // XXX: or do we need to plumb this through?\n\n\t/*\n\t\tA map defined as\n\t\t\tmap<key_type, value_type> map_field = N;\n\t\tis encoded in the same way as\n\t\t\tmessage MapFieldEntry {\n\t\t\t\tkey_type key = 1;\n\t\t\t\tvalue_type value = 2;\n\t\t\t}\n\t\t\trepeated MapFieldEntry map_field = N;\n\t*/\n\n\tv := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V\n\tif v.Len() == 0 {\n\t\treturn nil\n\t}\n\n\tkeycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)\n\n\tenc := func() error {\n\t\tif err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil {\n\t\t\treturn err\n\t\t}\n\t\treturn nil\n\t}\n\n\t// Don't sort map keys. It is not required by the spec, and C++ doesn't do it.\n\tfor _, key := range v.MapKeys() {\n\t\tval := v.MapIndex(key)\n\n\t\tkeycopy.Set(key)\n\t\tvalcopy.Set(val)\n\n\t\to.buf = append(o.buf, p.tagcode...)\n\t\tif err := o.enc_len_thing(enc, &state); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc size_new_map(p *Properties, base structPointer) int {\n\tv := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V\n\n\tkeycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)\n\n\tn := 0\n\tfor _, key := range v.MapKeys() {\n\t\tval := v.MapIndex(key)\n\t\tkeycopy.Set(key)\n\t\tvalcopy.Set(val)\n\n\t\t// Tag codes for key and val are the responsibility of the sub-sizer.\n\t\tkeysize := p.mkeyprop.size(p.mkeyprop, keybase)\n\t\tvalsize := p.mvalprop.size(p.mvalprop, valbase)\n\t\tentry := keysize + valsize\n\t\t// Add on tag code and length of map entry itself.\n\t\tn += len(p.tagcode) + sizeVarint(uint64(entry)) + entry\n\t}\n\treturn n\n}\n\n// mapEncodeScratch returns a new reflect.Value matching the map's value type,\n// and a structPointer suitable for passing to an encoder or sizer.\nfunc mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {\n\t// Prepare addressable doubly-indirect placeholders for the key and value types.\n\t// This is needed because the element-type encoders expect **T, but the map iteration produces T.\n\n\tkeycopy = reflect.New(mapType.Key()).Elem()                 // addressable K\n\tkeyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K\n\tkeyptr.Set(keycopy.Addr())                                  //\n\tkeybase = toStructPointer(keyptr.Addr())                    // **K\n\n\t// Value types are more varied and require special handling.\n\tswitch mapType.Elem().Kind() {\n\tcase reflect.Slice:\n\t\t// []byte\n\t\tvar dummy []byte\n\t\tvalcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte\n\t\tvalbase = toStructPointer(valcopy.Addr())\n\tcase reflect.Ptr:\n\t\t// message; the generated field type is map[K]*Msg (so V is *Msg),\n\t\t// so we only need one level of indirection.\n\t\tvalcopy = reflect.New(mapType.Elem()).Elem() // addressable V\n\t\tvalbase = toStructPointer(valcopy.Addr())\n\tdefault:\n\t\t// everything else\n\t\tvalcopy = reflect.New(mapType.Elem()).Elem()                // addressable V\n\t\tvalptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V\n\t\tvalptr.Set(valcopy.Addr())                                  //\n\t\tvalbase = toStructPointer(valptr.Addr())                    // **V\n\t}\n\treturn\n}\n\n// Encode a struct.\nfunc (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {\n\tvar state errorState\n\t// Encode fields in tag order so that decoders may use optimizations\n\t// that depend on the ordering.\n\t// https://developers.google.com/protocol-buffers/docs/encoding#order\n\tfor _, i := range prop.order {\n\t\tp := prop.Prop[i]\n\t\tif p.enc != nil {\n\t\t\terr := p.enc(o, p, base)\n\t\t\tif err != nil {\n\t\t\t\tif err == ErrNil {\n\t\t\t\t\tif p.Required && state.err == nil {\n\t\t\t\t\t\tstate.err = &RequiredNotSetError{p.Name}\n\t\t\t\t\t}\n\t\t\t\t} else if err == errRepeatedHasNil {\n\t\t\t\t\t// Give more context to nil values in repeated fields.\n\t\t\t\t\treturn errors.New(\"repeated field \" + p.OrigName + \" has nil element\")\n\t\t\t\t} else if !state.shouldContinue(err, p) {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(o.buf) > maxMarshalSize {\n\t\t\t\treturn ErrTooLarge\n\t\t\t}\n\t\t}\n\t}\n\n\t// Do oneof fields.\n\tif prop.oneofMarshaler != nil {\n\t\tm := structPointer_Interface(base, prop.stype).(Message)\n\t\tif err := prop.oneofMarshaler(m, o); err == ErrNil {\n\t\t\treturn errOneofHasNil\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Add unrecognized fields at the end.\n\tif prop.unrecField.IsValid() {\n\t\tv := *structPointer_Bytes(base, prop.unrecField)\n\t\tif len(o.buf)+len(v) > maxMarshalSize {\n\t\t\treturn ErrTooLarge\n\t\t}\n\t\tif len(v) > 0 {\n\t\t\to.buf = append(o.buf, v...)\n\t\t}\n\t}\n\n\treturn state.err\n}\n\nfunc size_struct(prop *StructProperties, base structPointer) (n int) {\n\tfor _, i := range prop.order {\n\t\tp := prop.Prop[i]\n\t\tif p.size != nil {\n\t\t\tn += p.size(p, base)\n\t\t}\n\t}\n\n\t// Add unrecognized fields at the end.\n\tif prop.unrecField.IsValid() {\n\t\tv := *structPointer_Bytes(base, prop.unrecField)\n\t\tn += len(v)\n\t}\n\n\t// Factor in any oneof fields.\n\tif prop.oneofSizer != nil {\n\t\tm := structPointer_Interface(base, prop.stype).(Message)\n\t\tn += prop.oneofSizer(m)\n\t}\n\n\treturn\n}\n\nvar zeroes [20]byte // longer than any conceivable sizeVarint\n\n// Encode a struct, preceded by its encoded length (as a varint).\nfunc (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {\n\treturn o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)\n}\n\n// Encode something, preceded by its encoded length (as a varint).\nfunc (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {\n\tiLen := len(o.buf)\n\to.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length\n\tiMsg := len(o.buf)\n\terr := enc()\n\tif err != nil && !state.shouldContinue(err, nil) {\n\t\treturn err\n\t}\n\tlMsg := len(o.buf) - iMsg\n\tlLen := sizeVarint(uint64(lMsg))\n\tswitch x := lLen - (iMsg - iLen); {\n\tcase x > 0: // actual length is x bytes larger than the space we reserved\n\t\t// Move msg x bytes right.\n\t\to.buf = append(o.buf, zeroes[:x]...)\n\t\tcopy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])\n\tcase x < 0: // actual length is x bytes smaller than the space we reserved\n\t\t// Move msg x bytes left.\n\t\tcopy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])\n\t\to.buf = o.buf[:len(o.buf)+x] // x is negative\n\t}\n\t// Encode the length in the reserved space.\n\to.buf = o.buf[:iLen]\n\to.EncodeVarint(uint64(lMsg))\n\to.buf = o.buf[:len(o.buf)+lMsg]\n\treturn state.err\n}\n\n// errorState maintains the first error that occurs and updates that error\n// with additional context.\ntype errorState struct {\n\terr error\n}\n\n// shouldContinue reports whether encoding should continue upon encountering the\n// given error. If the error is RequiredNotSetError, shouldContinue returns true\n// and, if this is the first appearance of that error, remembers it for future\n// reporting.\n//\n// If prop is not nil, it may update any error with additional context about the\n// field with the error.\nfunc (s *errorState) shouldContinue(err error, prop *Properties) bool {\n\t// Ignore unset required fields.\n\treqNotSet, ok := err.(*RequiredNotSetError)\n\tif !ok {\n\t\treturn false\n\t}\n\tif s.err == nil {\n\t\tif prop != nil {\n\t\t\terr = &RequiredNotSetError{prop.Name + \".\" + reqNotSet.field}\n\t\t}\n\t\ts.err = err\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/encode_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// +build go1.7\n\npackage proto_test\n\nimport (\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\ttpb \"github.com/golang/protobuf/proto/proto3_proto\"\n\t\"github.com/golang/protobuf/ptypes\"\n)\n\nvar (\n\tblackhole []byte\n)\n\n// BenchmarkAny creates increasingly large arbitrary Any messages.  The type is always the\n// same.\nfunc BenchmarkAny(b *testing.B) {\n\tdata := make([]byte, 1<<20)\n\tquantum := 1 << 10\n\tfor i := uint(0); i <= 10; i++ {\n\t\tb.Run(strconv.Itoa(quantum<<i), func(b *testing.B) {\n\t\t\tfor k := 0; k < b.N; k++ {\n\t\t\t\tinner := &tpb.Message{\n\t\t\t\t\tData: data[:quantum<<i],\n\t\t\t\t}\n\t\t\t\touter, err := ptypes.MarshalAny(inner)\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Error(\"wrong encode\", err)\n\t\t\t\t}\n\t\t\t\traw, err := proto.Marshal(&tpb.Message{\n\t\t\t\t\tAnything: outer,\n\t\t\t\t})\n\t\t\t\tif err != nil {\n\t\t\t\t\tb.Error(\"wrong encode\", err)\n\t\t\t\t}\n\t\t\t\tblackhole = raw\n\t\t\t}\n\t\t})\n\t}\n}\n\n// BenchmarkEmpy measures the overhead of doing the minimal possible encode.\nfunc BenchmarkEmpy(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\traw, err := proto.Marshal(&tpb.Message{})\n\t\tif err != nil {\n\t\t\tb.Error(\"wrong encode\", err)\n\t\t}\n\t\tblackhole = raw\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/equal.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// Protocol buffer comparison.\n\npackage proto\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n/*\nEqual returns true iff protocol buffers a and b are equal.\nThe arguments must both be pointers to protocol buffer structs.\n\nEquality is defined in this way:\n  - Two messages are equal iff they are the same type,\n    corresponding fields are equal, unknown field sets\n    are equal, and extensions sets are equal.\n  - Two set scalar fields are equal iff their values are equal.\n    If the fields are of a floating-point type, remember that\n    NaN != x for all x, including NaN. If the message is defined\n    in a proto3 .proto file, fields are not \"set\"; specifically,\n    zero length proto3 \"bytes\" fields are equal (nil == {}).\n  - Two repeated fields are equal iff their lengths are the same,\n    and their corresponding elements are equal. Note a \"bytes\" field,\n    although represented by []byte, is not a repeated field and the\n    rule for the scalar fields described above applies.\n  - Two unset fields are equal.\n  - Two unknown field sets are equal if their current\n    encoded state is equal.\n  - Two extension sets are equal iff they have corresponding\n    elements that are pairwise equal.\n  - Two map fields are equal iff their lengths are the same,\n    and they contain the same set of elements. Zero-length map\n    fields are equal.\n  - Every other combination of things are not equal.\n\nThe return value is undefined if a and b are not protocol buffers.\n*/\nfunc Equal(a, b Message) bool {\n\tif a == nil || b == nil {\n\t\treturn a == b\n\t}\n\tv1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)\n\tif v1.Type() != v2.Type() {\n\t\treturn false\n\t}\n\tif v1.Kind() == reflect.Ptr {\n\t\tif v1.IsNil() {\n\t\t\treturn v2.IsNil()\n\t\t}\n\t\tif v2.IsNil() {\n\t\t\treturn false\n\t\t}\n\t\tv1, v2 = v1.Elem(), v2.Elem()\n\t}\n\tif v1.Kind() != reflect.Struct {\n\t\treturn false\n\t}\n\treturn equalStruct(v1, v2)\n}\n\n// v1 and v2 are known to have the same type.\nfunc equalStruct(v1, v2 reflect.Value) bool {\n\tsprop := GetProperties(v1.Type())\n\tfor i := 0; i < v1.NumField(); i++ {\n\t\tf := v1.Type().Field(i)\n\t\tif strings.HasPrefix(f.Name, \"XXX_\") {\n\t\t\tcontinue\n\t\t}\n\t\tf1, f2 := v1.Field(i), v2.Field(i)\n\t\tif f.Type.Kind() == reflect.Ptr {\n\t\t\tif n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {\n\t\t\t\t// both unset\n\t\t\t\tcontinue\n\t\t\t} else if n1 != n2 {\n\t\t\t\t// set/unset mismatch\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tb1, ok := f1.Interface().(raw)\n\t\t\tif ok {\n\t\t\t\tb2 := f2.Interface().(raw)\n\t\t\t\t// RawMessage\n\t\t\t\tif !bytes.Equal(b1.Bytes(), b2.Bytes()) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tf1, f2 = f1.Elem(), f2.Elem()\n\t\t}\n\t\tif !equalAny(f1, f2, sprop.Prop[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif em1 := v1.FieldByName(\"XXX_InternalExtensions\"); em1.IsValid() {\n\t\tem2 := v2.FieldByName(\"XXX_InternalExtensions\")\n\t\tif !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif em1 := v1.FieldByName(\"XXX_extensions\"); em1.IsValid() {\n\t\tem2 := v2.FieldByName(\"XXX_extensions\")\n\t\tif !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tuf := v1.FieldByName(\"XXX_unrecognized\")\n\tif !uf.IsValid() {\n\t\treturn true\n\t}\n\n\tu1 := uf.Bytes()\n\tu2 := v2.FieldByName(\"XXX_unrecognized\").Bytes()\n\tif !bytes.Equal(u1, u2) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n// v1 and v2 are known to have the same type.\n// prop may be nil.\nfunc equalAny(v1, v2 reflect.Value, prop *Properties) bool {\n\tif v1.Type() == protoMessageType {\n\t\tm1, _ := v1.Interface().(Message)\n\t\tm2, _ := v2.Interface().(Message)\n\t\treturn Equal(m1, m2)\n\t}\n\tswitch v1.Kind() {\n\tcase reflect.Bool:\n\t\treturn v1.Bool() == v2.Bool()\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v1.Float() == v2.Float()\n\tcase reflect.Int32, reflect.Int64:\n\t\treturn v1.Int() == v2.Int()\n\tcase reflect.Interface:\n\t\t// Probably a oneof field; compare the inner values.\n\t\tn1, n2 := v1.IsNil(), v2.IsNil()\n\t\tif n1 || n2 {\n\t\t\treturn n1 == n2\n\t\t}\n\t\te1, e2 := v1.Elem(), v2.Elem()\n\t\tif e1.Type() != e2.Type() {\n\t\t\treturn false\n\t\t}\n\t\treturn equalAny(e1, e2, nil)\n\tcase reflect.Map:\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false\n\t\t}\n\t\tfor _, key := range v1.MapKeys() {\n\t\t\tval2 := v2.MapIndex(key)\n\t\t\tif !val2.IsValid() {\n\t\t\t\t// This key was not found in the second map.\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif !equalAny(v1.MapIndex(key), val2, nil) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.Ptr:\n\t\t// Maps may have nil values in them, so check for nil.\n\t\tif v1.IsNil() && v2.IsNil() {\n\t\t\treturn true\n\t\t}\n\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\treturn false\n\t\t}\n\t\treturn equalAny(v1.Elem(), v2.Elem(), prop)\n\tcase reflect.Slice:\n\t\tif v1.Type().Elem().Kind() == reflect.Uint8 {\n\t\t\t// short circuit: []byte\n\n\t\t\t// Edge case: if this is in a proto3 message, a zero length\n\t\t\t// bytes field is considered the zero value.\n\t\t\tif prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tif v1.IsNil() != v2.IsNil() {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))\n\t\t}\n\n\t\tif v1.Len() != v2.Len() {\n\t\t\treturn false\n\t\t}\n\t\tfor i := 0; i < v1.Len(); i++ {\n\t\t\tif !equalAny(v1.Index(i), v2.Index(i), prop) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\tcase reflect.String:\n\t\treturn v1.Interface().(string) == v2.Interface().(string)\n\tcase reflect.Struct:\n\t\treturn equalStruct(v1, v2)\n\tcase reflect.Uint32, reflect.Uint64:\n\t\treturn v1.Uint() == v2.Uint()\n\t}\n\n\t// unknown type, so not a protocol buffer\n\tlog.Printf(\"proto: don't know how to compare %v\", v1)\n\treturn false\n}\n\n// base is the struct type that the extensions are based on.\n// x1 and x2 are InternalExtensions.\nfunc equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {\n\tem1, _ := x1.extensionsRead()\n\tem2, _ := x2.extensionsRead()\n\treturn equalExtMap(base, em1, em2)\n}\n\nfunc equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {\n\tif len(em1) != len(em2) {\n\t\treturn false\n\t}\n\n\tfor extNum, e1 := range em1 {\n\t\te2, ok := em2[extNum]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tm1, m2 := e1.value, e2.value\n\n\t\tif m1 != nil && m2 != nil {\n\t\t\t// Both are unencoded.\n\t\t\tif !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// At least one is encoded. To do a semantically correct comparison\n\t\t// we need to unmarshal them first.\n\t\tvar desc *ExtensionDesc\n\t\tif m := extensionMaps[base]; m != nil {\n\t\t\tdesc = m[extNum]\n\t\t}\n\t\tif desc == nil {\n\t\t\tlog.Printf(\"proto: don't know how to compare extension %d of %v\", extNum, base)\n\t\t\tcontinue\n\t\t}\n\t\tvar err error\n\t\tif m1 == nil {\n\t\t\tm1, err = decodeExtension(e1.enc, desc)\n\t\t}\n\t\tif m2 == nil && err == nil {\n\t\t\tm2, err = decodeExtension(e2.enc, desc)\n\t\t}\n\t\tif err != nil {\n\t\t\t// The encoded form is invalid.\n\t\t\tlog.Printf(\"proto: badly encoded extension %d of %v: %v\", extNum, base, err)\n\t\t\treturn false\n\t\t}\n\t\tif !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/equal_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2011 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"testing\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\tproto3pb \"github.com/golang/protobuf/proto/proto3_proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\n// Four identical base messages.\n// The init function adds extensions to some of them.\nvar messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}\nvar messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}\nvar messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}\nvar messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}\n\n// Two messages with non-message extensions.\nvar messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}\nvar messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}\n\nfunc init() {\n\text1 := &pb.Ext{Data: String(\"Kirk\")}\n\text2 := &pb.Ext{Data: String(\"Picard\")}\n\n\t// messageWithExtension1a has ext1, but never marshals it.\n\tif err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {\n\t\tpanic(\"SetExtension on 1a failed: \" + err.Error())\n\t}\n\n\t// messageWithExtension1b is the unmarshaled form of messageWithExtension1a.\n\tif err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {\n\t\tpanic(\"SetExtension on 1b failed: \" + err.Error())\n\t}\n\tbuf, err := Marshal(messageWithExtension1b)\n\tif err != nil {\n\t\tpanic(\"Marshal of 1b failed: \" + err.Error())\n\t}\n\tmessageWithExtension1b.Reset()\n\tif err := Unmarshal(buf, messageWithExtension1b); err != nil {\n\t\tpanic(\"Unmarshal of 1b failed: \" + err.Error())\n\t}\n\n\t// messageWithExtension2 has ext2.\n\tif err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {\n\t\tpanic(\"SetExtension on 2 failed: \" + err.Error())\n\t}\n\n\tif err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {\n\t\tpanic(\"SetExtension on Int32-1 failed: \" + err.Error())\n\t}\n\tif err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {\n\t\tpanic(\"SetExtension on Int32-2 failed: \" + err.Error())\n\t}\n}\n\nvar EqualTests = []struct {\n\tdesc string\n\ta, b Message\n\texp  bool\n}{\n\t{\"different types\", &pb.GoEnum{}, &pb.GoTestField{}, false},\n\t{\"equal empty\", &pb.GoEnum{}, &pb.GoEnum{}, true},\n\t{\"nil vs nil\", nil, nil, true},\n\t{\"typed nil vs typed nil\", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},\n\t{\"typed nil vs empty\", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},\n\t{\"different typed nil\", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},\n\n\t{\"one set field, one unset field\", &pb.GoTestField{Label: String(\"foo\")}, &pb.GoTestField{}, false},\n\t{\"one set field zero, one unset field\", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},\n\t{\"different set fields\", &pb.GoTestField{Label: String(\"foo\")}, &pb.GoTestField{Label: String(\"bar\")}, false},\n\t{\"equal set\", &pb.GoTestField{Label: String(\"foo\")}, &pb.GoTestField{Label: String(\"foo\")}, true},\n\n\t{\"repeated, one set\", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},\n\t{\"repeated, different length\", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},\n\t{\"repeated, different value\", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},\n\t{\"repeated, equal\", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},\n\t{\"repeated, nil equal nil\", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},\n\t{\"repeated, nil equal empty\", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},\n\t{\"repeated, empty equal nil\", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},\n\n\t{\n\t\t\"nested, different\",\n\t\t&pb.GoTest{RequiredField: &pb.GoTestField{Label: String(\"foo\")}},\n\t\t&pb.GoTest{RequiredField: &pb.GoTestField{Label: String(\"bar\")}},\n\t\tfalse,\n\t},\n\t{\n\t\t\"nested, equal\",\n\t\t&pb.GoTest{RequiredField: &pb.GoTestField{Label: String(\"wow\")}},\n\t\t&pb.GoTest{RequiredField: &pb.GoTestField{Label: String(\"wow\")}},\n\t\ttrue,\n\t},\n\n\t{\"bytes\", &pb.OtherMessage{Value: []byte(\"foo\")}, &pb.OtherMessage{Value: []byte(\"foo\")}, true},\n\t{\"bytes, empty\", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},\n\t{\"bytes, empty vs nil\", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},\n\t{\n\t\t\"repeated bytes\",\n\t\t&pb.MyMessage{RepBytes: [][]byte{[]byte(\"sham\"), []byte(\"wow\")}},\n\t\t&pb.MyMessage{RepBytes: [][]byte{[]byte(\"sham\"), []byte(\"wow\")}},\n\t\ttrue,\n\t},\n\t// In proto3, []byte{} and []byte(nil) are equal.\n\t{\"proto3 bytes, empty vs nil\", &proto3pb.Message{Data: []byte{}}, &proto3pb.Message{Data: nil}, true},\n\n\t{\"extension vs. no extension\", messageWithoutExtension, messageWithExtension1a, false},\n\t{\"extension vs. same extension\", messageWithExtension1a, messageWithExtension1b, true},\n\t{\"extension vs. different extension\", messageWithExtension1a, messageWithExtension2, false},\n\n\t{\"int32 extension vs. itself\", messageWithInt32Extension1, messageWithInt32Extension1, true},\n\t{\"int32 extension vs. a different int32\", messageWithInt32Extension1, messageWithInt32Extension2, false},\n\n\t{\n\t\t\"message with group\",\n\t\t&pb.MyMessage{\n\t\t\tCount: Int32(1),\n\t\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\t\tGroupField: Int32(5),\n\t\t\t},\n\t\t},\n\t\t&pb.MyMessage{\n\t\t\tCount: Int32(1),\n\t\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\t\tGroupField: Int32(5),\n\t\t\t},\n\t\t},\n\t\ttrue,\n\t},\n\n\t{\n\t\t\"map same\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\ttrue,\n\t},\n\t{\n\t\t\"map different entry\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{2: \"Rob\"}},\n\t\tfalse,\n\t},\n\t{\n\t\t\"map different key only\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{2: \"Ken\"}},\n\t\tfalse,\n\t},\n\t{\n\t\t\"map different value only\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Rob\"}},\n\t\tfalse,\n\t},\n\t{\n\t\t\"zero-length maps same\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{}},\n\t\t&pb.MessageWithMap{NameMapping: nil},\n\t\ttrue,\n\t},\n\t{\n\t\t\"orders in map don't matter\",\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{1: \"Ken\", 2: \"Rob\"}},\n\t\t&pb.MessageWithMap{NameMapping: map[int32]string{2: \"Rob\", 1: \"Ken\"}},\n\t\ttrue,\n\t},\n\t{\n\t\t\"oneof same\",\n\t\t&pb.Communique{Union: &pb.Communique_Number{41}},\n\t\t&pb.Communique{Union: &pb.Communique_Number{41}},\n\t\ttrue,\n\t},\n\t{\n\t\t\"oneof one nil\",\n\t\t&pb.Communique{Union: &pb.Communique_Number{41}},\n\t\t&pb.Communique{},\n\t\tfalse,\n\t},\n\t{\n\t\t\"oneof different\",\n\t\t&pb.Communique{Union: &pb.Communique_Number{41}},\n\t\t&pb.Communique{Union: &pb.Communique_Name{\"Bobby Tables\"}},\n\t\tfalse,\n\t},\n}\n\nfunc TestEqual(t *testing.T) {\n\tfor _, tc := range EqualTests {\n\t\tif res := Equal(tc.a, tc.b); res != tc.exp {\n\t\t\tt.Errorf(\"%v: Equal(%v, %v) = %v, want %v\", tc.desc, tc.a, tc.b, res, tc.exp)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/extensions.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Types and routines for supporting protocol buffer extensions.\n */\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"sync\"\n)\n\n// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.\nvar ErrMissingExtension = errors.New(\"proto: missing extension\")\n\n// ExtensionRange represents a range of message extensions for a protocol buffer.\n// Used in code generated by the protocol compiler.\ntype ExtensionRange struct {\n\tStart, End int32 // both inclusive\n}\n\n// extendableProto is an interface implemented by any protocol buffer generated by the current\n// proto compiler that may be extended.\ntype extendableProto interface {\n\tMessage\n\tExtensionRangeArray() []ExtensionRange\n\textensionsWrite() map[int32]Extension\n\textensionsRead() (map[int32]Extension, sync.Locker)\n}\n\n// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous\n// version of the proto compiler that may be extended.\ntype extendableProtoV1 interface {\n\tMessage\n\tExtensionRangeArray() []ExtensionRange\n\tExtensionMap() map[int32]Extension\n}\n\n// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.\ntype extensionAdapter struct {\n\textendableProtoV1\n}\n\nfunc (e extensionAdapter) extensionsWrite() map[int32]Extension {\n\treturn e.ExtensionMap()\n}\n\nfunc (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {\n\treturn e.ExtensionMap(), notLocker{}\n}\n\n// notLocker is a sync.Locker whose Lock and Unlock methods are nops.\ntype notLocker struct{}\n\nfunc (n notLocker) Lock()   {}\nfunc (n notLocker) Unlock() {}\n\n// extendable returns the extendableProto interface for the given generated proto message.\n// If the proto message has the old extension format, it returns a wrapper that implements\n// the extendableProto interface.\nfunc extendable(p interface{}) (extendableProto, bool) {\n\tif ep, ok := p.(extendableProto); ok {\n\t\treturn ep, ok\n\t}\n\tif ep, ok := p.(extendableProtoV1); ok {\n\t\treturn extensionAdapter{ep}, ok\n\t}\n\treturn nil, false\n}\n\n// XXX_InternalExtensions is an internal representation of proto extensions.\n//\n// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,\n// thus gaining the unexported 'extensions' method, which can be called only from the proto package.\n//\n// The methods of XXX_InternalExtensions are not concurrency safe in general,\n// but calls to logically read-only methods such as has and get may be executed concurrently.\ntype XXX_InternalExtensions struct {\n\t// The struct must be indirect so that if a user inadvertently copies a\n\t// generated message and its embedded XXX_InternalExtensions, they\n\t// avoid the mayhem of a copied mutex.\n\t//\n\t// The mutex serializes all logically read-only operations to p.extensionMap.\n\t// It is up to the client to ensure that write operations to p.extensionMap are\n\t// mutually exclusive with other accesses.\n\tp *struct {\n\t\tmu           sync.Mutex\n\t\textensionMap map[int32]Extension\n\t}\n}\n\n// extensionsWrite returns the extension map, creating it on first use.\nfunc (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {\n\tif e.p == nil {\n\t\te.p = new(struct {\n\t\t\tmu           sync.Mutex\n\t\t\textensionMap map[int32]Extension\n\t\t})\n\t\te.p.extensionMap = make(map[int32]Extension)\n\t}\n\treturn e.p.extensionMap\n}\n\n// extensionsRead returns the extensions map for read-only use.  It may be nil.\n// The caller must hold the returned mutex's lock when accessing Elements within the map.\nfunc (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {\n\tif e.p == nil {\n\t\treturn nil, nil\n\t}\n\treturn e.p.extensionMap, &e.p.mu\n}\n\nvar extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()\nvar extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem()\n\n// ExtensionDesc represents an extension specification.\n// Used in generated code from the protocol compiler.\ntype ExtensionDesc struct {\n\tExtendedType  Message     // nil pointer to the type that is being extended\n\tExtensionType interface{} // nil pointer to the extension type\n\tField         int32       // field number\n\tName          string      // fully-qualified name of extension, for text formatting\n\tTag           string      // protobuf tag style\n\tFilename      string      // name of the file in which the extension is defined\n}\n\nfunc (ed *ExtensionDesc) repeated() bool {\n\tt := reflect.TypeOf(ed.ExtensionType)\n\treturn t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8\n}\n\n// Extension represents an extension in a message.\ntype Extension struct {\n\t// When an extension is stored in a message using SetExtension\n\t// only desc and value are set. When the message is marshaled\n\t// enc will be set to the encoded form of the message.\n\t//\n\t// When a message is unmarshaled and contains extensions, each\n\t// extension will have only enc set. When such an extension is\n\t// accessed using GetExtension (or GetExtensions) desc and value\n\t// will be set.\n\tdesc  *ExtensionDesc\n\tvalue interface{}\n\tenc   []byte\n}\n\n// SetRawExtension is for testing only.\nfunc SetRawExtension(base Message, id int32, b []byte) {\n\tepb, ok := extendable(base)\n\tif !ok {\n\t\treturn\n\t}\n\textmap := epb.extensionsWrite()\n\textmap[id] = Extension{enc: b}\n}\n\n// isExtensionField returns true iff the given field number is in an extension range.\nfunc isExtensionField(pb extendableProto, field int32) bool {\n\tfor _, er := range pb.ExtensionRangeArray() {\n\t\tif er.Start <= field && field <= er.End {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// checkExtensionTypes checks that the given extension is valid for pb.\nfunc checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {\n\tvar pbi interface{} = pb\n\t// Check the extended type.\n\tif ea, ok := pbi.(extensionAdapter); ok {\n\t\tpbi = ea.extendableProtoV1\n\t}\n\tif a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {\n\t\treturn errors.New(\"proto: bad extended type; \" + b.String() + \" does not extend \" + a.String())\n\t}\n\t// Check the range.\n\tif !isExtensionField(pb, extension.Field) {\n\t\treturn errors.New(\"proto: bad extension number; not in declared ranges\")\n\t}\n\treturn nil\n}\n\n// extPropKey is sufficient to uniquely identify an extension.\ntype extPropKey struct {\n\tbase  reflect.Type\n\tfield int32\n}\n\nvar extProp = struct {\n\tsync.RWMutex\n\tm map[extPropKey]*Properties\n}{\n\tm: make(map[extPropKey]*Properties),\n}\n\nfunc extensionProperties(ed *ExtensionDesc) *Properties {\n\tkey := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}\n\n\textProp.RLock()\n\tif prop, ok := extProp.m[key]; ok {\n\t\textProp.RUnlock()\n\t\treturn prop\n\t}\n\textProp.RUnlock()\n\n\textProp.Lock()\n\tdefer extProp.Unlock()\n\t// Check again.\n\tif prop, ok := extProp.m[key]; ok {\n\t\treturn prop\n\t}\n\n\tprop := new(Properties)\n\tprop.Init(reflect.TypeOf(ed.ExtensionType), \"unknown_name\", ed.Tag, nil)\n\textProp.m[key] = prop\n\treturn prop\n}\n\n// encode encodes any unmarshaled (unencoded) extensions in e.\nfunc encodeExtensions(e *XXX_InternalExtensions) error {\n\tm, mu := e.extensionsRead()\n\tif m == nil {\n\t\treturn nil // fast path\n\t}\n\tmu.Lock()\n\tdefer mu.Unlock()\n\treturn encodeExtensionsMap(m)\n}\n\n// encode encodes any unmarshaled (unencoded) extensions in e.\nfunc encodeExtensionsMap(m map[int32]Extension) error {\n\tfor k, e := range m {\n\t\tif e.value == nil || e.desc == nil {\n\t\t\t// Extension is only in its encoded form.\n\t\t\tcontinue\n\t\t}\n\n\t\t// We don't skip extensions that have an encoded form set,\n\t\t// because the extension value may have been mutated after\n\t\t// the last time this function was called.\n\n\t\tet := reflect.TypeOf(e.desc.ExtensionType)\n\t\tprops := extensionProperties(e.desc)\n\n\t\tp := NewBuffer(nil)\n\t\t// If e.value has type T, the encoder expects a *struct{ X T }.\n\t\t// Pass a *T with a zero field and hope it all works out.\n\t\tx := reflect.New(et)\n\t\tx.Elem().Set(reflect.ValueOf(e.value))\n\t\tif err := props.enc(p, props, toStructPointer(x)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\te.enc = p.buf\n\t\tm[k] = e\n\t}\n\treturn nil\n}\n\nfunc extensionsSize(e *XXX_InternalExtensions) (n int) {\n\tm, mu := e.extensionsRead()\n\tif m == nil {\n\t\treturn 0\n\t}\n\tmu.Lock()\n\tdefer mu.Unlock()\n\treturn extensionsMapSize(m)\n}\n\nfunc extensionsMapSize(m map[int32]Extension) (n int) {\n\tfor _, e := range m {\n\t\tif e.value == nil || e.desc == nil {\n\t\t\t// Extension is only in its encoded form.\n\t\t\tn += len(e.enc)\n\t\t\tcontinue\n\t\t}\n\n\t\t// We don't skip extensions that have an encoded form set,\n\t\t// because the extension value may have been mutated after\n\t\t// the last time this function was called.\n\n\t\tet := reflect.TypeOf(e.desc.ExtensionType)\n\t\tprops := extensionProperties(e.desc)\n\n\t\t// If e.value has type T, the encoder expects a *struct{ X T }.\n\t\t// Pass a *T with a zero field and hope it all works out.\n\t\tx := reflect.New(et)\n\t\tx.Elem().Set(reflect.ValueOf(e.value))\n\t\tn += props.size(props, toStructPointer(x))\n\t}\n\treturn\n}\n\n// HasExtension returns whether the given extension is present in pb.\nfunc HasExtension(pb Message, extension *ExtensionDesc) bool {\n\t// TODO: Check types, field numbers, etc.?\n\tepb, ok := extendable(pb)\n\tif !ok {\n\t\treturn false\n\t}\n\textmap, mu := epb.extensionsRead()\n\tif extmap == nil {\n\t\treturn false\n\t}\n\tmu.Lock()\n\t_, ok = extmap[extension.Field]\n\tmu.Unlock()\n\treturn ok\n}\n\n// ClearExtension removes the given extension from pb.\nfunc ClearExtension(pb Message, extension *ExtensionDesc) {\n\tepb, ok := extendable(pb)\n\tif !ok {\n\t\treturn\n\t}\n\t// TODO: Check types, field numbers, etc.?\n\textmap := epb.extensionsWrite()\n\tdelete(extmap, extension.Field)\n}\n\n// GetExtension parses and returns the given extension of pb.\n// If the extension is not present and has no default value it returns ErrMissingExtension.\nfunc GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {\n\tepb, ok := extendable(pb)\n\tif !ok {\n\t\treturn nil, errors.New(\"proto: not an extendable proto\")\n\t}\n\n\tif err := checkExtensionTypes(epb, extension); err != nil {\n\t\treturn nil, err\n\t}\n\n\temap, mu := epb.extensionsRead()\n\tif emap == nil {\n\t\treturn defaultExtensionValue(extension)\n\t}\n\tmu.Lock()\n\tdefer mu.Unlock()\n\te, ok := emap[extension.Field]\n\tif !ok {\n\t\t// defaultExtensionValue returns the default value or\n\t\t// ErrMissingExtension if there is no default.\n\t\treturn defaultExtensionValue(extension)\n\t}\n\n\tif e.value != nil {\n\t\t// Already decoded. Check the descriptor, though.\n\t\tif e.desc != extension {\n\t\t\t// This shouldn't happen. If it does, it means that\n\t\t\t// GetExtension was called twice with two different\n\t\t\t// descriptors with the same field number.\n\t\t\treturn nil, errors.New(\"proto: descriptor conflict\")\n\t\t}\n\t\treturn e.value, nil\n\t}\n\n\tv, err := decodeExtension(e.enc, extension)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Remember the decoded version and drop the encoded version.\n\t// That way it is safe to mutate what we return.\n\te.value = v\n\te.desc = extension\n\te.enc = nil\n\temap[extension.Field] = e\n\treturn e.value, nil\n}\n\n// defaultExtensionValue returns the default value for extension.\n// If no default for an extension is defined ErrMissingExtension is returned.\nfunc defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {\n\tt := reflect.TypeOf(extension.ExtensionType)\n\tprops := extensionProperties(extension)\n\n\tsf, _, err := fieldDefault(t, props)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif sf == nil || sf.value == nil {\n\t\t// There is no default value.\n\t\treturn nil, ErrMissingExtension\n\t}\n\n\tif t.Kind() != reflect.Ptr {\n\t\t// We do not need to return a Ptr, we can directly return sf.value.\n\t\treturn sf.value, nil\n\t}\n\n\t// We need to return an interface{} that is a pointer to sf.value.\n\tvalue := reflect.New(t).Elem()\n\tvalue.Set(reflect.New(value.Type().Elem()))\n\tif sf.kind == reflect.Int32 {\n\t\t// We may have an int32 or an enum, but the underlying data is int32.\n\t\t// Since we can't set an int32 into a non int32 reflect.value directly\n\t\t// set it as a int32.\n\t\tvalue.Elem().SetInt(int64(sf.value.(int32)))\n\t} else {\n\t\tvalue.Elem().Set(reflect.ValueOf(sf.value))\n\t}\n\treturn value.Interface(), nil\n}\n\n// decodeExtension decodes an extension encoded in b.\nfunc decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {\n\to := NewBuffer(b)\n\n\tt := reflect.TypeOf(extension.ExtensionType)\n\n\tprops := extensionProperties(extension)\n\n\t// t is a pointer to a struct, pointer to basic type or a slice.\n\t// Allocate a \"field\" to store the pointer/slice itself; the\n\t// pointer/slice will be stored here. We pass\n\t// the address of this field to props.dec.\n\t// This passes a zero field and a *t and lets props.dec\n\t// interpret it as a *struct{ x t }.\n\tvalue := reflect.New(t).Elem()\n\n\tfor {\n\t\t// Discard wire type and field number varint. It isn't needed.\n\t\tif _, err := o.DecodeVarint(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif o.index >= len(o.buf) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn value.Interface(), nil\n}\n\n// GetExtensions returns a slice of the extensions present in pb that are also listed in es.\n// The returned slice has the same length as es; missing extensions will appear as nil elements.\nfunc GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {\n\tepb, ok := extendable(pb)\n\tif !ok {\n\t\treturn nil, errors.New(\"proto: not an extendable proto\")\n\t}\n\textensions = make([]interface{}, len(es))\n\tfor i, e := range es {\n\t\textensions[i], err = GetExtension(epb, e)\n\t\tif err == ErrMissingExtension {\n\t\t\terr = nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn\n}\n\n// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.\n// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing\n// just the Field field, which defines the extension's field number.\nfunc ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {\n\tepb, ok := extendable(pb)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"proto: %T is not an extendable proto.Message\", pb)\n\t}\n\tregisteredExtensions := RegisteredExtensions(pb)\n\n\temap, mu := epb.extensionsRead()\n\tif emap == nil {\n\t\treturn nil, nil\n\t}\n\tmu.Lock()\n\tdefer mu.Unlock()\n\textensions := make([]*ExtensionDesc, 0, len(emap))\n\tfor extid, e := range emap {\n\t\tdesc := e.desc\n\t\tif desc == nil {\n\t\t\tdesc = registeredExtensions[extid]\n\t\t\tif desc == nil {\n\t\t\t\tdesc = &ExtensionDesc{Field: extid}\n\t\t\t}\n\t\t}\n\n\t\textensions = append(extensions, desc)\n\t}\n\treturn extensions, nil\n}\n\n// SetExtension sets the specified extension of pb to the specified value.\nfunc SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {\n\tepb, ok := extendable(pb)\n\tif !ok {\n\t\treturn errors.New(\"proto: not an extendable proto\")\n\t}\n\tif err := checkExtensionTypes(epb, extension); err != nil {\n\t\treturn err\n\t}\n\ttyp := reflect.TypeOf(extension.ExtensionType)\n\tif typ != reflect.TypeOf(value) {\n\t\treturn errors.New(\"proto: bad extension value type\")\n\t}\n\t// nil extension values need to be caught early, because the\n\t// encoder can't distinguish an ErrNil due to a nil extension\n\t// from an ErrNil due to a missing field. Extensions are\n\t// always optional, so the encoder would just swallow the error\n\t// and drop all the extensions from the encoded message.\n\tif reflect.ValueOf(value).IsNil() {\n\t\treturn fmt.Errorf(\"proto: SetExtension called with nil value of type %T\", value)\n\t}\n\n\textmap := epb.extensionsWrite()\n\textmap[extension.Field] = Extension{desc: extension, value: value}\n\treturn nil\n}\n\n// ClearAllExtensions clears all extensions from pb.\nfunc ClearAllExtensions(pb Message) {\n\tepb, ok := extendable(pb)\n\tif !ok {\n\t\treturn\n\t}\n\tm := epb.extensionsWrite()\n\tfor k := range m {\n\t\tdelete(m, k)\n\t}\n}\n\n// A global registry of extensions.\n// The generated code will register the generated descriptors by calling RegisterExtension.\n\nvar extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)\n\n// RegisterExtension is called from the generated code.\nfunc RegisterExtension(desc *ExtensionDesc) {\n\tst := reflect.TypeOf(desc.ExtendedType).Elem()\n\tm := extensionMaps[st]\n\tif m == nil {\n\t\tm = make(map[int32]*ExtensionDesc)\n\t\textensionMaps[st] = m\n\t}\n\tif _, ok := m[desc.Field]; ok {\n\t\tpanic(\"proto: duplicate extension registered: \" + st.String() + \" \" + strconv.Itoa(int(desc.Field)))\n\t}\n\tm[desc.Field] = desc\n}\n\n// RegisteredExtensions returns a map of the registered extensions of a\n// protocol buffer struct, indexed by the extension number.\n// The argument pb should be a nil pointer to the struct type.\nfunc RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {\n\treturn extensionMaps[reflect.TypeOf(pb).Elem()]\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/extensions_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2014 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\nfunc TestGetExtensionsWithMissingExtensions(t *testing.T) {\n\tmsg := &pb.MyMessage{}\n\text1 := &pb.Ext{}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {\n\t\tt.Fatalf(\"Could not set ext1: %s\", err)\n\t}\n\texts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{\n\t\tpb.E_Ext_More,\n\t\tpb.E_Ext_Text,\n\t})\n\tif err != nil {\n\t\tt.Fatalf(\"GetExtensions() failed: %s\", err)\n\t}\n\tif exts[0] != ext1 {\n\t\tt.Errorf(\"ext1 not in returned extensions: %T %v\", exts[0], exts[0])\n\t}\n\tif exts[1] != nil {\n\t\tt.Errorf(\"ext2 in returned extensions: %T %v\", exts[1], exts[1])\n\t}\n}\n\nfunc TestExtensionDescsWithMissingExtensions(t *testing.T) {\n\tmsg := &pb.MyMessage{Count: proto.Int32(0)}\n\textdesc1 := pb.E_Ext_More\n\tif descs, err := proto.ExtensionDescs(msg); len(descs) != 0 || err != nil {\n\t\tt.Errorf(\"proto.ExtensionDescs: got %d descs, error %v; want 0, nil\", len(descs), err)\n\t}\n\n\text1 := &pb.Ext{}\n\tif err := proto.SetExtension(msg, extdesc1, ext1); err != nil {\n\t\tt.Fatalf(\"Could not set ext1: %s\", err)\n\t}\n\textdesc2 := &proto.ExtensionDesc{\n\t\tExtendedType:  (*pb.MyMessage)(nil),\n\t\tExtensionType: (*bool)(nil),\n\t\tField:         123456789,\n\t\tName:          \"a.b\",\n\t\tTag:           \"varint,123456789,opt\",\n\t}\n\text2 := proto.Bool(false)\n\tif err := proto.SetExtension(msg, extdesc2, ext2); err != nil {\n\t\tt.Fatalf(\"Could not set ext2: %s\", err)\n\t}\n\n\tb, err := proto.Marshal(msg)\n\tif err != nil {\n\t\tt.Fatalf(\"Could not marshal msg: %v\", err)\n\t}\n\tif err := proto.Unmarshal(b, msg); err != nil {\n\t\tt.Fatalf(\"Could not unmarshal into msg: %v\", err)\n\t}\n\n\tdescs, err := proto.ExtensionDescs(msg)\n\tif err != nil {\n\t\tt.Fatalf(\"proto.ExtensionDescs: got error %v\", err)\n\t}\n\tsortExtDescs(descs)\n\twantDescs := []*proto.ExtensionDesc{extdesc1, &proto.ExtensionDesc{Field: extdesc2.Field}}\n\tif !reflect.DeepEqual(descs, wantDescs) {\n\t\tt.Errorf(\"proto.ExtensionDescs(msg) sorted extension ids: got %+v, want %+v\", descs, wantDescs)\n\t}\n}\n\ntype ExtensionDescSlice []*proto.ExtensionDesc\n\nfunc (s ExtensionDescSlice) Len() int           { return len(s) }\nfunc (s ExtensionDescSlice) Less(i, j int) bool { return s[i].Field < s[j].Field }\nfunc (s ExtensionDescSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\n\nfunc sortExtDescs(s []*proto.ExtensionDesc) {\n\tsort.Sort(ExtensionDescSlice(s))\n}\n\nfunc TestGetExtensionStability(t *testing.T) {\n\tcheck := func(m *pb.MyMessage) bool {\n\t\text1, err := proto.GetExtension(m, pb.E_Ext_More)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"GetExtension() failed: %s\", err)\n\t\t}\n\t\text2, err := proto.GetExtension(m, pb.E_Ext_More)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"GetExtension() failed: %s\", err)\n\t\t}\n\t\treturn ext1 == ext2\n\t}\n\tmsg := &pb.MyMessage{Count: proto.Int32(4)}\n\text0 := &pb.Ext{}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {\n\t\tt.Fatalf(\"Could not set ext1: %s\", ext0)\n\t}\n\tif !check(msg) {\n\t\tt.Errorf(\"GetExtension() not stable before marshaling\")\n\t}\n\tbb, err := proto.Marshal(msg)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal() failed: %s\", err)\n\t}\n\tmsg1 := &pb.MyMessage{}\n\terr = proto.Unmarshal(bb, msg1)\n\tif err != nil {\n\t\tt.Fatalf(\"Unmarshal() failed: %s\", err)\n\t}\n\tif !check(msg1) {\n\t\tt.Errorf(\"GetExtension() not stable after unmarshaling\")\n\t}\n}\n\nfunc TestGetExtensionDefaults(t *testing.T) {\n\tvar setFloat64 float64 = 1\n\tvar setFloat32 float32 = 2\n\tvar setInt32 int32 = 3\n\tvar setInt64 int64 = 4\n\tvar setUint32 uint32 = 5\n\tvar setUint64 uint64 = 6\n\tvar setBool = true\n\tvar setBool2 = false\n\tvar setString = \"Goodnight string\"\n\tvar setBytes = []byte(\"Goodnight bytes\")\n\tvar setEnum = pb.DefaultsMessage_TWO\n\n\ttype testcase struct {\n\t\text  *proto.ExtensionDesc // Extension we are testing.\n\t\twant interface{}          // Expected value of extension, or nil (meaning that GetExtension will fail).\n\t\tdef  interface{}          // Expected value of extension after ClearExtension().\n\t}\n\ttests := []testcase{\n\t\t{pb.E_NoDefaultDouble, setFloat64, nil},\n\t\t{pb.E_NoDefaultFloat, setFloat32, nil},\n\t\t{pb.E_NoDefaultInt32, setInt32, nil},\n\t\t{pb.E_NoDefaultInt64, setInt64, nil},\n\t\t{pb.E_NoDefaultUint32, setUint32, nil},\n\t\t{pb.E_NoDefaultUint64, setUint64, nil},\n\t\t{pb.E_NoDefaultSint32, setInt32, nil},\n\t\t{pb.E_NoDefaultSint64, setInt64, nil},\n\t\t{pb.E_NoDefaultFixed32, setUint32, nil},\n\t\t{pb.E_NoDefaultFixed64, setUint64, nil},\n\t\t{pb.E_NoDefaultSfixed32, setInt32, nil},\n\t\t{pb.E_NoDefaultSfixed64, setInt64, nil},\n\t\t{pb.E_NoDefaultBool, setBool, nil},\n\t\t{pb.E_NoDefaultBool, setBool2, nil},\n\t\t{pb.E_NoDefaultString, setString, nil},\n\t\t{pb.E_NoDefaultBytes, setBytes, nil},\n\t\t{pb.E_NoDefaultEnum, setEnum, nil},\n\t\t{pb.E_DefaultDouble, setFloat64, float64(3.1415)},\n\t\t{pb.E_DefaultFloat, setFloat32, float32(3.14)},\n\t\t{pb.E_DefaultInt32, setInt32, int32(42)},\n\t\t{pb.E_DefaultInt64, setInt64, int64(43)},\n\t\t{pb.E_DefaultUint32, setUint32, uint32(44)},\n\t\t{pb.E_DefaultUint64, setUint64, uint64(45)},\n\t\t{pb.E_DefaultSint32, setInt32, int32(46)},\n\t\t{pb.E_DefaultSint64, setInt64, int64(47)},\n\t\t{pb.E_DefaultFixed32, setUint32, uint32(48)},\n\t\t{pb.E_DefaultFixed64, setUint64, uint64(49)},\n\t\t{pb.E_DefaultSfixed32, setInt32, int32(50)},\n\t\t{pb.E_DefaultSfixed64, setInt64, int64(51)},\n\t\t{pb.E_DefaultBool, setBool, true},\n\t\t{pb.E_DefaultBool, setBool2, true},\n\t\t{pb.E_DefaultString, setString, \"Hello, string\"},\n\t\t{pb.E_DefaultBytes, setBytes, []byte(\"Hello, bytes\")},\n\t\t{pb.E_DefaultEnum, setEnum, pb.DefaultsMessage_ONE},\n\t}\n\n\tcheckVal := func(test testcase, msg *pb.DefaultsMessage, valWant interface{}) error {\n\t\tval, err := proto.GetExtension(msg, test.ext)\n\t\tif err != nil {\n\t\t\tif valWant != nil {\n\t\t\t\treturn fmt.Errorf(\"GetExtension(): %s\", err)\n\t\t\t}\n\t\t\tif want := proto.ErrMissingExtension; err != want {\n\t\t\t\treturn fmt.Errorf(\"Unexpected error: got %v, want %v\", err, want)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\t// All proto2 extension values are either a pointer to a value or a slice of values.\n\t\tty := reflect.TypeOf(val)\n\t\ttyWant := reflect.TypeOf(test.ext.ExtensionType)\n\t\tif got, want := ty, tyWant; got != want {\n\t\t\treturn fmt.Errorf(\"unexpected reflect.TypeOf(): got %v want %v\", got, want)\n\t\t}\n\t\ttye := ty.Elem()\n\t\ttyeWant := tyWant.Elem()\n\t\tif got, want := tye, tyeWant; got != want {\n\t\t\treturn fmt.Errorf(\"unexpected reflect.TypeOf().Elem(): got %v want %v\", got, want)\n\t\t}\n\n\t\t// Check the name of the type of the value.\n\t\t// If it is an enum it will be type int32 with the name of the enum.\n\t\tif got, want := tye.Name(), tye.Name(); got != want {\n\t\t\treturn fmt.Errorf(\"unexpected reflect.TypeOf().Elem().Name(): got %v want %v\", got, want)\n\t\t}\n\n\t\t// Check that value is what we expect.\n\t\t// If we have a pointer in val, get the value it points to.\n\t\tvalExp := val\n\t\tif ty.Kind() == reflect.Ptr {\n\t\t\tvalExp = reflect.ValueOf(val).Elem().Interface()\n\t\t}\n\t\tif got, want := valExp, valWant; !reflect.DeepEqual(got, want) {\n\t\t\treturn fmt.Errorf(\"unexpected reflect.DeepEqual(): got %v want %v\", got, want)\n\t\t}\n\n\t\treturn nil\n\t}\n\n\tsetTo := func(test testcase) interface{} {\n\t\tsetTo := reflect.ValueOf(test.want)\n\t\tif typ := reflect.TypeOf(test.ext.ExtensionType); typ.Kind() == reflect.Ptr {\n\t\t\tsetTo = reflect.New(typ).Elem()\n\t\t\tsetTo.Set(reflect.New(setTo.Type().Elem()))\n\t\t\tsetTo.Elem().Set(reflect.ValueOf(test.want))\n\t\t}\n\t\treturn setTo.Interface()\n\t}\n\n\tfor _, test := range tests {\n\t\tmsg := &pb.DefaultsMessage{}\n\t\tname := test.ext.Name\n\n\t\t// Check the initial value.\n\t\tif err := checkVal(test, msg, test.def); err != nil {\n\t\t\tt.Errorf(\"%s: %v\", name, err)\n\t\t}\n\n\t\t// Set the per-type value and check value.\n\t\tname = fmt.Sprintf(\"%s (set to %T %v)\", name, test.want, test.want)\n\t\tif err := proto.SetExtension(msg, test.ext, setTo(test)); err != nil {\n\t\t\tt.Errorf(\"%s: SetExtension(): %v\", name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif err := checkVal(test, msg, test.want); err != nil {\n\t\t\tt.Errorf(\"%s: %v\", name, err)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Set and check the value.\n\t\tname += \" (cleared)\"\n\t\tproto.ClearExtension(msg, test.ext)\n\t\tif err := checkVal(test, msg, test.def); err != nil {\n\t\t\tt.Errorf(\"%s: %v\", name, err)\n\t\t}\n\t}\n}\n\nfunc TestExtensionsRoundTrip(t *testing.T) {\n\tmsg := &pb.MyMessage{}\n\text1 := &pb.Ext{\n\t\tData: proto.String(\"hi\"),\n\t}\n\text2 := &pb.Ext{\n\t\tData: proto.String(\"there\"),\n\t}\n\texists := proto.HasExtension(msg, pb.E_Ext_More)\n\tif exists {\n\t\tt.Error(\"Extension More present unexpectedly\")\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {\n\t\tt.Error(err)\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {\n\t\tt.Error(err)\n\t}\n\te, err := proto.GetExtension(msg, pb.E_Ext_More)\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tx, ok := e.(*pb.Ext)\n\tif !ok {\n\t\tt.Errorf(\"e has type %T, expected testdata.Ext\", e)\n\t} else if *x.Data != \"there\" {\n\t\tt.Errorf(\"SetExtension failed to overwrite, got %+v, not 'there'\", x)\n\t}\n\tproto.ClearExtension(msg, pb.E_Ext_More)\n\tif _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {\n\t\tt.Errorf(\"got %v, expected ErrMissingExtension\", e)\n\t}\n\tif _, err := proto.GetExtension(msg, pb.E_X215); err == nil {\n\t\tt.Error(\"expected bad extension error, got nil\")\n\t}\n\tif err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {\n\t\tt.Error(\"expected extension err\")\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {\n\t\tt.Error(\"expected some sort of type mismatch error, got nil\")\n\t}\n}\n\nfunc TestNilExtension(t *testing.T) {\n\tmsg := &pb.MyMessage{\n\t\tCount: proto.Int32(1),\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_Text, proto.String(\"hello\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, (*pb.Ext)(nil)); err == nil {\n\t\tt.Error(\"expected SetExtension to fail due to a nil extension\")\n\t} else if want := \"proto: SetExtension called with nil value of type *testdata.Ext\"; err.Error() != want {\n\t\tt.Errorf(\"expected error %v, got %v\", want, err)\n\t}\n\t// Note: if the behavior of Marshal is ever changed to ignore nil extensions, update\n\t// this test to verify that E_Ext_Text is properly propagated through marshal->unmarshal.\n}\n\nfunc TestMarshalUnmarshalRepeatedExtension(t *testing.T) {\n\t// Add a repeated extension to the result.\n\ttests := []struct {\n\t\tname string\n\t\text  []*pb.ComplexExtension\n\t}{\n\t\t{\n\t\t\t\"two fields\",\n\t\t\t[]*pb.ComplexExtension{\n\t\t\t\t{First: proto.Int32(7)},\n\t\t\t\t{Second: proto.Int32(11)},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"repeated field\",\n\t\t\t[]*pb.ComplexExtension{\n\t\t\t\t{Third: []int32{1000}},\n\t\t\t\t{Third: []int32{2000}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"two fields and repeated field\",\n\t\t\t[]*pb.ComplexExtension{\n\t\t\t\t{Third: []int32{1000}},\n\t\t\t\t{First: proto.Int32(9)},\n\t\t\t\t{Second: proto.Int32(21)},\n\t\t\t\t{Third: []int32{2000}},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\t// Marshal message with a repeated extension.\n\t\tmsg1 := new(pb.OtherMessage)\n\t\terr := proto.SetExtension(msg1, pb.E_RComplex, test.ext)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s] Error setting extension: %v\", test.name, err)\n\t\t}\n\t\tb, err := proto.Marshal(msg1)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s] Error marshaling message: %v\", test.name, err)\n\t\t}\n\n\t\t// Unmarshal and read the merged proto.\n\t\tmsg2 := new(pb.OtherMessage)\n\t\terr = proto.Unmarshal(b, msg2)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s] Error unmarshaling message: %v\", test.name, err)\n\t\t}\n\t\te, err := proto.GetExtension(msg2, pb.E_RComplex)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s] Error getting extension: %v\", test.name, err)\n\t\t}\n\t\text := e.([]*pb.ComplexExtension)\n\t\tif ext == nil {\n\t\t\tt.Fatalf(\"[%s] Invalid extension\", test.name)\n\t\t}\n\t\tif !reflect.DeepEqual(ext, test.ext) {\n\t\t\tt.Errorf(\"[%s] Wrong value for ComplexExtension: got: %v want: %v\\n\", test.name, ext, test.ext)\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalRepeatingNonRepeatedExtension(t *testing.T) {\n\t// We may see multiple instances of the same extension in the wire\n\t// format. For example, the proto compiler may encode custom options in\n\t// this way. Here, we verify that we merge the extensions together.\n\ttests := []struct {\n\t\tname string\n\t\text  []*pb.ComplexExtension\n\t}{\n\t\t{\n\t\t\t\"two fields\",\n\t\t\t[]*pb.ComplexExtension{\n\t\t\t\t{First: proto.Int32(7)},\n\t\t\t\t{Second: proto.Int32(11)},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"repeated field\",\n\t\t\t[]*pb.ComplexExtension{\n\t\t\t\t{Third: []int32{1000}},\n\t\t\t\t{Third: []int32{2000}},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t\"two fields and repeated field\",\n\t\t\t[]*pb.ComplexExtension{\n\t\t\t\t{Third: []int32{1000}},\n\t\t\t\t{First: proto.Int32(9)},\n\t\t\t\t{Second: proto.Int32(21)},\n\t\t\t\t{Third: []int32{2000}},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tvar buf bytes.Buffer\n\t\tvar want pb.ComplexExtension\n\n\t\t// Generate a serialized representation of a repeated extension\n\t\t// by catenating bytes together.\n\t\tfor i, e := range test.ext {\n\t\t\t// Merge to create the wanted proto.\n\t\t\tproto.Merge(&want, e)\n\n\t\t\t// serialize the message\n\t\t\tmsg := new(pb.OtherMessage)\n\t\t\terr := proto.SetExtension(msg, pb.E_Complex, e)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"[%s] Error setting extension %d: %v\", test.name, i, err)\n\t\t\t}\n\t\t\tb, err := proto.Marshal(msg)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"[%s] Error marshaling message %d: %v\", test.name, i, err)\n\t\t\t}\n\t\t\tbuf.Write(b)\n\t\t}\n\n\t\t// Unmarshal and read the merged proto.\n\t\tmsg2 := new(pb.OtherMessage)\n\t\terr := proto.Unmarshal(buf.Bytes(), msg2)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s] Error unmarshaling message: %v\", test.name, err)\n\t\t}\n\t\te, err := proto.GetExtension(msg2, pb.E_Complex)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"[%s] Error getting extension: %v\", test.name, err)\n\t\t}\n\t\text := e.(*pb.ComplexExtension)\n\t\tif ext == nil {\n\t\t\tt.Fatalf(\"[%s] Invalid extension\", test.name)\n\t\t}\n\t\tif !reflect.DeepEqual(*ext, want) {\n\t\t\tt.Errorf(\"[%s] Wrong value for ComplexExtension: got: %s want: %s\\n\", test.name, ext, want)\n\t\t}\n\t}\n}\n\nfunc TestClearAllExtensions(t *testing.T) {\n\t// unregistered extension\n\tdesc := &proto.ExtensionDesc{\n\t\tExtendedType:  (*pb.MyMessage)(nil),\n\t\tExtensionType: (*bool)(nil),\n\t\tField:         101010100,\n\t\tName:          \"emptyextension\",\n\t\tTag:           \"varint,0,opt\",\n\t}\n\tm := &pb.MyMessage{}\n\tif proto.HasExtension(m, desc) {\n\t\tt.Errorf(\"proto.HasExtension(%s): got true, want false\", proto.MarshalTextString(m))\n\t}\n\tif err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {\n\t\tt.Errorf(\"proto.SetExtension(m, desc, true): got error %q, want nil\", err)\n\t}\n\tif !proto.HasExtension(m, desc) {\n\t\tt.Errorf(\"proto.HasExtension(%s): got false, want true\", proto.MarshalTextString(m))\n\t}\n\tproto.ClearAllExtensions(m)\n\tif proto.HasExtension(m, desc) {\n\t\tt.Errorf(\"proto.HasExtension(%s): got true, want false\", proto.MarshalTextString(m))\n\t}\n}\n\nfunc TestMarshalRace(t *testing.T) {\n\t// unregistered extension\n\tdesc := &proto.ExtensionDesc{\n\t\tExtendedType:  (*pb.MyMessage)(nil),\n\t\tExtensionType: (*bool)(nil),\n\t\tField:         101010100,\n\t\tName:          \"emptyextension\",\n\t\tTag:           \"varint,0,opt\",\n\t}\n\n\tm := &pb.MyMessage{Count: proto.Int32(4)}\n\tif err := proto.SetExtension(m, desc, proto.Bool(true)); err != nil {\n\t\tt.Errorf(\"proto.SetExtension(m, desc, true): got error %q, want nil\", err)\n\t}\n\n\tvar g errgroup.Group\n\tfor n := 3; n > 0; n-- {\n\t\tg.Go(func() error {\n\t\t\t_, err := proto.Marshal(m)\n\t\t\treturn err\n\t\t})\n\t}\n\tif err := g.Wait(); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/lib.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n/*\nPackage proto converts data structures to and from the wire format of\nprotocol buffers.  It works in concert with the Go source code generated\nfor .proto files by the protocol compiler.\n\nA summary of the properties of the protocol buffer interface\nfor a protocol buffer variable v:\n\n  - Names are turned from camel_case to CamelCase for export.\n  - There are no methods on v to set fields; just treat\n\tthem as structure fields.\n  - There are getters that return a field's value if set,\n\tand return the field's default value if unset.\n\tThe getters work even if the receiver is a nil message.\n  - The zero value for a struct is its correct initialization state.\n\tAll desired fields must be set before marshaling.\n  - A Reset() method will restore a protobuf struct to its zero state.\n  - Non-repeated fields are pointers to the values; nil means unset.\n\tThat is, optional or required field int32 f becomes F *int32.\n  - Repeated fields are slices.\n  - Helper functions are available to aid the setting of fields.\n\tmsg.Foo = proto.String(\"hello\") // set field\n  - Constants are defined to hold the default values of all fields that\n\thave them.  They have the form Default_StructName_FieldName.\n\tBecause the getter methods handle defaulted values,\n\tdirect use of these constants should be rare.\n  - Enums are given type names and maps from names to values.\n\tEnum values are prefixed by the enclosing message's name, or by the\n\tenum's type name if it is a top-level enum. Enum types have a String\n\tmethod, and a Enum method to assist in message construction.\n  - Nested messages, groups and enums have type names prefixed with the name of\n\tthe surrounding message type.\n  - Extensions are given descriptor names that start with E_,\n\tfollowed by an underscore-delimited list of the nested messages\n\tthat contain it (if any) followed by the CamelCased name of the\n\textension field itself.  HasExtension, ClearExtension, GetExtension\n\tand SetExtension are functions for manipulating extensions.\n  - Oneof field sets are given a single field in their message,\n\twith distinguished wrapper types for each possible field value.\n  - Marshal and Unmarshal are functions to encode and decode the wire format.\n\nWhen the .proto file specifies `syntax=\"proto3\"`, there are some differences:\n\n  - Non-repeated fields of non-message type are values instead of pointers.\n  - Enum types do not get an Enum method.\n\nThe simplest way to describe this is to see an example.\nGiven file test.proto, containing\n\n\tpackage example;\n\n\tenum FOO { X = 17; }\n\n\tmessage Test {\n\t  required string label = 1;\n\t  optional int32 type = 2 [default=77];\n\t  repeated int64 reps = 3;\n\t  optional group OptionalGroup = 4 {\n\t    required string RequiredField = 5;\n\t  }\n\t  oneof union {\n\t    int32 number = 6;\n\t    string name = 7;\n\t  }\n\t}\n\nThe resulting file, test.pb.go, is:\n\n\tpackage example\n\n\timport proto \"github.com/golang/protobuf/proto\"\n\timport math \"math\"\n\n\ttype FOO int32\n\tconst (\n\t\tFOO_X FOO = 17\n\t)\n\tvar FOO_name = map[int32]string{\n\t\t17: \"X\",\n\t}\n\tvar FOO_value = map[string]int32{\n\t\t\"X\": 17,\n\t}\n\n\tfunc (x FOO) Enum() *FOO {\n\t\tp := new(FOO)\n\t\t*p = x\n\t\treturn p\n\t}\n\tfunc (x FOO) String() string {\n\t\treturn proto.EnumName(FOO_name, int32(x))\n\t}\n\tfunc (x *FOO) UnmarshalJSON(data []byte) error {\n\t\tvalue, err := proto.UnmarshalJSONEnum(FOO_value, data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*x = FOO(value)\n\t\treturn nil\n\t}\n\n\ttype Test struct {\n\t\tLabel         *string             `protobuf:\"bytes,1,req,name=label\" json:\"label,omitempty\"`\n\t\tType          *int32              `protobuf:\"varint,2,opt,name=type,def=77\" json:\"type,omitempty\"`\n\t\tReps          []int64             `protobuf:\"varint,3,rep,name=reps\" json:\"reps,omitempty\"`\n\t\tOptionalgroup *Test_OptionalGroup `protobuf:\"group,4,opt,name=OptionalGroup\" json:\"optionalgroup,omitempty\"`\n\t\t// Types that are valid to be assigned to Union:\n\t\t//\t*Test_Number\n\t\t//\t*Test_Name\n\t\tUnion            isTest_Union `protobuf_oneof:\"union\"`\n\t\tXXX_unrecognized []byte       `json:\"-\"`\n\t}\n\tfunc (m *Test) Reset()         { *m = Test{} }\n\tfunc (m *Test) String() string { return proto.CompactTextString(m) }\n\tfunc (*Test) ProtoMessage() {}\n\n\ttype isTest_Union interface {\n\t\tisTest_Union()\n\t}\n\n\ttype Test_Number struct {\n\t\tNumber int32 `protobuf:\"varint,6,opt,name=number\"`\n\t}\n\ttype Test_Name struct {\n\t\tName string `protobuf:\"bytes,7,opt,name=name\"`\n\t}\n\n\tfunc (*Test_Number) isTest_Union() {}\n\tfunc (*Test_Name) isTest_Union()   {}\n\n\tfunc (m *Test) GetUnion() isTest_Union {\n\t\tif m != nil {\n\t\t\treturn m.Union\n\t\t}\n\t\treturn nil\n\t}\n\tconst Default_Test_Type int32 = 77\n\n\tfunc (m *Test) GetLabel() string {\n\t\tif m != nil && m.Label != nil {\n\t\t\treturn *m.Label\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tfunc (m *Test) GetType() int32 {\n\t\tif m != nil && m.Type != nil {\n\t\t\treturn *m.Type\n\t\t}\n\t\treturn Default_Test_Type\n\t}\n\n\tfunc (m *Test) GetOptionalgroup() *Test_OptionalGroup {\n\t\tif m != nil {\n\t\t\treturn m.Optionalgroup\n\t\t}\n\t\treturn nil\n\t}\n\n\ttype Test_OptionalGroup struct {\n\t\tRequiredField *string `protobuf:\"bytes,5,req\" json:\"RequiredField,omitempty\"`\n\t}\n\tfunc (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }\n\tfunc (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }\n\n\tfunc (m *Test_OptionalGroup) GetRequiredField() string {\n\t\tif m != nil && m.RequiredField != nil {\n\t\t\treturn *m.RequiredField\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tfunc (m *Test) GetNumber() int32 {\n\t\tif x, ok := m.GetUnion().(*Test_Number); ok {\n\t\t\treturn x.Number\n\t\t}\n\t\treturn 0\n\t}\n\n\tfunc (m *Test) GetName() string {\n\t\tif x, ok := m.GetUnion().(*Test_Name); ok {\n\t\t\treturn x.Name\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tfunc init() {\n\t\tproto.RegisterEnum(\"example.FOO\", FOO_name, FOO_value)\n\t}\n\nTo create and play with a Test object:\n\n\tpackage main\n\n\timport (\n\t\t\"log\"\n\n\t\t\"github.com/golang/protobuf/proto\"\n\t\tpb \"./example.pb\"\n\t)\n\n\tfunc main() {\n\t\ttest := &pb.Test{\n\t\t\tLabel: proto.String(\"hello\"),\n\t\t\tType:  proto.Int32(17),\n\t\t\tReps:  []int64{1, 2, 3},\n\t\t\tOptionalgroup: &pb.Test_OptionalGroup{\n\t\t\t\tRequiredField: proto.String(\"good bye\"),\n\t\t\t},\n\t\t\tUnion: &pb.Test_Name{\"fred\"},\n\t\t}\n\t\tdata, err := proto.Marshal(test)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"marshaling error: \", err)\n\t\t}\n\t\tnewTest := &pb.Test{}\n\t\terr = proto.Unmarshal(data, newTest)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"unmarshaling error: \", err)\n\t\t}\n\t\t// Now test and newTest contain the same data.\n\t\tif test.GetLabel() != newTest.GetLabel() {\n\t\t\tlog.Fatalf(\"data mismatch %q != %q\", test.GetLabel(), newTest.GetLabel())\n\t\t}\n\t\t// Use a type switch to determine which oneof was set.\n\t\tswitch u := test.Union.(type) {\n\t\tcase *pb.Test_Number: // u.Number contains the number.\n\t\tcase *pb.Test_Name: // u.Name contains the string.\n\t\t}\n\t\t// etc.\n\t}\n*/\npackage proto\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\"\n)\n\n// Message is implemented by generated protocol buffer messages.\ntype Message interface {\n\tReset()\n\tString() string\n\tProtoMessage()\n}\n\n// Stats records allocation details about the protocol buffer encoders\n// and decoders.  Useful for tuning the library itself.\ntype Stats struct {\n\tEmalloc uint64 // mallocs in encode\n\tDmalloc uint64 // mallocs in decode\n\tEncode  uint64 // number of encodes\n\tDecode  uint64 // number of decodes\n\tChit    uint64 // number of cache hits\n\tCmiss   uint64 // number of cache misses\n\tSize    uint64 // number of sizes\n}\n\n// Set to true to enable stats collection.\nconst collectStats = false\n\nvar stats Stats\n\n// GetStats returns a copy of the global Stats structure.\nfunc GetStats() Stats { return stats }\n\n// A Buffer is a buffer manager for marshaling and unmarshaling\n// protocol buffers.  It may be reused between invocations to\n// reduce memory usage.  It is not necessary to use a Buffer;\n// the global functions Marshal and Unmarshal create a\n// temporary Buffer and are fine for most applications.\ntype Buffer struct {\n\tbuf   []byte // encode/decode byte stream\n\tindex int    // read point\n\n\t// pools of basic types to amortize allocation.\n\tbools   []bool\n\tuint32s []uint32\n\tuint64s []uint64\n\n\t// extra pools, only used with pointer_reflect.go\n\tint32s   []int32\n\tint64s   []int64\n\tfloat32s []float32\n\tfloat64s []float64\n}\n\n// NewBuffer allocates a new Buffer and initializes its internal data to\n// the contents of the argument slice.\nfunc NewBuffer(e []byte) *Buffer {\n\treturn &Buffer{buf: e}\n}\n\n// Reset resets the Buffer, ready for marshaling a new protocol buffer.\nfunc (p *Buffer) Reset() {\n\tp.buf = p.buf[0:0] // for reading/writing\n\tp.index = 0        // for reading\n}\n\n// SetBuf replaces the internal buffer with the slice,\n// ready for unmarshaling the contents of the slice.\nfunc (p *Buffer) SetBuf(s []byte) {\n\tp.buf = s\n\tp.index = 0\n}\n\n// Bytes returns the contents of the Buffer.\nfunc (p *Buffer) Bytes() []byte { return p.buf }\n\n/*\n * Helper routines for simplifying the creation of optional fields of basic type.\n */\n\n// Bool is a helper routine that allocates a new bool value\n// to store v and returns a pointer to it.\nfunc Bool(v bool) *bool {\n\treturn &v\n}\n\n// Int32 is a helper routine that allocates a new int32 value\n// to store v and returns a pointer to it.\nfunc Int32(v int32) *int32 {\n\treturn &v\n}\n\n// Int is a helper routine that allocates a new int32 value\n// to store v and returns a pointer to it, but unlike Int32\n// its argument value is an int.\nfunc Int(v int) *int32 {\n\tp := new(int32)\n\t*p = int32(v)\n\treturn p\n}\n\n// Int64 is a helper routine that allocates a new int64 value\n// to store v and returns a pointer to it.\nfunc Int64(v int64) *int64 {\n\treturn &v\n}\n\n// Float32 is a helper routine that allocates a new float32 value\n// to store v and returns a pointer to it.\nfunc Float32(v float32) *float32 {\n\treturn &v\n}\n\n// Float64 is a helper routine that allocates a new float64 value\n// to store v and returns a pointer to it.\nfunc Float64(v float64) *float64 {\n\treturn &v\n}\n\n// Uint32 is a helper routine that allocates a new uint32 value\n// to store v and returns a pointer to it.\nfunc Uint32(v uint32) *uint32 {\n\treturn &v\n}\n\n// Uint64 is a helper routine that allocates a new uint64 value\n// to store v and returns a pointer to it.\nfunc Uint64(v uint64) *uint64 {\n\treturn &v\n}\n\n// String is a helper routine that allocates a new string value\n// to store v and returns a pointer to it.\nfunc String(v string) *string {\n\treturn &v\n}\n\n// EnumName is a helper function to simplify printing protocol buffer enums\n// by name.  Given an enum map and a value, it returns a useful string.\nfunc EnumName(m map[int32]string, v int32) string {\n\ts, ok := m[v]\n\tif ok {\n\t\treturn s\n\t}\n\treturn strconv.Itoa(int(v))\n}\n\n// UnmarshalJSONEnum is a helper function to simplify recovering enum int values\n// from their JSON-encoded representation. Given a map from the enum's symbolic\n// names to its int values, and a byte buffer containing the JSON-encoded\n// value, it returns an int32 that can be cast to the enum type by the caller.\n//\n// The function can deal with both JSON representations, numeric and symbolic.\nfunc UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {\n\tif data[0] == '\"' {\n\t\t// New style: enums are strings.\n\t\tvar repr string\n\t\tif err := json.Unmarshal(data, &repr); err != nil {\n\t\t\treturn -1, err\n\t\t}\n\t\tval, ok := m[repr]\n\t\tif !ok {\n\t\t\treturn 0, fmt.Errorf(\"unrecognized enum %s value %q\", enumName, repr)\n\t\t}\n\t\treturn val, nil\n\t}\n\t// Old style: enums are ints.\n\tvar val int32\n\tif err := json.Unmarshal(data, &val); err != nil {\n\t\treturn 0, fmt.Errorf(\"cannot unmarshal %#q into enum %s\", data, enumName)\n\t}\n\treturn val, nil\n}\n\n// DebugPrint dumps the encoded data in b in a debugging format with a header\n// including the string s. Used in testing but made available for general debugging.\nfunc (p *Buffer) DebugPrint(s string, b []byte) {\n\tvar u uint64\n\n\tobuf := p.buf\n\tindex := p.index\n\tp.buf = b\n\tp.index = 0\n\tdepth := 0\n\n\tfmt.Printf(\"\\n--- %s ---\\n\", s)\n\nout:\n\tfor {\n\t\tfor i := 0; i < depth; i++ {\n\t\t\tfmt.Print(\"  \")\n\t\t}\n\n\t\tindex := p.index\n\t\tif index == len(p.buf) {\n\t\t\tbreak\n\t\t}\n\n\t\top, err := p.DecodeVarint()\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"%3d: fetching op err %v\\n\", index, err)\n\t\t\tbreak out\n\t\t}\n\t\ttag := op >> 3\n\t\twire := op & 7\n\n\t\tswitch wire {\n\t\tdefault:\n\t\t\tfmt.Printf(\"%3d: t=%3d unknown wire=%d\\n\",\n\t\t\t\tindex, tag, wire)\n\t\t\tbreak out\n\n\t\tcase WireBytes:\n\t\t\tvar r []byte\n\n\t\t\tr, err = p.DecodeRawBytes(false)\n\t\t\tif err != nil {\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d bytes [%d]\", index, tag, len(r))\n\t\t\tif len(r) <= 6 {\n\t\t\t\tfor i := 0; i < len(r); i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor i := 0; i < 3; i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\" ..\")\n\t\t\t\tfor i := len(r) - 3; i < len(r); i++ {\n\t\t\t\t\tfmt.Printf(\" %.2x\", r[i])\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Printf(\"\\n\")\n\n\t\tcase WireFixed32:\n\t\t\tu, err = p.DecodeFixed32()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d fix32 err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d fix32 %d\\n\", index, tag, u)\n\n\t\tcase WireFixed64:\n\t\t\tu, err = p.DecodeFixed64()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d fix64 err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d fix64 %d\\n\", index, tag, u)\n\n\t\tcase WireVarint:\n\t\t\tu, err = p.DecodeVarint()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Printf(\"%3d: t=%3d varint err %v\\n\", index, tag, err)\n\t\t\t\tbreak out\n\t\t\t}\n\t\t\tfmt.Printf(\"%3d: t=%3d varint %d\\n\", index, tag, u)\n\n\t\tcase WireStartGroup:\n\t\t\tfmt.Printf(\"%3d: t=%3d start\\n\", index, tag)\n\t\t\tdepth++\n\n\t\tcase WireEndGroup:\n\t\t\tdepth--\n\t\t\tfmt.Printf(\"%3d: t=%3d end\\n\", index, tag)\n\t\t}\n\t}\n\n\tif depth != 0 {\n\t\tfmt.Printf(\"%3d: start-end not balanced %d\\n\", p.index, depth)\n\t}\n\tfmt.Printf(\"\\n\")\n\n\tp.buf = obuf\n\tp.index = index\n}\n\n// SetDefaults sets unset protocol buffer fields to their default values.\n// It only modifies fields that are both unset and have defined defaults.\n// It recursively sets default values in any non-nil sub-messages.\nfunc SetDefaults(pb Message) {\n\tsetDefaults(reflect.ValueOf(pb), true, false)\n}\n\n// v is a pointer to a struct.\nfunc setDefaults(v reflect.Value, recur, zeros bool) {\n\tv = v.Elem()\n\n\tdefaultMu.RLock()\n\tdm, ok := defaults[v.Type()]\n\tdefaultMu.RUnlock()\n\tif !ok {\n\t\tdm = buildDefaultMessage(v.Type())\n\t\tdefaultMu.Lock()\n\t\tdefaults[v.Type()] = dm\n\t\tdefaultMu.Unlock()\n\t}\n\n\tfor _, sf := range dm.scalars {\n\t\tf := v.Field(sf.index)\n\t\tif !f.IsNil() {\n\t\t\t// field already set\n\t\t\tcontinue\n\t\t}\n\t\tdv := sf.value\n\t\tif dv == nil && !zeros {\n\t\t\t// no explicit default, and don't want to set zeros\n\t\t\tcontinue\n\t\t}\n\t\tfptr := f.Addr().Interface() // **T\n\t\t// TODO: Consider batching the allocations we do here.\n\t\tswitch sf.kind {\n\t\tcase reflect.Bool:\n\t\t\tb := new(bool)\n\t\t\tif dv != nil {\n\t\t\t\t*b = dv.(bool)\n\t\t\t}\n\t\t\t*(fptr.(**bool)) = b\n\t\tcase reflect.Float32:\n\t\t\tf := new(float32)\n\t\t\tif dv != nil {\n\t\t\t\t*f = dv.(float32)\n\t\t\t}\n\t\t\t*(fptr.(**float32)) = f\n\t\tcase reflect.Float64:\n\t\t\tf := new(float64)\n\t\t\tif dv != nil {\n\t\t\t\t*f = dv.(float64)\n\t\t\t}\n\t\t\t*(fptr.(**float64)) = f\n\t\tcase reflect.Int32:\n\t\t\t// might be an enum\n\t\t\tif ft := f.Type(); ft != int32PtrType {\n\t\t\t\t// enum\n\t\t\t\tf.Set(reflect.New(ft.Elem()))\n\t\t\t\tif dv != nil {\n\t\t\t\t\tf.Elem().SetInt(int64(dv.(int32)))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// int32 field\n\t\t\t\ti := new(int32)\n\t\t\t\tif dv != nil {\n\t\t\t\t\t*i = dv.(int32)\n\t\t\t\t}\n\t\t\t\t*(fptr.(**int32)) = i\n\t\t\t}\n\t\tcase reflect.Int64:\n\t\t\ti := new(int64)\n\t\t\tif dv != nil {\n\t\t\t\t*i = dv.(int64)\n\t\t\t}\n\t\t\t*(fptr.(**int64)) = i\n\t\tcase reflect.String:\n\t\t\ts := new(string)\n\t\t\tif dv != nil {\n\t\t\t\t*s = dv.(string)\n\t\t\t}\n\t\t\t*(fptr.(**string)) = s\n\t\tcase reflect.Uint8:\n\t\t\t// exceptional case: []byte\n\t\t\tvar b []byte\n\t\t\tif dv != nil {\n\t\t\t\tdb := dv.([]byte)\n\t\t\t\tb = make([]byte, len(db))\n\t\t\t\tcopy(b, db)\n\t\t\t} else {\n\t\t\t\tb = []byte{}\n\t\t\t}\n\t\t\t*(fptr.(*[]byte)) = b\n\t\tcase reflect.Uint32:\n\t\t\tu := new(uint32)\n\t\t\tif dv != nil {\n\t\t\t\t*u = dv.(uint32)\n\t\t\t}\n\t\t\t*(fptr.(**uint32)) = u\n\t\tcase reflect.Uint64:\n\t\t\tu := new(uint64)\n\t\t\tif dv != nil {\n\t\t\t\t*u = dv.(uint64)\n\t\t\t}\n\t\t\t*(fptr.(**uint64)) = u\n\t\tdefault:\n\t\t\tlog.Printf(\"proto: can't set default for field %v (sf.kind=%v)\", f, sf.kind)\n\t\t}\n\t}\n\n\tfor _, ni := range dm.nested {\n\t\tf := v.Field(ni)\n\t\t// f is *T or []*T or map[T]*T\n\t\tswitch f.Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tif f.IsNil() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsetDefaults(f, recur, zeros)\n\n\t\tcase reflect.Slice:\n\t\t\tfor i := 0; i < f.Len(); i++ {\n\t\t\t\te := f.Index(i)\n\t\t\t\tif e.IsNil() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsetDefaults(e, recur, zeros)\n\t\t\t}\n\n\t\tcase reflect.Map:\n\t\t\tfor _, k := range f.MapKeys() {\n\t\t\t\te := f.MapIndex(k)\n\t\t\t\tif e.IsNil() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tsetDefaults(e, recur, zeros)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar (\n\t// defaults maps a protocol buffer struct type to a slice of the fields,\n\t// with its scalar fields set to their proto-declared non-zero default values.\n\tdefaultMu sync.RWMutex\n\tdefaults  = make(map[reflect.Type]defaultMessage)\n\n\tint32PtrType = reflect.TypeOf((*int32)(nil))\n)\n\n// defaultMessage represents information about the default values of a message.\ntype defaultMessage struct {\n\tscalars []scalarField\n\tnested  []int // struct field index of nested messages\n}\n\ntype scalarField struct {\n\tindex int          // struct field index\n\tkind  reflect.Kind // element type (the T in *T or []T)\n\tvalue interface{}  // the proto-declared default value, or nil\n}\n\n// t is a struct type.\nfunc buildDefaultMessage(t reflect.Type) (dm defaultMessage) {\n\tsprop := GetProperties(t)\n\tfor _, prop := range sprop.Prop {\n\t\tfi, ok := sprop.decoderTags.get(prop.Tag)\n\t\tif !ok {\n\t\t\t// XXX_unrecognized\n\t\t\tcontinue\n\t\t}\n\t\tft := t.Field(fi).Type\n\n\t\tsf, nested, err := fieldDefault(ft, prop)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\tlog.Print(err)\n\t\tcase nested:\n\t\t\tdm.nested = append(dm.nested, fi)\n\t\tcase sf != nil:\n\t\t\tsf.index = fi\n\t\t\tdm.scalars = append(dm.scalars, *sf)\n\t\t}\n\t}\n\n\treturn dm\n}\n\n// fieldDefault returns the scalarField for field type ft.\n// sf will be nil if the field can not have a default.\n// nestedMessage will be true if this is a nested message.\n// Note that sf.index is not set on return.\nfunc fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {\n\tvar canHaveDefault bool\n\tswitch ft.Kind() {\n\tcase reflect.Ptr:\n\t\tif ft.Elem().Kind() == reflect.Struct {\n\t\t\tnestedMessage = true\n\t\t} else {\n\t\t\tcanHaveDefault = true // proto2 scalar field\n\t\t}\n\n\tcase reflect.Slice:\n\t\tswitch ft.Elem().Kind() {\n\t\tcase reflect.Ptr:\n\t\t\tnestedMessage = true // repeated message\n\t\tcase reflect.Uint8:\n\t\t\tcanHaveDefault = true // bytes field\n\t\t}\n\n\tcase reflect.Map:\n\t\tif ft.Elem().Kind() == reflect.Ptr {\n\t\t\tnestedMessage = true // map with message values\n\t\t}\n\t}\n\n\tif !canHaveDefault {\n\t\tif nestedMessage {\n\t\t\treturn nil, true, nil\n\t\t}\n\t\treturn nil, false, nil\n\t}\n\n\t// We now know that ft is a pointer or slice.\n\tsf = &scalarField{kind: ft.Elem().Kind()}\n\n\t// scalar fields without defaults\n\tif !prop.HasDefault {\n\t\treturn sf, false, nil\n\t}\n\n\t// a scalar field: either *T or []byte\n\tswitch ft.Elem().Kind() {\n\tcase reflect.Bool:\n\t\tx, err := strconv.ParseBool(prop.Default)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default bool %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = x\n\tcase reflect.Float32:\n\t\tx, err := strconv.ParseFloat(prop.Default, 32)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default float32 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = float32(x)\n\tcase reflect.Float64:\n\t\tx, err := strconv.ParseFloat(prop.Default, 64)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default float64 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = x\n\tcase reflect.Int32:\n\t\tx, err := strconv.ParseInt(prop.Default, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default int32 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = int32(x)\n\tcase reflect.Int64:\n\t\tx, err := strconv.ParseInt(prop.Default, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default int64 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = x\n\tcase reflect.String:\n\t\tsf.value = prop.Default\n\tcase reflect.Uint8:\n\t\t// []byte (not *uint8)\n\t\tsf.value = []byte(prop.Default)\n\tcase reflect.Uint32:\n\t\tx, err := strconv.ParseUint(prop.Default, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default uint32 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = uint32(x)\n\tcase reflect.Uint64:\n\t\tx, err := strconv.ParseUint(prop.Default, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"proto: bad default uint64 %q: %v\", prop.Default, err)\n\t\t}\n\t\tsf.value = x\n\tdefault:\n\t\treturn nil, false, fmt.Errorf(\"proto: unhandled def kind %v\", ft.Elem().Kind())\n\t}\n\n\treturn sf, false, nil\n}\n\n// Map fields may have key types of non-float scalars, strings and enums.\n// The easiest way to sort them in some deterministic order is to use fmt.\n// If this turns out to be inefficient we can always consider other options,\n// such as doing a Schwartzian transform.\n\nfunc mapKeys(vs []reflect.Value) sort.Interface {\n\ts := mapKeySorter{\n\t\tvs: vs,\n\t\t// default Less function: textual comparison\n\t\tless: func(a, b reflect.Value) bool {\n\t\t\treturn fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())\n\t\t},\n\t}\n\n\t// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;\n\t// numeric keys are sorted numerically.\n\tif len(vs) == 0 {\n\t\treturn s\n\t}\n\tswitch vs[0].Kind() {\n\tcase reflect.Int32, reflect.Int64:\n\t\ts.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }\n\tcase reflect.Uint32, reflect.Uint64:\n\t\ts.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }\n\t}\n\n\treturn s\n}\n\ntype mapKeySorter struct {\n\tvs   []reflect.Value\n\tless func(a, b reflect.Value) bool\n}\n\nfunc (s mapKeySorter) Len() int      { return len(s.vs) }\nfunc (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }\nfunc (s mapKeySorter) Less(i, j int) bool {\n\treturn s.less(s.vs[i], s.vs[j])\n}\n\n// isProto3Zero reports whether v is a zero proto3 value.\nfunc isProto3Zero(v reflect.Value) bool {\n\tswitch v.Kind() {\n\tcase reflect.Bool:\n\t\treturn !v.Bool()\n\tcase reflect.Int32, reflect.Int64:\n\t\treturn v.Int() == 0\n\tcase reflect.Uint32, reflect.Uint64:\n\t\treturn v.Uint() == 0\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn v.Float() == 0\n\tcase reflect.String:\n\t\treturn v.String() == \"\"\n\t}\n\treturn false\n}\n\n// ProtoPackageIsVersion2 is referenced from generated protocol buffer files\n// to assert that that code is compatible with this version of the proto package.\nconst ProtoPackageIsVersion2 = true\n\n// ProtoPackageIsVersion1 is referenced from generated protocol buffer files\n// to assert that that code is compatible with this version of the proto package.\nconst ProtoPackageIsVersion1 = true\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/map_test.go",
    "content": "package proto_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tppb \"github.com/golang/protobuf/proto/proto3_proto\"\n)\n\nfunc marshalled() []byte {\n\tm := &ppb.IntMaps{}\n\tfor i := 0; i < 1000; i++ {\n\t\tm.Maps = append(m.Maps, &ppb.IntMap{\n\t\t\tRtt: map[int32]int32{1: 2},\n\t\t})\n\t}\n\tb, err := proto.Marshal(m)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Can't marshal %+v: %v\", m, err))\n\t}\n\treturn b\n}\n\nfunc BenchmarkConcurrentMapUnmarshal(b *testing.B) {\n\tin := marshalled()\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tvar out ppb.IntMaps\n\t\t\tif err := proto.Unmarshal(in, &out); err != nil {\n\t\t\t\tb.Errorf(\"Can't unmarshal ppb.IntMaps: %v\", err)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc BenchmarkSequentialMapUnmarshal(b *testing.B) {\n\tin := marshalled()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tvar out ppb.IntMaps\n\t\tif err := proto.Unmarshal(in, &out); err != nil {\n\t\t\tb.Errorf(\"Can't unmarshal ppb.IntMaps: %v\", err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/message_set.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Support for message sets.\n */\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n)\n\n// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.\n// A message type ID is required for storing a protocol buffer in a message set.\nvar errNoMessageTypeID = errors.New(\"proto does not have a message type ID\")\n\n// The first two types (_MessageSet_Item and messageSet)\n// model what the protocol compiler produces for the following protocol message:\n//   message MessageSet {\n//     repeated group Item = 1 {\n//       required int32 type_id = 2;\n//       required string message = 3;\n//     };\n//   }\n// That is the MessageSet wire format. We can't use a proto to generate these\n// because that would introduce a circular dependency between it and this package.\n\ntype _MessageSet_Item struct {\n\tTypeId  *int32 `protobuf:\"varint,2,req,name=type_id\"`\n\tMessage []byte `protobuf:\"bytes,3,req,name=message\"`\n}\n\ntype messageSet struct {\n\tItem             []*_MessageSet_Item `protobuf:\"group,1,rep\"`\n\tXXX_unrecognized []byte\n\t// TODO: caching?\n}\n\n// Make sure messageSet is a Message.\nvar _ Message = (*messageSet)(nil)\n\n// messageTypeIder is an interface satisfied by a protocol buffer type\n// that may be stored in a MessageSet.\ntype messageTypeIder interface {\n\tMessageTypeId() int32\n}\n\nfunc (ms *messageSet) find(pb Message) *_MessageSet_Item {\n\tmti, ok := pb.(messageTypeIder)\n\tif !ok {\n\t\treturn nil\n\t}\n\tid := mti.MessageTypeId()\n\tfor _, item := range ms.Item {\n\t\tif *item.TypeId == id {\n\t\t\treturn item\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (ms *messageSet) Has(pb Message) bool {\n\tif ms.find(pb) != nil {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (ms *messageSet) Unmarshal(pb Message) error {\n\tif item := ms.find(pb); item != nil {\n\t\treturn Unmarshal(item.Message, pb)\n\t}\n\tif _, ok := pb.(messageTypeIder); !ok {\n\t\treturn errNoMessageTypeID\n\t}\n\treturn nil // TODO: return error instead?\n}\n\nfunc (ms *messageSet) Marshal(pb Message) error {\n\tmsg, err := Marshal(pb)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif item := ms.find(pb); item != nil {\n\t\t// reuse existing item\n\t\titem.Message = msg\n\t\treturn nil\n\t}\n\n\tmti, ok := pb.(messageTypeIder)\n\tif !ok {\n\t\treturn errNoMessageTypeID\n\t}\n\n\tmtid := mti.MessageTypeId()\n\tms.Item = append(ms.Item, &_MessageSet_Item{\n\t\tTypeId:  &mtid,\n\t\tMessage: msg,\n\t})\n\treturn nil\n}\n\nfunc (ms *messageSet) Reset()         { *ms = messageSet{} }\nfunc (ms *messageSet) String() string { return CompactTextString(ms) }\nfunc (*messageSet) ProtoMessage()     {}\n\n// Support for the message_set_wire_format message option.\n\nfunc skipVarint(buf []byte) []byte {\n\ti := 0\n\tfor ; buf[i]&0x80 != 0; i++ {\n\t}\n\treturn buf[i+1:]\n}\n\n// MarshalMessageSet encodes the extension map represented by m in the message set wire format.\n// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.\nfunc MarshalMessageSet(exts interface{}) ([]byte, error) {\n\tvar m map[int32]Extension\n\tswitch exts := exts.(type) {\n\tcase *XXX_InternalExtensions:\n\t\tif err := encodeExtensions(exts); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tm, _ = exts.extensionsRead()\n\tcase map[int32]Extension:\n\t\tif err := encodeExtensionsMap(exts); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tm = exts\n\tdefault:\n\t\treturn nil, errors.New(\"proto: not an extension map\")\n\t}\n\n\t// Sort extension IDs to provide a deterministic encoding.\n\t// See also enc_map in encode.go.\n\tids := make([]int, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, int(id))\n\t}\n\tsort.Ints(ids)\n\n\tms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}\n\tfor _, id := range ids {\n\t\te := m[int32(id)]\n\t\t// Remove the wire type and field number varint, as well as the length varint.\n\t\tmsg := skipVarint(skipVarint(e.enc))\n\n\t\tms.Item = append(ms.Item, &_MessageSet_Item{\n\t\t\tTypeId:  Int32(int32(id)),\n\t\t\tMessage: msg,\n\t\t})\n\t}\n\treturn Marshal(ms)\n}\n\n// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.\n// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.\nfunc UnmarshalMessageSet(buf []byte, exts interface{}) error {\n\tvar m map[int32]Extension\n\tswitch exts := exts.(type) {\n\tcase *XXX_InternalExtensions:\n\t\tm = exts.extensionsWrite()\n\tcase map[int32]Extension:\n\t\tm = exts\n\tdefault:\n\t\treturn errors.New(\"proto: not an extension map\")\n\t}\n\n\tms := new(messageSet)\n\tif err := Unmarshal(buf, ms); err != nil {\n\t\treturn err\n\t}\n\tfor _, item := range ms.Item {\n\t\tid := *item.TypeId\n\t\tmsg := item.Message\n\n\t\t// Restore wire type and field number varint, plus length varint.\n\t\t// Be careful to preserve duplicate items.\n\t\tb := EncodeVarint(uint64(id)<<3 | WireBytes)\n\t\tif ext, ok := m[id]; ok {\n\t\t\t// Existing data; rip off the tag and length varint\n\t\t\t// so we join the new data correctly.\n\t\t\t// We can assume that ext.enc is set because we are unmarshaling.\n\t\t\to := ext.enc[len(b):]   // skip wire type and field number\n\t\t\t_, n := DecodeVarint(o) // calculate length of length varint\n\t\t\to = o[n:]               // skip length varint\n\t\t\tmsg = append(o, msg...) // join old data and new data\n\t\t}\n\t\tb = append(b, EncodeVarint(uint64(len(msg)))...)\n\t\tb = append(b, msg...)\n\n\t\tm[id] = Extension{enc: b}\n\t}\n\treturn nil\n}\n\n// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.\n// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.\nfunc MarshalMessageSetJSON(exts interface{}) ([]byte, error) {\n\tvar m map[int32]Extension\n\tswitch exts := exts.(type) {\n\tcase *XXX_InternalExtensions:\n\t\tm, _ = exts.extensionsRead()\n\tcase map[int32]Extension:\n\t\tm = exts\n\tdefault:\n\t\treturn nil, errors.New(\"proto: not an extension map\")\n\t}\n\tvar b bytes.Buffer\n\tb.WriteByte('{')\n\n\t// Process the map in key order for deterministic output.\n\tids := make([]int32, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, id)\n\t}\n\tsort.Sort(int32Slice(ids)) // int32Slice defined in text.go\n\n\tfor i, id := range ids {\n\t\text := m[id]\n\t\tif i > 0 {\n\t\t\tb.WriteByte(',')\n\t\t}\n\n\t\tmsd, ok := messageSetMap[id]\n\t\tif !ok {\n\t\t\t// Unknown type; we can't render it, so skip it.\n\t\t\tcontinue\n\t\t}\n\t\tfmt.Fprintf(&b, `\"[%s]\":`, msd.name)\n\n\t\tx := ext.value\n\t\tif x == nil {\n\t\t\tx = reflect.New(msd.t.Elem()).Interface()\n\t\t\tif err := Unmarshal(ext.enc, x.(Message)); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\td, err := json.Marshal(x)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tb.Write(d)\n\t}\n\tb.WriteByte('}')\n\treturn b.Bytes(), nil\n}\n\n// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.\n// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.\nfunc UnmarshalMessageSetJSON(buf []byte, exts interface{}) error {\n\t// Common-case fast path.\n\tif len(buf) == 0 || bytes.Equal(buf, []byte(\"{}\")) {\n\t\treturn nil\n\t}\n\n\t// This is fairly tricky, and it's not clear that it is needed.\n\treturn errors.New(\"TODO: UnmarshalMessageSetJSON not yet implemented\")\n}\n\n// A global registry of types that can be used in a MessageSet.\n\nvar messageSetMap = make(map[int32]messageSetDesc)\n\ntype messageSetDesc struct {\n\tt    reflect.Type // pointer to struct\n\tname string\n}\n\n// RegisterMessageSetType is called from the generated code.\nfunc RegisterMessageSetType(m Message, fieldNum int32, name string) {\n\tmessageSetMap[fieldNum] = messageSetDesc{\n\t\tt:    reflect.TypeOf(m),\n\t\tname: name,\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/message_set_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2014 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n)\n\nfunc TestUnmarshalMessageSetWithDuplicate(t *testing.T) {\n\t// Check that a repeated message set entry will be concatenated.\n\tin := &messageSet{\n\t\tItem: []*_MessageSet_Item{\n\t\t\t{TypeId: Int32(12345), Message: []byte(\"hoo\")},\n\t\t\t{TypeId: Int32(12345), Message: []byte(\"hah\")},\n\t\t},\n\t}\n\tb, err := Marshal(in)\n\tif err != nil {\n\t\tt.Fatalf(\"Marshal: %v\", err)\n\t}\n\tt.Logf(\"Marshaled bytes: %q\", b)\n\n\tvar extensions XXX_InternalExtensions\n\tif err := UnmarshalMessageSet(b, &extensions); err != nil {\n\t\tt.Fatalf(\"UnmarshalMessageSet: %v\", err)\n\t}\n\text, ok := extensions.p.extensionMap[12345]\n\tif !ok {\n\t\tt.Fatalf(\"Didn't retrieve extension 12345; map is %v\", extensions.p.extensionMap)\n\t}\n\t// Skip wire type/field number and length varints.\n\tgot := skipVarint(skipVarint(ext.enc))\n\tif want := []byte(\"hoohah\"); !bytes.Equal(got, want) {\n\t\tt.Errorf(\"Combined extension is %q, want %q\", got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/pointer_reflect.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// +build appengine js\n\n// This file contains an implementation of proto field accesses using package reflect.\n// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can\n// be used on App Engine.\n\npackage proto\n\nimport (\n\t\"math\"\n\t\"reflect\"\n)\n\n// A structPointer is a pointer to a struct.\ntype structPointer struct {\n\tv reflect.Value\n}\n\n// toStructPointer returns a structPointer equivalent to the given reflect value.\n// The reflect value must itself be a pointer to a struct.\nfunc toStructPointer(v reflect.Value) structPointer {\n\treturn structPointer{v}\n}\n\n// IsNil reports whether p is nil.\nfunc structPointer_IsNil(p structPointer) bool {\n\treturn p.v.IsNil()\n}\n\n// Interface returns the struct pointer as an interface value.\nfunc structPointer_Interface(p structPointer, _ reflect.Type) interface{} {\n\treturn p.v.Interface()\n}\n\n// A field identifies a field in a struct, accessible from a structPointer.\n// In this implementation, a field is identified by the sequence of field indices\n// passed to reflect's FieldByIndex.\ntype field []int\n\n// toField returns a field equivalent to the given reflect field.\nfunc toField(f *reflect.StructField) field {\n\treturn f.Index\n}\n\n// invalidField is an invalid field identifier.\nvar invalidField = field(nil)\n\n// IsValid reports whether the field identifier is valid.\nfunc (f field) IsValid() bool { return f != nil }\n\n// field returns the given field in the struct as a reflect value.\nfunc structPointer_field(p structPointer, f field) reflect.Value {\n\t// Special case: an extension map entry with a value of type T\n\t// passes a *T to the struct-handling code with a zero field,\n\t// expecting that it will be treated as equivalent to *struct{ X T },\n\t// which has the same memory layout. We have to handle that case\n\t// specially, because reflect will panic if we call FieldByIndex on a\n\t// non-struct.\n\tif f == nil {\n\t\treturn p.v.Elem()\n\t}\n\n\treturn p.v.Elem().FieldByIndex(f)\n}\n\n// ifield returns the given field in the struct as an interface value.\nfunc structPointer_ifield(p structPointer, f field) interface{} {\n\treturn structPointer_field(p, f).Addr().Interface()\n}\n\n// Bytes returns the address of a []byte field in the struct.\nfunc structPointer_Bytes(p structPointer, f field) *[]byte {\n\treturn structPointer_ifield(p, f).(*[]byte)\n}\n\n// BytesSlice returns the address of a [][]byte field in the struct.\nfunc structPointer_BytesSlice(p structPointer, f field) *[][]byte {\n\treturn structPointer_ifield(p, f).(*[][]byte)\n}\n\n// Bool returns the address of a *bool field in the struct.\nfunc structPointer_Bool(p structPointer, f field) **bool {\n\treturn structPointer_ifield(p, f).(**bool)\n}\n\n// BoolVal returns the address of a bool field in the struct.\nfunc structPointer_BoolVal(p structPointer, f field) *bool {\n\treturn structPointer_ifield(p, f).(*bool)\n}\n\n// BoolSlice returns the address of a []bool field in the struct.\nfunc structPointer_BoolSlice(p structPointer, f field) *[]bool {\n\treturn structPointer_ifield(p, f).(*[]bool)\n}\n\n// String returns the address of a *string field in the struct.\nfunc structPointer_String(p structPointer, f field) **string {\n\treturn structPointer_ifield(p, f).(**string)\n}\n\n// StringVal returns the address of a string field in the struct.\nfunc structPointer_StringVal(p structPointer, f field) *string {\n\treturn structPointer_ifield(p, f).(*string)\n}\n\n// StringSlice returns the address of a []string field in the struct.\nfunc structPointer_StringSlice(p structPointer, f field) *[]string {\n\treturn structPointer_ifield(p, f).(*[]string)\n}\n\n// Extensions returns the address of an extension map field in the struct.\nfunc structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {\n\treturn structPointer_ifield(p, f).(*XXX_InternalExtensions)\n}\n\n// ExtMap returns the address of an extension map field in the struct.\nfunc structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {\n\treturn structPointer_ifield(p, f).(*map[int32]Extension)\n}\n\n// NewAt returns the reflect.Value for a pointer to a field in the struct.\nfunc structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {\n\treturn structPointer_field(p, f).Addr()\n}\n\n// SetStructPointer writes a *struct field in the struct.\nfunc structPointer_SetStructPointer(p structPointer, f field, q structPointer) {\n\tstructPointer_field(p, f).Set(q.v)\n}\n\n// GetStructPointer reads a *struct field in the struct.\nfunc structPointer_GetStructPointer(p structPointer, f field) structPointer {\n\treturn structPointer{structPointer_field(p, f)}\n}\n\n// StructPointerSlice the address of a []*struct field in the struct.\nfunc structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {\n\treturn structPointerSlice{structPointer_field(p, f)}\n}\n\n// A structPointerSlice represents the address of a slice of pointers to structs\n// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.\ntype structPointerSlice struct {\n\tv reflect.Value\n}\n\nfunc (p structPointerSlice) Len() int                  { return p.v.Len() }\nfunc (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }\nfunc (p structPointerSlice) Append(q structPointer) {\n\tp.v.Set(reflect.Append(p.v, q.v))\n}\n\nvar (\n\tint32Type   = reflect.TypeOf(int32(0))\n\tuint32Type  = reflect.TypeOf(uint32(0))\n\tfloat32Type = reflect.TypeOf(float32(0))\n\tint64Type   = reflect.TypeOf(int64(0))\n\tuint64Type  = reflect.TypeOf(uint64(0))\n\tfloat64Type = reflect.TypeOf(float64(0))\n)\n\n// A word32 represents a field of type *int32, *uint32, *float32, or *enum.\n// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.\ntype word32 struct {\n\tv reflect.Value\n}\n\n// IsNil reports whether p is nil.\nfunc word32_IsNil(p word32) bool {\n\treturn p.v.IsNil()\n}\n\n// Set sets p to point at a newly allocated word with bits set to x.\nfunc word32_Set(p word32, o *Buffer, x uint32) {\n\tt := p.v.Type().Elem()\n\tswitch t {\n\tcase int32Type:\n\t\tif len(o.int32s) == 0 {\n\t\t\to.int32s = make([]int32, uint32PoolSize)\n\t\t}\n\t\to.int32s[0] = int32(x)\n\t\tp.v.Set(reflect.ValueOf(&o.int32s[0]))\n\t\to.int32s = o.int32s[1:]\n\t\treturn\n\tcase uint32Type:\n\t\tif len(o.uint32s) == 0 {\n\t\t\to.uint32s = make([]uint32, uint32PoolSize)\n\t\t}\n\t\to.uint32s[0] = x\n\t\tp.v.Set(reflect.ValueOf(&o.uint32s[0]))\n\t\to.uint32s = o.uint32s[1:]\n\t\treturn\n\tcase float32Type:\n\t\tif len(o.float32s) == 0 {\n\t\t\to.float32s = make([]float32, uint32PoolSize)\n\t\t}\n\t\to.float32s[0] = math.Float32frombits(x)\n\t\tp.v.Set(reflect.ValueOf(&o.float32s[0]))\n\t\to.float32s = o.float32s[1:]\n\t\treturn\n\t}\n\n\t// must be enum\n\tp.v.Set(reflect.New(t))\n\tp.v.Elem().SetInt(int64(int32(x)))\n}\n\n// Get gets the bits pointed at by p, as a uint32.\nfunc word32_Get(p word32) uint32 {\n\telem := p.v.Elem()\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32(p structPointer, f field) word32 {\n\treturn word32{structPointer_field(p, f)}\n}\n\n// A word32Val represents a field of type int32, uint32, float32, or enum.\n// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.\ntype word32Val struct {\n\tv reflect.Value\n}\n\n// Set sets *p to x.\nfunc word32Val_Set(p word32Val, x uint32) {\n\tswitch p.v.Type() {\n\tcase int32Type:\n\t\tp.v.SetInt(int64(x))\n\t\treturn\n\tcase uint32Type:\n\t\tp.v.SetUint(uint64(x))\n\t\treturn\n\tcase float32Type:\n\t\tp.v.SetFloat(float64(math.Float32frombits(x)))\n\t\treturn\n\t}\n\n\t// must be enum\n\tp.v.SetInt(int64(int32(x)))\n}\n\n// Get gets the bits pointed at by p, as a uint32.\nfunc word32Val_Get(p word32Val) uint32 {\n\telem := p.v\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.\nfunc structPointer_Word32Val(p structPointer, f field) word32Val {\n\treturn word32Val{structPointer_field(p, f)}\n}\n\n// A word32Slice is a slice of 32-bit values.\n// That is, v.Type() is []int32, []uint32, []float32, or []enum.\ntype word32Slice struct {\n\tv reflect.Value\n}\n\nfunc (p word32Slice) Append(x uint32) {\n\tn, m := p.v.Len(), p.v.Cap()\n\tif n < m {\n\t\tp.v.SetLen(n + 1)\n\t} else {\n\t\tt := p.v.Type().Elem()\n\t\tp.v.Set(reflect.Append(p.v, reflect.Zero(t)))\n\t}\n\telem := p.v.Index(n)\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\telem.SetInt(int64(int32(x)))\n\tcase reflect.Uint32:\n\t\telem.SetUint(uint64(x))\n\tcase reflect.Float32:\n\t\telem.SetFloat(float64(math.Float32frombits(x)))\n\t}\n}\n\nfunc (p word32Slice) Len() int {\n\treturn p.v.Len()\n}\n\nfunc (p word32Slice) Index(i int) uint32 {\n\telem := p.v.Index(i)\n\tswitch elem.Kind() {\n\tcase reflect.Int32:\n\t\treturn uint32(elem.Int())\n\tcase reflect.Uint32:\n\t\treturn uint32(elem.Uint())\n\tcase reflect.Float32:\n\t\treturn math.Float32bits(float32(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\n// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.\nfunc structPointer_Word32Slice(p structPointer, f field) word32Slice {\n\treturn word32Slice{structPointer_field(p, f)}\n}\n\n// word64 is like word32 but for 64-bit values.\ntype word64 struct {\n\tv reflect.Value\n}\n\nfunc word64_Set(p word64, o *Buffer, x uint64) {\n\tt := p.v.Type().Elem()\n\tswitch t {\n\tcase int64Type:\n\t\tif len(o.int64s) == 0 {\n\t\t\to.int64s = make([]int64, uint64PoolSize)\n\t\t}\n\t\to.int64s[0] = int64(x)\n\t\tp.v.Set(reflect.ValueOf(&o.int64s[0]))\n\t\to.int64s = o.int64s[1:]\n\t\treturn\n\tcase uint64Type:\n\t\tif len(o.uint64s) == 0 {\n\t\t\to.uint64s = make([]uint64, uint64PoolSize)\n\t\t}\n\t\to.uint64s[0] = x\n\t\tp.v.Set(reflect.ValueOf(&o.uint64s[0]))\n\t\to.uint64s = o.uint64s[1:]\n\t\treturn\n\tcase float64Type:\n\t\tif len(o.float64s) == 0 {\n\t\t\to.float64s = make([]float64, uint64PoolSize)\n\t\t}\n\t\to.float64s[0] = math.Float64frombits(x)\n\t\tp.v.Set(reflect.ValueOf(&o.float64s[0]))\n\t\to.float64s = o.float64s[1:]\n\t\treturn\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc word64_IsNil(p word64) bool {\n\treturn p.v.IsNil()\n}\n\nfunc word64_Get(p word64) uint64 {\n\telem := p.v.Elem()\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn elem.Uint()\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(elem.Float())\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64(p structPointer, f field) word64 {\n\treturn word64{structPointer_field(p, f)}\n}\n\n// word64Val is like word32Val but for 64-bit values.\ntype word64Val struct {\n\tv reflect.Value\n}\n\nfunc word64Val_Set(p word64Val, o *Buffer, x uint64) {\n\tswitch p.v.Type() {\n\tcase int64Type:\n\t\tp.v.SetInt(int64(x))\n\t\treturn\n\tcase uint64Type:\n\t\tp.v.SetUint(x)\n\t\treturn\n\tcase float64Type:\n\t\tp.v.SetFloat(math.Float64frombits(x))\n\t\treturn\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc word64Val_Get(p word64Val) uint64 {\n\telem := p.v\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn elem.Uint()\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(elem.Float())\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64Val(p structPointer, f field) word64Val {\n\treturn word64Val{structPointer_field(p, f)}\n}\n\ntype word64Slice struct {\n\tv reflect.Value\n}\n\nfunc (p word64Slice) Append(x uint64) {\n\tn, m := p.v.Len(), p.v.Cap()\n\tif n < m {\n\t\tp.v.SetLen(n + 1)\n\t} else {\n\t\tt := p.v.Type().Elem()\n\t\tp.v.Set(reflect.Append(p.v, reflect.Zero(t)))\n\t}\n\telem := p.v.Index(n)\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\telem.SetInt(int64(int64(x)))\n\tcase reflect.Uint64:\n\t\telem.SetUint(uint64(x))\n\tcase reflect.Float64:\n\t\telem.SetFloat(float64(math.Float64frombits(x)))\n\t}\n}\n\nfunc (p word64Slice) Len() int {\n\treturn p.v.Len()\n}\n\nfunc (p word64Slice) Index(i int) uint64 {\n\telem := p.v.Index(i)\n\tswitch elem.Kind() {\n\tcase reflect.Int64:\n\t\treturn uint64(elem.Int())\n\tcase reflect.Uint64:\n\t\treturn uint64(elem.Uint())\n\tcase reflect.Float64:\n\t\treturn math.Float64bits(float64(elem.Float()))\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc structPointer_Word64Slice(p structPointer, f field) word64Slice {\n\treturn word64Slice{structPointer_field(p, f)}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/pointer_unsafe.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n// +build !appengine,!js\n\n// This file contains the implementation of the proto field accesses using package unsafe.\n\npackage proto\n\nimport (\n\t\"reflect\"\n\t\"unsafe\"\n)\n\n// NOTE: These type_Foo functions would more idiomatically be methods,\n// but Go does not allow methods on pointer types, and we must preserve\n// some pointer type for the garbage collector. We use these\n// funcs with clunky names as our poor approximation to methods.\n//\n// An alternative would be\n//\ttype structPointer struct { p unsafe.Pointer }\n// but that does not registerize as well.\n\n// A structPointer is a pointer to a struct.\ntype structPointer unsafe.Pointer\n\n// toStructPointer returns a structPointer equivalent to the given reflect value.\nfunc toStructPointer(v reflect.Value) structPointer {\n\treturn structPointer(unsafe.Pointer(v.Pointer()))\n}\n\n// IsNil reports whether p is nil.\nfunc structPointer_IsNil(p structPointer) bool {\n\treturn p == nil\n}\n\n// Interface returns the struct pointer, assumed to have element type t,\n// as an interface value.\nfunc structPointer_Interface(p structPointer, t reflect.Type) interface{} {\n\treturn reflect.NewAt(t, unsafe.Pointer(p)).Interface()\n}\n\n// A field identifies a field in a struct, accessible from a structPointer.\n// In this implementation, a field is identified by its byte offset from the start of the struct.\ntype field uintptr\n\n// toField returns a field equivalent to the given reflect field.\nfunc toField(f *reflect.StructField) field {\n\treturn field(f.Offset)\n}\n\n// invalidField is an invalid field identifier.\nconst invalidField = ^field(0)\n\n// IsValid reports whether the field identifier is valid.\nfunc (f field) IsValid() bool {\n\treturn f != ^field(0)\n}\n\n// Bytes returns the address of a []byte field in the struct.\nfunc structPointer_Bytes(p structPointer, f field) *[]byte {\n\treturn (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BytesSlice returns the address of a [][]byte field in the struct.\nfunc structPointer_BytesSlice(p structPointer, f field) *[][]byte {\n\treturn (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// Bool returns the address of a *bool field in the struct.\nfunc structPointer_Bool(p structPointer, f field) **bool {\n\treturn (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BoolVal returns the address of a bool field in the struct.\nfunc structPointer_BoolVal(p structPointer, f field) *bool {\n\treturn (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// BoolSlice returns the address of a []bool field in the struct.\nfunc structPointer_BoolSlice(p structPointer, f field) *[]bool {\n\treturn (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// String returns the address of a *string field in the struct.\nfunc structPointer_String(p structPointer, f field) **string {\n\treturn (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StringVal returns the address of a string field in the struct.\nfunc structPointer_StringVal(p structPointer, f field) *string {\n\treturn (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StringSlice returns the address of a []string field in the struct.\nfunc structPointer_StringSlice(p structPointer, f field) *[]string {\n\treturn (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// ExtMap returns the address of an extension map field in the struct.\nfunc structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {\n\treturn (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\nfunc structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {\n\treturn (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// NewAt returns the reflect.Value for a pointer to a field in the struct.\nfunc structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {\n\treturn reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))\n}\n\n// SetStructPointer writes a *struct field in the struct.\nfunc structPointer_SetStructPointer(p structPointer, f field, q structPointer) {\n\t*(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q\n}\n\n// GetStructPointer reads a *struct field in the struct.\nfunc structPointer_GetStructPointer(p structPointer, f field) structPointer {\n\treturn *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// StructPointerSlice the address of a []*struct field in the struct.\nfunc structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {\n\treturn (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).\ntype structPointerSlice []structPointer\n\nfunc (v *structPointerSlice) Len() int                  { return len(*v) }\nfunc (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }\nfunc (v *structPointerSlice) Append(p structPointer)    { *v = append(*v, p) }\n\n// A word32 is the address of a \"pointer to 32-bit value\" field.\ntype word32 **uint32\n\n// IsNil reports whether *v is nil.\nfunc word32_IsNil(p word32) bool {\n\treturn *p == nil\n}\n\n// Set sets *v to point at a newly allocated word set to x.\nfunc word32_Set(p word32, o *Buffer, x uint32) {\n\tif len(o.uint32s) == 0 {\n\t\to.uint32s = make([]uint32, uint32PoolSize)\n\t}\n\to.uint32s[0] = x\n\t*p = &o.uint32s[0]\n\to.uint32s = o.uint32s[1:]\n}\n\n// Get gets the value pointed at by *v.\nfunc word32_Get(p word32) uint32 {\n\treturn **p\n}\n\n// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32(p structPointer, f field) word32 {\n\treturn word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// A word32Val is the address of a 32-bit value field.\ntype word32Val *uint32\n\n// Set sets *p to x.\nfunc word32Val_Set(p word32Val, x uint32) {\n\t*p = x\n}\n\n// Get gets the value pointed at by p.\nfunc word32Val_Get(p word32Val) uint32 {\n\treturn *p\n}\n\n// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.\nfunc structPointer_Word32Val(p structPointer, f field) word32Val {\n\treturn word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// A word32Slice is a slice of 32-bit values.\ntype word32Slice []uint32\n\nfunc (v *word32Slice) Append(x uint32)    { *v = append(*v, x) }\nfunc (v *word32Slice) Len() int           { return len(*v) }\nfunc (v *word32Slice) Index(i int) uint32 { return (*v)[i] }\n\n// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.\nfunc structPointer_Word32Slice(p structPointer, f field) *word32Slice {\n\treturn (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n\n// word64 is like word32 but for 64-bit values.\ntype word64 **uint64\n\nfunc word64_Set(p word64, o *Buffer, x uint64) {\n\tif len(o.uint64s) == 0 {\n\t\to.uint64s = make([]uint64, uint64PoolSize)\n\t}\n\to.uint64s[0] = x\n\t*p = &o.uint64s[0]\n\to.uint64s = o.uint64s[1:]\n}\n\nfunc word64_IsNil(p word64) bool {\n\treturn *p == nil\n}\n\nfunc word64_Get(p word64) uint64 {\n\treturn **p\n}\n\nfunc structPointer_Word64(p structPointer, f field) word64 {\n\treturn word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// word64Val is like word32Val but for 64-bit values.\ntype word64Val *uint64\n\nfunc word64Val_Set(p word64Val, o *Buffer, x uint64) {\n\t*p = x\n}\n\nfunc word64Val_Get(p word64Val) uint64 {\n\treturn *p\n}\n\nfunc structPointer_Word64Val(p structPointer, f field) word64Val {\n\treturn word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))\n}\n\n// word64Slice is like word32Slice but for 64-bit values.\ntype word64Slice []uint64\n\nfunc (v *word64Slice) Append(x uint64)    { *v = append(*v, x) }\nfunc (v *word64Slice) Len() int           { return len(*v) }\nfunc (v *word64Slice) Index(i int) uint64 { return (*v)[i] }\n\nfunc structPointer_Word64Slice(p structPointer, f field) *word64Slice {\n\treturn (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/properties.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n/*\n * Routines for encoding data into the wire format for protocol buffers.\n */\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\nconst debug bool = false\n\n// Constants that identify the encoding of a value on the wire.\nconst (\n\tWireVarint     = 0\n\tWireFixed64    = 1\n\tWireBytes      = 2\n\tWireStartGroup = 3\n\tWireEndGroup   = 4\n\tWireFixed32    = 5\n)\n\nconst startSize = 10 // initial slice/string sizes\n\n// Encoders are defined in encode.go\n// An encoder outputs the full representation of a field, including its\n// tag and encoder type.\ntype encoder func(p *Buffer, prop *Properties, base structPointer) error\n\n// A valueEncoder encodes a single integer in a particular encoding.\ntype valueEncoder func(o *Buffer, x uint64) error\n\n// Sizers are defined in encode.go\n// A sizer returns the encoded size of a field, including its tag and encoder\n// type.\ntype sizer func(prop *Properties, base structPointer) int\n\n// A valueSizer returns the encoded size of a single integer in a particular\n// encoding.\ntype valueSizer func(x uint64) int\n\n// Decoders are defined in decode.go\n// A decoder creates a value from its wire representation.\n// Unrecognized subelements are saved in unrec.\ntype decoder func(p *Buffer, prop *Properties, base structPointer) error\n\n// A valueDecoder decodes a single integer in a particular encoding.\ntype valueDecoder func(o *Buffer) (x uint64, err error)\n\n// A oneofMarshaler does the marshaling for all oneof fields in a message.\ntype oneofMarshaler func(Message, *Buffer) error\n\n// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.\ntype oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)\n\n// A oneofSizer does the sizing for all oneof fields in a message.\ntype oneofSizer func(Message) int\n\n// tagMap is an optimization over map[int]int for typical protocol buffer\n// use-cases. Encoded protocol buffers are often in tag order with small tag\n// numbers.\ntype tagMap struct {\n\tfastTags []int\n\tslowTags map[int]int\n}\n\n// tagMapFastLimit is the upper bound on the tag number that will be stored in\n// the tagMap slice rather than its map.\nconst tagMapFastLimit = 1024\n\nfunc (p *tagMap) get(t int) (int, bool) {\n\tif t > 0 && t < tagMapFastLimit {\n\t\tif t >= len(p.fastTags) {\n\t\t\treturn 0, false\n\t\t}\n\t\tfi := p.fastTags[t]\n\t\treturn fi, fi >= 0\n\t}\n\tfi, ok := p.slowTags[t]\n\treturn fi, ok\n}\n\nfunc (p *tagMap) put(t int, fi int) {\n\tif t > 0 && t < tagMapFastLimit {\n\t\tfor len(p.fastTags) < t+1 {\n\t\t\tp.fastTags = append(p.fastTags, -1)\n\t\t}\n\t\tp.fastTags[t] = fi\n\t\treturn\n\t}\n\tif p.slowTags == nil {\n\t\tp.slowTags = make(map[int]int)\n\t}\n\tp.slowTags[t] = fi\n}\n\n// StructProperties represents properties for all the fields of a struct.\n// decoderTags and decoderOrigNames should only be used by the decoder.\ntype StructProperties struct {\n\tProp             []*Properties  // properties for each field\n\treqCount         int            // required count\n\tdecoderTags      tagMap         // map from proto tag to struct field number\n\tdecoderOrigNames map[string]int // map from original name to struct field number\n\torder            []int          // list of struct field numbers in tag order\n\tunrecField       field          // field id of the XXX_unrecognized []byte field\n\textendable       bool           // is this an extendable proto\n\n\toneofMarshaler   oneofMarshaler\n\toneofUnmarshaler oneofUnmarshaler\n\toneofSizer       oneofSizer\n\tstype            reflect.Type\n\n\t// OneofTypes contains information about the oneof fields in this message.\n\t// It is keyed by the original name of a field.\n\tOneofTypes map[string]*OneofProperties\n}\n\n// OneofProperties represents information about a specific field in a oneof.\ntype OneofProperties struct {\n\tType  reflect.Type // pointer to generated struct type for this oneof field\n\tField int          // struct field number of the containing oneof in the message\n\tProp  *Properties\n}\n\n// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.\n// See encode.go, (*Buffer).enc_struct.\n\nfunc (sp *StructProperties) Len() int { return len(sp.order) }\nfunc (sp *StructProperties) Less(i, j int) bool {\n\treturn sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag\n}\nfunc (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }\n\n// Properties represents the protocol-specific behavior of a single struct field.\ntype Properties struct {\n\tName     string // name of the field, for error messages\n\tOrigName string // original name before protocol compiler (always set)\n\tJSONName string // name to use for JSON; determined by protoc\n\tWire     string\n\tWireType int\n\tTag      int\n\tRequired bool\n\tOptional bool\n\tRepeated bool\n\tPacked   bool   // relevant for repeated primitives only\n\tEnum     string // set for enum types only\n\tproto3   bool   // whether this is known to be a proto3 field; set for []byte only\n\toneof    bool   // whether this is a oneof field\n\n\tDefault    string // default value\n\tHasDefault bool   // whether an explicit default was provided\n\tdef_uint64 uint64\n\n\tenc           encoder\n\tvalEnc        valueEncoder // set for bool and numeric types only\n\tfield         field\n\ttagcode       []byte // encoding of EncodeVarint((Tag<<3)|WireType)\n\ttagbuf        [8]byte\n\tstype         reflect.Type      // set for struct types only\n\tsprop         *StructProperties // set for struct types only\n\tisMarshaler   bool\n\tisUnmarshaler bool\n\n\tmtype    reflect.Type // set for map types only\n\tmkeyprop *Properties  // set for map types only\n\tmvalprop *Properties  // set for map types only\n\n\tsize    sizer\n\tvalSize valueSizer // set for bool and numeric types only\n\n\tdec    decoder\n\tvalDec valueDecoder // set for bool and numeric types only\n\n\t// If this is a packable field, this will be the decoder for the packed version of the field.\n\tpackedDec decoder\n}\n\n// String formats the properties in the protobuf struct field tag style.\nfunc (p *Properties) String() string {\n\ts := p.Wire\n\ts = \",\"\n\ts += strconv.Itoa(p.Tag)\n\tif p.Required {\n\t\ts += \",req\"\n\t}\n\tif p.Optional {\n\t\ts += \",opt\"\n\t}\n\tif p.Repeated {\n\t\ts += \",rep\"\n\t}\n\tif p.Packed {\n\t\ts += \",packed\"\n\t}\n\ts += \",name=\" + p.OrigName\n\tif p.JSONName != p.OrigName {\n\t\ts += \",json=\" + p.JSONName\n\t}\n\tif p.proto3 {\n\t\ts += \",proto3\"\n\t}\n\tif p.oneof {\n\t\ts += \",oneof\"\n\t}\n\tif len(p.Enum) > 0 {\n\t\ts += \",enum=\" + p.Enum\n\t}\n\tif p.HasDefault {\n\t\ts += \",def=\" + p.Default\n\t}\n\treturn s\n}\n\n// Parse populates p by parsing a string in the protobuf struct field tag style.\nfunc (p *Properties) Parse(s string) {\n\t// \"bytes,49,opt,name=foo,def=hello!\"\n\tfields := strings.Split(s, \",\") // breaks def=, but handled below.\n\tif len(fields) < 2 {\n\t\tfmt.Fprintf(os.Stderr, \"proto: tag has too few fields: %q\\n\", s)\n\t\treturn\n\t}\n\n\tp.Wire = fields[0]\n\tswitch p.Wire {\n\tcase \"varint\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeVarint\n\t\tp.valDec = (*Buffer).DecodeVarint\n\t\tp.valSize = sizeVarint\n\tcase \"fixed32\":\n\t\tp.WireType = WireFixed32\n\t\tp.valEnc = (*Buffer).EncodeFixed32\n\t\tp.valDec = (*Buffer).DecodeFixed32\n\t\tp.valSize = sizeFixed32\n\tcase \"fixed64\":\n\t\tp.WireType = WireFixed64\n\t\tp.valEnc = (*Buffer).EncodeFixed64\n\t\tp.valDec = (*Buffer).DecodeFixed64\n\t\tp.valSize = sizeFixed64\n\tcase \"zigzag32\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeZigzag32\n\t\tp.valDec = (*Buffer).DecodeZigzag32\n\t\tp.valSize = sizeZigzag32\n\tcase \"zigzag64\":\n\t\tp.WireType = WireVarint\n\t\tp.valEnc = (*Buffer).EncodeZigzag64\n\t\tp.valDec = (*Buffer).DecodeZigzag64\n\t\tp.valSize = sizeZigzag64\n\tcase \"bytes\", \"group\":\n\t\tp.WireType = WireBytes\n\t\t// no numeric converter for non-numeric types\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"proto: tag has unknown wire type: %q\\n\", s)\n\t\treturn\n\t}\n\n\tvar err error\n\tp.Tag, err = strconv.Atoi(fields[1])\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := 2; i < len(fields); i++ {\n\t\tf := fields[i]\n\t\tswitch {\n\t\tcase f == \"req\":\n\t\t\tp.Required = true\n\t\tcase f == \"opt\":\n\t\t\tp.Optional = true\n\t\tcase f == \"rep\":\n\t\t\tp.Repeated = true\n\t\tcase f == \"packed\":\n\t\t\tp.Packed = true\n\t\tcase strings.HasPrefix(f, \"name=\"):\n\t\t\tp.OrigName = f[5:]\n\t\tcase strings.HasPrefix(f, \"json=\"):\n\t\t\tp.JSONName = f[5:]\n\t\tcase strings.HasPrefix(f, \"enum=\"):\n\t\t\tp.Enum = f[5:]\n\t\tcase f == \"proto3\":\n\t\t\tp.proto3 = true\n\t\tcase f == \"oneof\":\n\t\t\tp.oneof = true\n\t\tcase strings.HasPrefix(f, \"def=\"):\n\t\t\tp.HasDefault = true\n\t\t\tp.Default = f[4:] // rest of string\n\t\t\tif i+1 < len(fields) {\n\t\t\t\t// Commas aren't escaped, and def is always last.\n\t\t\t\tp.Default += \",\" + strings.Join(fields[i+1:], \",\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc logNoSliceEnc(t1, t2 reflect.Type) {\n\tfmt.Fprintf(os.Stderr, \"proto: no slice oenc for %T = []%T\\n\", t1, t2)\n}\n\nvar protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()\n\n// Initialize the fields for encoding and decoding.\nfunc (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {\n\tp.enc = nil\n\tp.dec = nil\n\tp.size = nil\n\n\tswitch t1 := typ; t1.Kind() {\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"proto: no coders for %v\\n\", t1)\n\n\t// proto3 scalar types\n\n\tcase reflect.Bool:\n\t\tp.enc = (*Buffer).enc_proto3_bool\n\t\tp.dec = (*Buffer).dec_proto3_bool\n\t\tp.size = size_proto3_bool\n\tcase reflect.Int32:\n\t\tp.enc = (*Buffer).enc_proto3_int32\n\t\tp.dec = (*Buffer).dec_proto3_int32\n\t\tp.size = size_proto3_int32\n\tcase reflect.Uint32:\n\t\tp.enc = (*Buffer).enc_proto3_uint32\n\t\tp.dec = (*Buffer).dec_proto3_int32 // can reuse\n\t\tp.size = size_proto3_uint32\n\tcase reflect.Int64, reflect.Uint64:\n\t\tp.enc = (*Buffer).enc_proto3_int64\n\t\tp.dec = (*Buffer).dec_proto3_int64\n\t\tp.size = size_proto3_int64\n\tcase reflect.Float32:\n\t\tp.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits\n\t\tp.dec = (*Buffer).dec_proto3_int32\n\t\tp.size = size_proto3_uint32\n\tcase reflect.Float64:\n\t\tp.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits\n\t\tp.dec = (*Buffer).dec_proto3_int64\n\t\tp.size = size_proto3_int64\n\tcase reflect.String:\n\t\tp.enc = (*Buffer).enc_proto3_string\n\t\tp.dec = (*Buffer).dec_proto3_string\n\t\tp.size = size_proto3_string\n\n\tcase reflect.Ptr:\n\t\tswitch t2 := t1.Elem(); t2.Kind() {\n\t\tdefault:\n\t\t\tfmt.Fprintf(os.Stderr, \"proto: no encoder function for %v -> %v\\n\", t1, t2)\n\t\t\tbreak\n\t\tcase reflect.Bool:\n\t\t\tp.enc = (*Buffer).enc_bool\n\t\t\tp.dec = (*Buffer).dec_bool\n\t\t\tp.size = size_bool\n\t\tcase reflect.Int32:\n\t\t\tp.enc = (*Buffer).enc_int32\n\t\t\tp.dec = (*Buffer).dec_int32\n\t\t\tp.size = size_int32\n\t\tcase reflect.Uint32:\n\t\t\tp.enc = (*Buffer).enc_uint32\n\t\t\tp.dec = (*Buffer).dec_int32 // can reuse\n\t\t\tp.size = size_uint32\n\t\tcase reflect.Int64, reflect.Uint64:\n\t\t\tp.enc = (*Buffer).enc_int64\n\t\t\tp.dec = (*Buffer).dec_int64\n\t\t\tp.size = size_int64\n\t\tcase reflect.Float32:\n\t\t\tp.enc = (*Buffer).enc_uint32 // can just treat them as bits\n\t\t\tp.dec = (*Buffer).dec_int32\n\t\t\tp.size = size_uint32\n\t\tcase reflect.Float64:\n\t\t\tp.enc = (*Buffer).enc_int64 // can just treat them as bits\n\t\t\tp.dec = (*Buffer).dec_int64\n\t\t\tp.size = size_int64\n\t\tcase reflect.String:\n\t\t\tp.enc = (*Buffer).enc_string\n\t\t\tp.dec = (*Buffer).dec_string\n\t\t\tp.size = size_string\n\t\tcase reflect.Struct:\n\t\t\tp.stype = t1.Elem()\n\t\t\tp.isMarshaler = isMarshaler(t1)\n\t\t\tp.isUnmarshaler = isUnmarshaler(t1)\n\t\t\tif p.Wire == \"bytes\" {\n\t\t\t\tp.enc = (*Buffer).enc_struct_message\n\t\t\t\tp.dec = (*Buffer).dec_struct_message\n\t\t\t\tp.size = size_struct_message\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_struct_group\n\t\t\t\tp.dec = (*Buffer).dec_struct_group\n\t\t\t\tp.size = size_struct_group\n\t\t\t}\n\t\t}\n\n\tcase reflect.Slice:\n\t\tswitch t2 := t1.Elem(); t2.Kind() {\n\t\tdefault:\n\t\t\tlogNoSliceEnc(t1, t2)\n\t\t\tbreak\n\t\tcase reflect.Bool:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_bool\n\t\t\t\tp.size = size_slice_packed_bool\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_bool\n\t\t\t\tp.size = size_slice_bool\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_bool\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_bool\n\t\tcase reflect.Int32:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int32\n\t\t\t\tp.size = size_slice_packed_int32\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_int32\n\t\t\t\tp.size = size_slice_int32\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\tcase reflect.Uint32:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_uint32\n\t\t\t\tp.size = size_slice_packed_uint32\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_uint32\n\t\t\t\tp.size = size_slice_uint32\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\tcase reflect.Int64, reflect.Uint64:\n\t\t\tif p.Packed {\n\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int64\n\t\t\t\tp.size = size_slice_packed_int64\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_int64\n\t\t\t\tp.size = size_slice_int64\n\t\t\t}\n\t\t\tp.dec = (*Buffer).dec_slice_int64\n\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int64\n\t\tcase reflect.Uint8:\n\t\t\tp.dec = (*Buffer).dec_slice_byte\n\t\t\tif p.proto3 {\n\t\t\t\tp.enc = (*Buffer).enc_proto3_slice_byte\n\t\t\t\tp.size = size_proto3_slice_byte\n\t\t\t} else {\n\t\t\t\tp.enc = (*Buffer).enc_slice_byte\n\t\t\t\tp.size = size_slice_byte\n\t\t\t}\n\t\tcase reflect.Float32, reflect.Float64:\n\t\t\tswitch t2.Bits() {\n\t\t\tcase 32:\n\t\t\t\t// can just treat them as bits\n\t\t\t\tif p.Packed {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_packed_uint32\n\t\t\t\t\tp.size = size_slice_packed_uint32\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_uint32\n\t\t\t\t\tp.size = size_slice_uint32\n\t\t\t\t}\n\t\t\t\tp.dec = (*Buffer).dec_slice_int32\n\t\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int32\n\t\t\tcase 64:\n\t\t\t\t// can just treat them as bits\n\t\t\t\tif p.Packed {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_packed_int64\n\t\t\t\t\tp.size = size_slice_packed_int64\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_int64\n\t\t\t\t\tp.size = size_slice_int64\n\t\t\t\t}\n\t\t\t\tp.dec = (*Buffer).dec_slice_int64\n\t\t\t\tp.packedDec = (*Buffer).dec_slice_packed_int64\n\t\t\tdefault:\n\t\t\t\tlogNoSliceEnc(t1, t2)\n\t\t\t\tbreak\n\t\t\t}\n\t\tcase reflect.String:\n\t\t\tp.enc = (*Buffer).enc_slice_string\n\t\t\tp.dec = (*Buffer).dec_slice_string\n\t\t\tp.size = size_slice_string\n\t\tcase reflect.Ptr:\n\t\t\tswitch t3 := t2.Elem(); t3.Kind() {\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"proto: no ptr oenc for %T -> %T -> %T\\n\", t1, t2, t3)\n\t\t\t\tbreak\n\t\t\tcase reflect.Struct:\n\t\t\t\tp.stype = t2.Elem()\n\t\t\t\tp.isMarshaler = isMarshaler(t2)\n\t\t\t\tp.isUnmarshaler = isUnmarshaler(t2)\n\t\t\t\tif p.Wire == \"bytes\" {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_struct_message\n\t\t\t\t\tp.dec = (*Buffer).dec_slice_struct_message\n\t\t\t\t\tp.size = size_slice_struct_message\n\t\t\t\t} else {\n\t\t\t\t\tp.enc = (*Buffer).enc_slice_struct_group\n\t\t\t\t\tp.dec = (*Buffer).dec_slice_struct_group\n\t\t\t\t\tp.size = size_slice_struct_group\n\t\t\t\t}\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tswitch t2.Elem().Kind() {\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintf(os.Stderr, \"proto: no slice elem oenc for %T -> %T -> %T\\n\", t1, t2, t2.Elem())\n\t\t\t\tbreak\n\t\t\tcase reflect.Uint8:\n\t\t\t\tp.enc = (*Buffer).enc_slice_slice_byte\n\t\t\t\tp.dec = (*Buffer).dec_slice_slice_byte\n\t\t\t\tp.size = size_slice_slice_byte\n\t\t\t}\n\t\t}\n\n\tcase reflect.Map:\n\t\tp.enc = (*Buffer).enc_new_map\n\t\tp.dec = (*Buffer).dec_new_map\n\t\tp.size = size_new_map\n\n\t\tp.mtype = t1\n\t\tp.mkeyprop = &Properties{}\n\t\tp.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), \"Key\", f.Tag.Get(\"protobuf_key\"), nil, lockGetProp)\n\t\tp.mvalprop = &Properties{}\n\t\tvtype := p.mtype.Elem()\n\t\tif vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {\n\t\t\t// The value type is not a message (*T) or bytes ([]byte),\n\t\t\t// so we need encoders for the pointer to this type.\n\t\t\tvtype = reflect.PtrTo(vtype)\n\t\t}\n\t\tp.mvalprop.init(vtype, \"Value\", f.Tag.Get(\"protobuf_val\"), nil, lockGetProp)\n\t}\n\n\t// precalculate tag code\n\twire := p.WireType\n\tif p.Packed {\n\t\twire = WireBytes\n\t}\n\tx := uint32(p.Tag)<<3 | uint32(wire)\n\ti := 0\n\tfor i = 0; x > 127; i++ {\n\t\tp.tagbuf[i] = 0x80 | uint8(x&0x7F)\n\t\tx >>= 7\n\t}\n\tp.tagbuf[i] = uint8(x)\n\tp.tagcode = p.tagbuf[0 : i+1]\n\n\tif p.stype != nil {\n\t\tif lockGetProp {\n\t\t\tp.sprop = GetProperties(p.stype)\n\t\t} else {\n\t\t\tp.sprop = getPropertiesLocked(p.stype)\n\t\t}\n\t}\n}\n\nvar (\n\tmarshalerType   = reflect.TypeOf((*Marshaler)(nil)).Elem()\n\tunmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()\n)\n\n// isMarshaler reports whether type t implements Marshaler.\nfunc isMarshaler(t reflect.Type) bool {\n\t// We're checking for (likely) pointer-receiver methods\n\t// so if t is not a pointer, something is very wrong.\n\t// The calls above only invoke isMarshaler on pointer types.\n\tif t.Kind() != reflect.Ptr {\n\t\tpanic(\"proto: misuse of isMarshaler\")\n\t}\n\treturn t.Implements(marshalerType)\n}\n\n// isUnmarshaler reports whether type t implements Unmarshaler.\nfunc isUnmarshaler(t reflect.Type) bool {\n\t// We're checking for (likely) pointer-receiver methods\n\t// so if t is not a pointer, something is very wrong.\n\t// The calls above only invoke isUnmarshaler on pointer types.\n\tif t.Kind() != reflect.Ptr {\n\t\tpanic(\"proto: misuse of isUnmarshaler\")\n\t}\n\treturn t.Implements(unmarshalerType)\n}\n\n// Init populates the properties from a protocol buffer struct tag.\nfunc (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {\n\tp.init(typ, name, tag, f, true)\n}\n\nfunc (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {\n\t// \"bytes,49,opt,def=hello!\"\n\tp.Name = name\n\tp.OrigName = name\n\tif f != nil {\n\t\tp.field = toField(f)\n\t}\n\tif tag == \"\" {\n\t\treturn\n\t}\n\tp.Parse(tag)\n\tp.setEncAndDec(typ, f, lockGetProp)\n}\n\nvar (\n\tpropertiesMu  sync.RWMutex\n\tpropertiesMap = make(map[reflect.Type]*StructProperties)\n)\n\n// GetProperties returns the list of properties for the type represented by t.\n// t must represent a generated struct type of a protocol message.\nfunc GetProperties(t reflect.Type) *StructProperties {\n\tif t.Kind() != reflect.Struct {\n\t\tpanic(\"proto: type must have kind struct\")\n\t}\n\n\t// Most calls to GetProperties in a long-running program will be\n\t// retrieving details for types we have seen before.\n\tpropertiesMu.RLock()\n\tsprop, ok := propertiesMap[t]\n\tpropertiesMu.RUnlock()\n\tif ok {\n\t\tif collectStats {\n\t\t\tstats.Chit++\n\t\t}\n\t\treturn sprop\n\t}\n\n\tpropertiesMu.Lock()\n\tsprop = getPropertiesLocked(t)\n\tpropertiesMu.Unlock()\n\treturn sprop\n}\n\n// getPropertiesLocked requires that propertiesMu is held.\nfunc getPropertiesLocked(t reflect.Type) *StructProperties {\n\tif prop, ok := propertiesMap[t]; ok {\n\t\tif collectStats {\n\t\t\tstats.Chit++\n\t\t}\n\t\treturn prop\n\t}\n\tif collectStats {\n\t\tstats.Cmiss++\n\t}\n\n\tprop := new(StructProperties)\n\t// in case of recursive protos, fill this in now.\n\tpropertiesMap[t] = prop\n\n\t// build properties\n\tprop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) ||\n\t\treflect.PtrTo(t).Implements(extendableProtoV1Type)\n\tprop.unrecField = invalidField\n\tprop.Prop = make([]*Properties, t.NumField())\n\tprop.order = make([]int, t.NumField())\n\n\tfor i := 0; i < t.NumField(); i++ {\n\t\tf := t.Field(i)\n\t\tp := new(Properties)\n\t\tname := f.Name\n\t\tp.init(f.Type, name, f.Tag.Get(\"protobuf\"), &f, false)\n\n\t\tif f.Name == \"XXX_InternalExtensions\" { // special case\n\t\t\tp.enc = (*Buffer).enc_exts\n\t\t\tp.dec = nil // not needed\n\t\t\tp.size = size_exts\n\t\t} else if f.Name == \"XXX_extensions\" { // special case\n\t\t\tp.enc = (*Buffer).enc_map\n\t\t\tp.dec = nil // not needed\n\t\t\tp.size = size_map\n\t\t} else if f.Name == \"XXX_unrecognized\" { // special case\n\t\t\tprop.unrecField = toField(&f)\n\t\t}\n\t\toneof := f.Tag.Get(\"protobuf_oneof\") // special case\n\t\tif oneof != \"\" {\n\t\t\t// Oneof fields don't use the traditional protobuf tag.\n\t\t\tp.OrigName = oneof\n\t\t}\n\t\tprop.Prop[i] = p\n\t\tprop.order[i] = i\n\t\tif debug {\n\t\t\tprint(i, \" \", f.Name, \" \", t.String(), \" \")\n\t\t\tif p.Tag > 0 {\n\t\t\t\tprint(p.String())\n\t\t\t}\n\t\t\tprint(\"\\n\")\n\t\t}\n\t\tif p.enc == nil && !strings.HasPrefix(f.Name, \"XXX_\") && oneof == \"\" {\n\t\t\tfmt.Fprintln(os.Stderr, \"proto: no encoder for\", f.Name, f.Type.String(), \"[GetProperties]\")\n\t\t}\n\t}\n\n\t// Re-order prop.order.\n\tsort.Sort(prop)\n\n\ttype oneofMessage interface {\n\t\tXXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})\n\t}\n\tif om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {\n\t\tvar oots []interface{}\n\t\tprop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()\n\t\tprop.stype = t\n\n\t\t// Interpret oneof metadata.\n\t\tprop.OneofTypes = make(map[string]*OneofProperties)\n\t\tfor _, oot := range oots {\n\t\t\toop := &OneofProperties{\n\t\t\t\tType: reflect.ValueOf(oot).Type(), // *T\n\t\t\t\tProp: new(Properties),\n\t\t\t}\n\t\t\tsft := oop.Type.Elem().Field(0)\n\t\t\toop.Prop.Name = sft.Name\n\t\t\toop.Prop.Parse(sft.Tag.Get(\"protobuf\"))\n\t\t\t// There will be exactly one interface field that\n\t\t\t// this new value is assignable to.\n\t\t\tfor i := 0; i < t.NumField(); i++ {\n\t\t\t\tf := t.Field(i)\n\t\t\t\tif f.Type.Kind() != reflect.Interface {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif !oop.Type.AssignableTo(f.Type) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\toop.Field = i\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tprop.OneofTypes[oop.Prop.OrigName] = oop\n\t\t}\n\t}\n\n\t// build required counts\n\t// build tags\n\treqCount := 0\n\tprop.decoderOrigNames = make(map[string]int)\n\tfor i, p := range prop.Prop {\n\t\tif strings.HasPrefix(p.Name, \"XXX_\") {\n\t\t\t// Internal fields should not appear in tags/origNames maps.\n\t\t\t// They are handled specially when encoding and decoding.\n\t\t\tcontinue\n\t\t}\n\t\tif p.Required {\n\t\t\treqCount++\n\t\t}\n\t\tprop.decoderTags.put(p.Tag, i)\n\t\tprop.decoderOrigNames[p.OrigName] = i\n\t}\n\tprop.reqCount = reqCount\n\n\treturn prop\n}\n\n// Return the Properties object for the x[0]'th field of the structure.\nfunc propByIndex(t reflect.Type, x []int) *Properties {\n\tif len(x) != 1 {\n\t\tfmt.Fprintf(os.Stderr, \"proto: field index dimension %d (not 1) for type %s\\n\", len(x), t)\n\t\treturn nil\n\t}\n\tprop := GetProperties(t)\n\treturn prop.Prop[x[0]]\n}\n\n// Get the address and type of a pointer to a struct from an interface.\nfunc getbase(pb Message) (t reflect.Type, b structPointer, err error) {\n\tif pb == nil {\n\t\terr = ErrNil\n\t\treturn\n\t}\n\t// get the reflect type of the pointer to the struct.\n\tt = reflect.TypeOf(pb)\n\t// get the address of the struct.\n\tvalue := reflect.ValueOf(pb)\n\tb = toStructPointer(value)\n\treturn\n}\n\n// A global registry of enum types.\n// The generated code will register the generated maps by calling RegisterEnum.\n\nvar enumValueMaps = make(map[string]map[string]int32)\n\n// RegisterEnum is called from the generated code to install the enum descriptor\n// maps into the global table to aid parsing text format protocol buffers.\nfunc RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {\n\tif _, ok := enumValueMaps[typeName]; ok {\n\t\tpanic(\"proto: duplicate enum registered: \" + typeName)\n\t}\n\tenumValueMaps[typeName] = valueMap\n}\n\n// EnumValueMap returns the mapping from names to integers of the\n// enum type enumType, or a nil if not found.\nfunc EnumValueMap(enumType string) map[string]int32 {\n\treturn enumValueMaps[enumType]\n}\n\n// A registry of all linked message types.\n// The string is a fully-qualified proto name (\"pkg.Message\").\nvar (\n\tprotoTypes    = make(map[string]reflect.Type)\n\trevProtoTypes = make(map[reflect.Type]string)\n)\n\n// RegisterType is called from generated code and maps from the fully qualified\n// proto name to the type (pointer to struct) of the protocol buffer.\nfunc RegisterType(x Message, name string) {\n\tif _, ok := protoTypes[name]; ok {\n\t\t// TODO: Some day, make this a panic.\n\t\tlog.Printf(\"proto: duplicate proto type registered: %s\", name)\n\t\treturn\n\t}\n\tt := reflect.TypeOf(x)\n\tprotoTypes[name] = t\n\trevProtoTypes[t] = name\n}\n\n// MessageName returns the fully-qualified proto name for the given message type.\nfunc MessageName(x Message) string {\n\ttype xname interface {\n\t\tXXX_MessageName() string\n\t}\n\tif m, ok := x.(xname); ok {\n\t\treturn m.XXX_MessageName()\n\t}\n\treturn revProtoTypes[reflect.TypeOf(x)]\n}\n\n// MessageType returns the message type (pointer to struct) for a named message.\nfunc MessageType(name string) reflect.Type { return protoTypes[name] }\n\n// A registry of all linked proto files.\nvar (\n\tprotoFiles = make(map[string][]byte) // file name => fileDescriptor\n)\n\n// RegisterFile is called from generated code and maps from the\n// full file name of a .proto file to its compressed FileDescriptorProto.\nfunc RegisterFile(filename string, fileDescriptor []byte) {\n\tprotoFiles[filename] = fileDescriptor\n}\n\n// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.\nfunc FileDescriptor(filename string) []byte { return protoFiles[filename] }\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/proto3_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2014 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tpb \"github.com/golang/protobuf/proto/proto3_proto\"\n\ttpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\nfunc TestProto3ZeroValues(t *testing.T) {\n\ttests := []struct {\n\t\tdesc string\n\t\tm    proto.Message\n\t}{\n\t\t{\"zero message\", &pb.Message{}},\n\t\t{\"empty bytes field\", &pb.Message{Data: []byte{}}},\n\t}\n\tfor _, test := range tests {\n\t\tb, err := proto.Marshal(test.m)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: proto.Marshal: %v\", test.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif len(b) > 0 {\n\t\t\tt.Errorf(\"%s: Encoding is non-empty: %q\", test.desc, b)\n\t\t}\n\t}\n}\n\nfunc TestRoundTripProto3(t *testing.T) {\n\tm := &pb.Message{\n\t\tName:         \"David\",          // (2 | 1<<3): 0x0a 0x05 \"David\"\n\t\tHilarity:     pb.Message_PUNS,  // (0 | 2<<3): 0x10 0x01\n\t\tHeightInCm:   178,              // (0 | 3<<3): 0x18 0xb2 0x01\n\t\tData:         []byte(\"roboto\"), // (2 | 4<<3): 0x20 0x06 \"roboto\"\n\t\tResultCount:  47,               // (0 | 7<<3): 0x38 0x2f\n\t\tTrueScotsman: true,             // (0 | 8<<3): 0x40 0x01\n\t\tScore:        8.1,              // (5 | 9<<3): 0x4d <8.1>\n\n\t\tKey: []uint64{1, 0xdeadbeef},\n\t\tNested: &pb.Nested{\n\t\t\tBunny: \"Monty\",\n\t\t},\n\t}\n\tt.Logf(\" m: %v\", m)\n\n\tb, err := proto.Marshal(m)\n\tif err != nil {\n\t\tt.Fatalf(\"proto.Marshal: %v\", err)\n\t}\n\tt.Logf(\" b: %q\", b)\n\n\tm2 := new(pb.Message)\n\tif err := proto.Unmarshal(b, m2); err != nil {\n\t\tt.Fatalf(\"proto.Unmarshal: %v\", err)\n\t}\n\tt.Logf(\"m2: %v\", m2)\n\n\tif !proto.Equal(m, m2) {\n\t\tt.Errorf(\"proto.Equal returned false:\\n m: %v\\nm2: %v\", m, m2)\n\t}\n}\n\nfunc TestGettersForBasicTypesExist(t *testing.T) {\n\tvar m pb.Message\n\tif got := m.GetNested().GetBunny(); got != \"\" {\n\t\tt.Errorf(\"m.GetNested().GetBunny() = %q, want empty string\", got)\n\t}\n\tif got := m.GetNested().GetCute(); got {\n\t\tt.Errorf(\"m.GetNested().GetCute() = %t, want false\", got)\n\t}\n}\n\nfunc TestProto3SetDefaults(t *testing.T) {\n\tin := &pb.Message{\n\t\tTerrain: map[string]*pb.Nested{\n\t\t\t\"meadow\": new(pb.Nested),\n\t\t},\n\t\tProto2Field: new(tpb.SubDefaults),\n\t\tProto2Value: map[string]*tpb.SubDefaults{\n\t\t\t\"badlands\": new(tpb.SubDefaults),\n\t\t},\n\t}\n\n\tgot := proto.Clone(in).(*pb.Message)\n\tproto.SetDefaults(got)\n\n\t// There are no defaults in proto3.  Everything should be the zero value, but\n\t// we need to remember to set defaults for nested proto2 messages.\n\twant := &pb.Message{\n\t\tTerrain: map[string]*pb.Nested{\n\t\t\t\"meadow\": new(pb.Nested),\n\t\t},\n\t\tProto2Field: &tpb.SubDefaults{N: proto.Int64(7)},\n\t\tProto2Value: map[string]*tpb.SubDefaults{\n\t\t\t\"badlands\": &tpb.SubDefaults{N: proto.Int64(7)},\n\t\t},\n\t}\n\n\tif !proto.Equal(got, want) {\n\t\tt.Errorf(\"with in = %v\\nproto.SetDefaults(in) =>\\ngot %v\\nwant %v\", in, got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/size2_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\nimport (\n\t\"testing\"\n)\n\n// This is a separate file and package from size_test.go because that one uses\n// generated messages and thus may not be in package proto without having a circular\n// dependency, whereas this file tests unexported details of size.go.\n\nfunc TestVarintSize(t *testing.T) {\n\t// Check the edge cases carefully.\n\ttestCases := []struct {\n\t\tn    uint64\n\t\tsize int\n\t}{\n\t\t{0, 1},\n\t\t{1, 1},\n\t\t{127, 1},\n\t\t{128, 2},\n\t\t{16383, 2},\n\t\t{16384, 3},\n\t\t{1<<63 - 1, 9},\n\t\t{1 << 63, 10},\n\t}\n\tfor _, tc := range testCases {\n\t\tsize := sizeVarint(tc.n)\n\t\tif size != tc.size {\n\t\t\tt.Errorf(\"sizeVarint(%d) = %d, want %d\", tc.n, size, tc.size)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/size_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2012 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"log\"\n\t\"strings\"\n\t\"testing\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\tproto3pb \"github.com/golang/protobuf/proto/proto3_proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\nvar messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}\n\n// messageWithExtension2 is in equal_test.go.\nvar messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}\n\nfunc init() {\n\tif err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String(\"Abbott\")}); err != nil {\n\t\tlog.Panicf(\"SetExtension: %v\", err)\n\t}\n\tif err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String(\"Costello\")}); err != nil {\n\t\tlog.Panicf(\"SetExtension: %v\", err)\n\t}\n\n\t// Force messageWithExtension3 to have the extension encoded.\n\tMarshal(messageWithExtension3)\n\n}\n\nvar SizeTests = []struct {\n\tdesc string\n\tpb   Message\n}{\n\t{\"empty\", &pb.OtherMessage{}},\n\t// Basic types.\n\t{\"bool\", &pb.Defaults{F_Bool: Bool(true)}},\n\t{\"int32\", &pb.Defaults{F_Int32: Int32(12)}},\n\t{\"negative int32\", &pb.Defaults{F_Int32: Int32(-1)}},\n\t{\"small int64\", &pb.Defaults{F_Int64: Int64(1)}},\n\t{\"big int64\", &pb.Defaults{F_Int64: Int64(1 << 20)}},\n\t{\"negative int64\", &pb.Defaults{F_Int64: Int64(-1)}},\n\t{\"fixed32\", &pb.Defaults{F_Fixed32: Uint32(71)}},\n\t{\"fixed64\", &pb.Defaults{F_Fixed64: Uint64(72)}},\n\t{\"uint32\", &pb.Defaults{F_Uint32: Uint32(123)}},\n\t{\"uint64\", &pb.Defaults{F_Uint64: Uint64(124)}},\n\t{\"float\", &pb.Defaults{F_Float: Float32(12.6)}},\n\t{\"double\", &pb.Defaults{F_Double: Float64(13.9)}},\n\t{\"string\", &pb.Defaults{F_String: String(\"niles\")}},\n\t{\"bytes\", &pb.Defaults{F_Bytes: []byte(\"wowsa\")}},\n\t{\"bytes, empty\", &pb.Defaults{F_Bytes: []byte{}}},\n\t{\"sint32\", &pb.Defaults{F_Sint32: Int32(65)}},\n\t{\"sint64\", &pb.Defaults{F_Sint64: Int64(67)}},\n\t{\"enum\", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},\n\t// Repeated.\n\t{\"empty repeated bool\", &pb.MoreRepeated{Bools: []bool{}}},\n\t{\"repeated bool\", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},\n\t{\"packed repeated bool\", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},\n\t{\"repeated int32\", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},\n\t{\"repeated int32 packed\", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},\n\t{\"repeated int64 packed\", &pb.MoreRepeated{Int64SPacked: []int64{\n\t\t// Need enough large numbers to verify that the header is counting the number of bytes\n\t\t// for the field, not the number of elements.\n\t\t1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,\n\t\t1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,\n\t}}},\n\t{\"repeated string\", &pb.MoreRepeated{Strings: []string{\"r\", \"ken\", \"gri\"}}},\n\t{\"repeated fixed\", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},\n\t// Nested.\n\t{\"nested\", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String(\"whatever\")}}},\n\t{\"group\", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},\n\t// Other things.\n\t{\"unrecognized\", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},\n\t{\"extension (unencoded)\", messageWithExtension1},\n\t{\"extension (encoded)\", messageWithExtension3},\n\t// proto3 message\n\t{\"proto3 empty\", &proto3pb.Message{}},\n\t{\"proto3 bool\", &proto3pb.Message{TrueScotsman: true}},\n\t{\"proto3 int64\", &proto3pb.Message{ResultCount: 1}},\n\t{\"proto3 uint32\", &proto3pb.Message{HeightInCm: 123}},\n\t{\"proto3 float\", &proto3pb.Message{Score: 12.6}},\n\t{\"proto3 string\", &proto3pb.Message{Name: \"Snezana\"}},\n\t{\"proto3 bytes\", &proto3pb.Message{Data: []byte(\"wowsa\")}},\n\t{\"proto3 bytes, empty\", &proto3pb.Message{Data: []byte{}}},\n\t{\"proto3 enum\", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},\n\t{\"proto3 map field with empty bytes\", &proto3pb.MessageWithMap{ByteMapping: map[bool][]byte{false: []byte{}}}},\n\n\t{\"map field\", &pb.MessageWithMap{NameMapping: map[int32]string{1: \"Rob\", 7: \"Andrew\"}}},\n\t{\"map field with message\", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},\n\t{\"map field with bytes\", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte(\"this time for sure\")}}},\n\t{\"map field with empty bytes\", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte{}}}},\n\n\t{\"map field with big entry\", &pb.MessageWithMap{NameMapping: map[int32]string{8: strings.Repeat(\"x\", 125)}}},\n\t{\"map field with big key and val\", &pb.MessageWithMap{StrToStr: map[string]string{strings.Repeat(\"x\", 70): strings.Repeat(\"y\", 70)}}},\n\t{\"map field with big numeric key\", &pb.MessageWithMap{NameMapping: map[int32]string{0xf00d: \"om nom nom\"}}},\n\n\t{\"oneof not set\", &pb.Oneof{}},\n\t{\"oneof bool\", &pb.Oneof{Union: &pb.Oneof_F_Bool{true}}},\n\t{\"oneof zero int32\", &pb.Oneof{Union: &pb.Oneof_F_Int32{0}}},\n\t{\"oneof big int32\", &pb.Oneof{Union: &pb.Oneof_F_Int32{1 << 20}}},\n\t{\"oneof int64\", &pb.Oneof{Union: &pb.Oneof_F_Int64{42}}},\n\t{\"oneof fixed32\", &pb.Oneof{Union: &pb.Oneof_F_Fixed32{43}}},\n\t{\"oneof fixed64\", &pb.Oneof{Union: &pb.Oneof_F_Fixed64{44}}},\n\t{\"oneof uint32\", &pb.Oneof{Union: &pb.Oneof_F_Uint32{45}}},\n\t{\"oneof uint64\", &pb.Oneof{Union: &pb.Oneof_F_Uint64{46}}},\n\t{\"oneof float\", &pb.Oneof{Union: &pb.Oneof_F_Float{47.1}}},\n\t{\"oneof double\", &pb.Oneof{Union: &pb.Oneof_F_Double{48.9}}},\n\t{\"oneof string\", &pb.Oneof{Union: &pb.Oneof_F_String{\"Rhythmic Fman\"}}},\n\t{\"oneof bytes\", &pb.Oneof{Union: &pb.Oneof_F_Bytes{[]byte(\"let go\")}}},\n\t{\"oneof sint32\", &pb.Oneof{Union: &pb.Oneof_F_Sint32{50}}},\n\t{\"oneof sint64\", &pb.Oneof{Union: &pb.Oneof_F_Sint64{51}}},\n\t{\"oneof enum\", &pb.Oneof{Union: &pb.Oneof_F_Enum{pb.MyMessage_BLUE}}},\n\t{\"message for oneof\", &pb.GoTestField{Label: String(\"k\"), Type: String(\"v\")}},\n\t{\"oneof message\", &pb.Oneof{Union: &pb.Oneof_F_Message{&pb.GoTestField{Label: String(\"k\"), Type: String(\"v\")}}}},\n\t{\"oneof group\", &pb.Oneof{Union: &pb.Oneof_FGroup{&pb.Oneof_F_Group{X: Int32(52)}}}},\n\t{\"oneof largest tag\", &pb.Oneof{Union: &pb.Oneof_F_Largest_Tag{1}}},\n\t{\"multiple oneofs\", &pb.Oneof{Union: &pb.Oneof_F_Int32{1}, Tormato: &pb.Oneof_Value{2}}},\n}\n\nfunc TestSize(t *testing.T) {\n\tfor _, tc := range SizeTests {\n\t\tsize := Size(tc.pb)\n\t\tb, err := Marshal(tc.pb)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%v: Marshal failed: %v\", tc.desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tif size != len(b) {\n\t\t\tt.Errorf(\"%v: Size(%v) = %d, want %d\", tc.desc, tc.pb, size, len(b))\n\t\t\tt.Logf(\"%v: bytes: %#v\", tc.desc, b)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n// Functions for writing the text protocol buffer format.\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n)\n\nvar (\n\tnewline         = []byte(\"\\n\")\n\tspaces          = []byte(\"                                        \")\n\tgtNewline       = []byte(\">\\n\")\n\tendBraceNewline = []byte(\"}\\n\")\n\tbackslashN      = []byte{'\\\\', 'n'}\n\tbackslashR      = []byte{'\\\\', 'r'}\n\tbackslashT      = []byte{'\\\\', 't'}\n\tbackslashDQ     = []byte{'\\\\', '\"'}\n\tbackslashBS     = []byte{'\\\\', '\\\\'}\n\tposInf          = []byte(\"inf\")\n\tnegInf          = []byte(\"-inf\")\n\tnan             = []byte(\"nan\")\n)\n\ntype writer interface {\n\tio.Writer\n\tWriteByte(byte) error\n}\n\n// textWriter is an io.Writer that tracks its indentation level.\ntype textWriter struct {\n\tind      int\n\tcomplete bool // if the current position is a complete line\n\tcompact  bool // whether to write out as a one-liner\n\tw        writer\n}\n\nfunc (w *textWriter) WriteString(s string) (n int, err error) {\n\tif !strings.Contains(s, \"\\n\") {\n\t\tif !w.compact && w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tw.complete = false\n\t\treturn io.WriteString(w.w, s)\n\t}\n\t// WriteString is typically called without newlines, so this\n\t// codepath and its copy are rare.  We copy to avoid\n\t// duplicating all of Write's logic here.\n\treturn w.Write([]byte(s))\n}\n\nfunc (w *textWriter) Write(p []byte) (n int, err error) {\n\tnewlines := bytes.Count(p, newline)\n\tif newlines == 0 {\n\t\tif !w.compact && w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tn, err = w.w.Write(p)\n\t\tw.complete = false\n\t\treturn n, err\n\t}\n\n\tfrags := bytes.SplitN(p, newline, newlines+1)\n\tif w.compact {\n\t\tfor i, frag := range frags {\n\t\t\tif i > 0 {\n\t\t\t\tif err := w.w.WriteByte(' '); err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t\tn++\n\t\t\t}\n\t\t\tnn, err := w.w.Write(frag)\n\t\t\tn += nn\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t}\n\t\treturn n, nil\n\t}\n\n\tfor i, frag := range frags {\n\t\tif w.complete {\n\t\t\tw.writeIndent()\n\t\t}\n\t\tnn, err := w.w.Write(frag)\n\t\tn += nn\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tif i+1 < len(frags) {\n\t\t\tif err := w.w.WriteByte('\\n'); err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\tn++\n\t\t}\n\t}\n\tw.complete = len(frags[len(frags)-1]) == 0\n\treturn n, nil\n}\n\nfunc (w *textWriter) WriteByte(c byte) error {\n\tif w.compact && c == '\\n' {\n\t\tc = ' '\n\t}\n\tif !w.compact && w.complete {\n\t\tw.writeIndent()\n\t}\n\terr := w.w.WriteByte(c)\n\tw.complete = c == '\\n'\n\treturn err\n}\n\nfunc (w *textWriter) indent() { w.ind++ }\n\nfunc (w *textWriter) unindent() {\n\tif w.ind == 0 {\n\t\tlog.Print(\"proto: textWriter unindented too far\")\n\t\treturn\n\t}\n\tw.ind--\n}\n\nfunc writeName(w *textWriter, props *Properties) error {\n\tif _, err := w.WriteString(props.OrigName); err != nil {\n\t\treturn err\n\t}\n\tif props.Wire != \"group\" {\n\t\treturn w.WriteByte(':')\n\t}\n\treturn nil\n}\n\n// raw is the interface satisfied by RawMessage.\ntype raw interface {\n\tBytes() []byte\n}\n\nfunc requiresQuotes(u string) bool {\n\t// When type URL contains any characters except [0-9A-Za-z./\\-]*, it must be quoted.\n\tfor _, ch := range u {\n\t\tswitch {\n\t\tcase ch == '.' || ch == '/' || ch == '_':\n\t\t\tcontinue\n\t\tcase '0' <= ch && ch <= '9':\n\t\t\tcontinue\n\t\tcase 'A' <= ch && ch <= 'Z':\n\t\t\tcontinue\n\t\tcase 'a' <= ch && ch <= 'z':\n\t\t\tcontinue\n\t\tdefault:\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// isAny reports whether sv is a google.protobuf.Any message\nfunc isAny(sv reflect.Value) bool {\n\ttype wkt interface {\n\t\tXXX_WellKnownType() string\n\t}\n\tt, ok := sv.Addr().Interface().(wkt)\n\treturn ok && t.XXX_WellKnownType() == \"Any\"\n}\n\n// writeProto3Any writes an expanded google.protobuf.Any message.\n//\n// It returns (false, nil) if sv value can't be unmarshaled (e.g. because\n// required messages are not linked in).\n//\n// It returns (true, error) when sv was written in expanded format or an error\n// was encountered.\nfunc (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {\n\tturl := sv.FieldByName(\"TypeUrl\")\n\tval := sv.FieldByName(\"Value\")\n\tif !turl.IsValid() || !val.IsValid() {\n\t\treturn true, errors.New(\"proto: invalid google.protobuf.Any message\")\n\t}\n\n\tb, ok := val.Interface().([]byte)\n\tif !ok {\n\t\treturn true, errors.New(\"proto: invalid google.protobuf.Any message\")\n\t}\n\n\tparts := strings.Split(turl.String(), \"/\")\n\tmt := MessageType(parts[len(parts)-1])\n\tif mt == nil {\n\t\treturn false, nil\n\t}\n\tm := reflect.New(mt.Elem())\n\tif err := Unmarshal(b, m.Interface().(Message)); err != nil {\n\t\treturn false, nil\n\t}\n\tw.Write([]byte(\"[\"))\n\tu := turl.String()\n\tif requiresQuotes(u) {\n\t\twriteString(w, u)\n\t} else {\n\t\tw.Write([]byte(u))\n\t}\n\tif w.compact {\n\t\tw.Write([]byte(\"]:<\"))\n\t} else {\n\t\tw.Write([]byte(\"]: <\\n\"))\n\t\tw.ind++\n\t}\n\tif err := tm.writeStruct(w, m.Elem()); err != nil {\n\t\treturn true, err\n\t}\n\tif w.compact {\n\t\tw.Write([]byte(\"> \"))\n\t} else {\n\t\tw.ind--\n\t\tw.Write([]byte(\">\\n\"))\n\t}\n\treturn true, nil\n}\n\nfunc (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {\n\tif tm.ExpandAny && isAny(sv) {\n\t\tif canExpand, err := tm.writeProto3Any(w, sv); canExpand {\n\t\t\treturn err\n\t\t}\n\t}\n\tst := sv.Type()\n\tsprops := GetProperties(st)\n\tfor i := 0; i < sv.NumField(); i++ {\n\t\tfv := sv.Field(i)\n\t\tprops := sprops.Prop[i]\n\t\tname := st.Field(i).Name\n\n\t\tif strings.HasPrefix(name, \"XXX_\") {\n\t\t\t// There are two XXX_ fields:\n\t\t\t//   XXX_unrecognized []byte\n\t\t\t//   XXX_extensions   map[int32]proto.Extension\n\t\t\t// The first is handled here;\n\t\t\t// the second is handled at the bottom of this function.\n\t\t\tif name == \"XXX_unrecognized\" && !fv.IsNil() {\n\t\t\t\tif err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Ptr && fv.IsNil() {\n\t\t\t// Field not filled in. This could be an optional field or\n\t\t\t// a required field that wasn't filled in. Either way, there\n\t\t\t// isn't anything we can show for it.\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Slice && fv.IsNil() {\n\t\t\t// Repeated field that is empty, or a bytes field that is unused.\n\t\t\tcontinue\n\t\t}\n\n\t\tif props.Repeated && fv.Kind() == reflect.Slice {\n\t\t\t// Repeated field.\n\t\t\tfor j := 0; j < fv.Len(); j++ {\n\t\t\t\tif err := writeName(w, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tv := fv.Index(j)\n\t\t\t\tif v.Kind() == reflect.Ptr && v.IsNil() {\n\t\t\t\t\t// A nil message in a repeated field is not valid,\n\t\t\t\t\t// but we can handle that more gracefully than panicking.\n\t\t\t\t\tif _, err := w.Write([]byte(\"<nil>\\n\")); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err := tm.writeAny(w, v, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() == reflect.Map {\n\t\t\t// Map fields are rendered as a repeated struct with key/value fields.\n\t\t\tkeys := fv.MapKeys()\n\t\t\tsort.Sort(mapKeys(keys))\n\t\t\tfor _, key := range keys {\n\t\t\t\tval := fv.MapIndex(key)\n\t\t\t\tif err := writeName(w, props); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// open struct\n\t\t\t\tif err := w.WriteByte('<'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tw.indent()\n\t\t\t\t// key\n\t\t\t\tif _, err := w.WriteString(\"key:\"); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif !w.compact {\n\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif err := tm.writeAny(w, key, props.mkeyprop); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\t// nil values aren't legal, but we can avoid panicking because of them.\n\t\t\t\tif val.Kind() != reflect.Ptr || !val.IsNil() {\n\t\t\t\t\t// value\n\t\t\t\t\tif _, err := w.WriteString(\"value:\"); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif !w.compact {\n\t\t\t\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif err := tm.writeAny(w, val, props.mvalprop); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\t// close struct\n\t\t\t\tw.unindent()\n\t\t\t\tif err := w.WriteByte('>'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {\n\t\t\t// empty bytes field\n\t\t\tcontinue\n\t\t}\n\t\tif fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {\n\t\t\t// proto3 non-repeated scalar field; skip if zero value\n\t\t\tif isProto3Zero(fv) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif fv.Kind() == reflect.Interface {\n\t\t\t// Check if it is a oneof.\n\t\t\tif st.Field(i).Tag.Get(\"protobuf_oneof\") != \"\" {\n\t\t\t\t// fv is nil, or holds a pointer to generated struct.\n\t\t\t\t// That generated struct has exactly one field,\n\t\t\t\t// which has a protobuf struct tag.\n\t\t\t\tif fv.IsNil() {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tinner := fv.Elem().Elem() // interface -> *T -> T\n\t\t\t\ttag := inner.Type().Field(0).Tag.Get(\"protobuf\")\n\t\t\t\tprops = new(Properties) // Overwrite the outer props var, but not its pointee.\n\t\t\t\tprops.Parse(tag)\n\t\t\t\t// Write the value in the oneof, not the oneof itself.\n\t\t\t\tfv = inner.Field(0)\n\n\t\t\t\t// Special case to cope with malformed messages gracefully:\n\t\t\t\t// If the value in the oneof is a nil pointer, don't panic\n\t\t\t\t// in writeAny.\n\t\t\t\tif fv.Kind() == reflect.Ptr && fv.IsNil() {\n\t\t\t\t\t// Use errors.New so writeAny won't render quotes.\n\t\t\t\t\tmsg := errors.New(\"/* nil */\")\n\t\t\t\t\tfv = reflect.ValueOf(&msg).Elem()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif err := writeName(w, props); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !w.compact {\n\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif b, ok := fv.Interface().(raw); ok {\n\t\t\tif err := writeRaw(w, b.Bytes()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Enums have a String method, so writeAny will work fine.\n\t\tif err := tm.writeAny(w, fv, props); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Extensions (the XXX_extensions field).\n\tpv := sv.Addr()\n\tif _, ok := extendable(pv.Interface()); ok {\n\t\tif err := tm.writeExtensions(w, pv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// writeRaw writes an uninterpreted raw message.\nfunc writeRaw(w *textWriter, b []byte) error {\n\tif err := w.WriteByte('<'); err != nil {\n\t\treturn err\n\t}\n\tif !w.compact {\n\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tw.indent()\n\tif err := writeUnknownStruct(w, b); err != nil {\n\t\treturn err\n\t}\n\tw.unindent()\n\tif err := w.WriteByte('>'); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// writeAny writes an arbitrary field.\nfunc (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {\n\tv = reflect.Indirect(v)\n\n\t// Floats have special cases.\n\tif v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {\n\t\tx := v.Float()\n\t\tvar b []byte\n\t\tswitch {\n\t\tcase math.IsInf(x, 1):\n\t\t\tb = posInf\n\t\tcase math.IsInf(x, -1):\n\t\t\tb = negInf\n\t\tcase math.IsNaN(x):\n\t\t\tb = nan\n\t\t}\n\t\tif b != nil {\n\t\t\t_, err := w.Write(b)\n\t\t\treturn err\n\t\t}\n\t\t// Other values are handled below.\n\t}\n\n\t// We don't attempt to serialise every possible value type; only those\n\t// that can occur in protocol buffers.\n\tswitch v.Kind() {\n\tcase reflect.Slice:\n\t\t// Should only be a []byte; repeated fields are handled in writeStruct.\n\t\tif err := writeString(w, string(v.Bytes())); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.String:\n\t\tif err := writeString(w, v.String()); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase reflect.Struct:\n\t\t// Required/optional group/message.\n\t\tvar bra, ket byte = '<', '>'\n\t\tif props != nil && props.Wire == \"group\" {\n\t\t\tbra, ket = '{', '}'\n\t\t}\n\t\tif err := w.WriteByte(bra); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !w.compact {\n\t\t\tif err := w.WriteByte('\\n'); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tw.indent()\n\t\tif etm, ok := v.Interface().(encoding.TextMarshaler); ok {\n\t\t\ttext, err := etm.MarshalText()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err = w.Write(text); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else if err := tm.writeStruct(w, v); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tw.unindent()\n\t\tif err := w.WriteByte(ket); err != nil {\n\t\t\treturn err\n\t\t}\n\tdefault:\n\t\t_, err := fmt.Fprint(w, v.Interface())\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// equivalent to C's isprint.\nfunc isprint(c byte) bool {\n\treturn c >= 0x20 && c < 0x7f\n}\n\n// writeString writes a string in the protocol buffer text format.\n// It is similar to strconv.Quote except we don't use Go escape sequences,\n// we treat the string as a byte sequence, and we use octal escapes.\n// These differences are to maintain interoperability with the other\n// languages' implementations of the text format.\nfunc writeString(w *textWriter, s string) error {\n\t// use WriteByte here to get any needed indent\n\tif err := w.WriteByte('\"'); err != nil {\n\t\treturn err\n\t}\n\t// Loop over the bytes, not the runes.\n\tfor i := 0; i < len(s); i++ {\n\t\tvar err error\n\t\t// Divergence from C++: we don't escape apostrophes.\n\t\t// There's no need to escape them, and the C++ parser\n\t\t// copes with a naked apostrophe.\n\t\tswitch c := s[i]; c {\n\t\tcase '\\n':\n\t\t\t_, err = w.w.Write(backslashN)\n\t\tcase '\\r':\n\t\t\t_, err = w.w.Write(backslashR)\n\t\tcase '\\t':\n\t\t\t_, err = w.w.Write(backslashT)\n\t\tcase '\"':\n\t\t\t_, err = w.w.Write(backslashDQ)\n\t\tcase '\\\\':\n\t\t\t_, err = w.w.Write(backslashBS)\n\t\tdefault:\n\t\t\tif isprint(c) {\n\t\t\t\terr = w.w.WriteByte(c)\n\t\t\t} else {\n\t\t\t\t_, err = fmt.Fprintf(w.w, \"\\\\%03o\", c)\n\t\t\t}\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn w.WriteByte('\"')\n}\n\nfunc writeUnknownStruct(w *textWriter, data []byte) (err error) {\n\tif !w.compact {\n\t\tif _, err := fmt.Fprintf(w, \"/* %d unknown bytes */\\n\", len(data)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tb := NewBuffer(data)\n\tfor b.index < len(b.buf) {\n\t\tx, err := b.DecodeVarint()\n\t\tif err != nil {\n\t\t\t_, err := fmt.Fprintf(w, \"/* %v */\\n\", err)\n\t\t\treturn err\n\t\t}\n\t\twire, tag := x&7, x>>3\n\t\tif wire == WireEndGroup {\n\t\t\tw.unindent()\n\t\t\tif _, err := w.Write(endBraceNewline); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := fmt.Fprint(w, tag); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif wire != WireStartGroup {\n\t\t\tif err := w.WriteByte(':'); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif !w.compact || wire == WireStartGroup {\n\t\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tswitch wire {\n\t\tcase WireBytes:\n\t\t\tbuf, e := b.DecodeRawBytes(false)\n\t\t\tif e == nil {\n\t\t\t\t_, err = fmt.Fprintf(w, \"%q\", buf)\n\t\t\t} else {\n\t\t\t\t_, err = fmt.Fprintf(w, \"/* %v */\", e)\n\t\t\t}\n\t\tcase WireFixed32:\n\t\t\tx, err = b.DecodeFixed32()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tcase WireFixed64:\n\t\t\tx, err = b.DecodeFixed64()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tcase WireStartGroup:\n\t\t\terr = w.WriteByte('{')\n\t\t\tw.indent()\n\t\tcase WireVarint:\n\t\t\tx, err = b.DecodeVarint()\n\t\t\terr = writeUnknownInt(w, x, err)\n\t\tdefault:\n\t\t\t_, err = fmt.Fprintf(w, \"/* unknown wire type %d */\", wire)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = w.WriteByte('\\n'); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc writeUnknownInt(w *textWriter, x uint64, err error) error {\n\tif err == nil {\n\t\t_, err = fmt.Fprint(w, x)\n\t} else {\n\t\t_, err = fmt.Fprintf(w, \"/* %v */\", err)\n\t}\n\treturn err\n}\n\ntype int32Slice []int32\n\nfunc (s int32Slice) Len() int           { return len(s) }\nfunc (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }\nfunc (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\n\n// writeExtensions writes all the extensions in pv.\n// pv is assumed to be a pointer to a protocol message struct that is extendable.\nfunc (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {\n\temap := extensionMaps[pv.Type().Elem()]\n\tep, _ := extendable(pv.Interface())\n\n\t// Order the extensions by ID.\n\t// This isn't strictly necessary, but it will give us\n\t// canonical output, which will also make testing easier.\n\tm, mu := ep.extensionsRead()\n\tif m == nil {\n\t\treturn nil\n\t}\n\tmu.Lock()\n\tids := make([]int32, 0, len(m))\n\tfor id := range m {\n\t\tids = append(ids, id)\n\t}\n\tsort.Sort(int32Slice(ids))\n\tmu.Unlock()\n\n\tfor _, extNum := range ids {\n\t\text := m[extNum]\n\t\tvar desc *ExtensionDesc\n\t\tif emap != nil {\n\t\t\tdesc = emap[extNum]\n\t\t}\n\t\tif desc == nil {\n\t\t\t// Unknown extension.\n\t\t\tif err := writeUnknownStruct(w, ext.enc); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tpb, err := GetExtension(ep, desc)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed getting extension: %v\", err)\n\t\t}\n\n\t\t// Repeated extensions will appear as a slice.\n\t\tif !desc.repeated() {\n\t\t\tif err := tm.writeExtension(w, desc.Name, pb); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tv := reflect.ValueOf(pb)\n\t\t\tfor i := 0; i < v.Len(); i++ {\n\t\t\t\tif err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {\n\tif _, err := fmt.Fprintf(w, \"[%s]:\", name); err != nil {\n\t\treturn err\n\t}\n\tif !w.compact {\n\t\tif err := w.WriteByte(' '); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {\n\t\treturn err\n\t}\n\tif err := w.WriteByte('\\n'); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (w *textWriter) writeIndent() {\n\tif !w.complete {\n\t\treturn\n\t}\n\tremain := w.ind * 2\n\tfor remain > 0 {\n\t\tn := remain\n\t\tif n > len(spaces) {\n\t\t\tn = len(spaces)\n\t\t}\n\t\tw.w.Write(spaces[:n])\n\t\tremain -= n\n\t}\n\tw.complete = false\n}\n\n// TextMarshaler is a configurable text format marshaler.\ntype TextMarshaler struct {\n\tCompact   bool // use compact text format (one line).\n\tExpandAny bool // expand google.protobuf.Any messages of known types\n}\n\n// Marshal writes a given protocol buffer in text format.\n// The only errors returned are from w.\nfunc (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {\n\tval := reflect.ValueOf(pb)\n\tif pb == nil || val.IsNil() {\n\t\tw.Write([]byte(\"<nil>\"))\n\t\treturn nil\n\t}\n\tvar bw *bufio.Writer\n\tww, ok := w.(writer)\n\tif !ok {\n\t\tbw = bufio.NewWriter(w)\n\t\tww = bw\n\t}\n\taw := &textWriter{\n\t\tw:        ww,\n\t\tcomplete: true,\n\t\tcompact:  tm.Compact,\n\t}\n\n\tif etm, ok := pb.(encoding.TextMarshaler); ok {\n\t\ttext, err := etm.MarshalText()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err = aw.Write(text); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif bw != nil {\n\t\t\treturn bw.Flush()\n\t\t}\n\t\treturn nil\n\t}\n\t// Dereference the received pointer so we don't have outer < and >.\n\tv := reflect.Indirect(val)\n\tif err := tm.writeStruct(aw, v); err != nil {\n\t\treturn err\n\t}\n\tif bw != nil {\n\t\treturn bw.Flush()\n\t}\n\treturn nil\n}\n\n// Text is the same as Marshal, but returns the string directly.\nfunc (tm *TextMarshaler) Text(pb Message) string {\n\tvar buf bytes.Buffer\n\ttm.Marshal(&buf, pb)\n\treturn buf.String()\n}\n\nvar (\n\tdefaultTextMarshaler = TextMarshaler{}\n\tcompactTextMarshaler = TextMarshaler{Compact: true}\n)\n\n// TODO: consider removing some of the Marshal functions below.\n\n// MarshalText writes a given protocol buffer in text format.\n// The only errors returned are from w.\nfunc MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }\n\n// MarshalTextString is the same as MarshalText, but returns the string directly.\nfunc MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }\n\n// CompactText writes a given protocol buffer in compact text format (one line).\nfunc CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }\n\n// CompactTextString is the same as CompactText, but returns the string directly.\nfunc CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text_parser.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto\n\n// Functions for parsing the Text protocol buffer format.\n// TODO: message sets.\n\nimport (\n\t\"encoding\"\n\t\"errors\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\n// Error string emitted when deserializing Any and fields are already set\nconst anyRepeatedlyUnpacked = \"Any message unpacked multiple times, or %q already set\"\n\ntype ParseError struct {\n\tMessage string\n\tLine    int // 1-based line number\n\tOffset  int // 0-based byte offset from start of input\n}\n\nfunc (p *ParseError) Error() string {\n\tif p.Line == 1 {\n\t\t// show offset only for first line\n\t\treturn fmt.Sprintf(\"line 1.%d: %v\", p.Offset, p.Message)\n\t}\n\treturn fmt.Sprintf(\"line %d: %v\", p.Line, p.Message)\n}\n\ntype token struct {\n\tvalue    string\n\terr      *ParseError\n\tline     int    // line number\n\toffset   int    // byte number from start of input, not start of line\n\tunquoted string // the unquoted version of value, if it was a quoted string\n}\n\nfunc (t *token) String() string {\n\tif t.err == nil {\n\t\treturn fmt.Sprintf(\"%q (line=%d, offset=%d)\", t.value, t.line, t.offset)\n\t}\n\treturn fmt.Sprintf(\"parse error: %v\", t.err)\n}\n\ntype textParser struct {\n\ts            string // remaining input\n\tdone         bool   // whether the parsing is finished (success or error)\n\tbacked       bool   // whether back() was called\n\toffset, line int\n\tcur          token\n}\n\nfunc newTextParser(s string) *textParser {\n\tp := new(textParser)\n\tp.s = s\n\tp.line = 1\n\tp.cur.line = 1\n\treturn p\n}\n\nfunc (p *textParser) errorf(format string, a ...interface{}) *ParseError {\n\tpe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}\n\tp.cur.err = pe\n\tp.done = true\n\treturn pe\n}\n\n// Numbers and identifiers are matched by [-+._A-Za-z0-9]\nfunc isIdentOrNumberChar(c byte) bool {\n\tswitch {\n\tcase 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':\n\t\treturn true\n\tcase '0' <= c && c <= '9':\n\t\treturn true\n\t}\n\tswitch c {\n\tcase '-', '+', '.', '_':\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isWhitespace(c byte) bool {\n\tswitch c {\n\tcase ' ', '\\t', '\\n', '\\r':\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isQuote(c byte) bool {\n\tswitch c {\n\tcase '\"', '\\'':\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (p *textParser) skipWhitespace() {\n\ti := 0\n\tfor i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {\n\t\tif p.s[i] == '#' {\n\t\t\t// comment; skip to end of line or input\n\t\t\tfor i < len(p.s) && p.s[i] != '\\n' {\n\t\t\t\ti++\n\t\t\t}\n\t\t\tif i == len(p.s) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif p.s[i] == '\\n' {\n\t\t\tp.line++\n\t\t}\n\t\ti++\n\t}\n\tp.offset += i\n\tp.s = p.s[i:len(p.s)]\n\tif len(p.s) == 0 {\n\t\tp.done = true\n\t}\n}\n\nfunc (p *textParser) advance() {\n\t// Skip whitespace\n\tp.skipWhitespace()\n\tif p.done {\n\t\treturn\n\t}\n\n\t// Start of non-whitespace\n\tp.cur.err = nil\n\tp.cur.offset, p.cur.line = p.offset, p.line\n\tp.cur.unquoted = \"\"\n\tswitch p.s[0] {\n\tcase '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':\n\t\t// Single symbol\n\t\tp.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]\n\tcase '\"', '\\'':\n\t\t// Quoted string\n\t\ti := 1\n\t\tfor i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\\n' {\n\t\t\tif p.s[i] == '\\\\' && i+1 < len(p.s) {\n\t\t\t\t// skip escaped char\n\t\t\t\ti++\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t\tif i >= len(p.s) || p.s[i] != p.s[0] {\n\t\t\tp.errorf(\"unmatched quote\")\n\t\t\treturn\n\t\t}\n\t\tunq, err := unquoteC(p.s[1:i], rune(p.s[0]))\n\t\tif err != nil {\n\t\t\tp.errorf(\"invalid quoted string %s: %v\", p.s[0:i+1], err)\n\t\t\treturn\n\t\t}\n\t\tp.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]\n\t\tp.cur.unquoted = unq\n\tdefault:\n\t\ti := 0\n\t\tfor i < len(p.s) && isIdentOrNumberChar(p.s[i]) {\n\t\t\ti++\n\t\t}\n\t\tif i == 0 {\n\t\t\tp.errorf(\"unexpected byte %#x\", p.s[0])\n\t\t\treturn\n\t\t}\n\t\tp.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]\n\t}\n\tp.offset += len(p.cur.value)\n}\n\nvar (\n\terrBadUTF8 = errors.New(\"proto: bad UTF-8\")\n\terrBadHex  = errors.New(\"proto: bad hexadecimal\")\n)\n\nfunc unquoteC(s string, quote rune) (string, error) {\n\t// This is based on C++'s tokenizer.cc.\n\t// Despite its name, this is *not* parsing C syntax.\n\t// For instance, \"\\0\" is an invalid quoted string.\n\n\t// Avoid allocation in trivial cases.\n\tsimple := true\n\tfor _, r := range s {\n\t\tif r == '\\\\' || r == quote {\n\t\t\tsimple = false\n\t\t\tbreak\n\t\t}\n\t}\n\tif simple {\n\t\treturn s, nil\n\t}\n\n\tbuf := make([]byte, 0, 3*len(s)/2)\n\tfor len(s) > 0 {\n\t\tr, n := utf8.DecodeRuneInString(s)\n\t\tif r == utf8.RuneError && n == 1 {\n\t\t\treturn \"\", errBadUTF8\n\t\t}\n\t\ts = s[n:]\n\t\tif r != '\\\\' {\n\t\t\tif r < utf8.RuneSelf {\n\t\t\t\tbuf = append(buf, byte(r))\n\t\t\t} else {\n\t\t\t\tbuf = append(buf, string(r)...)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tch, tail, err := unescape(s)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tbuf = append(buf, ch...)\n\t\ts = tail\n\t}\n\treturn string(buf), nil\n}\n\nfunc unescape(s string) (ch string, tail string, err error) {\n\tr, n := utf8.DecodeRuneInString(s)\n\tif r == utf8.RuneError && n == 1 {\n\t\treturn \"\", \"\", errBadUTF8\n\t}\n\ts = s[n:]\n\tswitch r {\n\tcase 'a':\n\t\treturn \"\\a\", s, nil\n\tcase 'b':\n\t\treturn \"\\b\", s, nil\n\tcase 'f':\n\t\treturn \"\\f\", s, nil\n\tcase 'n':\n\t\treturn \"\\n\", s, nil\n\tcase 'r':\n\t\treturn \"\\r\", s, nil\n\tcase 't':\n\t\treturn \"\\t\", s, nil\n\tcase 'v':\n\t\treturn \"\\v\", s, nil\n\tcase '?':\n\t\treturn \"?\", s, nil // trigraph workaround\n\tcase '\\'', '\"', '\\\\':\n\t\treturn string(r), s, nil\n\tcase '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':\n\t\tif len(s) < 2 {\n\t\t\treturn \"\", \"\", fmt.Errorf(`\\%c requires 2 following digits`, r)\n\t\t}\n\t\tbase := 8\n\t\tss := s[:2]\n\t\ts = s[2:]\n\t\tif r == 'x' || r == 'X' {\n\t\t\tbase = 16\n\t\t} else {\n\t\t\tss = string(r) + ss\n\t\t}\n\t\ti, err := strconv.ParseUint(ss, base, 8)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t\treturn string([]byte{byte(i)}), s, nil\n\tcase 'u', 'U':\n\t\tn := 4\n\t\tif r == 'U' {\n\t\t\tn = 8\n\t\t}\n\t\tif len(s) < n {\n\t\t\treturn \"\", \"\", fmt.Errorf(`\\%c requires %d digits`, r, n)\n\t\t}\n\n\t\tbs := make([]byte, n/2)\n\t\tfor i := 0; i < n; i += 2 {\n\t\t\ta, ok1 := unhex(s[i])\n\t\t\tb, ok2 := unhex(s[i+1])\n\t\t\tif !ok1 || !ok2 {\n\t\t\t\treturn \"\", \"\", errBadHex\n\t\t\t}\n\t\t\tbs[i/2] = a<<4 | b\n\t\t}\n\t\ts = s[n:]\n\t\treturn string(bs), s, nil\n\t}\n\treturn \"\", \"\", fmt.Errorf(`unknown escape \\%c`, r)\n}\n\n// Adapted from src/pkg/strconv/quote.go.\nfunc unhex(b byte) (v byte, ok bool) {\n\tswitch {\n\tcase '0' <= b && b <= '9':\n\t\treturn b - '0', true\n\tcase 'a' <= b && b <= 'f':\n\t\treturn b - 'a' + 10, true\n\tcase 'A' <= b && b <= 'F':\n\t\treturn b - 'A' + 10, true\n\t}\n\treturn 0, false\n}\n\n// Back off the parser by one token. Can only be done between calls to next().\n// It makes the next advance() a no-op.\nfunc (p *textParser) back() { p.backed = true }\n\n// Advances the parser and returns the new current token.\nfunc (p *textParser) next() *token {\n\tif p.backed || p.done {\n\t\tp.backed = false\n\t\treturn &p.cur\n\t}\n\tp.advance()\n\tif p.done {\n\t\tp.cur.value = \"\"\n\t} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {\n\t\t// Look for multiple quoted strings separated by whitespace,\n\t\t// and concatenate them.\n\t\tcat := p.cur\n\t\tfor {\n\t\t\tp.skipWhitespace()\n\t\t\tif p.done || !isQuote(p.s[0]) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tp.advance()\n\t\t\tif p.cur.err != nil {\n\t\t\t\treturn &p.cur\n\t\t\t}\n\t\t\tcat.value += \" \" + p.cur.value\n\t\t\tcat.unquoted += p.cur.unquoted\n\t\t}\n\t\tp.done = false // parser may have seen EOF, but we want to return cat\n\t\tp.cur = cat\n\t}\n\treturn &p.cur\n}\n\nfunc (p *textParser) consumeToken(s string) error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != s {\n\t\tp.back()\n\t\treturn p.errorf(\"expected %q, found %q\", s, tok.value)\n\t}\n\treturn nil\n}\n\n// Return a RequiredNotSetError indicating which required field was not set.\nfunc (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {\n\tst := sv.Type()\n\tsprops := GetProperties(st)\n\tfor i := 0; i < st.NumField(); i++ {\n\t\tif !isNil(sv.Field(i)) {\n\t\t\tcontinue\n\t\t}\n\n\t\tprops := sprops.Prop[i]\n\t\tif props.Required {\n\t\t\treturn &RequiredNotSetError{fmt.Sprintf(\"%v.%v\", st, props.OrigName)}\n\t\t}\n\t}\n\treturn &RequiredNotSetError{fmt.Sprintf(\"%v.<unknown field name>\", st)} // should not happen\n}\n\n// Returns the index in the struct for the named field, as well as the parsed tag properties.\nfunc structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {\n\ti, ok := sprops.decoderOrigNames[name]\n\tif ok {\n\t\treturn i, sprops.Prop[i], true\n\t}\n\treturn -1, nil, false\n}\n\n// Consume a ':' from the input stream (if the next token is a colon),\n// returning an error if a colon is needed but not present.\nfunc (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != \":\" {\n\t\t// Colon is optional when the field is a group or message.\n\t\tneedColon := true\n\t\tswitch props.Wire {\n\t\tcase \"group\":\n\t\t\tneedColon = false\n\t\tcase \"bytes\":\n\t\t\t// A \"bytes\" field is either a message, a string, or a repeated field;\n\t\t\t// those three become *T, *string and []T respectively, so we can check for\n\t\t\t// this field being a pointer to a non-string.\n\t\t\tif typ.Kind() == reflect.Ptr {\n\t\t\t\t// *T or *string\n\t\t\t\tif typ.Elem().Kind() == reflect.String {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else if typ.Kind() == reflect.Slice {\n\t\t\t\t// []T or []*T\n\t\t\t\tif typ.Elem().Kind() != reflect.Ptr {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t} else if typ.Kind() == reflect.String {\n\t\t\t\t// The proto3 exception is for a string field,\n\t\t\t\t// which requires a colon.\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tneedColon = false\n\t\t}\n\t\tif needColon {\n\t\t\treturn p.errorf(\"expected ':', found %q\", tok.value)\n\t\t}\n\t\tp.back()\n\t}\n\treturn nil\n}\n\nfunc (p *textParser) readStruct(sv reflect.Value, terminator string) error {\n\tst := sv.Type()\n\tsprops := GetProperties(st)\n\treqCount := sprops.reqCount\n\tvar reqFieldErr error\n\tfieldSet := make(map[string]bool)\n\t// A struct is a sequence of \"name: value\", terminated by one of\n\t// '>' or '}', or the end of the input.  A name may also be\n\t// \"[extension]\" or \"[type/url]\".\n\t//\n\t// The whole struct can also be an expanded Any message, like:\n\t// [type/url] < ... struct contents ... >\n\tfor {\n\t\ttok := p.next()\n\t\tif tok.err != nil {\n\t\t\treturn tok.err\n\t\t}\n\t\tif tok.value == terminator {\n\t\t\tbreak\n\t\t}\n\t\tif tok.value == \"[\" {\n\t\t\t// Looks like an extension or an Any.\n\t\t\t//\n\t\t\t// TODO: Check whether we need to handle\n\t\t\t// namespace rooted names (e.g. \".something.Foo\").\n\t\t\textName, err := p.consumeExtName()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif s := strings.LastIndex(extName, \"/\"); s >= 0 {\n\t\t\t\t// If it contains a slash, it's an Any type URL.\n\t\t\t\tmessageName := extName[s+1:]\n\t\t\t\tmt := MessageType(messageName)\n\t\t\t\tif mt == nil {\n\t\t\t\t\treturn p.errorf(\"unrecognized message %q in google.protobuf.Any\", messageName)\n\t\t\t\t}\n\t\t\t\ttok = p.next()\n\t\t\t\tif tok.err != nil {\n\t\t\t\t\treturn tok.err\n\t\t\t\t}\n\t\t\t\t// consume an optional colon\n\t\t\t\tif tok.value == \":\" {\n\t\t\t\t\ttok = p.next()\n\t\t\t\t\tif tok.err != nil {\n\t\t\t\t\t\treturn tok.err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tvar terminator string\n\t\t\t\tswitch tok.value {\n\t\t\t\tcase \"<\":\n\t\t\t\t\tterminator = \">\"\n\t\t\t\tcase \"{\":\n\t\t\t\t\tterminator = \"}\"\n\t\t\t\tdefault:\n\t\t\t\t\treturn p.errorf(\"expected '{' or '<', found %q\", tok.value)\n\t\t\t\t}\n\t\t\t\tv := reflect.New(mt.Elem())\n\t\t\t\tif pe := p.readStruct(v.Elem(), terminator); pe != nil {\n\t\t\t\t\treturn pe\n\t\t\t\t}\n\t\t\t\tb, err := Marshal(v.Interface().(Message))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn p.errorf(\"failed to marshal message of type %q: %v\", messageName, err)\n\t\t\t\t}\n\t\t\t\tif fieldSet[\"type_url\"] {\n\t\t\t\t\treturn p.errorf(anyRepeatedlyUnpacked, \"type_url\")\n\t\t\t\t}\n\t\t\t\tif fieldSet[\"value\"] {\n\t\t\t\t\treturn p.errorf(anyRepeatedlyUnpacked, \"value\")\n\t\t\t\t}\n\t\t\t\tsv.FieldByName(\"TypeUrl\").SetString(extName)\n\t\t\t\tsv.FieldByName(\"Value\").SetBytes(b)\n\t\t\t\tfieldSet[\"type_url\"] = true\n\t\t\t\tfieldSet[\"value\"] = true\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar desc *ExtensionDesc\n\t\t\t// This could be faster, but it's functional.\n\t\t\t// TODO: Do something smarter than a linear scan.\n\t\t\tfor _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {\n\t\t\t\tif d.Name == extName {\n\t\t\t\t\tdesc = d\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif desc == nil {\n\t\t\t\treturn p.errorf(\"unrecognized extension %q\", extName)\n\t\t\t}\n\n\t\t\tprops := &Properties{}\n\t\t\tprops.Parse(desc.Tag)\n\n\t\t\ttyp := reflect.TypeOf(desc.ExtensionType)\n\t\t\tif err := p.checkForColon(props, typ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\trep := desc.repeated()\n\n\t\t\t// Read the extension structure, and set it in\n\t\t\t// the value we're constructing.\n\t\t\tvar ext reflect.Value\n\t\t\tif !rep {\n\t\t\t\text = reflect.New(typ).Elem()\n\t\t\t} else {\n\t\t\t\text = reflect.New(typ.Elem()).Elem()\n\t\t\t}\n\t\t\tif err := p.readAny(ext, props); err != nil {\n\t\t\t\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\treqFieldErr = err\n\t\t\t}\n\t\t\tep := sv.Addr().Interface().(Message)\n\t\t\tif !rep {\n\t\t\t\tSetExtension(ep, desc, ext.Interface())\n\t\t\t} else {\n\t\t\t\told, err := GetExtension(ep, desc)\n\t\t\t\tvar sl reflect.Value\n\t\t\t\tif err == nil {\n\t\t\t\t\tsl = reflect.ValueOf(old) // existing slice\n\t\t\t\t} else {\n\t\t\t\t\tsl = reflect.MakeSlice(typ, 0, 1)\n\t\t\t\t}\n\t\t\t\tsl = reflect.Append(sl, ext)\n\t\t\t\tSetExtension(ep, desc, sl.Interface())\n\t\t\t}\n\t\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// This is a normal, non-extension field.\n\t\tname := tok.value\n\t\tvar dst reflect.Value\n\t\tfi, props, ok := structFieldByName(sprops, name)\n\t\tif ok {\n\t\t\tdst = sv.Field(fi)\n\t\t} else if oop, ok := sprops.OneofTypes[name]; ok {\n\t\t\t// It is a oneof.\n\t\t\tprops = oop.Prop\n\t\t\tnv := reflect.New(oop.Type.Elem())\n\t\t\tdst = nv.Elem().Field(0)\n\t\t\tfield := sv.Field(oop.Field)\n\t\t\tif !field.IsNil() {\n\t\t\t\treturn p.errorf(\"field '%s' would overwrite already parsed oneof '%s'\", name, sv.Type().Field(oop.Field).Name)\n\t\t\t}\n\t\t\tfield.Set(nv)\n\t\t}\n\t\tif !dst.IsValid() {\n\t\t\treturn p.errorf(\"unknown field name %q in %v\", name, st)\n\t\t}\n\n\t\tif dst.Kind() == reflect.Map {\n\t\t\t// Consume any colon.\n\t\t\tif err := p.checkForColon(props, dst.Type()); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\t// Construct the map if it doesn't already exist.\n\t\t\tif dst.IsNil() {\n\t\t\t\tdst.Set(reflect.MakeMap(dst.Type()))\n\t\t\t}\n\t\t\tkey := reflect.New(dst.Type().Key()).Elem()\n\t\t\tval := reflect.New(dst.Type().Elem()).Elem()\n\n\t\t\t// The map entry should be this sequence of tokens:\n\t\t\t//\t< key : KEY value : VALUE >\n\t\t\t// However, implementations may omit key or value, and technically\n\t\t\t// we should support them in any order.  See b/28924776 for a time\n\t\t\t// this went wrong.\n\n\t\t\ttok := p.next()\n\t\t\tvar terminator string\n\t\t\tswitch tok.value {\n\t\t\tcase \"<\":\n\t\t\t\tterminator = \">\"\n\t\t\tcase \"{\":\n\t\t\t\tterminator = \"}\"\n\t\t\tdefault:\n\t\t\t\treturn p.errorf(\"expected '{' or '<', found %q\", tok.value)\n\t\t\t}\n\t\t\tfor {\n\t\t\t\ttok := p.next()\n\t\t\t\tif tok.err != nil {\n\t\t\t\t\treturn tok.err\n\t\t\t\t}\n\t\t\t\tif tok.value == terminator {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tswitch tok.value {\n\t\t\t\tcase \"key\":\n\t\t\t\t\tif err := p.consumeToken(\":\"); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := p.readAny(key, props.mkeyprop); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tcase \"value\":\n\t\t\t\t\tif err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := p.readAny(val, props.mvalprop); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\tp.back()\n\t\t\t\t\treturn p.errorf(`expected \"key\", \"value\", or %q, found %q`, terminator, tok.value)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tdst.SetMapIndex(key, val)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check that it's not already set if it's not a repeated field.\n\t\tif !props.Repeated && fieldSet[name] {\n\t\t\treturn p.errorf(\"non-repeated field %q was repeated\", name)\n\t\t}\n\n\t\tif err := p.checkForColon(props, dst.Type()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Parse into the field.\n\t\tfieldSet[name] = true\n\t\tif err := p.readAny(dst, props); err != nil {\n\t\t\tif _, ok := err.(*RequiredNotSetError); !ok {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treqFieldErr = err\n\t\t}\n\t\tif props.Required {\n\t\t\treqCount--\n\t\t}\n\n\t\tif err := p.consumeOptionalSeparator(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif reqCount > 0 {\n\t\treturn p.missingRequiredFieldError(sv)\n\t}\n\treturn reqFieldErr\n}\n\n// consumeExtName consumes extension name or expanded Any type URL and the\n// following ']'. It returns the name or URL consumed.\nfunc (p *textParser) consumeExtName() (string, error) {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn \"\", tok.err\n\t}\n\n\t// If extension name or type url is quoted, it's a single token.\n\tif len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {\n\t\tname, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn name, p.consumeToken(\"]\")\n\t}\n\n\t// Consume everything up to \"]\"\n\tvar parts []string\n\tfor tok.value != \"]\" {\n\t\tparts = append(parts, tok.value)\n\t\ttok = p.next()\n\t\tif tok.err != nil {\n\t\t\treturn \"\", p.errorf(\"unrecognized type_url or extension name: %s\", tok.err)\n\t\t}\n\t}\n\treturn strings.Join(parts, \"\"), nil\n}\n\n// consumeOptionalSeparator consumes an optional semicolon or comma.\n// It is used in readStruct to provide backward compatibility.\nfunc (p *textParser) consumeOptionalSeparator() error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value != \";\" && tok.value != \",\" {\n\t\tp.back()\n\t}\n\treturn nil\n}\n\nfunc (p *textParser) readAny(v reflect.Value, props *Properties) error {\n\ttok := p.next()\n\tif tok.err != nil {\n\t\treturn tok.err\n\t}\n\tif tok.value == \"\" {\n\t\treturn p.errorf(\"unexpected EOF\")\n\t}\n\n\tswitch fv := v; fv.Kind() {\n\tcase reflect.Slice:\n\t\tat := v.Type()\n\t\tif at.Elem().Kind() == reflect.Uint8 {\n\t\t\t// Special case for []byte\n\t\t\tif tok.value[0] != '\"' && tok.value[0] != '\\'' {\n\t\t\t\t// Deliberately written out here, as the error after\n\t\t\t\t// this switch statement would write \"invalid []byte: ...\",\n\t\t\t\t// which is not as user-friendly.\n\t\t\t\treturn p.errorf(\"invalid string: %v\", tok.value)\n\t\t\t}\n\t\t\tbytes := []byte(tok.unquoted)\n\t\t\tfv.Set(reflect.ValueOf(bytes))\n\t\t\treturn nil\n\t\t}\n\t\t// Repeated field.\n\t\tif tok.value == \"[\" {\n\t\t\t// Repeated field with list notation, like [1,2,3].\n\t\t\tfor {\n\t\t\t\tfv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))\n\t\t\t\terr := p.readAny(fv.Index(fv.Len()-1), props)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ttok := p.next()\n\t\t\t\tif tok.err != nil {\n\t\t\t\t\treturn tok.err\n\t\t\t\t}\n\t\t\t\tif tok.value == \"]\" {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tif tok.value != \",\" {\n\t\t\t\t\treturn p.errorf(\"Expected ']' or ',' found %q\", tok.value)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\t// One value of the repeated field.\n\t\tp.back()\n\t\tfv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))\n\t\treturn p.readAny(fv.Index(fv.Len()-1), props)\n\tcase reflect.Bool:\n\t\t// true/1/t/True or false/f/0/False.\n\t\tswitch tok.value {\n\t\tcase \"true\", \"1\", \"t\", \"True\":\n\t\t\tfv.SetBool(true)\n\t\t\treturn nil\n\t\tcase \"false\", \"0\", \"f\", \"False\":\n\t\t\tfv.SetBool(false)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Float32, reflect.Float64:\n\t\tv := tok.value\n\t\t// Ignore 'f' for compatibility with output generated by C++, but don't\n\t\t// remove 'f' when the value is \"-inf\" or \"inf\".\n\t\tif strings.HasSuffix(v, \"f\") && tok.value != \"-inf\" && tok.value != \"inf\" {\n\t\t\tv = v[:len(v)-1]\n\t\t}\n\t\tif f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {\n\t\t\tfv.SetFloat(f)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Int32:\n\t\tif x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {\n\t\t\tfv.SetInt(x)\n\t\t\treturn nil\n\t\t}\n\n\t\tif len(props.Enum) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tm, ok := enumValueMaps[props.Enum]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tx, ok := m[tok.value]\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tfv.SetInt(int64(x))\n\t\treturn nil\n\tcase reflect.Int64:\n\t\tif x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {\n\t\t\tfv.SetInt(x)\n\t\t\treturn nil\n\t\t}\n\n\tcase reflect.Ptr:\n\t\t// A basic field (indirected through pointer), or a repeated message/group\n\t\tp.back()\n\t\tfv.Set(reflect.New(fv.Type().Elem()))\n\t\treturn p.readAny(fv.Elem(), props)\n\tcase reflect.String:\n\t\tif tok.value[0] == '\"' || tok.value[0] == '\\'' {\n\t\t\tfv.SetString(tok.unquoted)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Struct:\n\t\tvar terminator string\n\t\tswitch tok.value {\n\t\tcase \"{\":\n\t\t\tterminator = \"}\"\n\t\tcase \"<\":\n\t\t\tterminator = \">\"\n\t\tdefault:\n\t\t\treturn p.errorf(\"expected '{' or '<', found %q\", tok.value)\n\t\t}\n\t\t// TODO: Handle nested messages which implement encoding.TextUnmarshaler.\n\t\treturn p.readStruct(fv, terminator)\n\tcase reflect.Uint32:\n\t\tif x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {\n\t\t\tfv.SetUint(x)\n\t\t\treturn nil\n\t\t}\n\tcase reflect.Uint64:\n\t\tif x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {\n\t\t\tfv.SetUint(x)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn p.errorf(\"invalid %v: %v\", v.Type(), tok.value)\n}\n\n// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb\n// before starting to unmarshal, so any existing data in pb is always removed.\n// If a required field is not set and no other error occurs,\n// UnmarshalText returns *RequiredNotSetError.\nfunc UnmarshalText(s string, pb Message) error {\n\tif um, ok := pb.(encoding.TextUnmarshaler); ok {\n\t\terr := um.UnmarshalText([]byte(s))\n\t\treturn err\n\t}\n\tpb.Reset()\n\tv := reflect.ValueOf(pb)\n\tif pe := newTextParser(s).readStruct(v.Elem(), \"\"); pe != nil {\n\t\treturn pe\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text_parser_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"testing\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\tproto3pb \"github.com/golang/protobuf/proto/proto3_proto\"\n\t. \"github.com/golang/protobuf/proto/testdata\"\n)\n\ntype UnmarshalTextTest struct {\n\tin  string\n\terr string // if \"\", no error expected\n\tout *MyMessage\n}\n\nfunc buildExtStructTest(text string) UnmarshalTextTest {\n\tmsg := &MyMessage{\n\t\tCount: Int32(42),\n\t}\n\tSetExtension(msg, E_Ext_More, &Ext{\n\t\tData: String(\"Hello, world!\"),\n\t})\n\treturn UnmarshalTextTest{in: text, out: msg}\n}\n\nfunc buildExtDataTest(text string) UnmarshalTextTest {\n\tmsg := &MyMessage{\n\t\tCount: Int32(42),\n\t}\n\tSetExtension(msg, E_Ext_Text, String(\"Hello, world!\"))\n\tSetExtension(msg, E_Ext_Number, Int32(1729))\n\treturn UnmarshalTextTest{in: text, out: msg}\n}\n\nfunc buildExtRepStringTest(text string) UnmarshalTextTest {\n\tmsg := &MyMessage{\n\t\tCount: Int32(42),\n\t}\n\tif err := SetExtension(msg, E_Greeting, []string{\"bula\", \"hola\"}); err != nil {\n\t\tpanic(err)\n\t}\n\treturn UnmarshalTextTest{in: text, out: msg}\n}\n\nvar unMarshalTextTests = []UnmarshalTextTest{\n\t// Basic\n\t{\n\t\tin: \" count:42\\n  name:\\\"Dave\\\" \",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"Dave\"),\n\t\t},\n\t},\n\n\t// Empty quoted string\n\t{\n\t\tin: `count:42 name:\"\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"\"),\n\t\t},\n\t},\n\n\t// Quoted string concatenation with double quotes\n\t{\n\t\tin: `count:42 name: \"My name is \"` + \"\\n\" + `\"elsewhere\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"My name is elsewhere\"),\n\t\t},\n\t},\n\n\t// Quoted string concatenation with single quotes\n\t{\n\t\tin: \"count:42 name: 'My name is '\\n'elsewhere'\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"My name is elsewhere\"),\n\t\t},\n\t},\n\n\t// Quoted string concatenations with mixed quotes\n\t{\n\t\tin: \"count:42 name: 'My name is '\\n\\\"elsewhere\\\"\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"My name is elsewhere\"),\n\t\t},\n\t},\n\t{\n\t\tin: \"count:42 name: \\\"My name is \\\"\\n'elsewhere'\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"My name is elsewhere\"),\n\t\t},\n\t},\n\n\t// Quoted string with escaped apostrophe\n\t{\n\t\tin: `count:42 name: \"HOLIDAY - New Year\\'s Day\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"HOLIDAY - New Year's Day\"),\n\t\t},\n\t},\n\n\t// Quoted string with single quote\n\t{\n\t\tin: `count:42 name: 'Roger \"The Ramster\" Ramjet'`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(`Roger \"The Ramster\" Ramjet`),\n\t\t},\n\t},\n\n\t// Quoted string with all the accepted special characters from the C++ test\n\t{\n\t\tin: `count:42 name: ` + \"\\\"\\\\\\\"A string with \\\\' characters \\\\n and \\\\r newlines and \\\\t tabs and \\\\001 slashes \\\\\\\\ and  multiple   spaces\\\"\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and  multiple   spaces\"),\n\t\t},\n\t},\n\n\t// Quoted string with quoted backslash\n\t{\n\t\tin: `count:42 name: \"\\\\'xyz\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(`\\'xyz`),\n\t\t},\n\t},\n\n\t// Quoted string with UTF-8 bytes.\n\t{\n\t\tin: \"count:42 name: '\\303\\277\\302\\201\\xAB'\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"\\303\\277\\302\\201\\xAB\"),\n\t\t},\n\t},\n\n\t// Bad quoted string\n\t{\n\t\tin:  `inner: < host: \"\\0\" >` + \"\\n\",\n\t\terr: `line 1.15: invalid quoted string \"\\0\": \\0 requires 2 following digits`,\n\t},\n\n\t// Number too large for int64\n\t{\n\t\tin:  \"count: 1 others { key: 123456789012345678901 }\",\n\t\terr: \"line 1.23: invalid int64: 123456789012345678901\",\n\t},\n\n\t// Number too large for int32\n\t{\n\t\tin:  \"count: 1234567890123\",\n\t\terr: \"line 1.7: invalid int32: 1234567890123\",\n\t},\n\n\t// Number in hexadecimal\n\t{\n\t\tin: \"count: 0x2beef\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(0x2beef),\n\t\t},\n\t},\n\n\t// Number in octal\n\t{\n\t\tin: \"count: 024601\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(024601),\n\t\t},\n\t},\n\n\t// Floating point number with \"f\" suffix\n\t{\n\t\tin: \"count: 4 others:< weight: 17.0f >\",\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(4),\n\t\t\tOthers: []*OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tWeight: Float32(17),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n\n\t// Floating point positive infinity\n\t{\n\t\tin: \"count: 4 bigfloat: inf\",\n\t\tout: &MyMessage{\n\t\t\tCount:    Int32(4),\n\t\t\tBigfloat: Float64(math.Inf(1)),\n\t\t},\n\t},\n\n\t// Floating point negative infinity\n\t{\n\t\tin: \"count: 4 bigfloat: -inf\",\n\t\tout: &MyMessage{\n\t\t\tCount:    Int32(4),\n\t\t\tBigfloat: Float64(math.Inf(-1)),\n\t\t},\n\t},\n\n\t// Number too large for float32\n\t{\n\t\tin:  \"others:< weight: 12345678901234567890123456789012345678901234567890 >\",\n\t\terr: \"line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890\",\n\t},\n\n\t// Number posing as a quoted string\n\t{\n\t\tin:  `inner: < host: 12 >` + \"\\n\",\n\t\terr: `line 1.15: invalid string: 12`,\n\t},\n\n\t// Quoted string posing as int32\n\t{\n\t\tin:  `count: \"12\"`,\n\t\terr: `line 1.7: invalid int32: \"12\"`,\n\t},\n\n\t// Quoted string posing a float32\n\t{\n\t\tin:  `others:< weight: \"17.4\" >`,\n\t\terr: `line 1.17: invalid float32: \"17.4\"`,\n\t},\n\n\t// Enum\n\t{\n\t\tin: `count:42 bikeshed: BLUE`,\n\t\tout: &MyMessage{\n\t\t\tCount:    Int32(42),\n\t\t\tBikeshed: MyMessage_BLUE.Enum(),\n\t\t},\n\t},\n\n\t// Repeated field\n\t{\n\t\tin: `count:42 pet: \"horsey\" pet:\"bunny\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tPet:   []string{\"horsey\", \"bunny\"},\n\t\t},\n\t},\n\n\t// Repeated field with list notation\n\t{\n\t\tin: `count:42 pet: [\"horsey\", \"bunny\"]`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tPet:   []string{\"horsey\", \"bunny\"},\n\t\t},\n\t},\n\n\t// Repeated message with/without colon and <>/{}\n\t{\n\t\tin: `count:42 others:{} others{} others:<> others:{}`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tOthers: []*OtherMessage{\n\t\t\t\t{},\n\t\t\t\t{},\n\t\t\t\t{},\n\t\t\t\t{},\n\t\t\t},\n\t\t},\n\t},\n\n\t// Missing colon for inner message\n\t{\n\t\tin: `count:42 inner < host: \"cauchy.syd\" >`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost: String(\"cauchy.syd\"),\n\t\t\t},\n\t\t},\n\t},\n\n\t// Missing colon for string field\n\t{\n\t\tin:  `name \"Dave\"`,\n\t\terr: `line 1.5: expected ':', found \"\\\"Dave\\\"\"`,\n\t},\n\n\t// Missing colon for int32 field\n\t{\n\t\tin:  `count 42`,\n\t\terr: `line 1.6: expected ':', found \"42\"`,\n\t},\n\n\t// Missing required field\n\t{\n\t\tin:  `name: \"Pawel\"`,\n\t\terr: `proto: required field \"testdata.MyMessage.count\" not set`,\n\t\tout: &MyMessage{\n\t\t\tName: String(\"Pawel\"),\n\t\t},\n\t},\n\n\t// Missing required field in a required submessage\n\t{\n\t\tin:  `count: 42 we_must_go_deeper < leo_finally_won_an_oscar <> >`,\n\t\terr: `proto: required field \"testdata.InnerMessage.host\" not set`,\n\t\tout: &MyMessage{\n\t\t\tCount:          Int32(42),\n\t\t\tWeMustGoDeeper: &RequiredInnerMessage{LeoFinallyWonAnOscar: &InnerMessage{}},\n\t\t},\n\t},\n\n\t// Repeated non-repeated field\n\t{\n\t\tin:  `name: \"Rob\" name: \"Russ\"`,\n\t\terr: `line 1.12: non-repeated field \"name\" was repeated`,\n\t},\n\n\t// Group\n\t{\n\t\tin: `count: 17 SomeGroup { group_field: 12 }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(17),\n\t\t\tSomegroup: &MyMessage_SomeGroup{\n\t\t\t\tGroupField: Int32(12),\n\t\t\t},\n\t\t},\n\t},\n\n\t// Semicolon between fields\n\t{\n\t\tin: `count:3;name:\"Calvin\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(3),\n\t\t\tName:  String(\"Calvin\"),\n\t\t},\n\t},\n\t// Comma between fields\n\t{\n\t\tin: `count:4,name:\"Ezekiel\"`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(4),\n\t\t\tName:  String(\"Ezekiel\"),\n\t\t},\n\t},\n\n\t// Boolean false\n\t{\n\t\tin: `count:42 inner { host: \"example.com\" connected: false }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"example.com\"),\n\t\t\t\tConnected: Bool(false),\n\t\t\t},\n\t\t},\n\t},\n\t// Boolean true\n\t{\n\t\tin: `count:42 inner { host: \"example.com\" connected: true }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"example.com\"),\n\t\t\t\tConnected: Bool(true),\n\t\t\t},\n\t\t},\n\t},\n\t// Boolean 0\n\t{\n\t\tin: `count:42 inner { host: \"example.com\" connected: 0 }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"example.com\"),\n\t\t\t\tConnected: Bool(false),\n\t\t\t},\n\t\t},\n\t},\n\t// Boolean 1\n\t{\n\t\tin: `count:42 inner { host: \"example.com\" connected: 1 }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"example.com\"),\n\t\t\t\tConnected: Bool(true),\n\t\t\t},\n\t\t},\n\t},\n\t// Boolean f\n\t{\n\t\tin: `count:42 inner { host: \"example.com\" connected: f }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"example.com\"),\n\t\t\t\tConnected: Bool(false),\n\t\t\t},\n\t\t},\n\t},\n\t// Boolean t\n\t{\n\t\tin: `count:42 inner { host: \"example.com\" connected: t }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"example.com\"),\n\t\t\t\tConnected: Bool(true),\n\t\t\t},\n\t\t},\n\t},\n\t// Boolean False\n\t{\n\t\tin: `count:42 inner { host: \"example.com\" connected: False }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"example.com\"),\n\t\t\t\tConnected: Bool(false),\n\t\t\t},\n\t\t},\n\t},\n\t// Boolean True\n\t{\n\t\tin: `count:42 inner { host: \"example.com\" connected: True }`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"example.com\"),\n\t\t\t\tConnected: Bool(true),\n\t\t\t},\n\t\t},\n\t},\n\n\t// Extension\n\tbuildExtStructTest(`count: 42 [testdata.Ext.more]:<data:\"Hello, world!\" >`),\n\tbuildExtStructTest(`count: 42 [testdata.Ext.more] {data:\"Hello, world!\"}`),\n\tbuildExtDataTest(`count: 42 [testdata.Ext.text]:\"Hello, world!\" [testdata.Ext.number]:1729`),\n\tbuildExtRepStringTest(`count: 42 [testdata.greeting]:\"bula\" [testdata.greeting]:\"hola\"`),\n\n\t// Big all-in-one\n\t{\n\t\tin: \"count:42  # Meaning\\n\" +\n\t\t\t`name:\"Dave\" ` +\n\t\t\t`quote:\"\\\"I didn't want to go.\\\"\" ` +\n\t\t\t`pet:\"bunny\" ` +\n\t\t\t`pet:\"kitty\" ` +\n\t\t\t`pet:\"horsey\" ` +\n\t\t\t`inner:<` +\n\t\t\t`  host:\"footrest.syd\" ` +\n\t\t\t`  port:7001 ` +\n\t\t\t`  connected:true ` +\n\t\t\t`> ` +\n\t\t\t`others:<` +\n\t\t\t`  key:3735928559 ` +\n\t\t\t`  value:\"\\x01A\\a\\f\" ` +\n\t\t\t`> ` +\n\t\t\t`others:<` +\n\t\t\t\"  weight:58.9  # Atomic weight of Co\\n\" +\n\t\t\t`  inner:<` +\n\t\t\t`    host:\"lesha.mtv\" ` +\n\t\t\t`    port:8002 ` +\n\t\t\t`  >` +\n\t\t\t`>`,\n\t\tout: &MyMessage{\n\t\t\tCount: Int32(42),\n\t\t\tName:  String(\"Dave\"),\n\t\t\tQuote: String(`\"I didn't want to go.\"`),\n\t\t\tPet:   []string{\"bunny\", \"kitty\", \"horsey\"},\n\t\t\tInner: &InnerMessage{\n\t\t\t\tHost:      String(\"footrest.syd\"),\n\t\t\t\tPort:      Int32(7001),\n\t\t\t\tConnected: Bool(true),\n\t\t\t},\n\t\t\tOthers: []*OtherMessage{\n\t\t\t\t{\n\t\t\t\t\tKey:   Int64(3735928559),\n\t\t\t\t\tValue: []byte{0x1, 'A', '\\a', '\\f'},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tWeight: Float32(58.9),\n\t\t\t\t\tInner: &InnerMessage{\n\t\t\t\t\t\tHost: String(\"lesha.mtv\"),\n\t\t\t\t\t\tPort: Int32(8002),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t},\n}\n\nfunc TestUnmarshalText(t *testing.T) {\n\tfor i, test := range unMarshalTextTests {\n\t\tpb := new(MyMessage)\n\t\terr := UnmarshalText(test.in, pb)\n\t\tif test.err == \"\" {\n\t\t\t// We don't expect failure.\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Test %d: Unexpected error: %v\", i, err)\n\t\t\t} else if !reflect.DeepEqual(pb, test.out) {\n\t\t\t\tt.Errorf(\"Test %d: Incorrect populated \\nHave: %v\\nWant: %v\",\n\t\t\t\t\ti, pb, test.out)\n\t\t\t}\n\t\t} else {\n\t\t\t// We do expect failure.\n\t\t\tif err == nil {\n\t\t\t\tt.Errorf(\"Test %d: Didn't get expected error: %v\", i, test.err)\n\t\t\t} else if err.Error() != test.err {\n\t\t\t\tt.Errorf(\"Test %d: Incorrect error.\\nHave: %v\\nWant: %v\",\n\t\t\t\t\ti, err.Error(), test.err)\n\t\t\t} else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {\n\t\t\t\tt.Errorf(\"Test %d: Incorrect populated \\nHave: %v\\nWant: %v\",\n\t\t\t\t\ti, pb, test.out)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestUnmarshalTextCustomMessage(t *testing.T) {\n\tmsg := &textMessage{}\n\tif err := UnmarshalText(\"custom\", msg); err != nil {\n\t\tt.Errorf(\"Unexpected error from custom unmarshal: %v\", err)\n\t}\n\tif UnmarshalText(\"not custom\", msg) == nil {\n\t\tt.Errorf(\"Didn't get expected error from custom unmarshal\")\n\t}\n}\n\n// Regression test; this caused a panic.\nfunc TestRepeatedEnum(t *testing.T) {\n\tpb := new(RepeatedEnum)\n\tif err := UnmarshalText(\"color: RED\", pb); err != nil {\n\t\tt.Fatal(err)\n\t}\n\texp := &RepeatedEnum{\n\t\tColor: []RepeatedEnum_Color{RepeatedEnum_RED},\n\t}\n\tif !Equal(pb, exp) {\n\t\tt.Errorf(\"Incorrect populated \\nHave: %v\\nWant: %v\", pb, exp)\n\t}\n}\n\nfunc TestProto3TextParsing(t *testing.T) {\n\tm := new(proto3pb.Message)\n\tconst in = `name: \"Wallace\" true_scotsman: true`\n\twant := &proto3pb.Message{\n\t\tName:         \"Wallace\",\n\t\tTrueScotsman: true,\n\t}\n\tif err := UnmarshalText(in, m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !Equal(m, want) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, want)\n\t}\n}\n\nfunc TestMapParsing(t *testing.T) {\n\tm := new(MessageWithMap)\n\tconst in = `name_mapping:<key:1234 value:\"Feist\"> name_mapping:<key:1 value:\"Beatles\">` +\n\t\t`msg_mapping:<key:-4, value:<f: 2.0>,>` + // separating commas are okay\n\t\t`msg_mapping<key:-2 value<f: 4.0>>` + // no colon after \"value\"\n\t\t`msg_mapping:<value:<f: 5.0>>` + // omitted key\n\t\t`msg_mapping:<key:1>` + // omitted value\n\t\t`byte_mapping:<key:true value:\"so be it\">` +\n\t\t`byte_mapping:<>` // omitted key and value\n\twant := &MessageWithMap{\n\t\tNameMapping: map[int32]string{\n\t\t\t1:    \"Beatles\",\n\t\t\t1234: \"Feist\",\n\t\t},\n\t\tMsgMapping: map[int64]*FloatingPoint{\n\t\t\t-4: {F: Float64(2.0)},\n\t\t\t-2: {F: Float64(4.0)},\n\t\t\t0:  {F: Float64(5.0)},\n\t\t\t1:  nil,\n\t\t},\n\t\tByteMapping: map[bool][]byte{\n\t\t\tfalse: nil,\n\t\t\ttrue:  []byte(\"so be it\"),\n\t\t},\n\t}\n\tif err := UnmarshalText(in, m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !Equal(m, want) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, want)\n\t}\n}\n\nfunc TestOneofParsing(t *testing.T) {\n\tconst in = `name:\"Shrek\"`\n\tm := new(Communique)\n\twant := &Communique{Union: &Communique_Name{\"Shrek\"}}\n\tif err := UnmarshalText(in, m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !Equal(m, want) {\n\t\tt.Errorf(\"\\n got %v\\nwant %v\", m, want)\n\t}\n\n\tconst inOverwrite = `name:\"Shrek\" number:42`\n\tm = new(Communique)\n\ttestErr := \"line 1.13: field 'number' would overwrite already parsed oneof 'Union'\"\n\tif err := UnmarshalText(inOverwrite, m); err == nil {\n\t\tt.Errorf(\"TestOneofParsing: Didn't get expected error: %v\", testErr)\n\t} else if err.Error() != testErr {\n\t\tt.Errorf(\"TestOneofParsing: Incorrect error.\\nHave: %v\\nWant: %v\",\n\t\t\terr.Error(), testErr)\n\t}\n\n}\n\nvar benchInput string\n\nfunc init() {\n\tbenchInput = \"count: 4\\n\"\n\tfor i := 0; i < 1000; i++ {\n\t\tbenchInput += \"pet: \\\"fido\\\"\\n\"\n\t}\n\n\t// Check it is valid input.\n\tpb := new(MyMessage)\n\terr := UnmarshalText(benchInput, pb)\n\tif err != nil {\n\t\tpanic(\"Bad benchmark input: \" + err.Error())\n\t}\n}\n\nfunc BenchmarkUnmarshalText(b *testing.B) {\n\tpb := new(MyMessage)\n\tfor i := 0; i < b.N; i++ {\n\t\tUnmarshalText(benchInput, pb)\n\t}\n\tb.SetBytes(int64(len(benchInput)))\n}\n"
  },
  {
    "path": "vendor/github.com/golang/protobuf/proto/text_test.go",
    "content": "// Go support for Protocol Buffers - Google's data interchange format\n//\n// Copyright 2010 The Go Authors.  All rights reserved.\n// https://github.com/golang/protobuf\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage proto_test\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io/ioutil\"\n\t\"math\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tproto3pb \"github.com/golang/protobuf/proto/proto3_proto\"\n\tpb \"github.com/golang/protobuf/proto/testdata\"\n)\n\n// textMessage implements the methods that allow it to marshal and unmarshal\n// itself as text.\ntype textMessage struct {\n}\n\nfunc (*textMessage) MarshalText() ([]byte, error) {\n\treturn []byte(\"custom\"), nil\n}\n\nfunc (*textMessage) UnmarshalText(bytes []byte) error {\n\tif string(bytes) != \"custom\" {\n\t\treturn errors.New(\"expected 'custom'\")\n\t}\n\treturn nil\n}\n\nfunc (*textMessage) Reset()         {}\nfunc (*textMessage) String() string { return \"\" }\nfunc (*textMessage) ProtoMessage()  {}\n\nfunc newTestMessage() *pb.MyMessage {\n\tmsg := &pb.MyMessage{\n\t\tCount: proto.Int32(42),\n\t\tName:  proto.String(\"Dave\"),\n\t\tQuote: proto.String(`\"I didn't want to go.\"`),\n\t\tPet:   []string{\"bunny\", \"kitty\", \"horsey\"},\n\t\tInner: &pb.InnerMessage{\n\t\t\tHost:      proto.String(\"footrest.syd\"),\n\t\t\tPort:      proto.Int32(7001),\n\t\t\tConnected: proto.Bool(true),\n\t\t},\n\t\tOthers: []*pb.OtherMessage{\n\t\t\t{\n\t\t\t\tKey:   proto.Int64(0xdeadbeef),\n\t\t\t\tValue: []byte{1, 65, 7, 12},\n\t\t\t},\n\t\t\t{\n\t\t\t\tWeight: proto.Float32(6.022),\n\t\t\t\tInner: &pb.InnerMessage{\n\t\t\t\t\tHost: proto.String(\"lesha.mtv\"),\n\t\t\t\t\tPort: proto.Int32(8002),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tBikeshed: pb.MyMessage_BLUE.Enum(),\n\t\tSomegroup: &pb.MyMessage_SomeGroup{\n\t\t\tGroupField: proto.Int32(8),\n\t\t},\n\t\t// One normally wouldn't do this.\n\t\t// This is an undeclared tag 13, as a varint (wire type 0) with value 4.\n\t\tXXX_unrecognized: []byte{13<<3 | 0, 4},\n\t}\n\text := &pb.Ext{\n\t\tData: proto.String(\"Big gobs for big rats\"),\n\t}\n\tif err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {\n\t\tpanic(err)\n\t}\n\tgreetings := []string{\"adg\", \"easy\", \"cow\"}\n\tif err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Add an unknown extension. We marshal a pb.Ext, and fake the ID.\n\tb, err := proto.Marshal(&pb.Ext{Data: proto.String(\"3G skiing\")})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tb = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)\n\tproto.SetRawExtension(msg, 201, b)\n\n\t// Extensions can be plain fields, too, so let's test that.\n\tb = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)\n\tproto.SetRawExtension(msg, 202, b)\n\n\treturn msg\n}\n\nconst text = `count: 42\nname: \"Dave\"\nquote: \"\\\"I didn't want to go.\\\"\"\npet: \"bunny\"\npet: \"kitty\"\npet: \"horsey\"\ninner: <\n  host: \"footrest.syd\"\n  port: 7001\n  connected: true\n>\nothers: <\n  key: 3735928559\n  value: \"\\001A\\007\\014\"\n>\nothers: <\n  weight: 6.022\n  inner: <\n    host: \"lesha.mtv\"\n    port: 8002\n  >\n>\nbikeshed: BLUE\nSomeGroup {\n  group_field: 8\n}\n/* 2 unknown bytes */\n13: 4\n[testdata.Ext.more]: <\n  data: \"Big gobs for big rats\"\n>\n[testdata.greeting]: \"adg\"\n[testdata.greeting]: \"easy\"\n[testdata.greeting]: \"cow\"\n/* 13 unknown bytes */\n201: \"\\t3G skiing\"\n/* 3 unknown bytes */\n202: 19\n`\n\nfunc TestMarshalText(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\tif err := proto.MarshalText(buf, newTestMessage()); err != nil {\n\t\tt.Fatalf(\"proto.MarshalText: %v\", err)\n\t}\n\ts := buf.String()\n\tif s != text {\n\t\tt.Errorf(\"Got:\\n===\\n%v===\\nExpected:\\n===\\n%v===\\n\", s, text)\n\t}\n}\n\nfunc TestMarshalTextCustomMessage(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\tif err := proto.MarshalText(buf, &textMessage{}); err != nil {\n\t\tt.Fatalf(\"proto.MarshalText: %v\", err)\n\t}\n\ts := buf.String()\n\tif s != \"custom\" {\n\t\tt.Errorf(\"Got %q, expected %q\", s, \"custom\")\n\t}\n}\nfunc TestMarshalTextNil(t *testing.T) {\n\twant := \"<nil>\"\n\ttests := []proto.Message{nil, (*pb.MyMessage)(nil)}\n\tfor i, test := range tests {\n\t\tbuf := new(bytes.Buffer)\n\t\tif err := proto.MarshalText(buf, test); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif got := buf.String(); got != want {\n\t\t\tt.Errorf(\"%d: got %q want %q\", i, got, want)\n\t\t}\n\t}\n}\n\nfunc TestMarshalTextUnknownEnum(t *testing.T) {\n\t// The Color enum only specifies values 0-2.\n\tm := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}\n\tgot := m.String()\n\tconst want = `bikeshed:3 `\n\tif got != want {\n\t\tt.Errorf(\"\\n got %q\\nwant %q\", got, want)\n\t}\n}\n\nfunc TestTextOneof(t *testing.T) {\n\ttests := []struct {\n\t\tm    proto.Message\n\t\twant string\n\t}{\n\t\t// zero message\n\t\t{&pb.Communique{}, ``},\n\t\t// scalar field\n\t\t{&pb.Communique{Union: &pb.Communique_Number{4}}, `number:4`},\n\t\t// message field\n\t\t{&pb.Communique{Union: &pb.Communique_Msg{\n\t\t\t&pb.Strings{StringField: proto.String(\"why hello!\")},\n\t\t}}, `msg:<string_field:\"why hello!\" >`},\n\t\t// bad oneof (should not panic)\n\t\t{&pb.Communique{Union: &pb.Communique_Msg{nil}}, `msg:/* nil */`},\n\t}\n\tfor _, test := range tests {\n\t\tgot := strings.TrimSpace(test.m.String())\n\t\tif got != test.want {\n\t\t\tt.Errorf(\"\\n got %s\\nwant %s\", got, test.want)\n\t\t}\n\t}\n}\n\nfunc BenchmarkMarshalTextBuffered(b *testing.B) {\n\tbuf := new(bytes.Buffer)\n\tm := newTestMessage()\n\tfor i := 0; i < b.N; i++ {\n\t\tbuf.Reset()\n\t\tproto.MarshalText(buf, m)\n\t}\n}\n\nfunc BenchmarkMarshalTextUnbuffered(b *testing.B) {\n\tw := ioutil.Discard\n\tm := newTestMessage()\n\tfor i := 0; i < b.N; i++ {\n\t\tproto.MarshalText(w, m)\n\t}\n}\n\nfunc compact(src string) string {\n\t// s/[ \\n]+/ /g; s/ $//;\n\tdst := make([]byte, len(src))\n\tspace, comment := false, false\n\tj := 0\n\tfor i := 0; i < len(src); i++ {\n\t\tif strings.HasPrefix(src[i:], \"/*\") {\n\t\t\tcomment = true\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\tif comment && strings.HasPrefix(src[i:], \"*/\") {\n\t\t\tcomment = false\n\t\t\ti++\n\t\t\tcontinue\n\t\t}\n\t\tif comment {\n\t\t\tcontinue\n\t\t}\n\t\tc := src[i]\n\t\tif c == ' ' || c == '\\n' {\n\t\t\tspace = true\n\t\t\tcontinue\n\t\t}\n\t\tif j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {\n\t\t\tspace = false\n\t\t}\n\t\tif c == '{' {\n\t\t\tspace = false\n\t\t}\n\t\tif space {\n\t\t\tdst[j] = ' '\n\t\t\tj++\n\t\t\tspace = false\n\t\t}\n\t\tdst[j] = c\n\t\tj++\n\t}\n\tif space {\n\t\tdst[j] = ' '\n\t\tj++\n\t}\n\treturn string(dst[0:j])\n}\n\nvar compactText = compact(text)\n\nfunc TestCompactText(t *testing.T) {\n\ts := proto.CompactTextString(newTestMessage())\n\tif s != compactText {\n\t\tt.Errorf(\"Got:\\n===\\n%v===\\nExpected:\\n===\\n%v\\n===\\n\", s, compactText)\n\t}\n}\n\nfunc TestStringEscaping(t *testing.T) {\n\ttestCases := []struct {\n\t\tin  *pb.Strings\n\t\tout string\n\t}{\n\t\t{\n\t\t\t// Test data from C++ test (TextFormatTest.StringEscape).\n\t\t\t// Single divergence: we don't escape apostrophes.\n\t\t\t&pb.Strings{StringField: proto.String(\"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and  multiple   spaces\")},\n\t\t\t\"string_field: \\\"\\\\\\\"A string with ' characters \\\\n and \\\\r newlines and \\\\t tabs and \\\\001 slashes \\\\\\\\ and  multiple   spaces\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\t// Test data from the same C++ test.\n\t\t\t&pb.Strings{StringField: proto.String(\"\\350\\260\\267\\346\\255\\214\")},\n\t\t\t\"string_field: \\\"\\\\350\\\\260\\\\267\\\\346\\\\255\\\\214\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\t// Some UTF-8.\n\t\t\t&pb.Strings{StringField: proto.String(\"\\x00\\x01\\xff\\x81\")},\n\t\t\t`string_field: \"\\000\\001\\377\\201\"` + \"\\n\",\n\t\t},\n\t}\n\n\tfor i, tc := range testCases {\n\t\tvar buf bytes.Buffer\n\t\tif err := proto.MarshalText(&buf, tc.in); err != nil {\n\t\t\tt.Errorf(\"proto.MarsalText: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\ts := buf.String()\n\t\tif s != tc.out {\n\t\t\tt.Errorf(\"#%d: Got:\\n%s\\nExpected:\\n%s\\n\", i, s, tc.out)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Check round-trip.\n\t\tpb := new(pb.Strings)\n\t\tif err := proto.UnmarshalText(s, pb); err != nil {\n\t\t\tt.Errorf(\"#%d: UnmarshalText: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !proto.Equal(pb, tc.in) {\n\t\t\tt.Errorf(\"#%d: Round-trip failed:\\nstart: %v\\n  end: %v\", i, tc.in, pb)\n\t\t}\n\t}\n}\n\n// A limitedWriter accepts some output before it fails.\n// This is a proxy for something like a nearly-full or imminently-failing disk,\n// or a network connection that is about to die.\ntype limitedWriter struct {\n\tb     bytes.Buffer\n\tlimit int\n}\n\nvar outOfSpace = errors.New(\"proto: insufficient space\")\n\nfunc (w *limitedWriter) Write(p []byte) (n int, err error) {\n\tvar avail = w.limit - w.b.Len()\n\tif avail <= 0 {\n\t\treturn 0, outOfSpace\n\t}\n\tif len(p) <= avail {\n\t\treturn w.b.Write(p)\n\t}\n\tn, _ = w.b.Write(p[:avail])\n\treturn n, outOfSpace\n}\n\nfunc TestMarshalTextFailing(t *testing.T) {\n\t// Try lots of different sizes to exercise more error code-paths.\n\tfor lim := 0; lim < len(text); lim++ {\n\t\tbuf := new(limitedWriter)\n\t\tbuf.limit = lim\n\t\terr := proto.MarshalText(buf, newTestMessage())\n\t\t// We expect a certain error, but also some partial results in the buffer.\n\t\tif err != outOfSpace {\n\t\t\tt.Errorf(\"Got:\\n===\\n%v===\\nExpected:\\n===\\n%v===\\n\", err, outOfSpace)\n\t\t}\n\t\ts := buf.b.String()\n\t\tx := text[:buf.limit]\n\t\tif s != x {\n\t\t\tt.Errorf(\"Got:\\n===\\n%v===\\nExpected:\\n===\\n%v===\\n\", s, x)\n\t\t}\n\t}\n}\n\nfunc TestFloats(t *testing.T) {\n\ttests := []struct {\n\t\tf    float64\n\t\twant string\n\t}{\n\t\t{0, \"0\"},\n\t\t{4.7, \"4.7\"},\n\t\t{math.Inf(1), \"inf\"},\n\t\t{math.Inf(-1), \"-inf\"},\n\t\t{math.NaN(), \"nan\"},\n\t}\n\tfor _, test := range tests {\n\t\tmsg := &pb.FloatingPoint{F: &test.f}\n\t\tgot := strings.TrimSpace(msg.String())\n\t\twant := `f:` + test.want\n\t\tif got != want {\n\t\t\tt.Errorf(\"f=%f: got %q, want %q\", test.f, got, want)\n\t\t}\n\t}\n}\n\nfunc TestRepeatedNilText(t *testing.T) {\n\tm := &pb.MessageList{\n\t\tMessage: []*pb.MessageList_Message{\n\t\t\tnil,\n\t\t\t&pb.MessageList_Message{\n\t\t\t\tName: proto.String(\"Horse\"),\n\t\t\t},\n\t\t\tnil,\n\t\t},\n\t}\n\twant := `Message <nil>\nMessage {\n  name: \"Horse\"\n}\nMessage <nil>\n`\n\tif s := proto.MarshalTextString(m); s != want {\n\t\tt.Errorf(\" got: %s\\nwant: %s\", s, want)\n\t}\n}\n\nfunc TestProto3Text(t *testing.T) {\n\ttests := []struct {\n\t\tm    proto.Message\n\t\twant string\n\t}{\n\t\t// zero message\n\t\t{&proto3pb.Message{}, ``},\n\t\t// zero message except for an empty byte slice\n\t\t{&proto3pb.Message{Data: []byte{}}, ``},\n\t\t// trivial case\n\t\t{&proto3pb.Message{Name: \"Rob\", HeightInCm: 175}, `name:\"Rob\" height_in_cm:175`},\n\t\t// empty map\n\t\t{&pb.MessageWithMap{}, ``},\n\t\t// non-empty map; map format is the same as a repeated struct,\n\t\t// and they are sorted by key (numerically for numeric keys).\n\t\t{\n\t\t\t&pb.MessageWithMap{NameMapping: map[int32]string{\n\t\t\t\t-1:      \"Negatory\",\n\t\t\t\t7:       \"Lucky\",\n\t\t\t\t1234:    \"Feist\",\n\t\t\t\t6345789: \"Otis\",\n\t\t\t}},\n\t\t\t`name_mapping:<key:-1 value:\"Negatory\" > ` +\n\t\t\t\t`name_mapping:<key:7 value:\"Lucky\" > ` +\n\t\t\t\t`name_mapping:<key:1234 value:\"Feist\" > ` +\n\t\t\t\t`name_mapping:<key:6345789 value:\"Otis\" >`,\n\t\t},\n\t\t// map with nil value; not well-defined, but we shouldn't crash\n\t\t{\n\t\t\t&pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{7: nil}},\n\t\t\t`msg_mapping:<key:7 >`,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tgot := strings.TrimSpace(test.m.String())\n\t\tif got != test.want {\n\t\t\tt.Errorf(\"\\n got %s\\nwant %s\", got, test.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/.dockerignore",
    "content": ".git\nbuild\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/.gitattributes",
    "content": "CHANGELOG.md merge=union\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/.gitignore",
    "content": ".*.swp\ndist/*\n/build/*\n/*.conf\nkapacitor_linux*\nkapacitord_linux*\n/*.tick\n*~\n*#\nkapacitor*.rpm\nkapacitor*.deb\nkapacitor*.tar\nkapacitor*.zip\n*.pyc\n*.test\n/test-logs\n*.prof\n\n# Ignore any built binaries\n/kapacitor\n/kapacitord\n/tickfmt\n/tickdoc\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/BLOB_STORE_DESIGN.md",
    "content": "# Blob Store\n\nThe blob store is a mechanism to store arbitrary data in Kapacitor.\nThe data stored is immutable and opaque to Kapacitor.\n\nData is stored as blobs where each blob has a unique ID.\nA tagging system is used to refer various blobs within the store.\nA blob may be tagged with a given name.\nA blob may be retrieved by its ID or a tag name.\nWhen retrieving a blob via a tag name, the most recently associated blob is returned for that tag.\nTags may be updated, meaning they can be modified to point at a different blob.\nThe history of a tag to blob associations are preserved.\n\nThere are no specific limits on the size of a blob, and blobs can be streamed in and out of the store.\n\n## Uses\n\nThe following details the various uses of the Kapacitor blob store.\n\n### Snapshots\n\nKapacitor will periodically snapshot the state of a running task. (Currently only implemented for UDFs).\nWhen a task is started its previous snapshot or a named snapshot is restored.\n\nKapacitor tasks construct a [DAG](https://en.wikipedia.org/wiki/Directed_acyclic_graph) of the data pipeline.\nEach step in this DAG is called a node.\nSnapshots are associated with a single node within a single task.\nAll nodes are assigned IDs based on the DAG structure.\nWhen the DAG changes the previous snapshots are considered invalid an are no longer used to restore task state.\n\n### UDFs\n\nUDFs can explicitly save and request blobs from the store via the protobuf socket connection with Kapacitor.\nA common use case is to load and store trained model data.\nHowever you use the blob store within your UDF is up to you.\n\n\n## Design\n\nThe blob store will use content addressable IDs(i.e. shasum of the content) and be exposed via the HTTP API of Kapacitor.\n\nBlobs can be created, named and deleted.\nCreating a blob will accept only the content of the blob data and return the ID of the blob.\nNaming a blob associates a specified name to the content of the blob.\nA naming history is recorded, allowing the users to determine the \"version\" history for a given name.\nDeleting a blob removes it from the store.\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/CHANGELOG.md",
    "content": "# Changelog\n\n## Unreleased\n\n### Features\n- [#1413](https://github.com/influxdata/kapacitor/issues/1413): Add subscriptions modes to InfluxDB subscriptions.\n- [#1436](https://github.com/influxdata/kapacitor/issues/1436): Add linear fill support for QueryNode.\n- [#1345](https://github.com/influxdata/kapacitor/issues/1345): Add MQTT Alert Handler\n- [#1390](https://github.com/influxdata/kapacitor/issues/1390): Add built in functions to convert timestamps to integers\n- [#1425](https://github.com/influxdata/kapacitor/pull/1425): BREAKING: Change over internal API to use message passing semantics.\n    The breaking change is that the Combine and Flatten nodes previously, but erroneously, operated across batch boundaries; this has been fixed.\n- [#1497](https://github.com/influxdata/kapacitor/pull/1497): Add support for Docker Swarm autoscaling services.\n- [#1485](https://github.com/influxdata/kapacitor/issues/1485): Add bools field types to UDFs.\n\n### Bugfixes\n\n- [#1400](https://github.com/influxdata/kapacitor/issues/1400): Allow for `.yml` file extensions in `define-topic-handler`\n- [#1402](https://github.com/influxdata/kapacitor/pull/1402): Fix http server error logging.\n- [#1500](https://github.com/influxdata/kapacitor/pull/1500): Fix bugs with stopping running UDF agent.\n- [#1470](https://github.com/influxdata/kapacitor/pull/1470): Fix error messages for missing fields which are arguments to functions are not clear\n- [#1516](https://github.com/influxdata/kapacitor/pull/1516): Fix bad PagerDuty test the required server info.\n\n## v1.3.3 [2017-08-11]\n\n### Bugfixes\n- [#1520](https://github.com/influxdata/kapacitor/pull/1520): Expose pprof without authentication if enabled\n\n## v1.3.2 [2017-08-08]\n\n### Bugfixes\n- [#1512](https://github.com/influxdata/kapacitor/pull/1512): Use details field from alert node in PagerDuty.\n\n## v1.3.1 [2017-06-02]\n\n### Bugfixes\n\n- [#1415](https://github.com/influxdata/kapacitor/pull/1415): Proxy from environment for HTTP request to slack\n- [#1414](https://github.com/influxdata/kapacitor/pull/1414): Fix derivative node preserving fields from previous point in stream tasks.\n\n## v1.3.0 [2017-05-22]\n\n### Release Notes\n\nThe v1.3.0 release has two major features.\n\n1. Addition of scraping and discovering for Prometheus style data collection.\n2. Updates to the Alert Topic system\n\nHere is a quick example of how to configure Kapacitor to scrape discovered targets.\nFirst configure a discoverer, here we use the file-discovery discoverer.\nNext configure a scraper to use that discoverer.\n\n>NOTE: The scraping and discovering features are released under technical preview,\nmeaning that the configuration or API around the feature may change in a future release.\n\n```\n# Configure file discoverer\n[[file-discovery]]\n enabled = true\n id = \"discover_files\"\n refresh-interval = \"10s\"\n ##### This will look for prometheus json files\n ##### File format is here https://prometheus.io/docs/operating/configuration/#%3Cfile_sd_config%3E\n files = [\"/tmp/prom/*.json\"]\n\n# Configure scraper\n[[scraper]]\n enabled = true\n name = \"node_exporter\"\n discoverer-id = \"discover_files\"\n discoverer-service = \"file-discovery\"\n db = \"prometheus\"\n rp = \"autogen\"\n type = \"prometheus\"\n scheme = \"http\"\n metrics-path = \"/metrics\"\n scrape-interval = \"2s\"\n scrape-timeout = \"10s\"\n```\n\nAdd the above snippet to your kapacitor.conf file.\n\nCreate the below snippet as the file `/tmp/prom/localhost.json`:\n\n```\n[{\n \"targets\": [\"localhost:9100\"]\n}]\n```\n\nStart the Prometheus node_exporter locally.\n\nNow startup Kapacitor and it will discover the `localhost:9100` node_exporter target and begin scrapping it for metrics.\nFor more details on the scraping and discovery systems see the full documentation [here](https://docs.influxdata.com/kapacitor/v1.3/scraping).\n\nThe second major feature with this release, are changes to the alert topic system.\nThe previous release introduce this new system as a technical preview, with this release the alerting service has been simplified.\nAlert handlers now only ever have a single action and belong to a single topic.\n\nThe handler definition has been simplified as a result.\nHere are some example alert handlers using the new structure:\n\n```yaml\nid: my_handler\nkind: pagerDuty\noptions:\n  serviceKey: XXX\n```\n\n```yaml\nid: aggregate_by_1m\nkind: aggregate\noptions:\n  interval: 1m\n  topic: aggregated\n```\n\n```yaml\nid: publish_to_system\nkind: publish\noptions:\n  topics: [ system ]\n```\n\nTo define a handler now you must specify which topic the handler belongs to.\nFor example to define the above aggregate handler on the system topic use this command:\n\n```sh\nkapacitor define-handler system aggregate_by_1m.yaml\n```\n\nFor more details on the alerting system see the full documentation [here](https://docs.influxdata.com/kapacitor/v1.3/alerts).\n\n# Bugfixes\n\n- [#1396](https://github.com/influxdata/kapacitor/pull/1396): Fix broken ENV var config overrides for the kubernetes section.\n- [#1397](https://github.com/influxdata/kapacitor/pull/1397): Update default configuration file to include sections for each discoverer service.\n\n## v1.3.0-rc4 [2017-05-19]\n\n# Bugfixes\n\n- [#1379](https://github.com/influxdata/kapacitor/issues/1379): Copy batch points slice before modification, fixes potential panics and data corruption.\n- [#1394](https://github.com/influxdata/kapacitor/pull/1394): Use the Prometheus metric name as the measurement name by default for scrape data.\n- [#1392](https://github.com/influxdata/kapacitor/pull/1392): Fix possible deadlock for scraper configuration updating.\n\n## v1.3.0-rc3 [2017-05-18]\n\n### Bugfixes\n\n- [#1369](https://github.com/influxdata/kapacitor/issues/1369): Fix panic with concurrent writes to same points in state tracking nodes.\n- [#1387](https://github.com/influxdata/kapacitor/pull/1387): static-discovery configuration simplified\n- [#1378](https://github.com/influxdata/kapacitor/issues/1378): Fix panic in InfluxQL node with missing field.\n\n## v1.3.0-rc2 [2017-05-11]\n\n### Bugfixes\n\n- [#1370](https://github.com/influxdata/kapacitor/issues/1370): Fix missing working_cardinality stats on stateDuration and stateCount nodes.\n\n## v1.3.0-rc1 [2017-05-08]\n\n### Features\n\n- [#1299](https://github.com/influxdata/kapacitor/pull/1299): Allowing sensu handler to be specified\n- [#1284](https://github.com/influxdata/kapacitor/pull/1284): Add type signatures to Kapacitor functions.\n- [#1203](https://github.com/influxdata/kapacitor/issues/1203): Add `isPresent` operator for verifying whether a value is present (part of [#1284](https://github.com/influxdata/kapacitor/pull/1284)).\n- [#1354](https://github.com/influxdata/kapacitor/pull/1354): Add Kubernetes scraping support.\n- [#1359](https://github.com/influxdata/kapacitor/pull/1359): Add groupBy exclude and Add dropOriginalFieldName to flatten.\n- [#1360](https://github.com/influxdata/kapacitor/pull/1360): Add KapacitorLoopback node to be able to send data from a task back into Kapacitor.\n\n### Bugfixes\n\n- [#1329](https://github.com/influxdata/kapacitor/issues/1329): BREAKING: A bug was fixed around missing fields in the derivative node.\n    The behavior of the node changes slightly in order to provide a consistent fix to the bug.\n    The breaking change is that now, the time of the points returned are from the right hand or current point time, instead of the left hand or previous point time.\n- [#1353](https://github.com/influxdata/kapacitor/issues/1353): Fix panic in scraping TargetManager.\n- [#1238](https://github.com/influxdata/kapacitor/pull/1238): Use ProxyFromEnvironment for all outgoing HTTP traffic.\n\n## v1.3.0-beta2 [2017-05-01]\n\n### Features\n\n- [#117](https://github.com/influxdata/kapacitor/issues/117): Add headers to alert POST requests.\n\n### Bugfixes\n\n- [#1294](https://github.com/influxdata/kapacitor/issues/1294): Fix bug where batch queries would be missing all fields after the first nil field.\n- [#1343](https://github.com/influxdata/kapacitor/issues/1343): BREAKING: The UDF agent Go API has changed, the changes now make it so that the agent package is self contained.\n\n## v1.3.0-beta1 [2017-04-29]\n\n### Features\n\n- [#1322](https://github.com/influxdata/kapacitor/pull/1322): TLS configuration in Slack service for Mattermost compatibility\n- [#1330](https://github.com/influxdata/kapacitor/issues/1330): Generic HTTP Post node\n- [#1159](https://github.com/influxdata/kapacitor/pulls/1159): Go version 1.7.4 -> 1.7.5\n- [#1175](https://github.com/influxdata/kapacitor/pull/1175): BREAKING: Add generic error counters to every node type.\n    Renamed `query_errors` to `errors` in batch node.\n    Renamed `eval_errors` to `errors` in eval node.\n- [#922](https://github.com/influxdata/kapacitor/issues/922): Expose server specific information in alert templates.\n- [#1162](https://github.com/influxdata/kapacitor/pulls/1162): Add Pushover integration.\n- [#1221](https://github.com/influxdata/kapacitor/pull/1221): Add `working_cardinality` stat to each node type that tracks the number of groups per node.\n- [#1211](https://github.com/influxdata/kapacitor/issues/1211): Add StateDuration node.\n- [#1209](https://github.com/influxdata/kapacitor/issues/1209): BREAKING: Refactor the Alerting service.\n    The change is completely breaking for the technical preview alerting service, a.k.a. the new alert topic handler features.\n    The change boils down to simplifying how you define and interact with topics.\n    Alert handlers now only ever have a single action and belong to a single topic.\n    An automatic migration from old to new handler definitions will be performed during startup.\n    See the updated API docs.\n- [#1286](https://github.com/influxdata/kapacitor/issues/1286): Default HipChat URL should be blank\n- [#507](https://github.com/influxdata/kapacitor/issues/507): Add API endpoint for performing Kapacitor database backups.\n- [#1132](https://github.com/influxdata/kapacitor/issues/1132): Adding source for sensu alert as parameter\n- [#1346](https://github.com/influxdata/kapacitor/pull/1346): Add discovery and scraping services.\n\n### Bugfixes\n\n- [#1133](https://github.com/influxdata/kapacitor/issues/1133): Fix case-sensitivity for Telegram `parseMode` value.\n- [#1147](https://github.com/influxdata/kapacitor/issues/1147): Fix pprof debug endpoint\n- [#1164](https://github.com/influxdata/kapacitor/pull/1164): Fix hang in config API to update a config section.\n    Now if the service update process takes too long the request will timeout and return an error.\n    Previously the request would block forever.\n- [#1165](https://github.com/influxdata/kapacitor/issues/1165): Make the alerta auth token prefix configurable and default it to Bearer.\n- [#1184](https://github.com/influxdata/kapacitor/pull/1184): Fix logrotate file to correctly rotate error log.\n- [#1200](https://github.com/influxdata/kapacitor/pull/1200): Fix bug with alert duration being incorrect after restoring alert state.\n- [#1199](https://github.com/influxdata/kapacitor/pull/1199): BREAKING: Fix inconsistency with JSON data from alerts.\n    The alert handlers Alerta, Log, OpsGenie, PagerDuty, Post and VictorOps allow extra opaque data to be attached to alert notifications.\n    That opaque data was inconsistent and this change fixes that.\n    Depending on how that data was consumed this could result in a breaking change, since the original behavior was inconsistent\n    we decided it would be best to fix the issue now and make it consistent for all future builds.\n    Specifically in the JSON result data the old key `Series` is always `series`, and the old key `Err` is now always `error` instead of for only some of the outputs.\n- [#1181](https://github.com/influxdata/kapacitor/pull/1181): Fix bug parsing dbrp values with quotes.\n- [#1228](https://github.com/influxdata/kapacitor/pull/1228): Fix panic on loading replay files without a file extension.\n- [#1192](https://github.com/influxdata/kapacitor/issues/1192): Fix bug in Default Node not updating batch tags and groupID.\n    Also empty string on a tag value is now a sufficient condition for the default conditions to be applied.\n    See [#1233](https://github.com/influxdata/kapacitor/pull/1233) for more information.\n- [#1068](https://github.com/influxdata/kapacitor/issues/1068): Fix dot view syntax to use xlabels and not create invalid quotes.\n- [#1295](https://github.com/influxdata/kapacitor/issues/1295): Fix curruption of recordings list after deleting all recordings.\n- [#1237](https://github.com/influxdata/kapacitor/issues/1237): Fix missing \"vars\" key when listing tasks.\n- [#1271](https://github.com/influxdata/kapacitor/issues/1271): Fix bug where aggregates would not be able to change type.\n- [#1261](https://github.com/influxdata/kapacitor/issues/1261): Fix panic when the process cannot stat the data dir.\n\n## v1.2.1 [2017-04-13]\n\n### Bugfixes\n\n- [#1323](https://github.com/influxdata/kapacitor/pull/1323): Fix issue where credentials to InfluxDB could not be updated dynamically.\n\n## v1.2.0 [2017-01-23]\n\n### Release Notes\n\nA new system for working with alerts has been introduced.\nThis alerting system allows you to configure topics for alert events and then configure handlers for various topics.\nThis way alert generation is decoupled from alert handling.\n\nExisting TICKscripts will continue to work without modification.\n\nTo use this new alerting system remove any explicit alert handlers from your TICKscript and specify a topic.\nThen configure the handlers for the topic.\n\n```\nstream\n    |from()\n      .measurement('cpu')\n      .groupBy('host')\n    |alert()\n      // Specify the topic for the alert\n      .topic('cpu')\n      .info(lambda: \"value\" > 60)\n      .warn(lambda: \"value\" > 70)\n      .crit(lambda: \"value\" > 80)\n      // No handlers are configured in the script, they are instead defined on the topic via the API.\n```\n\nThe API exposes endpoints to query the state of each alert and endpoints for configuring alert handlers.\nSee the [API docs](https://docs.influxdata.com/kapacitor/latest/api/api/) for more details.\nThe kapacitor CLI has been updated with commands for defining alert handlers.\n\nThis release introduces a new feature where you can window based off the number of points instead of their time.\nFor example:\n\n```\nstream\n    |from()\n        .measurement('my-measurement')\n    // Emit window for every 10 points with 100 points per window.\n    |window()\n        .periodCount(100)\n        .everyCount(10)\n    |mean('value')\n    |alert()\n         .crit(lambda: \"mean\" > 100)\n         .slack()\n         .channel('#alerts')\n```\n\n\nWith this change alert nodes will have an anonymous topic created for them.\nThis topic is managed like all other topics preserving state etc. across restarts.\nAs a result existing alert nodes will now remember the state of alerts after restarts and disiabling/enabling a task.\n\n>NOTE: The new alerting features are being released under technical preview.\nThis means breaking changes may be made in later releases until the feature is considered complete.\nSee the [API docs on technical preview](https://docs.influxdata.com/kapacitor/v1.2/api/api/#technical-preview) for specifics of how this effects the API.\n\n### Features\n\n- [#1110](https://github.com/influxdata/kapacitor/pull/1110): Add new query property for aligning group by intervals to start times.\n- [#1095](https://github.com/influxdata/kapacitor/pull/1095): Add new alert API, with support for configuring handlers and topics.\n- [#1052](https://github.com/influxdata/kapacitor/issues/1052): Move alerta api token to header and add option to skip TLS verification.\n- [#929](https://github.com/influxdata/kapacitor/pull/929): Add SNMP trap service for alerting.\n- [#913](https://github.com/influxdata/kapacitor/issues/913): Add fillPeriod option to Window node, so that the first emit waits till the period has elapsed before emitting.\n- [#898](https://github.com/influxdata/kapacitor/issues/898): Now when the Window node every value is zero, the window will be emitted immediately for each new point.\n- [#744](https://github.com/influxdata/kapacitor/issues/744): Preserve alert state across restarts and disable/enable actions.\n- [#327](https://github.com/influxdata/kapacitor/issues/327): You can now window based on count in addition to time.\n- [#251](https://github.com/influxdata/kapacitor/issues/251): Enable markdown in slack attachments.\n\n\n### Bugfixes\n\n- [#1100](https://github.com/influxdata/kapacitor/issues/1100): Fix issue with the Union node buffering more points than necessary.\n- [#1087](https://github.com/influxdata/kapacitor/issues/1087): Fix panic during close of failed startup when connecting to InfluxDB.\n- [#1045](https://github.com/influxdata/kapacitor/issues/1045): Fix panic during replays.\n- [#1043](https://github.com/influxdata/kapacitor/issues/1043): logrotate.d ignores kapacitor configuration due to bad file mode.\n- [#872](https://github.com/influxdata/kapacitor/issues/872): Fix panic during failed aggregate results.\n\n## v1.1.1 [2016-12-02]\n\n### Release Notes\n\nNo changes to Kapacitor, only upgrading to go 1.7.4 for security patches.\n\n## v1.1.0 [2016-10-07]\n\n### Release Notes\n\nNew K8sAutoscale node that allows you to auotmatically scale Kubernetes deployments driven by any metrics Kapacitor consumes.\nFor example, to scale a deployment `myapp` based off requests per second:\n\n```\n// The target requests per second per host\nvar target = 100.0\n\nstream\n    |from()\n        .measurement('requests')\n        .where(lambda: \"deployment\" == 'myapp')\n    // Compute the moving average of the last 5 minutes\n    |movingAverage('requests', 5*60)\n        .as('mean_requests_per_second')\n    |k8sAutoscale()\n        .resourceName('app')\n        .kind('deployments')\n        .min(4)\n        .max(100)\n        // Compute the desired number of replicas based on target.\n        .replicas(lambda: int(ceil(\"mean_requests_per_second\" / target)))\n```\n\n\nNew API endpoints have been added to be able to configure InfluxDB clusters and alert handlers dynamically without needing to restart the Kapacitor daemon.\nAlong with the ability to dynamically configure a service, API endpoints have been added to test the configurable services.\nSee the [API docs](https://docs.influxdata.com/kapacitor/latest/api/api/) for more details.\n\n>NOTE: The `connect_errors` stat from the query node was removed since the client changed, all errors are now counted in the `query_errors` stat.\n\n### Features\n\n- [#931](https://github.com/influxdata/kapacitor/issues/931): Add a Kubernetes autoscaler node. You can now autoscale your Kubernetes deployments via Kapacitor.\n- [#928](https://github.com/influxdata/kapacitor/issues/928): Add new API endpoint for dynamically overriding sections of the configuration.\n- [#980](https://github.com/influxdata/kapacitor/pull/980): Upgrade to using go 1.7\n- [#957](https://github.com/influxdata/kapacitor/issues/957): Add API endpoints for testing service integrations.\n- [#958](https://github.com/influxdata/kapacitor/issues/958): Add support for Slack icon emojis and custom usernames.\n- [#991](https://github.com/influxdata/kapacitor/pull/991): Bring Kapacitor up to parity with available InfluxQL functions in 1.1\n\n### Bugfixes\n\n- [#984](https://github.com/influxdata/kapacitor/issues/984): Fix bug where keeping a list of fields that where not referenced in the eval expressions would cause an error.\n- [#955](https://github.com/influxdata/kapacitor/issues/955): Fix the number of subscriptions statistic.\n- [#999](https://github.com/influxdata/kapacitor/issues/999): Fix inconsistency with InfluxDB by adding config option to set a default retention policy.\n- [#1018](https://github.com/influxdata/kapacitor/pull/1018): Sort and dynamically adjust column width in CLI output. Fixes #785\n- [#1019](https://github.com/influxdata/kapacitor/pull/1019): Adds missing strLength function.\n\n## v1.0.2 [2016-10-06]\n\n### Release Notes\n\n### Features\n\n### Bugfixes\n\n- [#951](https://github.com/influxdata/kapacitor/pull/951): Fix bug where errors to save cluster/server ID files were ignored.\n- [#954](https://github.com/influxdata/kapacitor/pull/954): Create data_dir on startup if it does not exist.\n\n## v1.0.1 [2016-09-26]\n\n### Release Notes\n\n### Features\n\n- [#873](https://github.com/influxdata/kapacitor/pull/873): Add TCP alert handler\n- [#869](https://github.com/influxdata/kapacitor/issues/869): Add ability to set alert message as a field\n- [#854](https://github.com/influxdata/kapacitor/issues/854): Add `.create` property to InfluxDBOut node, which when set will create the database\n    and retention policy on task start.\n- [#909](https://github.com/influxdata/kapacitor/pull/909): Allow duration / duration in TICKscript.\n- [#777](https://github.com/influxdata/kapacitor/issues/777): Add support for string manipulation functions.\n- [#886](https://github.com/influxdata/kapacitor/issues/886): Add ability to set specific HTTP port and hostname per configured InfluxDB cluster.\n\n### Bugfixes\n\n- [#889](https://github.com/influxdata/kapacitor/issues/889): Some typo in the default config file\n- [#914](https://github.com/influxdata/kapacitor/pull/914): Change |log() output to be in JSON format so its self documenting structure.\n- [#915](https://github.com/influxdata/kapacitor/pull/915): Fix issue with TMax and the Holt-Winters method.\n- [#927](https://github.com/influxdata/kapacitor/pull/927): Fix bug with TMax and group by time.\n\n## v1.0.0 [2016-09-02]\n\n### Release Notes\n\nFinal release of v1.0.0.\n\n## v1.0.0-rc3 [2016-09-01]\n\n### Release Notes\n\n### Features\n\n### Bugfixes\n\n- [#842](https://github.com/influxdata/kapacitor/issues/842): Fix side-effecting modification in batch WhereNode.\n\n## v1.0.0-rc2 [2016-08-29]\n\n### Release Notes\n\n### Features\n\n- [#827](https://github.com/influxdata/kapacitor/issues/827): Bring Kapacitor up to parity with available InfluxQL functions in 1.0\n\n### Bugfixes\n\n- [#763](https://github.com/influxdata/kapacitor/issues/763): Fix NaNs begin returned from the `sigma` stateful function.\n- [#468](https://github.com/influxdata/kapacitor/issues/468): Fix tickfmt munging escaped slashes in regexes.\n\n## v1.0.0-rc1 [2016-08-22]\n\n### Release Notes\n\n#### Alert reset expressions\n\nKapacitor now supports alert reset expressions.\nThis way when an alert enters a state, it can only be lowered in severity if its reset expression evaluates to true.\n\nExample:\n\n```go\nstream\n    |from()\n      .measurement('cpu')\n      .where(lambda: \"host\" == 'serverA')\n      .groupBy('host')\n    |alert()\n      .info(lambda: \"value\" > 60)\n      .infoReset(lambda: \"value\" < 50)\n      .warn(lambda: \"value\" > 70)\n      .warnReset(lambda: \"value\" < 60)\n      .crit(lambda: \"value\" > 80)\n      .critReset(lambda: \"value\" < 70)\n```\n\nFor example given the following values:\n\n    61 73 64 85 62 56 47\n\nThe corresponding alert states are:\n\n    INFO WARNING WARNING CRITICAL INFO INFO OK\n\n### Features\n\n- [#740](https://github.com/influxdata/kapacitor/pull/740): Support reset expressions to prevent an alert from being lowered in severity. Thanks @minhdanh!\n- [#670](https://github.com/influxdata/kapacitor/issues/670): Add ability to supress OK recovery alert events.\n- [#804](https://github.com/influxdata/kapacitor/pull/804): Add API endpoint for refreshing subscriptions.\n    Also fixes issue where subs were not relinked if the sub was deleted.\n    UDP listen ports are closed when a database is dropped.\n\n### Bugfixes\n\n- [#783](https://github.com/influxdata/kapacitor/pull/783): Fix panic when revoking tokens not already defined.\n- [#784](https://github.com/influxdata/kapacitor/pull/784): Fix several issues with comment formatting in TICKscript.\n- [#786](https://github.com/influxdata/kapacitor/issues/786): Deleting tags now updates the group by dimensions if needed.\n- [#772](https://github.com/influxdata/kapacitor/issues/772): Delete task snapshot data when a task is deleted.\n- [#797](https://github.com/influxdata/kapacitor/issues/797): Fix panic from race condition in task master.\n- [#811](https://github.com/influxdata/kapacitor/pull/811): Fix bug where subscriptions + tokens would not work with more than one InfluxDB cluster.\n- [#812](https://github.com/influxdata/kapacitor/issues/812): Upgrade to use protobuf version 3.0.0\n\n## v1.0.0-beta4 [2016-07-27]\n\n### Release Notes\n\n#### Group By Fields\n\nKapacitor now supports grouping by fields.\nFirst convert a field into a tag using the EvalNode.\nThen group by the new tag.\n\nExample:\n\n```go\nstream\n    |from()\n        .measurement('alerts')\n    // Convert field 'level' to tag.\n    |eval(lambda: string(\"level\"))\n        .as('level')\n        .tags('level')\n    // Group by new tag 'level'.\n    |groupBy('alert', 'level')\n    |...\n```\n\nNote the field `level` is now removed from the point since `.keep` was not used.\nSee the [docs](https://docs.influxdata.com/kapacitor/v1.0/nodes/eval_node/#tags) for more details on how `.tags` works.\n\n\n#### Delete Fields or Tags\n\nIn companion with being able to create new tags, you can now delete tags or fields.\n\n\nExample:\n\n```go\nstream\n    |from()\n        .measurement('alerts')\n    |delete()\n        // Remove the field `extra` and tag `uuid` from all points.\n        .field('extra')\n        .tag('uuid')\n    |...\n```\n\n\n\n### Features\n\n- [#702](https://github.com/influxdata/kapacitor/pull/702): Add plumbing for authentication backends.\n- [#624](https://github.com/influxdata/kapacitor/issue/624): BREAKING: Add ability to GroupBy fields. First use EvalNode to create a tag from a field and then group by the new tag.\n    Also allows for grouping by measurement.\n    The breaking change is that the group ID format has changed to allow for the measurement name.\n- [#759](https://github.com/influxdata/kapacitor/pull/759): Add mechanism for token based subscription auth.\n- [#745](https://github.com/influxdata/kapacitor/pull/745): Add if function for tick script, for example: `if(\"value\" > 6, 1, 2)`.\n\n### Bugfixes\n\n- [#710](https://github.com/influxdata/kapacitor/pull/710): Fix infinite loop when parsing unterminated regex in TICKscript.\n- [#711](https://github.com/influxdata/kapacitor/issues/711): Fix where database name with quotes breaks subscription startup logic.\n- [#719](https://github.com/influxdata/kapacitor/pull/719): Fix panic on replay.\n- [#723](https://github.com/influxdata/kapacitor/pull/723): BREAKING: Search for valid configuration on startup in ~/.kapacitor and /etc/kapacitor/.\n    This is so that the -config CLI flag is not required if the configuration is found in a standard location.\n    The configuration file being used is always logged to STDERR.\n- [#298](https://github.com/influxdata/kapacitor/issues/298): BREAKING: Change alert level evaluation so each level is independent and not required to be a subset of the previous level.\n    The breaking change is that expression evaluation order changed.\n    As a result stateful expressions that relied on that order are broken.\n- [#749](https://github.com/influxdata/kapacitor/issues/749): Fix issue with tasks with empty DAG.\n- [#718](https://github.com/influxdata/kapacitor/issues/718): Fix broken extra expressions for deadman's switch.\n- [#752](https://github.com/influxdata/kapacitor/issues/752): Fix various bugs relating to the `fill` operation on a JoinNode.\n    Fill with batches and fill when using the `on` property were broken.\n    Also changes the DefaultNode set defaults for nil fields.\n\n## v1.0.0-beta3 [2016-07-09]\n\n### Release Notes\n\n### Features\n\n- [#662](https://github.com/influxdata/kapacitor/pull/662): Add `-skipVerify` flag to `kapacitor` CLI tool to skip SSL verification.\n- [#680](https://github.com/influxdata/kapacitor/pull/680): Add Telegram Alerting option, thanks @burdandrei!\n- [#46](https://github.com/influxdata/kapacitor/issues/46): Can now create combinations of points within the same stream.\n  This is kind of like join but instead joining a stream with itself.\n- [#669](https://github.com/influxdata/kapacitor/pull/669): Add size function for humanize byte size. thanks @jsvisa!\n- [#697](https://github.com/influxdata/kapacitor/pull/697): Can now flatten a set of points into a single points creating dynamcially named fields.\n- [#698](https://github.com/influxdata/kapacitor/pull/698): Join delimiter can be specified.\n- [#695](https://github.com/influxdata/kapacitor/pull/695): Bash completion filters by enabled disabled status. Thanks @bbczeuz!\n- [#706](https://github.com/influxdata/kapacitor/pull/706): Package UDF agents\n- [#707](https://github.com/influxdata/kapacitor/pull/707): Add size field to BeginBatch struct of UDF protocol. Provides hint as to size of incoming batch.\n\n### Bugfixes\n\n- [#656](https://github.com/influxdata/kapacitor/pull/656): Fix issues where an expression could not be passed as a function parameter in TICKscript.\n- [#627](https://github.com/influxdata/kapacitor/issues/627): Fix where InfluxQL functions that returned a batch could drop tags.\n- [#674](https://github.com/influxdata/kapacitor/issues/674): Fix panic with Join On and batches.\n- [#665](https://github.com/influxdata/kapacitor/issues/665): BREAKING: Fix file mode not being correct for Alert.Log files.\n  Breaking change is that integers numbers prefixed with a 0 in TICKscript are interpreted as octal numbers.\n- [#667](https://github.com/influxdata/kapacitor/issues/667): Align deadman timestamps to interval.\n\n## v1.0.0-beta2 [2016-06-17]\n\n### Release Notes\n\n### Features\n\n- [#636](https://github.com/influxdata/kapacitor/pull/636): Change HTTP logs to be in Common Log format.\n- [#652](https://github.com/influxdata/kapacitor/pull/652): Add optional replay ID to the task API so that you can get information about a task inside a running replay.\n\n### Bugfixes\n\n- [#621](https://github.com/influxdata/kapacitor/pull/621): Fix obscure error about single vs double quotes.\n- [#623](https://github.com/influxdata/kapacitor/pull/623): Fix issues with recording metadata missing data url.\n- [#631](https://github.com/influxdata/kapacitor/issues/631): Fix issues with using iterative lambda expressions in an EvalNode.\n- [#628](https://github.com/influxdata/kapacitor/issues/628): BREAKING: Change `kapacitord config` to not search default location for configuration files but rather require the `-config` option.\n    Since the `kapacitord run` command behaves this way they should be consistent.\n    Fix issue with `kapacitord config > kapacitor.conf` when the output file was a default location for the config.\n- [#626](https://github.com/influxdata/kapacitor/issues/626): Fix issues when changing the ID of an enabled task.\n- [#624](https://github.com/influxdata/kapacitor/pull/624): Fix issues where you could get a read error on a closed UDF socket.\n- [#651](https://github.com/influxdata/kapacitor/pull/651): Fix issues where an error during a batch replay would hang because the task wouldn't stop.\n- [#650](https://github.com/influxdata/kapacitor/pull/650): BREAKING: The default retention policy name was changed to `autogen` in InfluxDB.\n    This changes Kapacitor to use `autogen` for the default retention policy for the stats.\n    You may need to update your task DBRPs to use `autogen` instead of `default`.\n\n\n## v1.0.0-beta1 [2016-06-06]\n\n### Release Notes\n\n#### Template Tasks\n\nThe ability to create and use template tasks has been added.\nyou can define a template for a task and reuse that template across multiple tasks.\n\nA simple example:\n\n```go\n// Which measurement to consume\nvar measurement string\n// Optional where filter\nvar where_filter = lambda: TRUE\n// Optional list of group by dimensions\nvar groups = [*]\n// Which field to process\nvar field string\n// Warning criteria, has access to 'mean' field\nvar warn lambda\n// Critical criteria, has access to 'mean' field\nvar crit lambda\n// How much data to window\nvar window = 5m\n// The slack channel for alerts\nvar slack_channel = '#alerts'\n\nstream\n    |from()\n        .measurement(measurement)\n        .where(where_filter)\n        .groupBy(groups)\n    |window()\n        .period(window)\n        .every(window)\n    |mean(field)\n    |alert()\n         .warn(warn)\n         .crit(crit)\n         .slack()\n         .channel(slack_channel)\n```\n\nThen you can define the template like so:\n\n```\nkapacitor define-template generic_mean_alert -tick path/to/above/script.tick -type stream\n```\n\nNext define a task that uses the template:\n\n```\nkapacitor define cpu_alert -template generic_mean_alert -vars cpu_vars.json -dbrp telegraf.default\n```\n\nWhere `cpu_vars.json` would like like this:\n\n```json\n{\n    \"measurement\": {\"type\" : \"string\", \"value\" : \"cpu\" },\n    \"where_filter\": {\"type\": \"lambda\", \"value\": \"\\\"cpu\\\" == 'cpu-total'\"},\n    \"groups\": {\"type\": \"list\", \"value\": [{\"type\":\"string\", \"value\":\"host\"},{\"type\":\"string\", \"value\":\"dc\"}]},\n    \"field\": {\"type\" : \"string\", \"value\" : \"usage_idle\" },\n    \"warn\": {\"type\" : \"lambda\", \"value\" : \" \\\"mean\\\" < 30.0\" },\n    \"crit\": {\"type\" : \"lambda\", \"value\" : \" \\\"mean\\\" < 10.0\" },\n    \"window\": {\"type\" : \"duration\", \"value\" : \"1m\" },\n    \"slack_channel\": {\"type\" : \"string\", \"value\" : \"#alerts_testing\" }\n}\n```\n\n\n#### Live Replays\n\nWith this release you can now replay data directly against a task from InfluxDB without having to first create a recording.\nReplay the queries defined in the batch task `cpu_alert` for the past 10 hours.\n```sh\nkapacitor replay-live batch -task cpu_alert -past 10h\n```\n\nOr for a stream task with use a query directly:\n\n```sh\nkapacitor replay-live query -task cpu_alert -query 'SELECT usage_idle FROM telegraf.\"default\".cpu WHERE time > now() - 10h'\n```\n\n#### HTTP based subscriptions\n\nNow InfluxDB and Kapacitor support HTTP/S based subscriptions.\nThis means that Kapacitor need only listen on a single port for the HTTP service, greatly simplifying configuration and setup.\n\nIn order to start using HTTP subscriptions change the `subscription-protocol` option for your configured InfluxDB clusters.\n\nFor example:\n\n```\n[[influxdb]]\n  enabled = true\n  urls = [\"http://localhost:8086\",]\n  subscription-protocol = \"http\"\n  # or to use https\n  #subscription-protocol = \"https\"\n```\n\nOn startup Kapacitor will detect the change and recreate the subscriptions in InfluxDB to use the HTTP protocol.\n\n>NOTE: While HTTP itself is a TCP transport such that packet loss shouldn't be an issue, if Kapacitor starts to slow down for whatever reason, InfluxDB will drop the subscription writes to Kapacitor.\nIn order to know if subscription writes are being dropped you should monitor the measurement `_internal.monitor.subscriber` for the field `writeFailures`.\n\n#### Holt-Winters Forecasting\n\nThis release contains an new Holt Winters InfluxQL function.\n\nWith this forecasting method one can now define an alert based off forecasted future values.\n\nFor example, the following TICKscript will take the last 30 days of disk usage stats and using holt-winters forecast the next 7 days.\nIf the forecasted value crosses a threshold an alert is triggered.\n\nThe result is now Kapacitor will alert you 7 days in advance of a disk filling up.\nThis assumes a slow growth but by changing the vars in the script you could check for shorter growth intervals.\n\n```go\n// The interval on which to aggregate the disk usage\nvar growth_interval = 1d\n// The number of `growth_interval`s to forecast into the future\nvar forecast_count = 7\n// The amount of historical data to use for the fit\nvar history = 30d\n\n// The critical threshold on used_percent\nvar threshold = 90.0\n\nbatch\n    |query('''\n    SELECT max(used_percent) as used_percent\n    FROM \"telegraf\".\"default\".\"disk\"\n''')\n        .period(history)\n        .every(growth_interval)\n        .align()\n        .groupBy(time(growth_interval), *)\n    |holtWinters('used_percent', forecast_count, 0, growth_interval)\n        .as('used_percent')\n    |max('used_percent')\n        .as('used_percent')\n    |alert()\n         // Trigger alert if the forecasted disk usage is greater than threshold\n        .crit(lambda: \"used_percent\" > threshold)\n```\n\n\n### Features\n\n- [#283](https://github.com/influxdata/kapacitor/issues/283): Add live replays.\n- [#500](https://github.com/influxdata/kapacitor/issues/500): Support Float,Integer,String and Boolean types.\n- [#82](https://github.com/influxdata/kapacitor/issues/82): Multiple services for PagerDuty alert. thanks @savagegus!\n- [#558](https://github.com/influxdata/kapacitor/pull/558): Preserve fields as well as tags on selector InfluxQL functions.\n- [#259](https://github.com/influxdata/kapacitor/issues/259): Template Tasks have been added.\n- [#562](https://github.com/influxdata/kapacitor/pull/562): HTTP based subscriptions.\n- [#595](https://github.com/influxdata/kapacitor/pull/595): Support counting and summing empty batches to 0.\n- [#596](https://github.com/influxdata/kapacitor/pull/596): Support new group by time offset i.e. time(30s, 5s)\n- [#416](https://github.com/influxdata/kapacitor/issues/416): Track ingress counts by database, retention policy, and measurement. Expose stats via cli.\n- [#586](https://github.com/influxdata/kapacitor/pull/586): Add spread stateful function. thanks @upccup!\n- [#600](https://github.com/influxdata/kapacitor/pull/600): Add close http response after handler laert post, thanks @jsvisa!\n- [#606](https://github.com/influxdata/kapacitor/pull/606): Add Holt-Winters forecasting method.\n- [#605](https://github.com/influxdata/kapacitor/pull/605): BREAKING: StatsNode for batch edge now count the number of points in a batch instead of count batches as a whole.\n    This is only breaking if you have a deadman switch configured on a batch edge.\n- [#611](https://github.com/influxdata/kapacitor/pull/611): Adds bash completion to the kapacitor CLI tool.\n\n\n### Bugfixes\n\n- [#540](https://github.com/influxdata/kapacitor/issues/540): Fixes bug with log level API endpoint.\n- [#521](https://github.com/influxdata/kapacitor/issues/521): EvalNode now honors groups.\n- [#561](https://github.com/influxdata/kapacitor/issues/561): Fixes bug when lambda expressions would return error about types with nested binary expressions.\n- [#555](https://github.com/influxdata/kapacitor/issues/555): Fixes bug where \"time\" functions didn't work in lambda expressions.\n- [#570](https://github.com/influxdata/kapacitor/issues/570): Removes panic in SMTP service on failed close connection.\n- [#587](https://github.com/influxdata/kapacitor/issues/587): Allow number literals without leading zeros.\n- [#584](https://github.com/influxdata/kapacitor/issues/584): Do not block during startup to send usage stats.\n- [#553](https://github.com/influxdata/kapacitor/issues/553): Periodically check if new InfluxDB DBRPs have been created.\n- [#602](https://github.com/influxdata/kapacitor/issues/602): Fix missing To property on email alert handler.\n- [#581](https://github.com/influxdata/kapacitor/issues/581): Record/Replay batch tasks get cluster info from task not API.\n- [#613](https://github.com/influxdata/kapacitor/issues/613): BREAKING: Allow the ID of templates and tasks to be updated via the PATCH method.\n    The breaking change is that now PATCH request return a 200 with the template or task definition, where before they returned 204.\n\n## v0.13.1 [2016-05-13]\n\n### Release Notes\n\n>**Breaking changes may require special upgrade steps from versions <= 0.12, please read the 0.13.0 release notes**\n\nAlong with the API changes of 0.13.0, validation logic was added to task IDs, but this was not well documented.\nThis minor release remedies that.\n\nAll IDs (tasks, recordings, replays) must match this regex `^[-\\._\\p{L}0-9]+$`, which is essentially numbers, unicode letters, '-', '.' and '_'.\n\nIf you have existing tasks which do not match this pattern they should continue to function normally.\n\n### Features\n\n### Bugfixes\n\n- [#545](https://github.com/influxdata/kapacitor/issues/545): Fixes inconsistency with API docs for creating a task.\n- [#544](https://github.com/influxdata/kapacitor/issues/544): Fixes issues with existings tasks and invalid names.\n- [#543](https://github.com/influxdata/kapacitor/issues/543): Fixes default values not being set correctly in API calls.\n\n\n## v0.13.0 [2016-05-11]\n\n### Release Notes\n\n>**Breaking changes may require special upgrade steps please read below.**\n\n#### Upgrade Steps\n\nChanges to how and where task data is store have been made.\nIn order to safely upgrade to version 0.13 you need to follow these steps:\n\n1. Upgrade InfluxDB to version 0.13 first.\n2. Update all TICKscripts to use the new `|` and `@` operators. Once Kapacitor no longer issues any `DEPRECATION` warnings you are ready to begin the upgrade.\nThe upgrade will work without this step but tasks using the old syntax cannot be enabled, until modified to use the new syntax.\n3. Upgrade the Kapacitor binary/package.\n4. Configure new database location. By default the location `/var/lib/kapacitor/kapacitor.db` is chosen for package installs or `./kapacitor.db` for manual installs.\nDo **not** remove the configuration for the location of the old task.db database file since it is still needed to do the migration.\n\n    ```\n    [storage]\n    boltdb = \"/var/lib/kapacitor/kapacitor.db\"\n    ```\n\n5. Restart Kapacitor. At this point Kapacitor will migrate all existing data to the new database file.\nIf any errors occur Kapacitor will log them and fail to startup. This way if Kapacitor starts up you can be sure the migration was a success and can continue normal operation.\nThe old database is opened in read only mode so that existing data cannot be corrupted.\nIts recommended to start Kapacitor in debug logging mode for the migration so you can follow the details of the migration process.\n\nAt this point you may remove the configuration for the old `task` `dir` and restart Kapacitor to ensure everything is working.\nKapacitor will attempt the migration on every startup while the old configuration and db file exist, but will skip any data that was already migrated.\n\n\n#### API Changes\n\nWith this release the API has been updated to what we believe will be the stable version for a 1.0 release.\nSmall changes may still be made but the significant work to create a RESTful HTTP API is complete.\nMany breaking changes introduced, see the [client/API.md](http://github.com/influxdata/kapacitor/blob/master/client/API.md) doc for details on how the API works now.\n\n#### CLI Changes\n\nAlong with the API changes, breaking changes where also made to the `kapacitor` CLI command.\nHere is a break down of the CLI changes:\n\n* Every thing has an ID now: tasks, recordings, even replays.\n    The `name` used before to define a task is now its `ID`.\n    As such instead of using `-name` and `-id` to refer to tasks and recordings,\n    the flags have been changed to `-task` and `-recording` accordingly.\n* Replays can be listed and deleted like tasks and recordings.\n* Replays default to `fast` clock mode.\n* The record and replay commands now have a `-no-wait` option to start but not wait for the recording/replay to complete.\n* Listing recordings and replays displays the status of the respective action.\n* Record and Replay command now have an optional flag `-replay-id`/`-recording-id` to specify the ID of the replay or recording.\n    If not set then a random ID will be chosen like the previous behavior.\n\n#### Notable features\n\nUDF can now be managed externally to Kapacitor via Unix sockets.\nA process or container can be launched independent of Kapacitor exposing a socket.\nOn startup Kapacitor will connect to the socket and begin communication.\n\nExample UDF config for a socket based UDF.\n\n```\n[udf]\n[udf.functions]\n    [udf.functions.myCustomUDF]\n       socket = \"/path/to/socket\"\n       timeout = \"10s\"\n```\n\nAlert data can now be consumed directly from within TICKscripts.\nFor example, let's say we want to store all data that triggered an alert in InfluxDB with a tag `level` containing the level string value (i.e CRITICAL).\n\n```javascript\n...\n    |alert()\n        .warn(...)\n        .crit(...)\n        .levelTag('level')\n        // and/or use a field\n        //.levelField('level')\n        // Also tag the data with the alert ID\n        .idTag('id')\n        // and/or use a field\n        //.idField('id')\n    |influxDBOut()\n        .database('alerts')\n        ...\n```\n\n\n### Features\n\n- [#360](https://github.com/influxdata/kapacitor/pull/360): Forking tasks by measurement in order to improve performance\n- [#386](https://github.com/influxdata/kapacitor/issues/386): Adds official Go HTTP client package.\n- [#399](https://github.com/influxdata/kapacitor/issues/399): Allow disabling of subscriptions.\n- [#417](https://github.com/influxdata/kapacitor/issues/417): UDFs can be connected over a Unix socket. This enables UDFs from across Docker containers.\n- [#451](https://github.com/influxdata/kapacitor/issues/451): StreamNode supports `|groupBy` and `|where` methods.\n- [#93](https://github.com/influxdata/kapacitor/issues/93): AlertNode now outputs data to child nodes. The output data can have either a tag or field indicating the alert level.\n- [#281](https://github.com/influxdata/kapacitor/issues/281): AlertNode now has an `.all()` property that specifies that all points in a batch must match the criteria in order to trigger an alert.\n- [#384](https://github.com/influxdata/kapacitor/issues/384): Add `elapsed` function to compute the time difference between subsequent points.\n- [#230](https://github.com/influxdata/kapacitor/issues/230): Alert.StateChangesOnly now accepts optional duration arg. An alert will be triggered for every interval even if the state has not changed.\n- [#426](https://github.com/influxdata/kapacitor/issues/426): Add `skip-format` query parameter to the `GET /task` endpoint so that returned TICKscript content is left unmodified from the user input.\n- [#388](https://github.com/influxdata/kapacitor/issues/388): The duration of an alert is now tracked and exposed as part of the alert data as well as can be set as a field via `.durationField('duration')`.\n- [#486](https://github.com/influxdata/kapacitor/pull/486): Default config file location.\n- [#461](https://github.com/influxdata/kapacitor/pull/461): Make Alerta `event` property configurable.\n- [#491](https://github.com/influxdata/kapacitor/pull/491): BREAKING: Rewriting stateful expression in order to improve performance, the only breaking change is: short circuit evaluation for booleans - for example: ``lambda: \"bool_value\" && (count() > 100)`` if \"bool_value\" is false, we won't evaluate \"count\".\n- [#504](https://github.com/influxdata/kapacitor/pull/504): BREAKING: Many changes to the API and underlying storage system. This release requires a special upgrade process.\n- [#511](https://github.com/influxdata/kapacitor/pull/511): Adds DefaultNode for providing default values for missing fields or tags.\n- [#285](https://github.com/influxdata/kapacitor/pull/285): Track created,modified and last enabled dates on tasks.\n- [#533](https://github.com/influxdata/kapacitor/pull/533): Add useful statistics for nodes.\n\n### Bugfixes\n\n- [#499](https://github.com/influxdata/kapacitor/issues/499): Fix panic in InfluxQL nodes if field is missing or incorrect type.\n- [#441](https://github.com/influxdata/kapacitor/issues/441): Fix panic in UDF code.\n- [#429](https://github.com/influxdata/kapacitor/issues/429): BREAKING: Change TICKscript parser to be left-associative on equal precedence operators. For example previously this statement `(1+2-3*4/5)` was evaluated as `(1+(2-(3*(4/5))))`\n    which is not the typical/expected behavior. Now using left-associative parsing the statement is evaluated as `((1+2)-((3*4)/5))`.\n- [#456](https://github.com/influxdata/kapacitor/pull/456): Fixes Alerta integration to let server set status, fix `rawData` attribute and set default severity to `indeterminate`.\n- [#425](https://github.com/influxdata/kapacitor/pull/425): BREAKING: Preserving tags on influxql simple selectors - first, last, max, min, percentile\n- [#423](https://github.com/influxdata/kapacitor/issues/423): Recording stream queries with group by now correctly saves data in time order not group by order.\n- [#331](https://github.com/influxdata/kapacitor/issues/331): Fix panic when missing `.as()` for JoinNode.\n- [#523](https://github.com/influxdata/kapacitor/pull/523): JoinNode will now emit join sets as soon as they are ready. If multiple joinable sets arrive in the same tolerance window than each will be emitted (previously the first points were dropped).\n- [#537](https://github.com/influxdata/kapacitor/issues/537): Fix panic in alert node when batch is empty.\n\n## v0.12.0 [2016-04-04]\n\n### Release Notes\n\nNew TICKscript syntax that uses a different operators for chaining methods vs property methods vs UDF methods.\n\n* A chaining method is a method that creates a new node in the pipeline. Uses the `|` operator.\n* A property method is a method that changes a property on a node. Uses the `.` operator.\n* A UDF method is a method that calls out to a UDF. Uses the `@` operator.\n\nFor example below the `from`, `mean`, and `alert` methods create new nodes,\nthe `detectAnomalies` method calls a UDF,\nand the other methods modify the nodes as property methods.\n\n```javascript\nstream\n    |from()\n        .measurement('cpu')\n        .where(lambda: \"cpu\" == 'cpu-total')\n    |mean('usage_idle')\n        .as('value')\n    @detectAnomalies()\n        .field('mean')\n    |alert()\n        .crit(lambda: \"anomaly_score\" > 10)\n        .log('/tmp/cpu.log')\n```\n\nWith this change a new binary is provided with Kapacitor `tickfmt` which will\nformat a TICKscript file according to a common standard.\n\n\n### Features\n\n- [#299](https://github.com/influxdata/kapacitor/issues/299): Changes TICKscript chaining method operators and adds `tickfmt` binary.\n- [#389](https://github.com/influxdata/kapacitor/pull/389): Adds benchmarks to Kapacitor for basic use cases.\n- [#390](https://github.com/influxdata/kapacitor/issues/390): BREAKING: Remove old `.mapReduce` functions.\n- [#381](https://github.com/influxdata/kapacitor/pull/381): Adding enable/disable/delete/reload tasks by glob.\n- [#401](https://github.com/influxdata/kapacitor/issues/401): Add `.align()` property to BatchNode so you can align query start and stop times.\n\n### Bugfixes\n\n- [#378](https://github.com/influxdata/kapacitor/issues/378): Fix issue where derivative would divide by zero.\n- [#387](https://github.com/influxdata/kapacitor/issues/387): Add `.quiet()` option to EvalNode so errors can be suppressed if expected.\n- [#400](https://github.com/influxdata/kapacitor/issues/400): All query/connection errors are counted and reported in BatchNode stats.\n- [#412](https://github.com/influxdata/kapacitor/pull/412): Fix issues with batch queries dropping points because of nil fields.\n- [#413](https://github.com/influxdata/kapacitor/pull/413): Allow disambiguation between \".groupBy\" and \"|groupBy\".\n\n\n## v0.11.0 [2016-03-22]\n\n### Release Notes\n\nKapacitor is now using the functions from the new query engine in InfluxDB core.\nAlong with this change is a change in the TICKscript API so that using the InfluxQL functions is easier.\nSimply call the desired method directly no need to call `.mapReduce` explicitly.\nThis change now hides the mapReduce aspect and handles it internally.\nUsing `.mapReduce` is officially deprecated in this release and will be remove in the next major release.\nWe feel that this change improves the readability of TICKscripts and exposes less implementation details\nto the end user.\nUpdating your exising TICKscripts is simple.\nIf previously you had code like this:\n\n```javascript\nstream.from()...\n    .window()...\n    .mapReduce(influxql.count('value'))\n```\nthen update it to look like this:\n\n```javascript\nstream.from()...\n    .window()...\n    .count('value')\n```\n\na simple regex could fix all your existing scripts.\n\nKapacitor now exposes more internal metrics for determining the performance of a given task.\nThe internal statistics includes a new measurement named `node` that contains any stats a node provides, tagged by the task, node, task type and kind of node (i.e. window vs union).\nAll nodes provide an averaged execution time for the node.\nThese stats are also available in the DOT output of the Kapacitor show command.\n\nSignificant performance improvements have also been added.\nIn some cases Kapacitor throughput has improved by 4X.\n\nKapacitor can now connect to different InfluxDB clusters.\nMultiple InfluxDB config sections can be defined and one will be marked as default.\nTo upgrade convert an `influxdb` config.\n\nFrom this:\n\n```\n[influxdb]\n  enabled = true\n  ...\n```\n\nto this:\n\n```\n[[influxdb]]\n  enabled = true\n  default = true\n  name = \"localhost\"\n  ...\n```\n\nVarious improvements to joining features have been implemented.\nWith #144 you can now join streams with differing group by dimensions.\n\nIf you previously configured Email, Slack or HipChat globally now you must also set the `state-changes-only` option to true as well if you want to preserve the original behavior.\nFor example:\n\n```\n[slack]\n   enable = true\n   global = true\n   state-changes-only = true\n```\n\n### Features\n- [#236](https://github.com/influxdata/kapacitor/issues/236): Implement batched group by\n- [#231](https://github.com/influxdata/kapacitor/pull/231): Add ShiftNode so values can be shifted in time for joining/comparisons.\n- [#190](https://github.com/influxdata/kapacitor/issues/190): BREAKING: Deadman's switch now triggers off emitted counts and is grouped by to original grouping of the data.\n    The breaking change is that the 'collected' stat is no longer output for `.stats` and has been replaced by `emitted`.\n- [#145](https://github.com/influxdata/kapacitor/issues/145): The InfluxDB Out Node now writes data to InfluxDB in buffers.\n- [#215](https://github.com/influxdata/kapacitor/issues/215): Add performance metrics to nodes for average execution times and node throughput values.\n- [#144](https://github.com/influxdata/kapacitor/issues/144): Can now join streams with differing dimensions using the join.On property.\n- [#249](https://github.com/influxdata/kapacitor/issues/249): Can now use InfluxQL functions directly instead of via the MapReduce method. Example `stream.from().count()`.\n- [#233](https://github.com/influxdata/kapacitor/issues/233): BREAKING: Now you can use multiple InfluxDB clusters. The config changes to make this possible are breaking. See notes above for changes.\n- [#302](https://github.com/influxdata/kapacitor/issues/302): Can now use .Time in alert message.\n- [#239](https://github.com/influxdata/kapacitor/issues/239): Support more detailed TLS config when connecting to an InfluxDB host.\n- [#323](https://github.com/influxdata/kapacitor/pull/323): Stats for task execution are provided via JSON HTTP request instead of just DOT string. thanks @yosiat\n- [#358](https://github.com/influxdata/kapacitor/issues/358): Improved logging. Adds LogNode so any data in a pipeline can be logged.\n- [#366](https://github.com/influxdata/kapacitor/issues/366): HttpOutNode now allows chaining methods.\n\n\n### Bugfixes\n- [#199](https://github.com/influxdata/kapacitor/issues/199): BREAKING: Various fixes for the Alerta integration.\n    The `event` property has been removed from the Alerta node and is now set as the value of the alert ID.\n- [#232](https://github.com/influxdata/kapacitor/issues/232): Better error message for alert integrations. Better error message for VictorOps 404 response.\n- [#231](https://github.com/influxdata/kapacitor/issues/231): Fix window logic when there were gaps in the data stream longer than window every value.\n- [#213](https://github.com/influxdata/kapacitor/issues/231): Add SourceStreamNode so that yuou must always first call `.from` on the `stream` object before filtering it, so as to not create confusing to understand TICKscripts.\n- [#255](https://github.com/influxdata/kapacitor/issues/255): Add OPTIONS handler for task delete method so it can be preflighted.\n- [#258](https://github.com/influxdata/kapacitor/issues/258): Fix UDP internal metrics, change subscriptions to use clusterID.\n- [#240](https://github.com/influxdata/kapacitor/issues/240): BREAKING: Fix issues with Sensu integration. The breaking change is that the config no longer takes a `url` but rather a `host` option since the communication is raw TCP rather HTTP.\n- [#270](https://github.com/influxdata/kapacitor/issues/270): The HTTP server will now gracefully stop.\n- [#300](https://github.com/influxdata/kapacitor/issues/300): Add OPTIONS method to /recording endpoint for deletes.\n- [#304](https://github.com/influxdata/kapacitor/issues/304): Fix panic if recording query but do not have an InfluxDB instance configured\n- [#289](https://github.com/influxdata/kapacitor/issues/289): Add better error handling to batch node.\n- [#142](https://github.com/influxdata/kapacitor/issues/142): Fixes bug when defining multiple influxdb hosts.\n- [#266](https://github.com/influxdata/kapacitor/issues/266): Fixes error log for HipChat that is not an error.\n- [#333](https://github.com/influxdata/kapacitor/issues/333): Fixes hang when replaying with .stats node. Fixes issues with batch and stats.\n- [#340](https://github.com/influxdata/kapacitor/issues/340): BREAKING: Decouples global setting for alert handlers from the state changes only setting.\n- [#348](https://github.com/influxdata/kapacitor/issues/348): config.go: refactor to simplify structure and fix support for array elements\n- [#362](https://github.com/influxdata/kapacitor/issues/362): Fix bug with join tolerance and batches.\n\n## v0.10.1 [2016-02-08]\n\n### Release Notes\n\nThis is a bug fix release that fixes many issues releated to the recent 0.10.0 release.\nThe few additional features are focused on usability improvements from recent feedback.\n\nImproved UDFs, lots of bug fixes and improvements on the API. There was a breaking change for UDFs protobuf messages, see #176.\n\nThere was a breaking change to the `define` command, see [#173](https://github.com/influxdata/kapacitor/issues/173) below.\n\n### Features\n\n- [#176](https://github.com/influxdata/kapacitor/issues/176): BREAKING: Improved UDFs and groups. Now it is easy to deal with groups from the UDF process.\n    There is a breaking change in the BeginBatch protobuf message for this change.\n- [#196](https://github.com/influxdata/kapacitor/issues/196): Adds a 'details' property to the alert node so that the email body can be defined. See also [#75](https://github.com/influxdata/kapacitor/issues/75).\n- [#132](https://github.com/influxdata/kapacitor/issues/132): Make is so multiple calls to `where` simply `AND` expressions together instead of replacing or creating extra nodes in the pipeline.\n- [#173](https://github.com/influxdata/kapacitor/issues/173): BREAKING: Added a `-no-reload` flag to the define command in the CLI. Now if the task is enabled define will automatically reload it unless `-no-reload` is passed.\n- [#194](https://github.com/influxdata/kapacitor/pull/194): Adds Talk integration for alerts. Thanks @wutaizeng!\n- [#320](https://github.com/influxdata/kapacitor/pull/320): Upgrade to go 1.6\n\n### Bugfixes\n\n- [#177](https://github.com/influxdata/kapacitor/issues/177): Fix panic for show command on batch tasks.\n- [#185](https://github.com/influxdata/kapacitor/issues/185): Fix panic in define command with invalid dbrp value.\n- [#195](https://github.com/influxdata/kapacitor/issues/195): Fix panic in where node.\n- [#208](https://github.com/influxdata/kapacitor/issues/208): Add default stats dbrp to default subscription excludes.\n- [#203](https://github.com/influxdata/kapacitor/issues/203): Fix hang when deleteing invalid batch task.\n- [#182](https://github.com/influxdata/kapacitor/issues/182): Fix missing/incorrect Content-Type headers for various HTTP endpoints.\n- [#187](https://github.com/influxdata/kapacitor/issues/187): Retry connecting to InfluxDB on startup for up to 5 minutes by default.\n\n## v0.10.0 [2016-01-26]\n\n### Release Notes\n\nThis release marks the next major release of Kapacitor.\nWith this release you can now run your own custom code for processing data within Kapacitor.\nSee [udf/agent/README.md](https://github.com/influxdata/kapacitor/blob/master/udf/agent/README.md) for more details.\n\nWith the addition of UDFs it is now possible to run custom anomaly detection alogrithms suited to your needs.\nThere are simple examples of how to use UDFs in [udf/agent/examples](https://github.com/influxdata/kapacitor/tree/master/udf/agent/examples/).\n\nThe version has jumped significantly so that it is inline with other projects in the TICK stack.\nThis way you can easily tell which versions of Telegraf, InfluxDB, Chronograf and Kapacitor work together.\n\nSee note on a breaking change in the HTTP API below. #163\n\n\n### Features\n- [#137](https://github.com/influxdata/kapacitor/issues/137): Add deadman's switch. Can be setup via TICKscript and globally via configuration.\n- [#72](https://github.com/influxdata/kapacitor/issues/72): Add support for User Defined Functions (UDFs).\n- [#139](https://github.com/influxdata/kapacitor/issues/139): Alerta.io support thanks! @md14454\n- [#85](https://github.com/influxdata/kapacitor/issues/85): Sensu support using JIT clients. Thanks @sstarcher!\n- [#141](https://github.com/influxdata/kapacitor/issues/141): Time of day expressions for silencing alerts.\n\n### Bugfixes\n- [#153](https://github.com/influxdata/kapacitor/issues/153): Fix panic if referencing non existant field in MapReduce function.\n- [#138](https://github.com/influxdata/kapacitor/issues/138): Change over to influxdata github org.\n- [#164](https://github.com/influxdata/kapacitor/issues/164): Update imports etc from InfluxDB as per the new meta store/client changes.\n- [#163](https://github.com/influxdata/kapacitor/issues/163): BREAKING CHANGE: Removed the 'api/v1' pathing from the HTTP API so that Kapacitor is\n    path compatible with InfluxDB. While this is a breaking change the kapacitor cli has been updated accordingly and you will not experience any distruptions unless you\n    were calling the HTTP API directly.\n- [#147](https://github.com/influxdata/kapacitor/issues/147): Compress .tar archives from builds.\n\n## v0.2.4 [2016-01-07]\n\n### Release Notes\n\n### Features\n- [#118](https://github.com/influxdata/kapacitor/issues/118): Can now define multiple handlers of the same type on an AlertNode.\n- [#119](https://github.com/influxdata/kapacitor/issues/119): HipChat support thanks! @ericiles *2\n- [#113](https://github.com/influxdata/kapacitor/issues/113): OpsGenie support thanks! @ericiles\n- [#107](https://github.com/influxdata/kapacitor/issues/107): Enable TICKscript variables to be defined and then referenced from lambda expressions.\n        Also fixes various bugs around using regexes.\n\n### Bugfixes\n- [#124](https://github.com/influxdata/kapacitor/issues/124): Fix panic where there is an error starting a task.\n- [#122](https://github.com/influxdata/kapacitor/issues/122): Fixes panic when using WhereNode.\n- [#128](https://github.com/influxdata/kapacitor/issues/128): Fix not sending emails when using recipient list from config.\n\n## v0.2.3 [2015-12-22]\n\n### Release Notes\n\nBugfix #106 made a breaking change to the internal HTTP API. This was to facilitate integration testing and overall better design.\nNow POSTing a recording request will start the recording and immediately return. If you want to wait till it is complete do\na GET for the recording info and it will block until its complete. The kapacitor cli has been updated accordingly.\n\n### Features\n- [#96](https://github.com/influxdata/kapacitor/issues/96): Use KAPACITOR_URL env var for setting the kapacitord url in the client.\n- [#109](https://github.com/influxdata/kapacitor/pull/109): Add throughput counts to DOT format in `kapacitor show` command, if task is executing.\n\n### Bugfixes\n- [#102](https://github.com/influxdata/kapacitor/issues/102): Fix race when start/stoping timeTicker in batch.go\n- [#106](https://github.com/influxdata/kapacitor/pull/106): Fix hang when replaying stream recording.\n\n\n## v0.2.2 [2015-12-16]\n\n### Release Notes\n\nSome bug fixes including one that cause Kapacitor to deadlock.\n\n### Features\n- [#83](https://github.com/influxdata/kapacitor/pull/83): Use enterprise usage client, remove deprecated enterprise register and reporting features.\n\n### Bugfixes\n\n- [#86](https://github.com/influxdata/kapacitor/issues/86): Fix dealock form errors in tasks. Also fixes issue where task failures did not get logged.\n- [#95](https://github.com/influxdata/kapacitor/pull/95): Fix race in bolt usage when starting enabled tasks at startup.\n\n## v0.2.0 [2015-12-8]\n\n### Release Notes\n\nMajor public release.\n\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/CONTRIBUTING.md",
    "content": "Contributing to Kapacitor\n=========================\n\nBug reports\n---------------\nBefore you file an issue, please search existing issues in case it has already been filed, or perhaps even fixed.\nIf you file an issue, please include the following.\n* Full details of your operating system (or distribution) e.g. 64-bit Ubuntu 14.04.\n* The version of Kapacitor you are running\n* Whether you installed it using a pre-built package, or built it from source.\n* A small test case, if applicable, that demonstrates the issues.\n\nRemember the golden rule of bug reports: **The easier you make it for us to reproduce the problem, the faster it will get fixed.**\nIf you have never written a bug report before, or if you want to brush up on your bug reporting skills, we recommend reading [Simon Tatham's essay \"How to Report Bugs Effectively.\"](http://www.chiark.greenend.org.uk/~sgtatham/bugs.html)\n\nPlease note that issues are *not the place to file general questions* such as \"how do I use InfluxDB with Kapacitor?\" Questions of this nature should be sent to the [Google Group](https://groups.google.com/forum/#!forum/influxdb), not filed as issues. Issues like this will be closed.\n\nFeature requests\n---------------\nWe really like to receive feature requests, as it helps us prioritize our work.\nPlease be clear about your requirements, as incomplete feature requests may simply be closed if we don't understand what you would like to see added to Kapacitor.\n\nContributing to the source code\n---------------\n\nKapacitor follows standard Go project structure.\nThis means that all your go development are done in `$GOPATH/src`.\nGOPATH can be any directory under which InfluxDB and all its dependencies will be cloned.\nFor more details on recommended go project's structure, see [How to Write Go Code](http://golang.org/doc/code.html) and\n[Go: Best Practices for Production Environments](http://peter.bourgon.org/go-in-production/), or you can just follow the steps below.\n\nSubmitting a pull request\n------------\nTo submit a pull request you should fork the Kapacitor repository, and make your change on a feature branch of your fork.\nThen generate a pull request from your branch against *master* of the Kapacitor repository.\nInclude in your pull request details of your change -- the why *and* the how -- as well as the testing your performed.\nAlso, be sure to run the test suite with your change in place. Changes that cause tests to fail cannot be merged.\n\nThere will usually be some back and forth as we finalize the change, but once that completes it may be merged.\n\nTo assist in review for the PR, please add the following to your pull request comment:\n\n```md\n- [ ] CHANGELOG.md updated\n- [ ] Rebased/mergable\n- [ ] Tests pass\n- [ ] Sign [CLA](http://influxdb.com/community/cla.html) (if not already signed)\n```\n\nUse of third-party packages\n---------------------------\nA third-party package is defined as one that is not part of the standard Go distribution.\nGenerally speaking we prefer to minimize our use of third-party packages, and avoid them unless absolutely necessarily.\nWe'll often write a little bit of code rather than pull in a third-party package.\nSo to maximise the chance your change will be accepted by us, use only the standard libraries, or the third-party packages we have decided to use.\n\nFor rationale, check out the post [The Case Against Third Party Libraries](http://blog.gopheracademy.com/advent-2014/case-against-3pl/).\n\nSigning the CLA\n---------------\n\nIf you are going to be contributing back to Kapacitor please take a second to sign our CLA, which can be found\n[on our website](http://influxdb.com/community/cla.html).\n\nInstalling Go\n-------------\n\nKapacitor typically requires the lastest version of Go.\n\nTo install go see https://golang.org/dl/\n\nGetting the source\n------\nSetup the project structure and fetch the repo like so:\n\n    mkdir $HOME/go\n    export GOPATH=$HOME/go\n    go get github.com/influxdata/kapacitor\n\nYou can add the line `export GOPATH=$HOME/go` to your bash/zsh file to be set for every shell instead of having to manually run it everytime.\n\nCloning a fork\n-------------\nIf you wish to work with fork of Kapacitor, your own fork for example, you must still follow the directory structure above.\nBut instead of cloning the main repo, instead clone your fork. Follow the steps below to work with a fork:\n\n    export GOPATH=$HOME/go\n    mkdir -p $GOPATH/src/github.com/influxdata\n    cd $GOPATH/src/github.com/influxdata\n    git clone git@github.com:<username>/kapacitor\n\nRetaining the directory structure `$GOPATH/src/github.com/influxdata` is necessary so that Go imports work correctly.\n\nPre-commit checks\n-------------\n\nWe have a pre-commit hook to make sure code is formatted properly and vetted before you commit any changes. We strongly recommend using the pre-commit hook to guard against accidentally committing unformatted code. To use the pre-commit hook, run the following:\n\n    cd $GOPATH/src/github.com/influxdata/kapacitor\n    cp .hooks/pre-commit .git/hooks/\n\nIn case the commit is rejected because it's not formatted you can run\nthe following to format the code:\n\n```\ngo fmt ./...\ngo vet ./...\n```\n\nFor more information on `go vet`, [read the GoDoc](https://godoc.org/golang.org/x/tools/cmd/vet).\n\nBuild and Test\n--------------\n\nMake sure you have Go installed and the project structure as shown above. To then build the project, execute the following commands:\n\n```bash\ncd $GOPATH/src/github.com/influxdata/kapacitor\ngo build ./cmd/kapacitor\ngo build ./cmd/kapacitord\n```\nKapacitor builds two binares is named `kapacitor`, and `kapacitord`.\n\nTo run the tests, execute the following command:\n\n```bash\ngo test $(go list ./... | grep -v /vendor/)\n```\n\nDependencies\n------------\n\nKapacitor vendors all dependencies.\nKapacitor uses the golang [dep](https://github.com/golang/dep) tool.\n\nInstall the dep tool:\n\n```\ngo get -u github.com/golang/dep\n```\n\nSee the dep help for usage and documentation.\n\nKapacitor commits vendored deps into the repo, as a result always run `dep prune` after any `dep ensure` operation.\nThis helps keep the amount of code committed to a minimum.\n\n\nGenerating Code\n---------------\n\nKapacitor uses generated code.\nThe generated code is committed to the repository so normally it is not necessary to regenerate it.\nBut if you modify one of the templates for code generation you must re-run the generate commands.\n\nGo provides a consistent command for generating all necessary code:\n\n```bash\ngo generate ./...\n```\n\nFor the generate command to succeed you will need a few dependencies installed on your system.\nThese dependencies are already vendored in the code and and can be installed from there.\n\n* tmpl -- A utility used to generate code from templates. Install via `go install ./vendor/github.com/benbjohnson/tmpl`\n* protoc + protoc-gen-go -- A protobuf compiler plus the protoc-gen-go extension.\n    You need version 3.0.0 of protoc.\n    To install the go plugin run `go install ./vendor/github.com/golang/protobuf/protoc-gen-go`\n\nThe Build Script\n----------------\n\nThe above commands have all be encapsulated for you in a `build.py` script.\nThe script has flags for testing code, building binaries and complete distribution packages.\n\nTo build kapacitor use:\n\n```bash\n./build.py\n```\n\nTo run the tests use:\n\n```bash\n./build.py --test\n```\n\nIf you want to generate code run:\n\n```bash\n./build.py --generate\n```\n\nIf you want to build packages run:\n\n```bash\n./build.py --packages\n```\n\nThere are many more options available see\n\n```bash\n./build.py --help\n```\n\n\nThe Build Script + Docker\n-------------------------\n\nKapacitor requires a few extra dependencies to perform certain build actions.\nSpecifically to build packages or to regenerate any of the generated code you will need a few extra tools.\nA `build.sh` script is provided that will run `build.py` in a docker container with all the needed dependencies installed with correct versions.\n\nAll you need is to have docker installed and then use the `./build.sh` command as if it were the `./build.py` command.\n\n\nProfiling\n---------\nWhen troubleshooting problems with CPU or memory the Go toolchain can be helpful. You can start InfluxDB with CPU or memory profiling turned on. For example:\n\n```sh\n# start kapacitord with profiling\n./kapacitord -cpuprofile kapacitord.prof\n# run task, replays whatever you're testing\n# Quit out of kapacitord and kapacitord.prof will then be written.\n# open up pprof to examine the profiling data.\ngo tool pprof ./kapacitord kapacitord.prof\n# once inside run \"web\", opens up browser with the CPU graph\n# can also run \"web <function name>\" to zoom in. Or \"list <function name>\" to see specific lines\n```\nNote that when you pass the binary to `go tool pprof` *you must specify the path to the binary*.\n\nContinuous Integration testing\n------------------------------\nKapacitor uses CircleCI for continuous integration testing.\n\nUseful links\n------------\n- [Useful techniques in Go](http://arslan.io/ten-useful-techniques-in-go)\n- [Go in production](http://peter.bourgon.org/go-in-production/)\n- [Principles of designing Go APIs with channels](https://inconshreveable.com/07-08-2014/principles-of-designing-go-apis-with-channels/)\n- [Common mistakes in Golang](http://soryy.com/blog/2014/common-mistakes-with-go-lang/). Especially this section `Loops, Closures, and Local Variables`\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/DESIGN.md",
    "content": "# Kapacitor Internal Design\n\nThis document is meant to layout both the high level design of Kapacitor as well and discuss the details\nof the implementation.\n\nIt should be accessible to someone wanting to contribute to Kapacitor.\n\n\n## Topic\n\n* Key Concepts\n* Data Flow -- How data flows through Kapacitor\n* TICKscript -- How TICKscript is implemented (not written yet)\n* kapacitord/kapacitor -- How the daemon ties all the pieces together. (not written yet)\n\n\n## Key Concepts\n\nKapacitor is a framework for processing time series data.\nIt follows a [flow based programing](https://en.wikipedia.org/wiki/Flow-based_programming) model.\nData flows from node to node and each node is a *black box* process that can manipulate the data in any way.\nThe data model used to transport data from node to node matches the schema used by InfluxDB, namely measurements, tags and fields.\nNodes can be arranged in a directed acyclic graph [(DAG)](https://en.wikipedia.org/wiki/Directed_acyclic_graph).\n\n### Tasks\n\nUsers define tasks for Kapacitor to run.\nA task defines a DAG of nodes that process the data.\nThis task is defined via a DSL named TICKscript.\nTo learn more about how to use and interact with Kapacitor see the [docs](https://docs.influxdata.com/kapacitor/).\n\nA task defines a potentially infinite amount of work to be done.\nThe amount work is determined by the data that is received by the task.\nOnce the source data stream is *closed* the task is complete.\nIt is normal for a task to never complete but rather run indefinitely.\nAs a result, tasks can be in one of three states, disabled, enabled not executing, and enabled executing.\nAn enabled task is not executing if it encountered an error or its data source was closed.\n\n## Data Flow\n\nData flows from node to node and each node is a black box that can process the data the however it sees fit.\nIn order for a system like this to work the transport method and data model needs to be well defined.\n\n### Models\n\nThe data model for transporting data has two types:\n\n* Stream -- Data points are passed as single entities.\n* Batch -- Data points are passed in groups of data.\n\nA batch consists of a type that describes the common attributes of all data points within the batch\nand a list of all the individual data points.\n\nA data point consists of a timestamp, a map of fields, and a map of tags.\nWhen data points are transfered as a stream not within the context of a batch they\nalso contain information on their scope, i.e database, retention policy and measurement.\nThis data model is schemaless in that the names of fields and tags are arbitrary and opaque to Kapacitor.\n\nLastly both batches and streamed data points contain information about the *group* they belong two if\nthe data set has been grouped by any number of dimensions. More on that later.\n\n### Time\n\nTime is measured based on the timestamps of the data flowing through a node.\nIf data flow stops so does time.\nIf a node performs a transformation dependent on time then it is always consistent based on a given data set.\n\n### Edges\n\nKapacitor models data transfer along *edges*.\nAn edge connects exactly two nodes and data flows from the *parent* node to the *child* node.\nThere are two actions performed on an edge:\n\n* Collect -- The parent presents a data point for the edge to consume.\n* Emit -- The child retrieves a data point from the edge.\n\nFrom the perspective of an *edge* data is collected from a parent and then emitted to a child.\nFrom the perspective of a *node*, data is pulled off from any number of parent\nedges and collected into any number of child edges.\nNodes, not edges, control the flow of the data. Edges simply provide the transport mechanism.\nMeaning that if a child node stops pulling data from its parent edge, data flow stops.\n\nEdges are typed, meaning that a given edge only transports a certain type of data, i.e. streams or batches.\nNodes are said to *want* parent edges of a certain type and to *provide* child edges of a certain type.\nA node can want and provide the same or different type of edges. For example the `WindowNode` wants a stream edge\nwhile providing a batch edge.\n\nModeling data flow through edges allows for the transport mechanism to be abstracted.\nIf the data is being transfered within the same process then it can be sent via in-memory structures;\nif the data is being transfered to another Kapacitor host it can be serialized and transfered accordingly.\n\nThe current implementation of an edge uses Go channels and can be found in `edge.go`.\nThere are three channel per edge instance but only ever one channel is non nil based on the type of the edge.\nDirect access to the channel is not provided but rather wrapper methods for collecting and emitting the data.\nThis allows for the edge to keep counts on throughput and be aborted at any point.\nThe channels are currently unbuffered, this will probably need to change eventually, but for now the simplicity is useful.\n\nPassing batch data can be accomplished in one of two ways.\nFirst, pass the data as a single object containing the complete batch and all points.\nSecond, pass marker objects that indicate the beginning and end of batches and stream individual points between the markers.\nThe marker objects can also contain the common data to the batch.\nCurrently the first option is used.\nThis has the advantage that fewer objects are passing through the channels.\nIt also works better with the current map-reduce functions in core sense they expect all the data in a single object.\nIt has the disadvantage that the whole batch has to be held in memory.\nIn some cases the entire batch does need to live in memory but not in all.\nFor example a node that is counting points per batch need only maintain a counter in memory and not the entire batch.\n\n### Source Mapping\n\nKapacitor can receive data from many different sources, including querying InfluxDB.\nThe type TaskMaster in`task_master.go` is responsible for managing which tasks are receiving which data.\n\nFor stream tasks this is done by having on global edge.\nAll sources (graphite, collectd, http, etc) write their data to the TaskMaster, who writes the data to the global *stream* edge.\nWhen a stream task is started it gets a *fork* of the global stream filtered down by the databases and retention policies its allowed to access.\nThen the task can further process the data stream.\n\nIn the case of the batch tasks, the TaskMaster manages starting the schedules for querying InfluxDB.\nThe results of the queries are passed to the root nodes of the task directly.\n\n\n### Windowing\n\nWindowing data is an important piece to creating pipelines.\nWindowing is concerned with how you can slice a data stream into multiple windows and is orthogonal to how batches are transfered.\nKapacitor handles windowing explicitly, by allowing the user to define a WindowNode\nthat has two parameters. First, the `period` is the length of the window in time.\nSecond, the `every` property defines how often an window should be emitted into the stream.\nThis allows for creating windows that overlap, have no overlap, or have gaps between the windows.\nAs a result the concept of a window does not exist inherently in the data stream, but rather windowing is the method of converting a stream of data into a batch of data.\n\nExample TICKscript:\n\n```javascript\nstream\n    .window()\n        .period(10s)\n        .every(5s)\n```\n\nThe above script slices the incoming stream into overlapping windows.\nEach window contains the last 10s of data and a new window is emitted every 5s.\n\n\n### Challenges\n\nChallenges with the current implementation:\n\n* For stream tasks: If a single node stop processing data all nodes will eventually stop including nodes from other tasks.\n    This is because of the global stream to aggregate all incoming sources and the fact the edges just block instead of dropping data.\n    This could be mitigated further by creating independent streams for each database retention policy pair, but this only provides isolation and not a solution.\n    We need a contract in place for what to do when a given node stops processing data.\n* Nodes are responsible for not creating deadlock in the way they read and write data from their parent and child edges.\n    For example the `JoinNode` has multiple parents and has to guarantee that the goroutines that are reading from the parents never block on each other.\n    Otherwise a deadlock can be created since a parent may be blocked writing to the JoinNode while the JoinNode is blocked reading from a different parent.\n    Since both parents could have a common ancestor the blocked parent will eventually block the ancestor which in turn will block the other parent.\n* Fragile, so far the smallest of changes to the way the system work almost always results in a deadlock, because of the order of processing data.\n* If data flow stops so does time. In many use cases this is exactly what you want, but in some cases you would still like the data in transit to be flushed out.\n    As for monitoring the throughput of tasks this is possible out-of-band of the task so even if the task stop processing data you can still trigger an event in a different task.\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/Dockerfile_build_ubuntu32",
    "content": "FROM 32bit/ubuntu:16.04\n\n# This dockerfile capabable  of the the minumum required\n# to run the tests and nothing else.\n\nMAINTAINER support@influxdb.com\n\nRUN apt-get -qq update && apt-get -qq install -y \\\n    wget \\\n    unzip \\\n    git \\\n    mercurial \\\n    build-essential \\\n    autoconf \\\n    automake \\\n    libtool \\\n    python-setuptools \\\n    zip \\\n    curl\n\n# Install protobuf3 protoc binary\nENV PROTO_VERSION 3.0.0\nRUN wget -q https://github.com/google/protobuf/releases/download/v${PROTO_VERSION}/protoc-${PROTO_VERSION}-linux-x86_32.zip\\\n    && unzip -j protoc-${PROTO_VERSION}-linux-x86_32.zip bin/protoc -d /bin \\\n    rm protoc-${PROTO_VERSION}-linux-x86_64.zip\n\n# Install protobuf3 python library\nRUN wget -q https://github.com/google/protobuf/releases/download/v${PROTO_VERSION}/protobuf-python-${PROTO_VERSION}.tar.gz \\\n    && tar -xf protobuf-python-${PROTO_VERSION}.tar.gz \\\n    && cd /protobuf-${PROTO_VERSION}/python \\\n    && python setup.py install \\\n    && rm -rf /protobuf-${PROTO_VERSION} protobuf-python-${PROTO_VERSION}.tar.gz\n\n# Install go\nENV GOPATH /root/go\nENV GO_VERSION 1.7.5\nENV GO_ARCH 386\nRUN wget -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \\\n   tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \\\n   rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz\nENV PATH /usr/local/go/bin:$PATH\n\nENV PROJECT_DIR $GOPATH/src/github.com/influxdata/kapacitor\nENV PATH $GOPATH/bin:$PATH\nRUN mkdir -p $PROJECT_DIR\nWORKDIR $PROJECT_DIR\n\nVOLUME $PROJECT_DIR\n\nENTRYPOINT [ \"/root/go/src/github.com/influxdata/kapacitor/build.py\" ]\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/Dockerfile_build_ubuntu64",
    "content": "FROM ubuntu:latest\n\n# This dockerfile is capabable of performing all\n# build/test/package/deploy actions needed for Kapacitor.\n\nMAINTAINER support@influxdb.com\n\nRUN apt-get -qq update && apt-get -qq install -y \\\n    python-software-properties \\\n    software-properties-common \\\n    wget \\\n    unzip \\\n    git \\\n    mercurial \\\n    make \\\n    ruby \\\n    ruby-dev \\\n    rpm \\\n    zip \\\n    python \\\n    python-boto \\\n    build-essential \\\n    autoconf \\\n    automake \\\n    libtool \\\n    python-setuptools \\\n    curl\n\nRUN gem install fpm\n\n# Install protobuf3 protoc binary\nENV PROTO_VERSION 3.0.0\nRUN wget -q https://github.com/google/protobuf/releases/download/v${PROTO_VERSION}/protoc-${PROTO_VERSION}-linux-x86_64.zip \\\n    && unzip -j protoc-${PROTO_VERSION}-linux-x86_64.zip bin/protoc -d /bin \\\n    rm protoc-${PROTO_VERSION}-linux-x86_64.zip\n\n# Install protobuf3 python library\nRUN wget -q https://github.com/google/protobuf/releases/download/v${PROTO_VERSION}/protobuf-python-${PROTO_VERSION}.tar.gz \\\n    && tar -xf protobuf-python-${PROTO_VERSION}.tar.gz \\\n    && cd /protobuf-${PROTO_VERSION}/python \\\n    && python setup.py install \\\n    && rm -rf /protobuf-${PROTO_VERSION} protobuf-python-${PROTO_VERSION}.tar.gz\n\n# Install go\nENV GOPATH /root/go\nENV GO_VERSION 1.7.5\nENV GO_ARCH amd64\nRUN wget -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz; \\\n   tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz ; \\\n   rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz\nENV PATH /usr/local/go/bin:$PATH\n\nENV PROJECT_DIR $GOPATH/src/github.com/influxdata/kapacitor\nENV PATH $GOPATH/bin:$PATH\nRUN mkdir -p $PROJECT_DIR\nWORKDIR $PROJECT_DIR\n\nVOLUME $PROJECT_DIR\nVOLUME /root/go/src\n\n# Configure local git\nRUN git config --global user.email \"support@influxdb.com\"\nRUN git config --global user.Name \"Docker Builder\"\n\nENTRYPOINT [ \"/root/go/src/github.com/influxdata/kapacitor/build.py\" ]\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/Gopkg.toml",
    "content": "required = [\"github.com/benbjohnson/tmpl\",\"github.com/golang/protobuf/protoc-gen-go\"]\n\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/davecgh/go-spew\"\n\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/evanphx/json-patch\"\n\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/ghodss/yaml\"\n\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/google/uuid\"\n\n[[constraint]]\n  name = \"github.com/influxdata/influxdb\"\n  version = \"~1.1.0\"\n\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/mitchellh/mapstructure\"\n\n[[constraint]]\n  branch = \"logger-targetmanager-wait\"\n  name = \"github.com/prometheus/prometheus\"\n  source = \"github.com/goller/prometheus\"\n\n[[constraint]]\n  branch = \"master\"\n  name = \"github.com/shurcooL/markdownfmt\"\n\n[[constraint]]\n  name = \"github.com/eclipse/paho.mqtt.golang\"\n  version = \"~1.0.0\"\n\n# Force the Azure projects to be a specific older version that Prometheus needs\n[[override]]\n  name = \"github.com/Azure/azure-sdk-for-go\"\n  revision = \"bd73d950fa4440dae889bd9917bff7cef539f86e\"\n\n[[override]]\n  name = \"github.com/Azure/go-autorest\"\n  revision = \"a2fdd780c9a50455cecd249b00bdc3eb73a78e31\"\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2015 InfluxDB\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/LICENSE_OF_DEPENDENCIES.md",
    "content": "Dependencies\n============\n\n* github.com/BurntSushi/toml [WTFPL](https://github.com/BurntSushi/toml/blob/master/COPYING)\n* github.com/boltdb/bolt [MIT](https://github.com/boltdb/bolt/blob/master/LICENSE)\n* github.com/cenkalti/backoff [MIT](https://github.com/cenkalti/backoff/blob/master/LICENSE)\n* github.com/dgrijalva/jwt-go [MIT](https://github.com/dgrijalva/jwt-go/blob/master/LICENSE)\n* github.com/dustin/go-humanize [MIT](https://github.com/dustin/go-humanize/blob/master/LICENSE)\n* github.com/golang/protobuf [BSD](https://github.com/golang/protobuf/blob/master/LICENSE)\n* github.com/google/uuid [BSD](https://github.com/google/uuid/blob/master/LICENSE)\n* github.com/gorhill/cronexpr [APLv2](https://github.com/gorhill/cronexpr/blob/master/APLv2)\n* github.com/k-sone/snmpgo [MIT](https://github.com/k-sone/snmpgo/blob/master/LICENSE)\n* github.com/kimor79/gollectd [BSD](https://github.com/kimor79/gollectd/blob/master/LICENSE)\n* github.com/mattn/go-runewidth [MIT](https://github.com/mattn/go-runewidth/blob/master/README.mkd)\n* github.com/mitchellh/copystructure[MIT](https://github.com/mitchellh/copystructure/blob/master/LICENSE)\n* github.com/mitchellh/reflectwalk [MIT](https://github.com/mitchellh/reflectwalk/blob/master/LICENSE)\n* github.com/pkg/errors [BSD](https://github.com/pkg/errors/blob/master/LICENSE)\n* github.com/russross/blackfriday [BSD](https://github.com/russross/blackfriday/blob/master/LICENSE.txt)\n* github.com/serenize/snaker [MIT](https://github.com/serenize/snaker/blob/master/LICENSE.txt)\n* github.com/shurcooL/go [MIT](https://github.com/shurcooL/go/blob/master/README.md)\n* github.com/shurcooL/markdownfmt [MIT](https://github.com/shurcooL/markdownfmt/blob/master/README.md)\n* github.com/shurcooL/sanitized\\_anchor\\_name [MIT](https://github.com/shurcooL/sanitized_anchor_name/blob/master/LICENSE)\n* github.com/stretchr/testify [MIT](https://github.com/stretchr/testify/blob/master/LICENSE)\n* gopkg.in/gomail.v2 [MIT](https://github.com/go-gomail/gomail/blob/v2/LICENSE)\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/README.md",
    "content": "# Kapacitor [![Circle CI](https://circleci.com/gh/influxdata/kapacitor/tree/master.svg?style=svg&circle-token=78c97422cf89526309e502a290c230e8a463229f)](https://circleci.com/gh/influxdata/kapacitor/tree/master) [![Docker pulls](https://img.shields.io/docker/pulls/library/kapacitor.svg)](https://hub.docker.com/_/kapacitor/)\nOpen source framework for processing, monitoring, and alerting on time series data\n\n# Installation\n\nKapacitor has two binaries:\n\n* kapacitor – a CLI program for calling the Kapacitor API.\n* kapacitord – the Kapacitor server daemon.\n\nYou can either download the binaries directly from the [downloads](https://influxdata.com/downloads/#kapacitor) page or go get them:\n\n```sh\ngo get github.com/influxdata/kapacitor/cmd/kapacitor\ngo get github.com/influxdata/kapacitor/cmd/kapacitord\n```\n\n# Configuration\nAn example configuration file can be found [here](https://github.com/influxdata/kapacitor/blob/master/etc/kapacitor/kapacitor.conf)\n\nKapacitor can also provide an example config for you using this command:\n\n```sh\nkapacitord config\n```\n\n\n# Getting Started\n\nThis README gives you a high level overview of what Kapacitor is and what its like to use it. As well as some details of how it works.\nTo get started using Kapacitor see [this guide](https://docs.influxdata.com/kapacitor/latest/introduction/getting_started/). After you finish the getting started exercise you can check out the [TICKscripts](https://github.com/influxdata/kapacitor/tree/master/examples/telegraf) for different Telegraf plugins.\n\n# Basic Example\n\nKapacitor use a DSL named [TICKscript](https://docs.influxdata.com/kapacitor/latest/tick/) to define tasks.\n\nA simple TICKscript that alerts on high cpu usage looks like this:\n\n```javascript\nstream\n    |from()\n        .measurement('cpu_usage_idle')\n        .groupBy('host')\n    |window()\n        .period(1m)\n        .every(1m)\n    |mean('value')\n    |eval(lambda: 100.0 - \"mean\")\n        .as('used')\n    |alert()\n        .message('{{ .Level}}: {{ .Name }}/{{ index .Tags \"host\" }} has high cpu usage: {{ index .Fields \"used\" }}')\n        .warn(lambda: \"used\" > 70.0)\n        .crit(lambda: \"used\" > 85.0)\n\n        // Send alert to hander of choice.\n\n        // Slack\n        .slack()\n        .channel('#alerts')\n\n        // VictorOps\n        .victorOps()\n        .routingKey('team_rocket')\n\n        // PagerDuty\n        .pagerDuty()\n```\n\nPlace the above script into a file `cpu_alert.tick` then run these commands to start the task:\n\n```sh\n# Define the task (assumes cpu data is in db 'telegraf')\nkapacitor define \\\n    cpu_alert \\\n    -type stream \\\n    -dbrp telegraf.default \\\n    -tick ./cpu_alert.tick\n# Start the task\nkapacitor enable cpu_alert\n```\n\nFor more complete examples see the [documentation](https://docs.influxdata.com/kapacitor/latest/examples/).\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/alert.go",
    "content": "package kapacitor\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\thtml \"html/template\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\ttext \"text/template\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/alert\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\talertservice \"github.com/influxdata/kapacitor/services/alert\"\n\t\"github.com/influxdata/kapacitor/services/hipchat\"\n\t\"github.com/influxdata/kapacitor/services/httppost\"\n\t\"github.com/influxdata/kapacitor/services/mqtt\"\n\t\"github.com/influxdata/kapacitor/services/opsgenie\"\n\t\"github.com/influxdata/kapacitor/services/pagerduty\"\n\t\"github.com/influxdata/kapacitor/services/pushover\"\n\t\"github.com/influxdata/kapacitor/services/sensu\"\n\t\"github.com/influxdata/kapacitor/services/slack\"\n\t\"github.com/influxdata/kapacitor/services/smtp\"\n\t\"github.com/influxdata/kapacitor/services/snmptrap\"\n\t\"github.com/influxdata/kapacitor/services/telegram\"\n\t\"github.com/influxdata/kapacitor/services/victorops\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n\t\"github.com/pkg/errors\"\n)\n\nconst (\n\tstatsAlertsTriggered = \"alerts_triggered\"\n\tstatsOKsTriggered    = \"oks_triggered\"\n\tstatsInfosTriggered  = \"infos_triggered\"\n\tstatsWarnsTriggered  = \"warns_triggered\"\n\tstatsCritsTriggered  = \"crits_triggered\"\n\tstatsEventsDropped   = \"events_dropped\"\n)\n\n// The newest state change is weighted 'weightDiff' times more than oldest state change.\nconst weightDiff = 1.5\n\n// Maximum weight applied to newest state change.\nconst maxWeight = 1.2\n\ntype AlertNode struct {\n\tnode\n\ta           *pipeline.AlertNode\n\ttopic       string\n\tanonTopic   string\n\thandlers    []alert.Handler\n\tlevels      []stateful.Expression\n\tscopePools  []stateful.ScopePool\n\tidTmpl      *text.Template\n\tmessageTmpl *text.Template\n\tdetailsTmpl *html.Template\n\n\talertsTriggered *expvar.Int\n\toksTriggered    *expvar.Int\n\tinfosTriggered  *expvar.Int\n\twarnsTriggered  *expvar.Int\n\tcritsTriggered  *expvar.Int\n\teventsDropped   *expvar.Int\n\n\tbufPool sync.Pool\n\n\tlevelResets  []stateful.Expression\n\tlrScopePools []stateful.ScopePool\n}\n\n// Create a new  AlertNode which caches the most recent item and exposes it over the HTTP API.\nfunc newAlertNode(et *ExecutingTask, n *pipeline.AlertNode, l *log.Logger) (an *AlertNode, err error) {\n\tan = &AlertNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\ta:    n,\n\t}\n\tan.node.runF = an.runAlert\n\n\tan.topic = n.Topic\n\t// Create anonymous topic name\n\tan.anonTopic = fmt.Sprintf(\"%s:%s:%s\", et.tm.ID(), et.Task.ID, an.Name())\n\n\t// Create buffer pool for the templates\n\tan.bufPool = sync.Pool{\n\t\tNew: func() interface{} {\n\t\t\treturn new(bytes.Buffer)\n\t\t},\n\t}\n\n\t// Parse templates\n\tan.idTmpl, err = text.New(\"id\").Parse(n.Id)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tan.messageTmpl, err = text.New(\"message\").Parse(n.Message)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tan.detailsTmpl, err = html.New(\"details\").Funcs(html.FuncMap{\n\t\t\"json\": func(v interface{}) html.JS {\n\n\t\t\ttmpBuffer := an.bufPool.Get().(*bytes.Buffer)\n\t\t\tdefer func() {\n\t\t\t\ttmpBuffer.Reset()\n\t\t\t\tan.bufPool.Put(tmpBuffer)\n\t\t\t}()\n\n\t\t\t_ = json.NewEncoder(tmpBuffer).Encode(v)\n\n\t\t\treturn html.JS(tmpBuffer.String())\n\t\t},\n\t}).Parse(n.Details)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, tcp := range n.TcpHandlers {\n\t\tc := alertservice.TCPHandlerConfig{\n\t\t\tAddress: tcp.Address,\n\t\t}\n\t\th := alertservice.NewTCPHandler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, email := range n.EmailHandlers {\n\t\tc := smtp.HandlerConfig{\n\t\t\tTo: email.ToList,\n\t\t}\n\t\th := et.tm.SMTPService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\tif len(n.EmailHandlers) == 0 && (et.tm.SMTPService != nil && et.tm.SMTPService.Global()) {\n\t\tc := smtp.HandlerConfig{}\n\t\th := et.tm.SMTPService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\t// If email has been configured with state changes only set it.\n\tif et.tm.SMTPService != nil &&\n\t\tet.tm.SMTPService.Global() &&\n\t\tet.tm.SMTPService.StateChangesOnly() {\n\t\tn.IsStateChangesOnly = true\n\t}\n\n\tfor _, e := range n.ExecHandlers {\n\t\tc := alertservice.ExecHandlerConfig{\n\t\t\tProg:      e.Command[0],\n\t\t\tArgs:      e.Command[1:],\n\t\t\tCommander: et.tm.Commander,\n\t\t}\n\t\th := alertservice.NewExecHandler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, log := range n.LogHandlers {\n\t\tc := alertservice.DefaultLogHandlerConfig()\n\t\tc.Path = log.FilePath\n\t\tif log.Mode != 0 {\n\t\t\tc.Mode = os.FileMode(log.Mode)\n\t\t}\n\t\th, err := alertservice.NewLogHandler(c, l)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to create log alert handler\")\n\t\t}\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, vo := range n.VictorOpsHandlers {\n\t\tc := victorops.HandlerConfig{\n\t\t\tRoutingKey: vo.RoutingKey,\n\t\t}\n\t\th := et.tm.VictorOpsService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\tif len(n.VictorOpsHandlers) == 0 && (et.tm.VictorOpsService != nil && et.tm.VictorOpsService.Global()) {\n\t\tc := victorops.HandlerConfig{}\n\t\th := et.tm.VictorOpsService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, pd := range n.PagerDutyHandlers {\n\t\tc := pagerduty.HandlerConfig{\n\t\t\tServiceKey: pd.ServiceKey,\n\t\t}\n\t\th := et.tm.PagerDutyService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\tif len(n.PagerDutyHandlers) == 0 && (et.tm.PagerDutyService != nil && et.tm.PagerDutyService.Global()) {\n\t\tc := pagerduty.HandlerConfig{}\n\t\th := et.tm.PagerDutyService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, s := range n.SensuHandlers {\n\t\tc := sensu.HandlerConfig{\n\t\t\tSource:   s.Source,\n\t\t\tHandlers: s.HandlersList,\n\t\t}\n\t\th, err := et.tm.SensuService.Handler(c, l)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to create sensu alert handler\")\n\t\t}\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, s := range n.SlackHandlers {\n\t\tc := slack.HandlerConfig{\n\t\t\tChannel:   s.Channel,\n\t\t\tUsername:  s.Username,\n\t\t\tIconEmoji: s.IconEmoji,\n\t\t}\n\t\th := et.tm.SlackService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\tif len(n.SlackHandlers) == 0 && (et.tm.SlackService != nil && et.tm.SlackService.Global()) {\n\t\th := et.tm.SlackService.Handler(slack.HandlerConfig{}, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\t// If slack has been configured with state changes only set it.\n\tif et.tm.SlackService != nil &&\n\t\tet.tm.SlackService.Global() &&\n\t\tet.tm.SlackService.StateChangesOnly() {\n\t\tn.IsStateChangesOnly = true\n\t}\n\n\tfor _, t := range n.TelegramHandlers {\n\t\tc := telegram.HandlerConfig{\n\t\t\tChatId:                t.ChatId,\n\t\t\tParseMode:             t.ParseMode,\n\t\t\tDisableWebPagePreview: t.IsDisableWebPagePreview,\n\t\t\tDisableNotification:   t.IsDisableNotification,\n\t\t}\n\t\th := et.tm.TelegramService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, s := range n.SNMPTrapHandlers {\n\t\tdataList := make([]snmptrap.Data, len(s.DataList))\n\t\tfor i, d := range s.DataList {\n\t\t\tdataList[i] = snmptrap.Data{\n\t\t\t\tOid:   d.Oid,\n\t\t\t\tType:  d.Type,\n\t\t\t\tValue: d.Value,\n\t\t\t}\n\t\t}\n\t\tc := snmptrap.HandlerConfig{\n\t\t\tTrapOid:  s.TrapOid,\n\t\t\tDataList: dataList,\n\t\t}\n\t\th, err := et.tm.SNMPTrapService.Handler(c, l)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to create SNMP handler\")\n\t\t}\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tif len(n.TelegramHandlers) == 0 && (et.tm.TelegramService != nil && et.tm.TelegramService.Global()) {\n\t\tc := telegram.HandlerConfig{}\n\t\th := et.tm.TelegramService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\t// If telegram has been configured with state changes only set it.\n\tif et.tm.TelegramService != nil &&\n\t\tet.tm.TelegramService.Global() &&\n\t\tet.tm.TelegramService.StateChangesOnly() {\n\t\tn.IsStateChangesOnly = true\n\t}\n\n\tfor _, hc := range n.HipChatHandlers {\n\t\tc := hipchat.HandlerConfig{\n\t\t\tRoom:  hc.Room,\n\t\t\tToken: hc.Token,\n\t\t}\n\t\th := et.tm.HipChatService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\tif len(n.HipChatHandlers) == 0 && (et.tm.HipChatService != nil && et.tm.HipChatService.Global()) {\n\t\tc := hipchat.HandlerConfig{}\n\t\th := et.tm.HipChatService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\t// If HipChat has been configured with state changes only set it.\n\tif et.tm.HipChatService != nil &&\n\t\tet.tm.HipChatService.Global() &&\n\t\tet.tm.HipChatService.StateChangesOnly() {\n\t\tn.IsStateChangesOnly = true\n\t}\n\n\tfor _, a := range n.AlertaHandlers {\n\t\tc := et.tm.AlertaService.DefaultHandlerConfig()\n\t\tif a.Token != \"\" {\n\t\t\tc.Token = a.Token\n\t\t}\n\t\tif a.Resource != \"\" {\n\t\t\tc.Resource = a.Resource\n\t\t}\n\t\tif a.Event != \"\" {\n\t\t\tc.Event = a.Event\n\t\t}\n\t\tif a.Environment != \"\" {\n\t\t\tc.Environment = a.Environment\n\t\t}\n\t\tif a.Group != \"\" {\n\t\t\tc.Group = a.Group\n\t\t}\n\t\tif a.Value != \"\" {\n\t\t\tc.Value = a.Value\n\t\t}\n\t\tif a.Origin != \"\" {\n\t\t\tc.Origin = a.Origin\n\t\t}\n\t\tif len(a.Service) != 0 {\n\t\t\tc.Service = a.Service\n\t\t}\n\t\th, err := et.tm.AlertaService.Handler(c, l)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to create Alerta handler\")\n\t\t}\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, p := range n.PushoverHandlers {\n\t\tc := pushover.HandlerConfig{}\n\t\tif p.Device != \"\" {\n\t\t\tc.Device = p.Device\n\t\t}\n\t\tif p.Title != \"\" {\n\t\t\tc.Title = p.Title\n\t\t}\n\t\tif p.URL != \"\" {\n\t\t\tc.URL = p.URL\n\t\t}\n\t\tif p.URLTitle != \"\" {\n\t\t\tc.URLTitle = p.URLTitle\n\t\t}\n\t\tif p.Sound != \"\" {\n\t\t\tc.Sound = p.Sound\n\t\t}\n\t\th := et.tm.PushoverService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, p := range n.HTTPPostHandlers {\n\t\tc := httppost.HandlerConfig{\n\t\t\tURL:      p.URL,\n\t\t\tEndpoint: p.Endpoint,\n\t\t\tHeaders:  p.Headers,\n\t\t}\n\t\th := et.tm.HTTPPostService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, og := range n.OpsGenieHandlers {\n\t\tc := opsgenie.HandlerConfig{\n\t\t\tTeamsList:      og.TeamsList,\n\t\t\tRecipientsList: og.RecipientsList,\n\t\t}\n\t\th := et.tm.OpsGenieService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\tif len(n.OpsGenieHandlers) == 0 && (et.tm.OpsGenieService != nil && et.tm.OpsGenieService.Global()) {\n\t\tc := opsgenie.HandlerConfig{}\n\t\th := et.tm.OpsGenieService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor range n.TalkHandlers {\n\t\th := et.tm.TalkService.Handler(l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\n\tfor _, m := range n.MQTTHandlers {\n\t\tc := mqtt.HandlerConfig{\n\t\t\tBrokerName: m.BrokerName,\n\t\t\tTopic:      m.Topic,\n\t\t\tQoS:        mqtt.QoSLevel(m.Qos),\n\t\t\tRetained:   m.Retained,\n\t\t}\n\t\th := et.tm.MQTTService.Handler(c, l)\n\t\tan.handlers = append(an.handlers, h)\n\t}\n\t// Parse level expressions\n\tan.levels = make([]stateful.Expression, alert.Critical+1)\n\tan.scopePools = make([]stateful.ScopePool, alert.Critical+1)\n\n\tan.levelResets = make([]stateful.Expression, alert.Critical+1)\n\tan.lrScopePools = make([]stateful.ScopePool, alert.Critical+1)\n\n\tif n.Info != nil {\n\t\tstatefulExpression, expressionCompileError := stateful.NewExpression(n.Info.Expression)\n\t\tif expressionCompileError != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to compile stateful expression for info: %s\", expressionCompileError)\n\t\t}\n\n\t\tan.levels[alert.Info] = statefulExpression\n\t\tan.scopePools[alert.Info] = stateful.NewScopePool(ast.FindReferenceVariables(n.Info.Expression))\n\t\tif n.InfoReset != nil {\n\t\t\tlstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.InfoReset.Expression)\n\t\t\tif lexpressionCompileError != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Failed to compile stateful expression for infoReset: %s\", lexpressionCompileError)\n\t\t\t}\n\t\t\tan.levelResets[alert.Info] = lstatefulExpression\n\t\t\tan.lrScopePools[alert.Info] = stateful.NewScopePool(ast.FindReferenceVariables(n.InfoReset.Expression))\n\t\t}\n\t}\n\n\tif n.Warn != nil {\n\t\tstatefulExpression, expressionCompileError := stateful.NewExpression(n.Warn.Expression)\n\t\tif expressionCompileError != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to compile stateful expression for warn: %s\", expressionCompileError)\n\t\t}\n\t\tan.levels[alert.Warning] = statefulExpression\n\t\tan.scopePools[alert.Warning] = stateful.NewScopePool(ast.FindReferenceVariables(n.Warn.Expression))\n\t\tif n.WarnReset != nil {\n\t\t\tlstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.WarnReset.Expression)\n\t\t\tif lexpressionCompileError != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Failed to compile stateful expression for warnReset: %s\", lexpressionCompileError)\n\t\t\t}\n\t\t\tan.levelResets[alert.Warning] = lstatefulExpression\n\t\t\tan.lrScopePools[alert.Warning] = stateful.NewScopePool(ast.FindReferenceVariables(n.WarnReset.Expression))\n\t\t}\n\t}\n\n\tif n.Crit != nil {\n\t\tstatefulExpression, expressionCompileError := stateful.NewExpression(n.Crit.Expression)\n\t\tif expressionCompileError != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to compile stateful expression for crit: %s\", expressionCompileError)\n\t\t}\n\t\tan.levels[alert.Critical] = statefulExpression\n\t\tan.scopePools[alert.Critical] = stateful.NewScopePool(ast.FindReferenceVariables(n.Crit.Expression))\n\t\tif n.CritReset != nil {\n\t\t\tlstatefulExpression, lexpressionCompileError := stateful.NewExpression(n.CritReset.Expression)\n\t\t\tif lexpressionCompileError != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"Failed to compile stateful expression for critReset: %s\", lexpressionCompileError)\n\t\t\t}\n\t\t\tan.levelResets[alert.Critical] = lstatefulExpression\n\t\t\tan.lrScopePools[alert.Critical] = stateful.NewScopePool(ast.FindReferenceVariables(n.CritReset.Expression))\n\t\t}\n\t}\n\n\t// Setup states\n\tif n.History < 2 {\n\t\tn.History = 2\n\t}\n\n\t// Configure flapping\n\tif n.UseFlapping {\n\t\tif n.FlapLow > 1 || n.FlapHigh > 1 {\n\t\t\treturn nil, errors.New(\"alert flap thresholds are percentages and should be between 0 and 1\")\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (n *AlertNode) runAlert([]byte) error {\n\t// Register delete hook\n\tif n.hasAnonTopic() {\n\t\tn.et.tm.registerDeleteHookForTask(n.et.Task.ID, deleteAlertHook(n.anonTopic))\n\n\t\t// Register Handlers on topic\n\t\tfor _, h := range n.handlers {\n\t\t\tn.et.tm.AlertService.RegisterAnonHandler(n.anonTopic, h)\n\t\t}\n\t\t// Restore anonTopic\n\t\tn.et.tm.AlertService.RestoreTopic(n.anonTopic)\n\t}\n\n\t// Setup stats\n\tn.alertsTriggered = &expvar.Int{}\n\tn.statMap.Set(statsAlertsTriggered, n.alertsTriggered)\n\n\tn.oksTriggered = &expvar.Int{}\n\tn.statMap.Set(statsOKsTriggered, n.oksTriggered)\n\n\tn.infosTriggered = &expvar.Int{}\n\tn.statMap.Set(statsInfosTriggered, n.infosTriggered)\n\n\tn.warnsTriggered = &expvar.Int{}\n\tn.statMap.Set(statsWarnsTriggered, n.warnsTriggered)\n\n\tn.critsTriggered = &expvar.Int{}\n\tn.statMap.Set(statsCritsTriggered, n.critsTriggered)\n\n\tn.eventsDropped = &expvar.Int{}\n\tn.statMap.Set(statsCritsTriggered, n.critsTriggered)\n\n\t// Setup consumer\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\n\tif err := consumer.Consume(); err != nil {\n\t\treturn err\n\t}\n\n\t// Close the anonymous topic.\n\tn.et.tm.AlertService.CloseTopic(n.anonTopic)\n\n\t// Deregister Handlers on topic\n\tfor _, h := range n.handlers {\n\t\tn.et.tm.AlertService.DeregisterAnonHandler(n.anonTopic, h)\n\t}\n\treturn nil\n}\n\nfunc (n *AlertNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\tid, err := n.renderID(first.Name(), first.GroupID(), first.Tags())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt := first.Time()\n\n\tstate := n.restoreEventState(id, t)\n\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(\n\t\t\tn.timer,\n\t\t\tstate,\n\t\t),\n\t), nil\n}\n\nfunc (n *AlertNode) restoreEventState(id string, t time.Time) *alertState {\n\tstate := n.newAlertState()\n\tcurrentLevel, triggered := n.restoreEvent(id)\n\tif currentLevel != alert.OK {\n\t\t// Add initial event\n\t\tstate.addEvent(t, currentLevel)\n\t\t// Record triggered time\n\t\tstate.triggered(triggered)\n\t}\n\treturn state\n}\n\nfunc (n *AlertNode) newAlertState() *alertState {\n\treturn &alertState{\n\t\thistory: make([]alert.Level, n.a.History),\n\t\tn:       n,\n\t\tbuffer:  new(edge.BatchBuffer),\n\t}\n}\n\nfunc (n *AlertNode) restoreEvent(id string) (alert.Level, time.Time) {\n\tvar topicState, anonTopicState alert.EventState\n\tvar anonFound, topicFound bool\n\t// Check for previous state on anonTopic\n\tif n.hasAnonTopic() {\n\t\tif state, ok, err := n.et.tm.AlertService.EventState(n.anonTopic, id); err != nil {\n\t\t\tn.incrementErrorCount()\n\t\t\tn.logger.Printf(\"E! failed to get event state for anonymous topic %s, event %s: %v\", n.anonTopic, id, err)\n\t\t} else if ok {\n\t\t\tanonTopicState = state\n\t\t\tanonFound = true\n\t\t}\n\t}\n\t// Check for previous state on topic.\n\tif n.hasTopic() {\n\t\tif state, ok, err := n.et.tm.AlertService.EventState(n.topic, id); err != nil {\n\t\t\tn.incrementErrorCount()\n\t\t\tn.logger.Printf(\"E! failed to get event state for topic %s, event %s: %v\", n.topic, id, err)\n\t\t} else if ok {\n\t\t\ttopicState = state\n\t\t\ttopicFound = true\n\t\t}\n\t}\n\tif topicState.Level != anonTopicState.Level {\n\t\tif anonFound && topicFound {\n\t\t\t// Anon topic takes precedence\n\t\t\tif err := n.et.tm.AlertService.UpdateEvent(n.topic, anonTopicState); err != nil {\n\t\t\t\tn.incrementErrorCount()\n\t\t\t\tn.logger.Printf(\"E! failed to update topic %q event state for event %q\", n.topic, id)\n\t\t\t}\n\t\t} else if topicFound && n.hasAnonTopic() {\n\t\t\t// Update event state for topic\n\t\t\tif err := n.et.tm.AlertService.UpdateEvent(n.anonTopic, topicState); err != nil {\n\t\t\t\tn.incrementErrorCount()\n\t\t\t\tn.logger.Printf(\"E! failed to update topic %q event state for event %q\", n.topic, id)\n\t\t\t}\n\t\t} // else nothing was found, nothing to do\n\t}\n\tif anonFound {\n\t\treturn anonTopicState.Level, anonTopicState.Time\n\t}\n\treturn topicState.Level, topicState.Time\n}\n\nfunc deleteAlertHook(anonTopic string) deleteHook {\n\treturn func(tm *TaskMaster) {\n\t\ttm.AlertService.DeleteTopic(anonTopic)\n\t}\n}\n\nfunc (n *AlertNode) hasAnonTopic() bool {\n\treturn len(n.handlers) > 0\n}\nfunc (n *AlertNode) hasTopic() bool {\n\treturn n.topic != \"\"\n}\n\nfunc (n *AlertNode) handleEvent(event alert.Event) {\n\tn.alertsTriggered.Add(1)\n\tswitch event.State.Level {\n\tcase alert.OK:\n\t\tn.oksTriggered.Add(1)\n\tcase alert.Info:\n\t\tn.infosTriggered.Add(1)\n\tcase alert.Warning:\n\t\tn.warnsTriggered.Add(1)\n\tcase alert.Critical:\n\t\tn.critsTriggered.Add(1)\n\t}\n\tn.logger.Printf(\"D! %v alert triggered id:%s msg:%s data:%v\", event.State.Level, event.State.ID, event.State.Message, event.Data.Result.Series[0])\n\n\t// If we have anon handlers, emit event to the anonTopic\n\tif n.hasAnonTopic() {\n\t\tevent.Topic = n.anonTopic\n\t\terr := n.et.tm.AlertService.Collect(event)\n\t\tif err != nil {\n\t\t\tn.eventsDropped.Add(1)\n\t\t\tn.incrementErrorCount()\n\t\t\tn.logger.Println(\"E!\", err)\n\t\t}\n\t}\n\n\t// If we have a user define topic, emit event to the topic.\n\tif n.hasTopic() {\n\t\tevent.Topic = n.topic\n\t\terr := n.et.tm.AlertService.Collect(event)\n\t\tif err != nil {\n\t\t\tn.eventsDropped.Add(1)\n\t\t\tn.incrementErrorCount()\n\t\t\tn.logger.Println(\"E!\", err)\n\t\t}\n\t}\n}\n\nfunc (n *AlertNode) determineLevel(p edge.FieldsTagsTimeGetter, currentLevel alert.Level) alert.Level {\n\tif higherLevel, found := n.findFirstMatchLevel(alert.Critical, currentLevel-1, p); found {\n\t\treturn higherLevel\n\t}\n\tif rse := n.levelResets[currentLevel]; rse != nil {\n\t\tif pass, err := EvalPredicate(rse, n.lrScopePools[currentLevel], p); err != nil {\n\t\t\tn.incrementErrorCount()\n\t\t\tn.logger.Printf(\"E! error evaluating reset expression for current level %v: %s\", currentLevel, err)\n\t\t} else if !pass {\n\t\t\treturn currentLevel\n\t\t}\n\t}\n\tif newLevel, found := n.findFirstMatchLevel(currentLevel, alert.OK, p); found {\n\t\treturn newLevel\n\t}\n\treturn alert.OK\n}\n\nfunc (n *AlertNode) findFirstMatchLevel(start alert.Level, stop alert.Level, p edge.FieldsTagsTimeGetter) (alert.Level, bool) {\n\tif stop < alert.OK {\n\t\tstop = alert.OK\n\t}\n\tfor l := start; l > stop; l-- {\n\t\tse := n.levels[l]\n\t\tif se == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif pass, err := EvalPredicate(se, n.scopePools[l], p); err != nil {\n\t\t\tn.incrementErrorCount()\n\t\t\tn.logger.Printf(\"E! error evaluating expression for level %v: %s\", alert.Level(l), err)\n\t\t\tcontinue\n\t\t} else if pass {\n\t\t\treturn alert.Level(l), true\n\t\t}\n\t}\n\treturn alert.OK, false\n}\n\nfunc (n *AlertNode) event(\n\tid, name string,\n\tgroup models.GroupID,\n\ttags models.Tags,\n\tfields models.Fields,\n\tlevel alert.Level,\n\tt time.Time,\n\td time.Duration,\n\tresult models.Result,\n) (alert.Event, error) {\n\tmsg, details, err := n.renderMessageAndDetails(id, name, t, group, tags, fields, level)\n\tif err != nil {\n\t\treturn alert.Event{}, err\n\t}\n\tevent := alert.Event{\n\t\tTopic: n.anonTopic,\n\t\tState: alert.EventState{\n\t\t\tID:       id,\n\t\t\tMessage:  msg,\n\t\t\tDetails:  details,\n\t\t\tTime:     t,\n\t\t\tDuration: d,\n\t\t\tLevel:    level,\n\t\t},\n\t\tData: alert.EventData{\n\t\t\tName:     name,\n\t\t\tTaskName: n.et.Task.ID,\n\t\t\tGroup:    string(group),\n\t\t\tTags:     tags,\n\t\t\tFields:   fields,\n\t\t\tResult:   result,\n\t\t},\n\t}\n\treturn event, nil\n}\n\ntype alertState struct {\n\tn *AlertNode\n\n\tbuffer *edge.BatchBuffer\n\n\thistory []alert.Level\n\tidx     int\n\n\tflapping bool\n\n\tchanged bool\n\t// Time when first alert was triggered\n\tfirstTriggered time.Time\n\t// Time when last alert was triggered.\n\t// Note: Alerts are not triggered for every event.\n\tlastTriggered time.Time\n\texpired       bool\n}\n\nfunc (a *alertState) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\treturn nil, a.buffer.BeginBatch(begin)\n}\n\nfunc (a *alertState) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\treturn nil, a.buffer.BatchPoint(bp)\n}\n\nfunc (a *alertState) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn a.BufferedBatch(a.buffer.BufferedBatchMessage(end))\n}\n\nfunc (a *alertState) BufferedBatch(b edge.BufferedBatchMessage) (edge.Message, error) {\n\tbegin := b.Begin()\n\tid, err := a.n.renderID(begin.Name(), begin.GroupID(), begin.Tags())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(b.Points()) == 0 {\n\t\treturn nil, nil\n\t}\n\t// Keep track of lowest level for any point\n\tlowestLevel := alert.Critical\n\t// Keep track of highest level and point\n\thighestLevel := alert.OK\n\tvar highestPoint edge.BatchPointMessage\n\n\tcurrentLevel := a.currentLevel()\n\tfor _, bp := range b.Points() {\n\t\tl := a.n.determineLevel(bp, currentLevel)\n\t\tif l < lowestLevel {\n\t\t\tlowestLevel = l\n\t\t}\n\t\tif l > highestLevel || highestPoint == nil {\n\t\t\thighestLevel = l\n\t\t\thighestPoint = bp\n\t\t}\n\t}\n\n\t// Default the determined level to lowest.\n\tl := lowestLevel\n\t// Update determined level to highest if we don't care about all\n\tif !a.n.a.AllFlag {\n\t\tl = highestLevel\n\t}\n\t// Create alert Data\n\tt := highestPoint.Time()\n\tif a.n.a.AllFlag || l == alert.OK {\n\t\tt = begin.Time()\n\t}\n\n\ta.addEvent(t, l)\n\n\t// Trigger alert only if:\n\t//  l == OK and state.changed (aka recovery)\n\t//    OR\n\t//  l != OK and flapping/statechanges checkout\n\tif !(a.changed && l == alert.OK ||\n\t\t(l != alert.OK &&\n\t\t\t!((a.n.a.UseFlapping && a.flapping) ||\n\t\t\t\t(a.n.a.IsStateChangesOnly && !a.changed && !a.expired)))) {\n\t\treturn nil, nil\n\t}\n\n\ta.triggered(t)\n\n\t// Suppress the recovery event.\n\tif a.n.a.NoRecoveriesFlag && l == alert.OK {\n\t\treturn nil, nil\n\t}\n\n\tduration := a.duration()\n\tevent, err := a.n.event(id, begin.Name(), begin.GroupID(), begin.Tags(), highestPoint.Fields(), l, t, duration, b.ToResult())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ta.n.handleEvent(event)\n\n\t// Update tags or fields with event state\n\tif a.n.a.LevelTag != \"\" ||\n\t\ta.n.a.LevelField != \"\" ||\n\t\ta.n.a.IdTag != \"\" ||\n\t\ta.n.a.IdField != \"\" ||\n\t\ta.n.a.DurationField != \"\" ||\n\t\ta.n.a.MessageField != \"\" {\n\n\t\tb = b.ShallowCopy()\n\t\tpoints := make([]edge.BatchPointMessage, len(b.Points()))\n\t\tfor i, bp := range b.Points() {\n\t\t\tbp = bp.ShallowCopy()\n\t\t\ta.augmentTagsWithEventState(bp, event.State)\n\t\t\ta.augmentFieldsWithEventState(bp, event.State)\n\t\t\tpoints[i] = bp\n\t\t}\n\t\tb.SetPoints(points)\n\n\t\tnewBegin := begin.ShallowCopy()\n\t\ta.augmentTagsWithEventState(newBegin, event.State)\n\t\tb.SetBegin(newBegin)\n\t}\n\treturn b, nil\n}\n\nfunc (a *alertState) Point(p edge.PointMessage) (edge.Message, error) {\n\tid, err := a.n.renderID(p.Name(), p.GroupID(), p.Tags())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tl := a.n.determineLevel(p, a.currentLevel())\n\n\ta.addEvent(p.Time(), l)\n\n\tif (a.n.a.UseFlapping && a.flapping) || (a.n.a.IsStateChangesOnly && !a.changed && !a.expired) {\n\t\treturn nil, nil\n\t}\n\t// send alert if we are not OK or we are OK and state changed (i.e recovery)\n\tif l != alert.OK || a.changed {\n\t\ta.triggered(p.Time())\n\t\t// Suppress the recovery event.\n\t\tif a.n.a.NoRecoveriesFlag && l == alert.OK {\n\t\t\treturn nil, nil\n\t\t}\n\t\t// Create an alert event\n\t\tduration := a.duration()\n\t\tevent, err := a.n.event(\n\t\t\tid,\n\t\t\tp.Name(),\n\t\t\tp.GroupID(),\n\t\t\tp.Tags(),\n\t\t\tp.Fields(),\n\t\t\tl,\n\t\t\tp.Time(),\n\t\t\tduration,\n\t\t\tp.ToResult(),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\ta.n.handleEvent(event)\n\n\t\t// Prepare an augmented point to return\n\t\tp = p.ShallowCopy()\n\t\ta.augmentTagsWithEventState(p, event.State)\n\t\ta.augmentFieldsWithEventState(p, event.State)\n\t\treturn p, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (a *alertState) augmentTagsWithEventState(p edge.TagSetter, eventState alert.EventState) {\n\tif a.n.a.LevelTag != \"\" || a.n.a.IdTag != \"\" {\n\t\ttags := p.Tags().Copy()\n\t\tif a.n.a.LevelTag != \"\" {\n\t\t\ttags[a.n.a.LevelTag] = eventState.Level.String()\n\t\t}\n\t\tif a.n.a.IdTag != \"\" {\n\t\t\ttags[a.n.a.IdTag] = eventState.ID\n\t\t}\n\t\tp.SetTags(tags)\n\t}\n}\n\nfunc (a *alertState) augmentFieldsWithEventState(p edge.FieldSetter, eventState alert.EventState) {\n\tif a.n.a.LevelField != \"\" || a.n.a.IdField != \"\" || a.n.a.DurationField != \"\" || a.n.a.MessageField != \"\" {\n\t\tfields := p.Fields().Copy()\n\t\tif a.n.a.LevelField != \"\" {\n\t\t\tfields[a.n.a.LevelField] = eventState.Level.String()\n\t\t}\n\t\tif a.n.a.MessageField != \"\" {\n\t\t\tfields[a.n.a.MessageField] = eventState.Message\n\t\t}\n\t\tif a.n.a.IdField != \"\" {\n\t\t\tfields[a.n.a.IdField] = eventState.ID\n\t\t}\n\t\tif a.n.a.DurationField != \"\" {\n\t\t\tfields[a.n.a.DurationField] = int64(eventState.Duration)\n\t\t}\n\t\tp.SetFields(fields)\n\t}\n}\n\nfunc (a *alertState) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (a *alertState) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\n// Return the duration of the current alert state.\nfunc (a *alertState) duration() time.Duration {\n\treturn a.lastTriggered.Sub(a.firstTriggered)\n}\n\n// Record that the alert was triggered at time t.\nfunc (a *alertState) triggered(t time.Time) {\n\ta.lastTriggered = t\n\t// Check if we are being triggered for first time since an alert.OKAlert\n\t// If so reset firstTriggered time\n\tp := a.idx - 1\n\tif p == -1 {\n\t\tp = len(a.history) - 1\n\t}\n\tif a.history[p] == alert.OK {\n\t\ta.firstTriggered = t\n\t}\n}\n\n// Record an event in the alert history.\nfunc (a *alertState) addEvent(t time.Time, level alert.Level) {\n\t// Check for changes\n\ta.changed = a.history[a.idx] != level\n\n\t// Add event to history\n\ta.idx = (a.idx + 1) % len(a.history)\n\ta.history[a.idx] = level\n\n\ta.updateFlapping()\n\ta.updateExpired(t)\n}\n\n// Return current level of this state\nfunc (a *alertState) currentLevel() alert.Level {\n\treturn a.history[a.idx]\n}\n\n// Compute the percentage change in the alert history.\nfunc (a *alertState) percentChange() float64 {\n\tl := len(a.history)\n\tchanges := 0.0\n\tweight := (maxWeight / weightDiff)\n\tstep := (maxWeight - weight) / float64(l-1)\n\tfor i := 0; i < l-1; i++ {\n\t\t// get current index\n\t\tc := (i + a.idx) % l\n\t\t// get previous index\n\t\tp := c - 1\n\t\t// check for wrap around\n\t\tif p < 0 {\n\t\t\tp = l - 1\n\t\t}\n\t\tif a.history[c] != a.history[p] {\n\t\t\tchanges += weight\n\t\t}\n\t\tweight += step\n\t}\n\n\tp := changes / float64(l-1)\n\treturn p\n}\n\nfunc (a *alertState) updateFlapping() {\n\tif !a.n.a.UseFlapping {\n\t\treturn\n\t}\n\tp := a.percentChange()\n\tif a.flapping && p < a.n.a.FlapLow {\n\t\ta.flapping = false\n\t} else if !a.flapping && p > a.n.a.FlapHigh {\n\t\ta.flapping = true\n\t}\n}\n\nfunc (a *alertState) updateExpired(t time.Time) {\n\ta.expired = !a.changed && a.n.a.StateChangesOnlyDuration != 0 && t.Sub(a.lastTriggered) >= a.n.a.StateChangesOnlyDuration\n}\n\ntype serverInfo struct {\n\tHostname  string\n\tClusterID string\n\tServerID  string\n}\n\n// Type containing information available to ID template.\ntype idInfo struct {\n\t// Measurement name\n\tName string\n\n\t// Task name\n\tTaskName string\n\n\t// Concatenation of all group-by tags of the form [key=value,]+.\n\t// If not groupBy is performed equal to literal 'nil'\n\tGroup string\n\n\t// Map of tags\n\tTags map[string]string\n\n\tServerInfo serverInfo\n}\n\ntype messageInfo struct {\n\tidInfo\n\n\t// The ID of the alert.\n\tID string\n\n\t// Fields of alerting data point.\n\tFields map[string]interface{}\n\n\t// Alert Level, one of: INFO, WARNING, CRITICAL.\n\tLevel string\n\n\t// Time\n\tTime time.Time\n}\n\ntype detailsInfo struct {\n\tmessageInfo\n\t// The Message of the Alert\n\tMessage string\n}\n\nfunc (n *AlertNode) serverInfo() serverInfo {\n\treturn serverInfo{\n\t\tHostname:  n.et.tm.ServerInfo.Hostname(),\n\t\tClusterID: n.et.tm.ServerInfo.ClusterID().String(),\n\t\tServerID:  n.et.tm.ServerInfo.ServerID().String(),\n\t}\n\n}\nfunc (n *AlertNode) renderID(name string, group models.GroupID, tags models.Tags) (string, error) {\n\tg := string(group)\n\tif group == models.NilGroup {\n\t\tg = \"nil\"\n\t}\n\tinfo := idInfo{\n\t\tName:       name,\n\t\tTaskName:   n.et.Task.ID,\n\t\tGroup:      g,\n\t\tTags:       tags,\n\t\tServerInfo: n.serverInfo(),\n\t}\n\tid := n.bufPool.Get().(*bytes.Buffer)\n\tdefer func() {\n\t\tid.Reset()\n\t\tn.bufPool.Put(id)\n\t}()\n\n\terr := n.idTmpl.Execute(id, info)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn id.String(), nil\n}\n\nfunc (n *AlertNode) renderMessageAndDetails(id, name string, t time.Time, group models.GroupID, tags models.Tags, fields models.Fields, level alert.Level) (string, string, error) {\n\tg := string(group)\n\tif group == models.NilGroup {\n\t\tg = \"nil\"\n\t}\n\tminfo := messageInfo{\n\t\tidInfo: idInfo{\n\t\t\tName:       name,\n\t\t\tTaskName:   n.et.Task.ID,\n\t\t\tGroup:      g,\n\t\t\tTags:       tags,\n\t\t\tServerInfo: n.serverInfo(),\n\t\t},\n\t\tID:     id,\n\t\tFields: fields,\n\t\tLevel:  level.String(),\n\t\tTime:   t,\n\t}\n\n\t// Grab a buffer for the message template and the details template\n\ttmpBuffer := n.bufPool.Get().(*bytes.Buffer)\n\tdefer func() {\n\t\ttmpBuffer.Reset()\n\t\tn.bufPool.Put(tmpBuffer)\n\t}()\n\ttmpBuffer.Reset()\n\n\terr := n.messageTmpl.Execute(tmpBuffer, minfo)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tmsg := tmpBuffer.String()\n\tdinfo := detailsInfo{\n\t\tmessageInfo: minfo,\n\t\tMessage:     msg,\n\t}\n\n\t// Reuse the buffer, for the details template\n\ttmpBuffer.Reset()\n\terr = n.detailsTmpl.Execute(tmpBuffer, dinfo)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tdetails := tmpBuffer.String()\n\treturn msg, details, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/autoscale.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\tk8s \"github.com/influxdata/kapacitor/services/k8s/client\"\n\tswarm \"github.com/influxdata/kapacitor/services/swarm/client\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n\t\"github.com/pkg/errors\"\n)\n\nconst (\n\tstatsAutoscaleIncreaseEventsCount = \"increase_events\"\n\tstatsAutoscaleDecreaseEventsCount = \"decrease_events\"\n\tstatsAutoscaleCooldownDropsCount  = \"cooldown_drops\"\n)\n\ntype resourceID interface {\n\tID() string\n}\n\ntype autoscaler interface {\n\tResourceIDFromTags(models.Tags) (resourceID, error)\n\tReplicas(id resourceID) (int, error)\n\tSetReplicas(id resourceID, replicas int) error\n\tSetResourceIDOnTags(id resourceID, tags models.Tags)\n}\n\ntype resourceState struct {\n\tlastIncrease time.Time\n\tlastDecrease time.Time\n\tcurrent      int\n}\n\ntype event struct {\n\tID  resourceID\n\tOld int\n\tNew int\n}\n\ntype AutoscaleNode struct {\n\tnode\n\n\ta autoscaler\n\n\treplicasExpr      stateful.Expression\n\treplicasScopePool stateful.ScopePool\n\n\tresourceStates map[string]resourceState\n\n\tincreaseCount      *expvar.Int\n\tdecreaseCount      *expvar.Int\n\tcooldownDropsCount *expvar.Int\n\n\tmin int\n\tmax int\n\n\tincreaseCooldown time.Duration\n\tdecreaseCooldown time.Duration\n\n\tcurrentField string\n}\n\n// Create a new AutoscaleNode which can trigger autoscale events.\nfunc newAutoscaleNode(\n\tet *ExecutingTask,\n\tl *log.Logger,\n\tn pipeline.Node,\n\ta autoscaler,\n\tmin,\n\tmax int,\n\tincreaseCooldown,\n\tdecreaseCooldown time.Duration,\n\tcurrentField string,\n\treplicas *ast.LambdaNode,\n) (*AutoscaleNode, error) {\n\tif min < 1 {\n\t\treturn nil, fmt.Errorf(\"minimum count must be >= 1, got %d\", min)\n\t}\n\t// Initialize the replicas lambda expression scope pool\n\treplicasExpr, err := stateful.NewExpression(replicas.Expression)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"invalid replicas expression\")\n\t}\n\treplicasScopePool := stateful.NewScopePool(ast.FindReferenceVariables(replicas.Expression))\n\tkn := &AutoscaleNode{\n\t\tnode:              node{Node: n, et: et, logger: l},\n\t\tresourceStates:    make(map[string]resourceState),\n\t\tmin:               min,\n\t\tmax:               max,\n\t\tincreaseCooldown:  increaseCooldown,\n\t\tdecreaseCooldown:  decreaseCooldown,\n\t\tcurrentField:      currentField,\n\t\ta:                 a,\n\t\treplicasExpr:      replicasExpr,\n\t\treplicasScopePool: replicasScopePool,\n\t}\n\tkn.node.runF = kn.runAutoscale\n\treturn kn, nil\n}\n\nfunc (n *AutoscaleNode) runAutoscale([]byte) error {\n\tn.increaseCount = &expvar.Int{}\n\tn.decreaseCount = &expvar.Int{}\n\tn.cooldownDropsCount = &expvar.Int{}\n\n\tn.statMap.Set(statsAutoscaleIncreaseEventsCount, n.increaseCount)\n\tn.statMap.Set(statsAutoscaleDecreaseEventsCount, n.decreaseCount)\n\tn.statMap.Set(statsAutoscaleCooldownDropsCount, n.cooldownDropsCount)\n\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\treturn consumer.Consume()\n}\n\nfunc (n *AutoscaleNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, n.newGroup()),\n\t), nil\n}\n\nfunc (n *AutoscaleNode) newGroup() *autoscaleGroup {\n\treturn &autoscaleGroup{\n\t\tn:    n,\n\t\texpr: n.replicasExpr.CopyReset(),\n\t}\n}\n\ntype autoscaleGroup struct {\n\tn *AutoscaleNode\n\n\texpr stateful.Expression\n\n\tbegin edge.BeginBatchMessage\n}\n\nfunc (g *autoscaleGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tg.begin = begin\n\treturn nil, nil\n}\n\nfunc (g *autoscaleGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tnp, err := g.n.handlePoint(g.begin.Name(), g.begin.Dimensions(), bp, g.expr)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E!\", err)\n\t}\n\treturn np, nil\n}\n\nfunc (g *autoscaleGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn nil, nil\n}\n\nfunc (g *autoscaleGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\tnp, err := g.n.handlePoint(p.Name(), p.Dimensions(), p, g.expr)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E!\", err)\n\t}\n\treturn np, nil\n}\n\nfunc (g *autoscaleGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *autoscaleGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\nfunc (n *AutoscaleNode) handlePoint(streamName string, dims models.Dimensions, p edge.FieldsTagsTimeGetter, expr stateful.Expression) (edge.PointMessage, error) {\n\tid, err := n.a.ResourceIDFromTags(p.Tags())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstate, ok := n.resourceStates[id.ID()]\n\tif !ok {\n\t\t// If we haven't seen this resource before, get its state\n\t\treplicas, err := n.a.Replicas(id)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"could not determine initial scale for %q\", id)\n\t\t}\n\t\tstate = resourceState{\n\t\t\tcurrent: replicas,\n\t\t}\n\t\tn.resourceStates[id.ID()] = state\n\t}\n\n\t// Eval the replicas expression\n\tnewReplicas, err := n.evalExpr(state.current, expr, p)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to evaluate the replicas expression\")\n\t}\n\n\t// Create the event\n\te := event{\n\t\tID:  id,\n\t\tOld: state.current,\n\t\tNew: newReplicas,\n\t}\n\t// Check bounds\n\tif n.max > 0 && e.New > n.max {\n\t\te.New = n.max\n\t}\n\tif e.New < n.min {\n\t\te.New = n.min\n\t}\n\n\t// Validate something changed\n\tif e.New == e.Old {\n\t\t// Nothing to do\n\t\treturn nil, nil\n\t}\n\n\t// Update local copy of state\n\tchange := e.New - e.Old\n\tstate.current = e.New\n\n\t// Check last change cooldown times\n\tt := p.Time()\n\tvar counter *expvar.Int\n\tswitch {\n\tcase change > 0:\n\t\tif t.Before(state.lastIncrease.Add(n.increaseCooldown)) {\n\t\t\t// Still hot, nothing to do\n\t\t\tn.cooldownDropsCount.Add(1)\n\t\t\treturn nil, nil\n\t\t}\n\t\tstate.lastIncrease = t\n\t\tcounter = n.increaseCount\n\tcase change < 0:\n\t\tif t.Before(state.lastDecrease.Add(n.decreaseCooldown)) {\n\t\t\t// Still hot, nothing to do\n\t\t\tn.cooldownDropsCount.Add(1)\n\t\t\treturn nil, nil\n\t\t}\n\t\tstate.lastDecrease = t\n\t\tcounter = n.decreaseCount\n\t}\n\n\t// We have a valid event to apply\n\tif err := n.applyEvent(e); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to apply scaling event\")\n\t}\n\n\t// Only save the updated state if we were successful\n\tn.resourceStates[id.ID()] = state\n\n\t// Count event\n\tcounter.Add(1)\n\n\t// Create new tags for the point.\n\t// Leave room for the namespace,kind, and resource tags.\n\tnewTags := make(models.Tags, len(dims.TagNames)+3)\n\n\t// Copy group by tags\n\tfor _, d := range dims.TagNames {\n\t\tnewTags[d] = p.Tags()[d]\n\t}\n\tn.a.SetResourceIDOnTags(id, newTags)\n\n\t// Create point representing the event\n\treturn edge.NewPointMessage(\n\t\tstreamName, \"\", \"\",\n\t\tdims,\n\t\tmodels.Fields{\n\t\t\t\"old\": int64(e.Old),\n\t\t\t\"new\": int64(e.New),\n\t\t},\n\t\tnewTags,\n\t\tt,\n\t), nil\n}\n\nfunc (n *AutoscaleNode) applyEvent(e event) error {\n\tn.logger.Printf(\"D! setting replicas to %d was %d for %q\", e.New, e.Old, e.ID)\n\terr := n.a.SetReplicas(e.ID, e.New)\n\treturn errors.Wrapf(err, \"failed to set new replica count for %q\", e.ID)\n}\n\nfunc (n *AutoscaleNode) evalExpr(\n\tcurrent int,\n\texpr stateful.Expression,\n\tp edge.FieldsTagsTimeGetter,\n) (int, error) {\n\tvars := n.replicasScopePool.Get()\n\tdefer n.replicasScopePool.Put(vars)\n\n\t// Set the current replicas value on the scope if requested.\n\tif n.currentField != \"\" {\n\t\tvars.Set(n.currentField, current)\n\t}\n\n\t// Fill the scope with the rest of the values\n\terr := fillScope(vars, n.replicasScopePool.ReferenceVariables(), p)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\ti, err := expr.EvalInt(vars)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn int(i), err\n}\n\n////////////////////////////////////\n// K8s implementation of Autoscaler\n\ntype k8sAutoscaler struct {\n\tclient k8s.Client\n\n\tresourceName    string\n\tresourceNameTag string\n\n\tnamespaceTag string\n\tkindTag      string\n\tnameTag      string\n\n\tkind string\n\n\tnamespace string\n}\n\nfunc newK8sAutoscaleNode(et *ExecutingTask, n *pipeline.K8sAutoscaleNode, l *log.Logger) (*AutoscaleNode, error) {\n\tclient, err := et.tm.K8sService.Client(n.Cluster)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot use the k8sAutoscale node, could not create kubernetes client: %v\", err)\n\t}\n\ta := &k8sAutoscaler{\n\t\tclient:          client,\n\t\tresourceName:    n.ResourceName,\n\t\tresourceNameTag: n.ResourceNameTag,\n\t\tnamespaceTag:    n.NamespaceTag,\n\t\tkindTag:         n.KindTag,\n\t\tnameTag:         n.ResourceTag,\n\t\tkind:            n.Kind,\n\t\tnamespace:       n.Namespace,\n\t}\n\treturn newAutoscaleNode(\n\t\tet,\n\t\tl,\n\t\tn,\n\t\ta,\n\t\tint(n.Min),\n\t\tint(n.Max),\n\t\tn.IncreaseCooldown,\n\t\tn.DecreaseCooldown,\n\t\tn.CurrentField,\n\t\tn.Replicas,\n\t)\n}\n\ntype k8sResourceID struct {\n\tNamespace,\n\tKind,\n\tName string\n}\n\nfunc (id k8sResourceID) ID() string {\n\treturn id.Name\n}\n\nfunc (id k8sResourceID) String() string {\n\treturn fmt.Sprintf(\"%s/%s/%s\", id.Namespace, id.Kind, id.Name)\n}\n\nfunc (a *k8sAutoscaler) ResourceIDFromTags(tags models.Tags) (resourceID, error) {\n\t// Get the name of the resource\n\tvar name string\n\tswitch {\n\tcase a.resourceName != \"\":\n\t\tname = a.resourceName\n\tcase a.resourceNameTag != \"\":\n\t\tt, ok := tags[a.resourceNameTag]\n\t\tif ok {\n\t\t\tname = t\n\t\t}\n\tdefault:\n\t\treturn nil, errors.New(\"expected one of ResourceName or ResourceNameTag to be set\")\n\t}\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"could not determine the name of the resource\")\n\t}\n\tnamespace := a.namespace\n\tif namespace == \"\" {\n\t\tnamespace = k8s.NamespaceDefault\n\t}\n\treturn k8sResourceID{\n\t\tNamespace: namespace,\n\t\tKind:      a.kind,\n\t\tName:      name,\n\t}, nil\n}\n\nfunc (a *k8sAutoscaler) getScale(kid k8sResourceID) (*k8s.Scale, error) {\n\tscales := a.client.Scales(kid.Namespace)\n\tscale, err := scales.Get(kid.Kind, kid.Name)\n\treturn scale, err\n}\n\nfunc (a *k8sAutoscaler) Replicas(id resourceID) (int, error) {\n\tkid := id.(k8sResourceID)\n\tscale, err := a.getScale(kid)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn int(scale.Spec.Replicas), nil\n}\n\nfunc (a *k8sAutoscaler) SetReplicas(id resourceID, replicas int) error {\n\tkid := id.(k8sResourceID)\n\tscale, err := a.getScale(kid)\n\tif err != nil {\n\t\treturn err\n\t}\n\tscale.Spec.Replicas = int32(replicas)\n\tscales := a.client.Scales(kid.Namespace)\n\tif err := scales.Update(kid.Kind, scale); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (a *k8sAutoscaler) SetResourceIDOnTags(id resourceID, tags models.Tags) {\n\tkid := id.(k8sResourceID)\n\t// Set namespace,kind,resource tags\n\tif a.namespaceTag != \"\" {\n\t\ttags[a.namespaceTag] = kid.Namespace\n\t}\n\tif a.kindTag != \"\" {\n\t\ttags[a.kindTag] = kid.Kind\n\t}\n\tif a.nameTag != \"\" {\n\t\ttags[a.nameTag] = kid.Name\n\t}\n}\n\n/////////////////////////////////////////////\n// Docker Swarm implementation of Autoscaler\n\ntype swarmAutoscaler struct {\n\tclient swarm.Client\n\n\tserviceName          string\n\tserviceNameTag       string\n\toutputServiceNameTag string\n}\n\nfunc newSwarmAutoscaleNode(et *ExecutingTask, n *pipeline.SwarmAutoscaleNode, l *log.Logger) (*AutoscaleNode, error) {\n\tclient, err := et.tm.SwarmService.Client(n.Cluster)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot use the swarmAutoscale node, could not create swarm client: %v\", err)\n\t}\n\toutputServiceNameTag := n.OutputServiceNameTag\n\tif outputServiceNameTag == \"\" {\n\t\toutputServiceNameTag = n.ServiceNameTag\n\t}\n\ta := &swarmAutoscaler{\n\t\tclient:               client,\n\t\tserviceName:          n.ServiceName,\n\t\tserviceNameTag:       n.ServiceNameTag,\n\t\toutputServiceNameTag: outputServiceNameTag,\n\t}\n\treturn newAutoscaleNode(\n\t\tet,\n\t\tl,\n\t\tn,\n\t\ta,\n\t\tint(n.Min),\n\t\tint(n.Max),\n\t\tn.IncreaseCooldown,\n\t\tn.DecreaseCooldown,\n\t\tn.CurrentField,\n\t\tn.Replicas,\n\t)\n}\n\ntype swarmResourceID string\n\nfunc (id swarmResourceID) ID() string {\n\treturn string(id)\n}\n\nfunc (a *swarmAutoscaler) ResourceIDFromTags(tags models.Tags) (resourceID, error) {\n\t// Get the name of the resource\n\tvar name string\n\tswitch {\n\tcase a.serviceName != \"\":\n\t\tname = a.serviceName\n\tcase a.serviceNameTag != \"\":\n\t\tt, ok := tags[a.serviceNameTag]\n\t\tif ok {\n\t\t\tname = t\n\t\t}\n\tdefault:\n\t\treturn nil, errors.New(\"expected one of ServiceName or ServiceNameTag to be set\")\n\t}\n\tif name == \"\" {\n\t\treturn nil, errors.New(\"could not determine the name of the resource\")\n\t}\n\treturn swarmResourceID(name), nil\n}\n\nfunc (a *swarmAutoscaler) Replicas(id resourceID) (int, error) {\n\tsid := id.ID()\n\tservice, err := a.client.Service(sid)\n\tif err != nil {\n\t\treturn 0, errors.Wrapf(err, \"failed to get swarm service for %q\", id)\n\t}\n\treturn int(*service.Spec.Mode.Replicated.Replicas), nil\n\n}\n\nfunc (a *swarmAutoscaler) SetReplicas(id resourceID, replicas int) error {\n\tsid := id.ID()\n\tservice, err := a.client.Service(sid)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to get swarm service for %q\", id)\n\t}\n\t*service.Spec.Mode.Replicated.Replicas = uint64(replicas)\n\n\treturn a.client.UpdateService(service)\n}\n\nfunc (a *swarmAutoscaler) SetResourceIDOnTags(id resourceID, tags models.Tags) {\n\tif a.outputServiceNameTag != \"\" {\n\t\ttags[a.outputServiceNameTag] = id.ID()\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/batch.go",
    "content": "package kapacitor\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gorhill/cronexpr\"\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/influxdb\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/pkg/errors\"\n)\n\nconst (\n\tstatsBatchesQueried = \"batches_queried\"\n\tstatsPointsQueried  = \"points_queried\"\n)\n\ntype BatchNode struct {\n\tnode\n\ts   *pipeline.BatchNode\n\tidx int\n}\n\nfunc newBatchNode(et *ExecutingTask, n *pipeline.BatchNode, l *log.Logger) (*BatchNode, error) {\n\tsn := &BatchNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\ts:    n,\n\t}\n\treturn sn, nil\n}\n\nfunc (n *BatchNode) linkChild(c Node) error {\n\n\t// add child\n\tif n.Provides() != c.Wants() {\n\t\treturn fmt.Errorf(\"cannot add child mismatched edges: %s -> %s\", n.Provides(), c.Wants())\n\t}\n\tn.children = append(n.children, c)\n\n\t// add parent\n\tc.addParent(n)\n\n\treturn nil\n}\n\nfunc (n *BatchNode) addParentEdge(in edge.StatsEdge) {\n\t// Pass edges down to children\n\tn.children[n.idx].addParentEdge(in)\n\tn.idx++\n}\n\nfunc (n *BatchNode) start([]byte) {\n}\n\nfunc (n *BatchNode) Wait() error {\n\treturn nil\n}\n\n// Return list of databases and retention policies\n// the batcher will query.\nfunc (n *BatchNode) DBRPs() ([]DBRP, error) {\n\tvar dbrps []DBRP\n\tfor _, b := range n.children {\n\t\td, err := b.(*QueryNode).DBRPs()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdbrps = append(dbrps, d...)\n\t}\n\treturn dbrps, nil\n}\n\nfunc (n *BatchNode) Count() int {\n\treturn len(n.children)\n}\n\nfunc (n *BatchNode) Start() {\n\tfor _, b := range n.children {\n\t\tb.(*QueryNode).Start()\n\t}\n}\n\nfunc (n *BatchNode) Abort() {\n\tfor _, b := range n.children {\n\t\tb.(*QueryNode).Abort()\n\t}\n}\n\ntype BatchQueries struct {\n\tQueries            []*Query\n\tCluster            string\n\tGroupByMeasurement bool\n}\n\nfunc (n *BatchNode) Queries(start, stop time.Time) ([]BatchQueries, error) {\n\tqueries := make([]BatchQueries, len(n.children))\n\tfor i, b := range n.children {\n\t\tqn := b.(*QueryNode)\n\t\tqs, err := qn.Queries(start, stop)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tqueries[i] = BatchQueries{\n\t\t\tQueries:            qs,\n\t\t\tCluster:            qn.Cluster(),\n\t\t\tGroupByMeasurement: qn.GroupByMeasurement(),\n\t\t}\n\t}\n\treturn queries, nil\n}\n\n// Do not add the source batch node to the dot output\n// since its not really an edge.\nfunc (n *BatchNode) edot(*bytes.Buffer, bool) {}\n\nfunc (n *BatchNode) collectedCount() (count int64) {\n\tfor _, child := range n.children {\n\t\tcount += child.collectedCount()\n\t}\n\treturn\n}\n\ntype QueryNode struct {\n\tnode\n\tb        *pipeline.QueryNode\n\tquery    *Query\n\tticker   ticker\n\tqueryMu  sync.Mutex\n\tqueryErr chan error\n\tclosing  chan struct{}\n\taborting chan struct{}\n\n\tbatchesQueried *expvar.Int\n\tpointsQueried  *expvar.Int\n\tbyName         bool\n}\n\nfunc newQueryNode(et *ExecutingTask, n *pipeline.QueryNode, l *log.Logger) (*QueryNode, error) {\n\tbn := &QueryNode{\n\t\tnode:     node{Node: n, et: et, logger: l},\n\t\tb:        n,\n\t\tclosing:  make(chan struct{}),\n\t\taborting: make(chan struct{}),\n\t\tbyName:   n.GroupByMeasurementFlag,\n\t}\n\tbn.node.runF = bn.runBatch\n\tbn.node.stopF = bn.stopBatch\n\n\t// Create query\n\tq, err := NewQuery(n.QueryStr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbn.query = q\n\t// Add in dimensions\n\terr = bn.query.Dimensions(n.Dimensions)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// Set offset alignment\n\tif n.AlignGroupFlag {\n\t\tbn.query.AlignGroup()\n\t}\n\t// Set fill\n\tswitch fill := n.Fill.(type) {\n\tcase string:\n\t\tswitch fill {\n\t\tcase \"null\":\n\t\t\tbn.query.Fill(influxql.NullFill, nil)\n\t\tcase \"none\":\n\t\t\tbn.query.Fill(influxql.NoFill, nil)\n\t\tcase \"previous\":\n\t\t\tbn.query.Fill(influxql.PreviousFill, nil)\n\t\tcase \"linear\":\n\t\t\tbn.query.Fill(influxql.LinearFill, nil)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected fill option %s\", fill)\n\t\t}\n\tcase int64, float64:\n\t\tbn.query.Fill(influxql.NumberFill, fill)\n\t}\n\n\t// Determine schedule\n\tif n.Every != 0 && n.Cron != \"\" {\n\t\treturn nil, errors.New(\"must not set both 'every' and 'cron' properties\")\n\t}\n\tswitch {\n\tcase n.Every != 0:\n\t\tbn.ticker = newTimeTicker(n.Every, n.AlignFlag)\n\tcase n.Cron != \"\":\n\t\tvar err error\n\t\tbn.ticker, err = newCronTicker(n.Cron)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\treturn nil, errors.New(\"must define one of 'every' or 'cron'\")\n\t}\n\n\treturn bn, nil\n}\n\nfunc (n *QueryNode) GroupByMeasurement() bool {\n\treturn n.byName\n}\n\n// Return list of databases and retention policies\n// the batcher will query.\nfunc (n *QueryNode) DBRPs() ([]DBRP, error) {\n\treturn n.query.DBRPs()\n}\n\nfunc (n *QueryNode) Start() {\n\tn.queryMu.Lock()\n\tdefer n.queryMu.Unlock()\n\tn.queryErr = make(chan error, 1)\n\tgo func() {\n\t\tn.queryErr <- n.doQuery(n.ins[0])\n\t}()\n}\n\nfunc (n *QueryNode) Abort() {\n\tclose(n.aborting)\n}\n\nfunc (n *QueryNode) Cluster() string {\n\treturn n.b.Cluster\n}\n\nfunc (n *QueryNode) Queries(start, stop time.Time) ([]*Query, error) {\n\tnow := time.Now()\n\tif stop.IsZero() {\n\t\tstop = now\n\t}\n\t// Crons are sensitive to timezones.\n\t// Make sure we are using local time.\n\tcurrent := start.Local()\n\tqueries := make([]*Query, 0)\n\tfor {\n\t\tcurrent = n.ticker.Next(current)\n\t\tif current.IsZero() || current.After(stop) {\n\t\t\tbreak\n\t\t}\n\t\tqstop := current.Add(-1 * n.b.Offset)\n\t\tif qstop.After(now) {\n\t\t\tbreak\n\t\t}\n\n\t\tq, err := n.query.Clone()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tq.SetStartTime(qstop.Add(-1 * n.b.Period))\n\t\tq.SetStopTime(qstop)\n\t\tqueries = append(queries, q)\n\t}\n\treturn queries, nil\n}\n\n// Query InfluxDB and collect batches on batch collector.\nfunc (n *QueryNode) doQuery(in edge.Edge) error {\n\tdefer in.Close()\n\tn.batchesQueried = &expvar.Int{}\n\tn.pointsQueried = &expvar.Int{}\n\n\tn.statMap.Set(statsBatchesQueried, n.batchesQueried)\n\tn.statMap.Set(statsPointsQueried, n.pointsQueried)\n\n\tif n.et.tm.InfluxDBService == nil {\n\t\treturn errors.New(\"InfluxDB not configured, cannot query InfluxDB for batch query\")\n\t}\n\n\tcon, err := n.et.tm.InfluxDBService.NewNamedClient(n.b.Cluster)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get InfluxDB client\")\n\t}\n\ttickC := n.ticker.Start()\n\tfor {\n\t\tselect {\n\t\tcase <-n.closing:\n\t\t\treturn nil\n\t\tcase <-n.aborting:\n\t\t\treturn errors.New(\"batch doQuery aborted\")\n\t\tcase now := <-tickC:\n\t\t\tn.timer.Start()\n\t\t\t// Update times for query\n\t\t\tstop := now.Add(-1 * n.b.Offset)\n\t\t\tn.query.SetStartTime(stop.Add(-1 * n.b.Period))\n\t\t\tn.query.SetStopTime(stop)\n\n\t\t\tqStr := n.query.String()\n\t\t\tn.logger.Println(\"D! starting next batch query:\", qStr)\n\n\t\t\t// Execute query\n\t\t\tq := influxdb.Query{\n\t\t\t\tCommand: qStr,\n\t\t\t}\n\t\t\tresp, err := con.Query(q)\n\t\t\tif err != nil {\n\t\t\t\tn.incrementErrorCount()\n\t\t\t\tn.logger.Println(\"E!\", err)\n\t\t\t\tn.timer.Stop()\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Collect batches\n\t\t\tfor _, res := range resp.Results {\n\t\t\t\tbatches, err := edge.ResultToBufferedBatches(res, n.byName)\n\t\t\t\tif err != nil {\n\t\t\t\t\tn.incrementErrorCount()\n\t\t\t\t\tn.logger.Println(\"E! failed to understand query result:\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, bch := range batches {\n\t\t\t\t\t// Set stop time based off query bounds\n\t\t\t\t\tif bch.Begin().Time().IsZero() || !n.query.IsGroupedByTime() {\n\t\t\t\t\t\tbch.Begin().SetTime(stop)\n\t\t\t\t\t}\n\n\t\t\t\t\tn.batchesQueried.Add(1)\n\t\t\t\t\tn.pointsQueried.Add(int64(len(bch.Points())))\n\n\t\t\t\t\tn.timer.Pause()\n\t\t\t\t\tif err := in.Collect(bch); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tn.timer.Resume()\n\t\t\t\t}\n\t\t\t}\n\t\t\tn.timer.Stop()\n\t\t}\n\t}\n}\n\nfunc (n *QueryNode) runBatch([]byte) error {\n\terrC := make(chan error, 1)\n\tgo func() {\n\t\tdefer func() {\n\t\t\terr := recover()\n\t\t\tif err != nil {\n\t\t\t\terrC <- fmt.Errorf(\"%v\", err)\n\t\t\t}\n\t\t}()\n\t\tfor bt, ok := n.ins[0].Emit(); ok; bt, ok = n.ins[0].Emit() {\n\t\t\tfor _, child := range n.outs {\n\t\t\t\terr := child.Collect(bt)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrC <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\terrC <- nil\n\t}()\n\tvar queryErr error\n\tn.queryMu.Lock()\n\tif n.queryErr != nil {\n\t\tn.queryMu.Unlock()\n\t\tselect {\n\t\tcase queryErr = <-n.queryErr:\n\t\tcase <-n.aborting:\n\t\t\tqueryErr = errors.New(\"batch queryErr aborted\")\n\t\t}\n\t} else {\n\t\tn.queryMu.Unlock()\n\t}\n\n\tvar err error\n\tselect {\n\tcase err = <-errC:\n\tcase <-n.aborting:\n\t\terr = errors.New(\"batch run aborted\")\n\t}\n\tif queryErr != nil {\n\t\treturn queryErr\n\t}\n\treturn err\n}\n\nfunc (n *QueryNode) stopBatch() {\n\tif n.ticker != nil {\n\t\tn.ticker.Stop()\n\t}\n\tclose(n.closing)\n}\n\ntype ticker interface {\n\tStart() <-chan time.Time\n\tStop()\n\t// Return the next time the ticker will tick after now.\n\tNext(now time.Time) time.Time\n}\n\ntype timeTicker struct {\n\tevery     time.Duration\n\talign     bool\n\talignChan chan time.Time\n\tstopping  chan struct{}\n\tticker    *time.Ticker\n\tmu        sync.Mutex\n\twg        sync.WaitGroup\n}\n\nfunc newTimeTicker(every time.Duration, align bool) *timeTicker {\n\tt := &timeTicker{\n\t\talign: align,\n\t\tevery: every,\n\t}\n\tif align {\n\t\tt.alignChan = make(chan time.Time)\n\t\tt.stopping = make(chan struct{})\n\t}\n\treturn t\n}\n\nfunc (t *timeTicker) Start() <-chan time.Time {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tif t.alignChan != nil {\n\t\tt.wg.Add(1)\n\t\tgo func() {\n\t\t\tdefer t.wg.Done()\n\t\t\t// Sleep until we are roughly aligned\n\t\t\tnow := time.Now()\n\t\t\tnext := now.Truncate(t.every).Add(t.every)\n\t\t\tafter := time.NewTicker(next.Sub(now))\n\t\t\tselect {\n\t\t\tcase <-after.C:\n\t\t\t\tafter.Stop()\n\t\t\tcase <-t.stopping:\n\t\t\t\tafter.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.ticker = time.NewTicker(t.every)\n\t\t\t// Send first event since we waited for it explicitly\n\t\t\tt.alignChan <- next\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-t.stopping:\n\t\t\t\t\treturn\n\t\t\t\tcase now := <-t.ticker.C:\n\t\t\t\t\tnow = now.Round(t.every)\n\t\t\t\t\tt.alignChan <- now\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t\treturn t.alignChan\n\t} else {\n\t\tt.ticker = time.NewTicker(t.every)\n\t\treturn t.ticker.C\n\t}\n}\n\nfunc (t *timeTicker) Stop() {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tif t.ticker != nil {\n\t\tt.ticker.Stop()\n\t}\n\tif t.alignChan != nil {\n\t\tclose(t.stopping)\n\t}\n\tt.wg.Wait()\n}\n\nfunc (t *timeTicker) Next(now time.Time) time.Time {\n\tnext := now.Add(t.every)\n\tif t.align {\n\t\tnext = next.Round(t.every)\n\t}\n\treturn next\n}\n\ntype cronTicker struct {\n\texpr    *cronexpr.Expression\n\tticker  chan time.Time\n\tclosing chan struct{}\n\twg      sync.WaitGroup\n}\n\nfunc newCronTicker(cronExpr string) (*cronTicker, error) {\n\texpr, err := cronexpr.Parse(cronExpr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &cronTicker{\n\t\texpr:    expr,\n\t\tticker:  make(chan time.Time),\n\t\tclosing: make(chan struct{}),\n\t}, nil\n}\n\nfunc (c *cronTicker) Start() <-chan time.Time {\n\tc.wg.Add(1)\n\tgo func() {\n\t\tdefer c.wg.Done()\n\t\tfor {\n\t\t\tnow := time.Now()\n\t\t\tnext := c.expr.Next(now)\n\t\t\tdiff := next.Sub(now)\n\t\t\tselect {\n\t\t\tcase <-time.After(diff):\n\t\t\t\tc.ticker <- next\n\t\t\tcase <-c.closing:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn c.ticker\n}\n\nfunc (c *cronTicker) Stop() {\n\tclose(c.closing)\n\tc.wg.Wait()\n}\n\nfunc (c *cronTicker) Next(now time.Time) time.Time {\n\treturn c.expr.Next(now)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/build.py",
    "content": "#!/usr/bin/python2.7 -u\n\nimport sys\nimport os\nimport subprocess\nimport time\nfrom datetime import datetime\nimport shutil\nimport tempfile\nimport hashlib\nimport re\nimport logging\nimport argparse\n\n################\n#### Kapacitor Variables\n################\n\n# Enable Go vendoring\nos.environ[\"GO15VENDOREXPERIMENT\"] = \"1\"\n\n# PACKAGING VARIABLES\nPACKAGE_NAME = \"kapacitor\"\nINSTALL_ROOT_DIR = \"/usr/bin\"\nLOG_DIR = \"/var/log/kapacitor\"\nDATA_DIR = \"/var/lib/kapacitor\"\nSCRIPT_DIR = \"/usr/lib/kapacitor/scripts\"\n\nINIT_SCRIPT = \"scripts/init.sh\"\nSYSTEMD_SCRIPT = \"scripts/kapacitor.service\"\nPOSTINST_SCRIPT = \"scripts/post-install.sh\"\nPOSTUNINST_SCRIPT = \"scripts/post-uninstall.sh\"\nLOGROTATE_CONFIG = \"etc/logrotate.d/kapacitor\"\nBASH_COMPLETION_SH = \"usr/share/bash-completion/completions/kapacitor\"\nDEFAULT_CONFIG = \"etc/kapacitor/kapacitor.conf\"\nPREINST_SCRIPT = None\n\n# Default AWS S3 bucket for uploads\nDEFAULT_BUCKET = \"dl.influxdata.com/kapacitor/artifacts\"\n\n# META-PACKAGE VARIABLES\nPACKAGE_LICENSE = \"MIT\"\nPACKAGE_URL = \"github.com/influxdata/kapacitor\"\nMAINTAINER = \"support@influxdb.com\"\nVENDOR = \"InfluxData\"\nDESCRIPTION = \"Time series data processing engine\"\n\n# SCRIPT START\ngo_vet_command = \"go tool vet -composites=false\"\nprereqs = [ 'git', 'go' ]\noptional_prereqs = [ 'fpm', 'rpmbuild', 'gpg' ]\n\nfpm_common_args = \"-f -s dir --log error \\\n --vendor {} \\\n --url {} \\\n --after-install {} \\\n --after-remove {} \\\n --license {} \\\n --maintainer {} \\\n --config-files {} \\\n --config-files {} \\\n --directories {} \\\n --description \\\"{}\\\"\".format(\n        VENDOR,\n        PACKAGE_URL,\n        POSTINST_SCRIPT,\n        POSTUNINST_SCRIPT,\n        PACKAGE_LICENSE,\n        MAINTAINER,\n        DEFAULT_CONFIG,\n        LOGROTATE_CONFIG,\n        ' --directories '.join([\n                         LOG_DIR[1:],\n                         DATA_DIR[1:],\n                         SCRIPT_DIR[1:],\n                         os.path.dirname(SCRIPT_DIR[1:]),\n                         os.path.dirname(DEFAULT_CONFIG),\n                    ]),\n        DESCRIPTION)\n\ntargets = {\n    'kapacitor' : './cmd/kapacitor',\n    'kapacitord' : './cmd/kapacitord',\n    'tickfmt' : './tick/cmd/tickfmt'\n}\n\nsupported_builds = {\n    'darwin': [ \"amd64\", \"i386\" ],\n    'linux': [ \"amd64\", \"i386\", \"armhf\", \"arm64\", \"armel\", \"static_i386\", \"static_amd64\" ],\n    'windows': [ \"amd64\", \"i386\" ]\n}\n\nsupported_packages = {\n    \"darwin\": [ \"tar\"],\n    \"linux\": [ \"deb\", \"rpm\", \"tar\"],\n    # experimental\n    \"windows\": [ \"zip\" ]\n}\n\n################\n#### Kapacitor Functions\n################\n\ndef print_banner():\n    logging.info(\"\"\"\n\n'##:::'##::::'###::::'########:::::'###:::::'######::'####:'########::'#######::'########::\n ##::'##::::'## ##::: ##.... ##:::'## ##:::'##... ##:. ##::... ##..::'##.... ##: ##.... ##:\n ##:'##::::'##:. ##:: ##:::: ##::'##:. ##:: ##:::..::: ##::::: ##:::: ##:::: ##: ##:::: ##:\n #####::::'##:::. ##: ########::'##:::. ##: ##:::::::: ##::::: ##:::: ##:::: ##: ########::\n ##. ##::: #########: ##.....::: #########: ##:::::::: ##::::: ##:::: ##:::: ##: ##.. ##:::\n ##:. ##:: ##.... ##: ##:::::::: ##.... ##: ##::: ##:: ##::::: ##:::: ##:::: ##: ##::. ##::\n ##::. ##: ##:::: ##: ##:::::::: ##:::: ##:. ######::'####:::: ##::::. #######:: ##:::. ##:\n..::::..::..:::::..::..:::::::::..:::::..:::......:::....:::::..::::::.......:::..:::::..::\n Build Script\n\"\"\")\n\ndef create_package_fs(build_root):\n    \"\"\"Create a filesystem structure to mimic the package filesystem.\n    \"\"\"\n    logging.debug(\"Creating a filesystem hierarchy from directory: {}\".format(build_root))\n    # Using [1:] for the path names due to them being absolute\n    # (will overwrite previous paths, per 'os.path.join' documentation)\n    os.makedirs(os.path.join(build_root, INSTALL_ROOT_DIR[1:]))\n    os.makedirs(os.path.join(build_root, LOG_DIR[1:]))\n    os.makedirs(os.path.join(build_root, DATA_DIR[1:]))\n    os.makedirs(os.path.join(build_root, SCRIPT_DIR[1:]))\n    os.makedirs(os.path.join(build_root, os.path.dirname(DEFAULT_CONFIG)))\n    os.makedirs(os.path.join(build_root, os.path.dirname(LOGROTATE_CONFIG)))\n    os.makedirs(os.path.join(build_root, os.path.dirname(BASH_COMPLETION_SH)))\n\ndef package_scripts(build_root, config_only=False):\n    \"\"\"Copy the necessary scripts and configuration files to the package\n    filesystem.\n    \"\"\"\n    if config_only:\n        logging.info(\"Copying configuration to build directory.\")\n        conf_name = os.path.basename(DEFAULT_CONFIG)\n        shutil.copyfile(DEFAULT_CONFIG, os.path.join(build_root, conf_name))\n        os.chmod(os.path.join(build_root, conf_name), 0o644)\n    else:\n        logging.info(\"Copying scripts and configuration to build directory\")\n        shutil.copy(INIT_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], INIT_SCRIPT.split('/')[1]))\n        shutil.copy(SYSTEMD_SCRIPT, os.path.join(build_root, SCRIPT_DIR[1:], SYSTEMD_SCRIPT.split('/')[1]))\n        shutil.copy(LOGROTATE_CONFIG, os.path.join(build_root, LOGROTATE_CONFIG))\n        shutil.copy(BASH_COMPLETION_SH, os.path.join(build_root, BASH_COMPLETION_SH))\n        shutil.copy(DEFAULT_CONFIG, os.path.join(build_root, DEFAULT_CONFIG))\n        os.chmod(os.path.join(build_root, LOGROTATE_CONFIG), 0o644)\n\ndef run_generate():\n    \"\"\"Run 'go generate' to rebuild any static assets.\n    \"\"\"\n    logging.info(\"Running generate...\")\n    run(\"go install ./vendor/github.com/golang/protobuf/protoc-gen-go\")\n    run(\"go install ./vendor/github.com/benbjohnson/tmpl\")\n    generate_cmd = [\"go\", \"generate\"]\n    generate_cmd.extend(go_list())\n    p = subprocess.Popen(generate_cmd)\n    code = p.wait()\n    if code == 0:\n        logging.info(\"Generate succeeded.\")\n        return True\n    else:\n        logging.error(\"Generate failed.\")\n        return False\n\ndef go_get():\n    \"\"\"\n    Retrieve build dependencies or restore pinned dependencies.\n    \"\"\"\n    # Nothing to do, all dependencies are vendored.\n    return True\n\ndef check_nochanges():\n    \"\"\"\n    Check that there are no changes\n    \"\"\"\n    changes = run(\"git status --porcelain\").strip()\n    if len(changes) > 0:\n        logging.error(\"There are un-committed changes in your local branch, --no-uncommited was given, cannot continue\")\n        logging.debug(\"Changes:\\n{}\".format(changes))\n        return False\n    return True\n\n\ndef run_tests(race, parallel, timeout, no_vet):\n    \"\"\"Run the Go test suite on binary output.\n    \"\"\"\n    logging.info(\"Starting tests...\")\n    if race:\n        logging.info(\"Race is enabled.\")\n    if parallel is not None:\n        logging.info(\"Using parallel: {}\".format(parallel))\n    if timeout is not None:\n        logging.info(\"Using timeout: {}\".format(timeout))\n    out = run(\"go fmt {}\".format(' '.join(go_list())))\n    if len(out) > 0:\n        logging.error(\"Code not formatted. Please use 'go fmt ./...' to fix formatting errors.\")\n        logging.error(\"{}\".format(out))\n        return False\n    if not no_vet:\n        vet_cmd = go_vet_command + \" {}\".format(\" \".join(go_list(relative=True)))\n        out = run(vet_cmd)\n        if len(out) > 0:\n            logging.error(\"Go vet failed. Please run '{}' and fix any errors.\".format(vet_cmd))\n            logging.error(\"{}\".format(out))\n            return False\n    else:\n        logging.info(\"Skipping 'go vet' call...\")\n    test_command = \"go test -v\"\n    if race:\n        test_command += \" -race\"\n    if parallel is not None:\n        test_command += \" -parallel {}\".format(parallel)\n    if timeout is not None:\n        test_command += \" -timeout {}\".format(timeout)\n    test_command += \" {}\".format(' '.join(go_list()))\n    logging.info(\"Running tests...\")\n    output = run(test_command, printOutput=logging.getLogger().getEffectiveLevel() == logging.DEBUG)\n    return True\n\ndef package_udfs(version, dist_dir):\n    \"\"\"\n    Create packages for UDF agents\n    \"\"\"\n    logging.info(\"Packaging UDF agents\")\n    packages = package_python_udf(version, dist_dir)\n    return packages\n\ndef package_python_udf(version, dist_dir):\n    \"\"\"\n    Bundle python sources for UDF agent\n    \"\"\"\n    logging.debug(\"Packaging python UDF agent\")\n\n    # Update python package version\n    version_file = './udf/agent/py/kapacitor/udf/__init__.py'\n    with open(version_file, 'w') as f:\n        f.write('VERSION = \"{}\"\\n'.format(version))\n\n    # Create tar of python sources\n    fname = \"python-kapacitor_udf-{}.tar.gz\".format(version)\n    outfile = os.path.join(dist_dir, fname)\n\n    tar_cmd = ['tar', '-cz', '-C', './udf/agent/py', '--transform', 's/^./kapacitor_udf-{}/'.format(version), '-f']\n    tar_cmd.append(outfile)\n    exclude_list = ['*.pyc', '*.pyo', '__pycache__']\n    for e in exclude_list:\n        tar_cmd.append('--exclude='+e)\n    tar_cmd.append('./')\n    p = subprocess.Popen(tar_cmd)\n    code = p.wait()\n    if code != 0:\n        logging.error(\"Python UDF tar failed.\")\n        sys.exit(1)\n\n    # Revert version file\n    version_file = './udf/agent/py/kapacitor/udf/__init__.py'\n    with open(version_file, 'w') as f:\n        f.write('VERSION = \"\"\\n')\n\n    return [outfile]\n\n\n################\n#### All Kapacitor-specific content above this line\n################\n\ndef run(command, allow_failure=False, shell=False, printOutput=False):\n    \"\"\"\n    Run shell command (convenience wrapper around subprocess).\n\n    If printOutput is True then the output is sent to STDOUT and not returned\n    \"\"\"\n    out = None\n    logging.debug(\"{}\".format(command))\n    try:\n        cmd = command\n        if not shell:\n            cmd = command.split()\n\n        stdout = subprocess.PIPE\n        stderr = subprocess.STDOUT\n        if printOutput:\n            stdout = None\n\n        p = subprocess.Popen(cmd, shell=shell, stdout=stdout, stderr=stderr)\n        out, _ = p.communicate()\n        if out is not None:\n            out = out.decode('utf-8').strip()\n        if p.returncode != 0:\n            if allow_failure:\n                logging.warn(u\"Command '{}' failed with error: {}\".format(command, out))\n                return None\n            else:\n                logging.error(u\"Command '{}' failed with error: {}\".format(command, out))\n                sys.exit(1)\n    except OSError as e:\n        if allow_failure:\n            logging.warn(\"Command '{}' failed with error: {}\".format(command, e))\n            return out\n        else:\n            logging.error(\"Command '{}' failed with error: {}\".format(command, e))\n            sys.exit(1)\n    else:\n        return out\n\ndef create_temp_dir(prefix = None):\n    \"\"\" Create temporary directory with optional prefix.\n    \"\"\"\n    if prefix is None:\n        return tempfile.mkdtemp(prefix=\"{}-build.\".format(PACKAGE_NAME))\n    else:\n        return tempfile.mkdtemp(prefix=prefix)\n\ndef increment_minor_version(version):\n    \"\"\"Return the version with the minor version incremented and patch\n    version set to zero.\n    \"\"\"\n    ver_list = version.split('.')\n    if len(ver_list) != 3:\n        logging.warn(\"Could not determine how to increment version '{}', will just use provided version.\".format(version))\n        return version\n    ver_list[1] = str(int(ver_list[1]) + 1)\n    ver_list[2] = str(0)\n    inc_version = '.'.join(ver_list)\n    logging.debug(\"Incremented version from '{}' to '{}'.\".format(version, inc_version))\n    return inc_version\n\ndef get_current_version_tag():\n    \"\"\"Retrieve the raw git version tag.\n    \"\"\"\n    version = run(\"git describe --always --tags --abbrev=0\")\n    return version\n\ndef get_current_version():\n    \"\"\"Parse version information from git tag output.\n    \"\"\"\n    version_tag = get_current_version_tag()\n    # Remove leading 'v'\n    if version_tag[0] == 'v':\n        version_tag = version_tag[1:]\n    # Replace any '-'/'_' with '~'\n    if '-' in version_tag:\n        version_tag = version_tag.replace(\"-\",\"~\")\n    if '_' in version_tag:\n        version_tag = version_tag.replace(\"_\",\"~\")\n    return version_tag\n\ndef get_current_commit(short=False):\n    \"\"\"Retrieve the current git commit.\n    \"\"\"\n    command = None\n    if short:\n        command = \"git log --pretty=format:'%h' -n 1\"\n    else:\n        command = \"git rev-parse HEAD\"\n    out = run(command)\n    return out.strip('\\'\\n\\r ')\n\ndef get_current_branch():\n    \"\"\"Retrieve the current git branch.\n    \"\"\"\n    command = \"git rev-parse --abbrev-ref HEAD\"\n    out = run(command)\n    return out.strip()\n\ndef local_changes():\n    \"\"\"Return True if there are local un-committed changes.\n    \"\"\"\n    output = run(\"git diff-files --ignore-submodules --\").strip()\n    if len(output) > 0:\n        return True\n    return False\n\ndef get_system_arch():\n    \"\"\"Retrieve current system architecture.\n    \"\"\"\n    arch = os.uname()[4]\n    if arch == \"x86_64\":\n        arch = \"amd64\"\n    elif arch == \"386\":\n        arch = \"i386\"\n    elif 'arm' in arch:\n        # Prevent uname from reporting full ARM arch (eg 'armv7l')\n        arch = \"arm\"\n    return arch\n\ndef get_system_platform():\n    \"\"\"Retrieve current system platform.\n    \"\"\"\n    if sys.platform.startswith(\"linux\"):\n        return \"linux\"\n    else:\n        return sys.platform\n\ndef get_go_version():\n    \"\"\"Retrieve version information for Go.\n    \"\"\"\n    out = run(\"go version\")\n    matches = re.search('go version go(\\S+)', out)\n    if matches is not None:\n        return matches.groups()[0].strip()\n    return None\n\ndef check_path_for(b):\n    \"\"\"Check the the user's path for the provided binary.\n    \"\"\"\n    def is_exe(fpath):\n        return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n    for path in os.environ[\"PATH\"].split(os.pathsep):\n        path = path.strip('\"')\n        full_path = os.path.join(path, b)\n        if os.path.isfile(full_path) and os.access(full_path, os.X_OK):\n            return full_path\n\ndef check_environ(build_dir = None):\n    \"\"\"Check environment for common Go variables.\n    \"\"\"\n    logging.info(\"Checking environment...\")\n    for v in [ \"GOPATH\", \"GOBIN\", \"GOROOT\" ]:\n        logging.debug(\"Using '{}' for {}\".format(os.environ.get(v), v))\n\n    cwd = os.getcwd()\n    if build_dir is None and os.environ.get(\"GOPATH\") and os.environ.get(\"GOPATH\") not in cwd:\n        logging.warn(\"Your current directory is not under your GOPATH. This may lead to build failures.\")\n    return True\n\ndef check_prereqs():\n    \"\"\"Check user path for required dependencies.\n    \"\"\"\n    logging.info(\"Checking for dependencies...\")\n    for req in prereqs:\n        if not check_path_for(req):\n            logging.error(\"Could not find dependency: {}\".format(req))\n            return False\n    return True\n\ndef upload_packages(packages, bucket_name=None, overwrite=False):\n    \"\"\"Upload provided package output to AWS S3.\n    \"\"\"\n    logging.debug(\"Uploading files to bucket '{}': {}\".format(bucket_name, packages))\n    try:\n        import boto\n        from boto.s3.key import Key\n        from boto.s3.connection import OrdinaryCallingFormat\n        logging.getLogger(\"boto\").setLevel(logging.WARNING)\n    except ImportError:\n        logging.warn(\"Cannot upload packages without 'boto' Python library!\")\n        return False\n    logging.info(\"Connecting to AWS S3...\")\n    # Up the number of attempts to 10 from default of 1\n    boto.config.add_section(\"Boto\")\n    boto.config.set(\"Boto\", \"metadata_service_num_attempts\", \"10\")\n    c = boto.connect_s3(calling_format=OrdinaryCallingFormat())\n    if bucket_name is None:\n        bucket_name = DEFAULT_BUCKET\n    bucket = c.get_bucket(bucket_name.split('/')[0])\n    for p in packages:\n        if '/' in bucket_name:\n            # Allow for nested paths within the bucket name (ex:\n            # bucket/folder). Assuming forward-slashes as path\n            # delimiter.\n            name = os.path.join('/'.join(bucket_name.split('/')[1:]),\n                                os.path.basename(p))\n        else:\n            name = os.path.basename(p)\n        logging.debug(\"Using key: {}\".format(name))\n        if bucket.get_key(name) is None or overwrite:\n            logging.info(\"Uploading file {}\".format(name))\n            k = Key(bucket)\n            k.key = name\n            if overwrite:\n                n = k.set_contents_from_filename(p, replace=True)\n            else:\n                n = k.set_contents_from_filename(p, replace=False)\n            k.make_public()\n        else:\n            logging.warn(\"Not uploading file {}, as it already exists in the target bucket.\".format(name))\n    return True\n\ndef go_list(vendor=False, relative=False):\n    \"\"\"\n    Return a list of packages\n    If vendor is False vendor package are not included\n    If relative is True the package prefix defined by PACKAGE_URL is stripped\n    \"\"\"\n    p = subprocess.Popen([\"go\", \"list\", \"./...\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n    out, err = p.communicate()\n    packages = out.split('\\n')\n    if packages[-1] == '':\n        packages = packages[:-1]\n    if not vendor:\n        non_vendor = []\n        for p in packages:\n            if '/vendor/' not in p:\n                non_vendor.append(p)\n        packages = non_vendor\n    if relative:\n        relative_pkgs = []\n        for p in packages:\n            r = p.replace(PACKAGE_URL, '.')\n            if r != '.':\n                relative_pkgs.append(r)\n        packages = relative_pkgs\n    return packages\n\ndef build(version=None,\n          platform=None,\n          arch=None,\n          nightly=False,\n          race=False,\n          clean=False,\n          outdir=\".\",\n          tags=[],\n          static=False):\n    \"\"\"Build each target for the specified architecture and platform.\n    \"\"\"\n    logging.info(\"Starting build for {}/{}...\".format(platform, arch))\n    logging.info(\"Using Go version: {}\".format(get_go_version()))\n    logging.info(\"Using git branch: {}\".format(get_current_branch()))\n    logging.info(\"Using git commit: {}\".format(get_current_commit()))\n    if static:\n        logging.info(\"Using statically-compiled output.\")\n    if race:\n        logging.info(\"Race is enabled.\")\n    if len(tags) > 0:\n        logging.info(\"Using build tags: {}\".format(','.join(tags)))\n\n    logging.info(\"Sending build output to: {}\".format(outdir))\n    if not os.path.exists(outdir):\n        os.makedirs(outdir)\n    elif clean and outdir != '/' and outdir != \".\":\n        logging.info(\"Cleaning build directory '{}' before building.\".format(outdir))\n        shutil.rmtree(outdir)\n        os.makedirs(outdir)\n\n    logging.info(\"Using version '{}' for build.\".format(version))\n\n    tmp_build_dir = create_temp_dir()\n    for target, path in targets.items():\n        logging.info(\"Building target: {}\".format(target))\n        build_command = \"\"\n\n        # Handle static binary output\n        if static is True or \"static_\" in arch:\n            if \"static_\" in arch:\n                static = True\n                arch = arch.replace(\"static_\", \"\")\n            build_command += \"CGO_ENABLED=0 \"\n\n        # Handle variations in architecture output\n        if arch == \"i386\" or arch == \"i686\":\n            arch = \"386\"\n        elif \"arm\" in arch:\n            arch = \"arm\"\n        build_command += \"GOOS={} GOARCH={} \".format(platform, arch)\n\n        if \"arm\" in arch:\n            if arch == \"armel\":\n                build_command += \"GOARM=5 \"\n            elif arch == \"armhf\" or arch == \"arm\":\n                build_command += \"GOARM=6 \"\n            elif arch == \"arm64\":\n                # TODO(rossmcdonald) - Verify this is the correct setting for arm64\n                build_command += \"GOARM=7 \"\n            else:\n                logging.error(\"Invalid ARM architecture specified: {}\".format(arch))\n                logging.error(\"Please specify either 'armel', 'armhf', or 'arm64'.\")\n                return False\n        if platform == 'windows':\n            target = target + '.exe'\n        build_command += \"go build -o {} \".format(os.path.join(outdir, target))\n        if race:\n            build_command += \"-race \"\n        if len(tags) > 0:\n            build_command += \"-tags {} \".format(','.join(tags))\n        if \"1.4\" in get_go_version():\n            if static:\n                build_command += \"-ldflags=\\\"-s -X main.version {} -X main.branch {} -X main.commit {}\\\" \".format(version,\n                                                                                                                  get_current_branch(),\n                                                                                                                  get_current_commit())\n            else:\n                build_command += \"-ldflags=\\\"-X main.version {} -X main.branch {} -X main.commit {}\\\" \".format(version,\n                                                                                                               get_current_branch(),\n                                                                                                               get_current_commit())\n\n        else:\n            # Starting with Go 1.5, the linker flag arguments changed to 'name=value' from 'name value'\n            if static:\n                build_command += \"-ldflags=\\\"-s -X main.version={} -X main.branch={} -X main.commit={}\\\" \".format(version,\n                                                                                                                  get_current_branch(),\n                                                                                                                  get_current_commit())\n            else:\n                build_command += \"-ldflags=\\\"-X main.version={} -X main.branch={} -X main.commit={}\\\" \".format(version,\n                                                                                                               get_current_branch(),\n                                                                                                               get_current_commit())\n        if static:\n            build_command += \"-a -installsuffix cgo \"\n        build_command += path\n        start_time = datetime.utcnow()\n        run(build_command, shell=True)\n        end_time = datetime.utcnow()\n        logging.info(\"Time taken: {}s\".format((end_time - start_time).total_seconds()))\n    return True\n\ndef generate_md5_from_file(path):\n    \"\"\"Generate MD5 signature based on the contents of the file at path.\n    \"\"\"\n    m = hashlib.md5()\n    with open(path, 'rb') as f:\n        for chunk in iter(lambda: f.read(4096), b\"\"):\n            m.update(chunk)\n    return m.hexdigest()\n\ndef generate_sig_from_file(path):\n    \"\"\"Generate a detached GPG signature from the file at path.\n    \"\"\"\n    logging.debug(\"Generating GPG signature for file: {}\".format(path))\n    gpg_path = check_path_for('gpg')\n    if gpg_path is None:\n        logging.warn(\"gpg binary not found on path! Skipping signature creation.\")\n        return False\n    if os.environ.get(\"GNUPG_HOME\") is not None:\n        run('gpg --homedir {} --armor --yes --detach-sign {}'.format(os.environ.get(\"GNUPG_HOME\"), path))\n    else:\n        run('gpg --armor --detach-sign --yes {}'.format(path))\n    return True\n\ndef package(build_output, pkg_name, version, nightly=False, iteration=1, static=False, release=False):\n    \"\"\"Package the output of the build process.\n    \"\"\"\n    outfiles = []\n    tmp_build_dir = create_temp_dir()\n    logging.debug(\"Packaging for build output: {}\".format(build_output))\n    logging.info(\"Using temporary directory: {}\".format(tmp_build_dir))\n    try:\n        for platform in build_output:\n            # Create top-level folder displaying which platform (linux, etc)\n            os.makedirs(os.path.join(tmp_build_dir, platform))\n            for arch in build_output[platform]:\n                logging.info(\"Creating packages for {}/{}\".format(platform, arch))\n                # Create second-level directory displaying the architecture (amd64, etc)\n                current_location = build_output[platform][arch]\n\n                # Create directory tree to mimic file system of package\n                build_root = os.path.join(tmp_build_dir,\n                                          platform,\n                                          arch,\n                                          '{}-{}-{}'.format(PACKAGE_NAME, version, iteration))\n                os.makedirs(build_root)\n\n                # Copy packaging scripts to build directory\n                if platform == \"windows\" or static or \"static_\" in arch:\n                    # For windows and static builds, just copy\n                    # binaries to root of package (no other scripts or\n                    # directories)\n                    package_scripts(build_root, config_only=True)\n                else:\n                    create_package_fs(build_root)\n                    package_scripts(build_root)\n\n                for binary in targets:\n                    # Copy newly-built binaries to packaging directory\n                    if platform == 'windows':\n                        binary = binary + '.exe'\n                    if platform == 'windows' or static or \"static_\" in arch:\n                        # Where the binary should go in the package filesystem\n                        to = os.path.join(build_root, binary)\n                        # Where the binary currently is located\n                        fr = os.path.join(current_location, binary)\n                    else:\n                        # Where the binary currently is located\n                        fr = os.path.join(current_location, binary)\n                        # Where the binary should go in the package filesystem\n                        to = os.path.join(build_root, INSTALL_ROOT_DIR[1:], binary)\n                    shutil.copy(fr, to)\n\n                for package_type in supported_packages[platform]:\n                    # Package the directory structure for each package type for the platform\n                    logging.debug(\"Packaging directory '{}' as '{}'.\".format(build_root, package_type))\n                    name = pkg_name\n                    # Reset version, iteration, and current location on each run\n                    # since they may be modified below.\n                    package_version = version\n                    package_iteration = iteration\n                    if \"static_\" in arch:\n                        # Remove the \"static_\" from the displayed arch on the package\n                        package_arch = arch.replace(\"static_\", \"\")\n                    else:\n                        package_arch = arch\n                    if not release and not nightly:\n                        # For non-release builds, just use the commit hash as the version\n                        package_version = \"{}~{}\".format(version,\n                                                         get_current_commit(short=True))\n                        package_iteration = \"0\"\n                    package_build_root = build_root\n                    current_location = build_output[platform][arch]\n\n                    if package_type in ['zip', 'tar']:\n                        # For tars and zips, start the packaging one folder above\n                        # the build root (to include the package name)\n                        package_build_root = os.path.join('/', '/'.join(build_root.split('/')[:-1]))\n                        if nightly:\n                            if static or \"static_\" in arch:\n                                name = '{}-static-nightly_{}_{}'.format(name,\n                                                                        platform,\n                                                                        package_arch)\n                            else:\n                                name = '{}-nightly_{}_{}'.format(name,\n                                                                 platform,\n                                                                 package_arch)\n                        else:\n                            if static or \"static_\" in arch:\n                                name = '{}-{}-static_{}_{}'.format(name,\n                                                                   package_version,\n                                                                   platform,\n                                                                   package_arch)\n                            else:\n                                name = '{}-{}_{}_{}'.format(name,\n                                                            package_version,\n                                                            platform,\n                                                            package_arch)\n                        current_location = os.path.join(os.getcwd(), current_location)\n                        if package_type == 'tar':\n                            tar_command = \"cd {} && tar -cvzf {}.tar.gz ./*\".format(package_build_root, name)\n                            run(tar_command, shell=True)\n                            run(\"mv {}.tar.gz {}\".format(os.path.join(package_build_root, name), current_location), shell=True)\n                            outfile = os.path.join(current_location, name + \".tar.gz\")\n                            outfiles.append(outfile)\n                        elif package_type == 'zip':\n                            zip_command = \"cd {} && zip -r {}.zip ./*\".format(package_build_root, name)\n                            run(zip_command, shell=True)\n                            run(\"mv {}.zip {}\".format(os.path.join(package_build_root, name), current_location), shell=True)\n                            outfile = os.path.join(current_location, name + \".zip\")\n                            outfiles.append(outfile)\n                    elif package_type not in ['zip', 'tar'] and static or \"static_\" in arch:\n                        logging.info(\"Skipping package type '{}' for static builds.\".format(package_type))\n                    else:\n                        fpm_command = \"fpm {} --name {} -a {} -t {} --version {} --iteration {} -C {} -p {} \".format(\n                            fpm_common_args,\n                            name,\n                            package_arch,\n                            package_type,\n                            package_version,\n                            package_iteration,\n                            package_build_root,\n                            current_location)\n                        if package_type == \"rpm\":\n                            fpm_command += \"--depends coreutils --rpm-posttrans {}\".format(POSTINST_SCRIPT)\n                        out = run(fpm_command, shell=True)\n                        matches = re.search(':path=>\"(.*)\"', out)\n                        outfile = None\n                        if matches is not None:\n                            outfile = matches.groups()[0]\n                        if outfile is None:\n                            logging.warn(\"Could not determine output from packaging output!\")\n                        else:\n                            if nightly:\n                                # Strip nightly version from package name\n                                new_outfile = outfile.replace(\"{}-{}\".format(package_version, package_iteration), \"nightly\")\n                                os.rename(outfile, new_outfile)\n                                outfile = new_outfile\n                            else:\n                                if package_type == 'rpm':\n                                    # rpm's convert any dashes to underscores\n                                    package_version = package_version.replace(\"-\", \"_\")\n                                new_outfile = outfile.replace(\"{}-{}\".format(package_version, package_iteration), package_version)\n                                os.rename(outfile, new_outfile)\n                                outfile = new_outfile\n                            outfiles.append(os.path.join(os.getcwd(), outfile))\n        logging.debug(\"Produced package files: {}\".format(outfiles))\n        return outfiles\n    finally:\n        # Cleanup\n        shutil.rmtree(tmp_build_dir)\n\ndef main(args):\n    global PACKAGE_NAME\n\n    if args.release and args.nightly:\n        logging.error(\"Cannot be both a nightly and a release.\")\n        return 1\n\n    if args.nightly:\n        args.version = increment_minor_version(args.version)\n        args.version = \"{}~n{}\".format(args.version,\n                                       datetime.utcnow().strftime(\"%Y%m%d%H%M\"))\n        args.iteration = 0\n\n    # Validate version\n    if not re.match(r'^[-\\d\\w\\.]+', args.version):\n        logging.error(\"Invalid version {}\".format(args.version))\n        return 1\n\n    # Pre-build checks\n    check_environ()\n    if not check_prereqs():\n        return 1\n    if args.build_tags is None:\n        args.build_tags = []\n    else:\n        args.build_tags = args.build_tags.split(',')\n\n    orig_commit = get_current_commit(short=True)\n    orig_branch = get_current_branch()\n\n    if args.platform not in supported_builds and args.platform != 'all':\n        logging.error(\"Invalid build platform: {}\".format(target_platform))\n        return 1\n\n    build_output = {}\n\n    if args.branch != orig_branch and args.commit != orig_commit:\n        logging.error(\"Can only specify one branch or commit to build from.\")\n        return 1\n    elif args.branch != orig_branch:\n        logging.info(\"Moving to git branch: {}\".format(args.branch))\n        run(\"git checkout {}\".format(args.branch))\n    elif args.commit != orig_commit:\n        logging.info(\"Moving to git commit: {}\".format(args.commit))\n        run(\"git checkout {}\".format(args.commit))\n\n    if not args.no_get:\n        if not go_get():\n            return 1\n\n    if args.generate:\n        if not run_generate():\n            return 1\n\n    if args.no_uncommitted:\n        if not check_nochanges():\n            return 1\n\n    if args.test:\n        if not run_tests(args.race, args.parallel, args.timeout, args.no_vet):\n            return 1\n\n    platforms = []\n    single_build = True\n    if args.platform == 'all':\n        platforms = supported_builds.keys()\n        single_build = False\n    else:\n        platforms = [args.platform]\n\n    for platform in platforms:\n        build_output.update( { platform : {} } )\n        archs = []\n        if args.arch == \"all\":\n            single_build = False\n            archs = supported_builds.get(platform)\n        else:\n            archs = [args.arch]\n\n        for arch in archs:\n            od = args.outdir\n            if not single_build:\n                od = os.path.join(args.outdir, platform, arch)\n            if not build(version=args.version,\n                         platform=platform,\n                         arch=arch,\n                         nightly=args.nightly,\n                         race=args.race,\n                         clean=args.clean,\n                         outdir=od,\n                         tags=args.build_tags,\n                         static=args.static):\n                return 1\n            build_output.get(platform).update( { arch : od } )\n\n    # Build packages\n    if args.package:\n        if not check_path_for(\"fpm\"):\n            logging.error(\"FPM ruby gem required for packaging. Stopping.\")\n            return 1\n        packages = package(build_output,\n                           args.name,\n                           args.version,\n                           nightly=args.nightly,\n                           iteration=args.iteration,\n                           static=args.static,\n                           release=args.release)\n\n        if args.package_udfs:\n            packages += package_udfs(args.version, args.outdir)\n\n        if args.sign:\n            logging.debug(\"Generating GPG signatures for packages: {}\".format(packages))\n            sigs = [] # retain signatures so they can be uploaded with packages\n            for p in packages:\n                if generate_sig_from_file(p):\n                    sigs.append(p + '.asc')\n                else:\n                    logging.error(\"Creation of signature for package [{}] failed!\".format(p))\n                    return 1\n            packages += sigs\n        if args.upload:\n            logging.debug(\"Files staged for upload: {}\".format(packages))\n            if args.nightly:\n                args.upload_overwrite = True\n            if not upload_packages(packages, bucket_name=args.bucket, overwrite=args.upload_overwrite):\n                return 1\n        logging.info(\"Packages created:\")\n        for p in packages:\n            logging.info(\"{} (MD5={})\".format(p.split('/')[-1:][0],\n                                              generate_md5_from_file(p)))\n\n\n    if orig_branch != get_current_branch():\n        logging.info(\"Moving back to original git branch: {}\".format(args.branch))\n        run(\"git checkout {}\".format(orig_branch))\n\n    return 0\n\nif __name__ == '__main__':\n    LOG_LEVEL = logging.INFO\n    if '--debug' in sys.argv[1:]:\n        LOG_LEVEL = logging.DEBUG\n    log_format = '[%(levelname)s] %(funcName)s: %(message)s'\n    logging.basicConfig(level=LOG_LEVEL,\n                        format=log_format)\n\n    parser = argparse.ArgumentParser(description='InfluxDB build and packaging script.')\n    parser.add_argument('--verbose','-v','--debug',\n                        action='store_true',\n                        help='Use debug output')\n    parser.add_argument('--outdir', '-o',\n                        metavar='<output directory>',\n                        default='./build/',\n                        type=os.path.abspath,\n                        help='Output directory')\n    parser.add_argument('--name', '-n',\n                        metavar='<name>',\n                        default=PACKAGE_NAME,\n                        type=str,\n                        help='Name to use for package name (when package is specified)')\n    parser.add_argument('--arch',\n                        metavar='<amd64|i386|armhf|arm64|armel|all>',\n                        type=str,\n                        default=get_system_arch(),\n                        help='Target architecture for build output')\n    parser.add_argument('--platform',\n                        metavar='<linux|darwin|windows|all>',\n                        type=str,\n                        default=get_system_platform(),\n                        help='Target platform for build output')\n    parser.add_argument('--branch',\n                        metavar='<branch>',\n                        type=str,\n                        default=get_current_branch(),\n                        help='Build from a specific branch')\n    parser.add_argument('--commit',\n                        metavar='<commit>',\n                        type=str,\n                        default=get_current_commit(short=True),\n                        help='Build from a specific commit')\n    parser.add_argument('--version',\n                        metavar='<version>',\n                        type=str,\n                        default=get_current_version(),\n                        help='Version information to apply to build output (ex: 0.12.0)')\n    parser.add_argument('--iteration',\n                        metavar='<package iteration>',\n                        type=str,\n                        default=\"1\",\n                        help='Package iteration to apply to build output (defaults to 1)')\n    parser.add_argument('--stats',\n                        action='store_true',\n                        help='Emit build metrics (requires InfluxDB Python client)')\n    parser.add_argument('--stats-server',\n                        metavar='<hostname:port>',\n                        type=str,\n                        help='Send build stats to InfluxDB using provided hostname and port')\n    parser.add_argument('--stats-db',\n                        metavar='<database name>',\n                        type=str,\n                        help='Send build stats to InfluxDB using provided database name')\n    parser.add_argument('--nightly',\n                        action='store_true',\n                        help='Mark build output as nightly build (will incremement the minor version)')\n    parser.add_argument('--update',\n                        action='store_true',\n                        help='Update build dependencies prior to building')\n    parser.add_argument('--package',\n                        action='store_true',\n                        help='Package binary output')\n    parser.add_argument('--package-udfs',\n                        action='store_true',\n                        help='Package UDF agents')\n    parser.add_argument('--release',\n                        action='store_true',\n                        help='Mark build output as release')\n    parser.add_argument('--clean',\n                        action='store_true',\n                        help='Clean output directory before building')\n    parser.add_argument('--no-get',\n                        action='store_true',\n                        help='Do not retrieve pinned dependencies when building')\n    parser.add_argument('--no-uncommitted',\n                        action='store_true',\n                        help='Fail if uncommitted changes exist in the working directory')\n    parser.add_argument('--upload',\n                        action='store_true',\n                        help='Upload output packages to AWS S3')\n    parser.add_argument('--upload-overwrite','-w',\n                        action='store_true',\n                        help='Upload output packages to AWS S3')\n    parser.add_argument('--bucket',\n                        metavar='<S3 bucket name>',\n                        type=str,\n                        default=DEFAULT_BUCKET,\n                        help='Destination bucket for uploads')\n    parser.add_argument('--generate',\n                        action='store_true',\n                        help='Run \"go generate\" before building')\n    parser.add_argument('--build-tags',\n                        metavar='<tags>',\n                        help='Optional build tags to use for compilation')\n    parser.add_argument('--static',\n                        action='store_true',\n                        help='Create statically-compiled binary output')\n    parser.add_argument('--sign',\n                        action='store_true',\n                        help='Create GPG detached signatures for packages (when package is specified)')\n    parser.add_argument('--test',\n                        action='store_true',\n                        help='Run tests (does not produce build output)')\n    parser.add_argument('--no-vet',\n                        action='store_true',\n                        help='Do not run \"go vet\" when running tests')\n    parser.add_argument('--race',\n                        action='store_true',\n                        help='Enable race flag for build output')\n    parser.add_argument('--parallel',\n                        metavar='<num threads>',\n                        type=int,\n                        help='Number of tests to run simultaneously')\n    parser.add_argument('--timeout',\n                        metavar='<timeout>',\n                        type=str,\n                        help='Timeout for tests before failing')\n    args = parser.parse_args()\n    print_banner()\n    sys.exit(main(args))\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/build.sh",
    "content": "#!/bin/bash\n# Run the build utility via Docker\n\nset -e\n\n# Make sure our working dir is the dir of the script\nDIR=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)\ncd $DIR\n\n# Unique number for this build\nBUILD_NUM=${BUILD_NUM-$RANDOM}\n# Home dir of the docker user\nHOME_DIR=/root\n\nimagename=kapacitor-builder-img-$BUILD_NUM\ndataname=kapacitor-data-$BUILD_NUM\n\n# Build new docker image\ndocker build -f Dockerfile_build_ubuntu64 -t $imagename $DIR\n\n# Build new docker image\ndocker build -f Dockerfile_build_ubuntu64 -t influxdata/kapacitor-builder $DIR\n\n# Create data volume with code\ndocker create \\\n    --name $dataname \\\n    -v \"$HOME_DIR/go/src/github.com/influxdata/kapacitor\" \\\n    $imagename /bin/true\ndocker cp \"$DIR/\" \"$dataname:$HOME_DIR/go/src/github.com/influxdata/\"\n\necho \"Running build.py\"\n# Run docker\ndocker run \\\n    --rm \\\n    --volumes-from $dataname \\\n    -e AWS_ACCESS_KEY_ID=\"$AWS_ACCESS_KEY_ID\" \\\n    -e AWS_SECRET_ACCESS_KEY=\"$AWS_SECRET_ACCESS_KEY\" \\\n    $imagename \\\n    \"$@\"\n\ndocker cp \"$dataname:$HOME_DIR/go/src/github.com/influxdata/kapacitor/build\" \\\n    ./\ndocker rm -v $dataname\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/circle-test.sh",
    "content": "#!/bin/bash\n#\n# This is the InfluxDB test script for CircleCI, it is a light wrapper around ./test.sh.\n\n# Exit if any command fails\nset -e\n\n# Get dir of script and make it is our working directory.\nDIR=$(cd $(dirname \"${BASH_SOURCE[0]}\") && pwd)\ncd $DIR\n\n\nexport NO_UNCOMMITTED=true\nexport BUILD_NUM=$CIRCLE_BUILD_NUM\n\n# Get number of test environments.\ncount=$(./test.sh count)\n# Check that we aren't wasting CircleCI nodes.\nif [ $CIRCLE_NODE_TOTAL -gt $count ]\nthen\n    echo \"More CircleCI nodes allocated than tests environments to run!\"\n    exit 1\nfi\n\n# Map CircleCI nodes to test environments.\ntests=$(seq 0 $((count - 1)))\nfor i in $tests\ndo\n    mine=$(( $i % $CIRCLE_NODE_TOTAL ))\n    if [ $mine -eq $CIRCLE_NODE_INDEX ]\n    then\n        echo \"Running test env index: $i\"\n        ./test.sh $i\n    fi\ndone\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/circle.yml",
    "content": "machine:\n    services:\n        - docker\n\ndependencies:\n    pre:\n      # setup ipv6\n      - sudo sysctl -w net.ipv6.conf.lo.disable_ipv6=0 net.ipv6.conf.default.disable_ipv6=0 net.ipv6.conf.all.disable_ipv6=0\n    cache_directories:\n        - \"~/docker\"\n    override:\n      - ./test.sh save\n\ntest:\n    override:\n        - bash circle-test.sh:\n            parallel: true\n\ndeployment:\n    release:\n        tag: /v[0-9]+(\\.[0-9]+){2}(-(rc|beta)[0-9]+)?/\n        commands:\n            - ./build.sh --debug --clean --generate --package --package-udfs --upload --bucket=dl.influxdata.com/kapacitor/releases --platform=all --arch=all --release\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/combine.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n)\n\ntype CombineNode struct {\n\tnode\n\tc *pipeline.CombineNode\n\n\texpressions []stateful.Expression\n\tscopePools  []stateful.ScopePool\n\n\tcombination combination\n}\n\n// Create a new CombineNode, which combines a stream with itself dynamically.\nfunc newCombineNode(et *ExecutingTask, n *pipeline.CombineNode, l *log.Logger) (*CombineNode, error) {\n\tcn := &CombineNode{\n\t\tc:           n,\n\t\tnode:        node{Node: n, et: et, logger: l},\n\t\tcombination: combination{max: n.Max},\n\t}\n\n\t// Create stateful expressions\n\tcn.expressions = make([]stateful.Expression, len(n.Lambdas))\n\tcn.scopePools = make([]stateful.ScopePool, len(n.Lambdas))\n\tfor i, lambda := range n.Lambdas {\n\t\tstatefulExpr, err := stateful.NewExpression(lambda.Expression)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to compile %v expression: %v\", i, err)\n\t\t}\n\t\tcn.expressions[i] = statefulExpr\n\t\tcn.scopePools[i] = stateful.NewScopePool(ast.FindReferenceVariables(lambda.Expression))\n\t}\n\tcn.node.runF = cn.runCombine\n\treturn cn, nil\n}\n\nfunc (n *CombineNode) runCombine([]byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\treturn consumer.Consume()\n}\n\nfunc (n *CombineNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\texpressions := make([]stateful.Expression, len(n.expressions))\n\tfor i, expr := range n.expressions {\n\t\texpressions[i] = expr.CopyReset()\n\t}\n\treturn &combineBuffer{\n\t\tn:           n,\n\t\ttime:        first.Time(),\n\t\tname:        first.Name(),\n\t\tgroupInfo:   group,\n\t\texpressions: expressions,\n\t\tc:           n.combination,\n\t}, nil\n}\n\ntype combineBuffer struct {\n\tn           *CombineNode\n\ttime        time.Time\n\tname        string\n\tgroupInfo   edge.GroupInfo\n\tpoints      []edge.FieldsTagsTimeSetter\n\texpressions []stateful.Expression\n\tc           combination\n\n\tbegin edge.BeginBatchMessage\n}\n\nfunc (b *combineBuffer) BeginBatch(begin edge.BeginBatchMessage) error {\n\tb.n.timer.Start()\n\tdefer b.n.timer.Stop()\n\n\tb.name = begin.Name()\n\tb.time = time.Time{}\n\tif s := begin.SizeHint(); s > cap(b.points) {\n\t\tb.points = make([]edge.FieldsTagsTimeSetter, 0, s)\n\t}\n\treturn nil\n}\n\nfunc (b *combineBuffer) BatchPoint(bp edge.BatchPointMessage) error {\n\tb.n.timer.Start()\n\tdefer b.n.timer.Stop()\n\tbp = bp.ShallowCopy()\n\treturn b.addPoint(bp)\n}\n\nfunc (b *combineBuffer) EndBatch(end edge.EndBatchMessage) error {\n\tb.n.timer.Start()\n\tdefer b.n.timer.Stop()\n\tif err := b.combine(); err != nil {\n\t\treturn err\n\t}\n\tb.points = b.points[0:0]\n\treturn nil\n}\n\nfunc (b *combineBuffer) Point(p edge.PointMessage) error {\n\tb.n.timer.Start()\n\tdefer b.n.timer.Stop()\n\tp = p.ShallowCopy()\n\treturn b.addPoint(p)\n}\n\nfunc (b *combineBuffer) addPoint(p edge.FieldsTagsTimeSetter) error {\n\tt := p.Time().Round(b.n.c.Tolerance)\n\tp.SetTime(t)\n\tif t.Equal(b.time) {\n\t\tb.points = append(b.points, p)\n\t} else {\n\t\tif err := b.combine(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb.time = t\n\t\tb.points = b.points[0:1]\n\t\tb.points[0] = p\n\t}\n\treturn nil\n}\n\nfunc (b *combineBuffer) Barrier(barrier edge.BarrierMessage) error {\n\treturn edge.Forward(b.n.outs, barrier)\n}\nfunc (b *combineBuffer) DeleteGroup(d edge.DeleteGroupMessage) error {\n\treturn edge.Forward(b.n.outs, d)\n}\n\n// Combine a set of points into all their combinations.\nfunc (b *combineBuffer) combine() error {\n\tif len(b.points) == 0 {\n\t\treturn nil\n\t}\n\n\tl := len(b.expressions)\n\n\t// Compute matching result for all points\n\tmatches := make([]map[int]bool, l)\n\tfor i := 0; i < l; i++ {\n\t\tmatches[i] = make(map[int]bool, len(b.points))\n\t}\n\tfor idx, p := range b.points {\n\t\tfor i := range b.expressions {\n\t\t\tmatched, err := EvalPredicate(b.expressions[i], b.n.scopePools[i], p)\n\t\t\tif err != nil {\n\t\t\t\tb.n.incrementErrorCount()\n\t\t\t\tb.n.logger.Println(\"E! evaluating lambda expression:\", err)\n\t\t\t}\n\t\t\tmatches[i][idx] = matched\n\t\t}\n\t}\n\n\tp := edge.NewPointMessage(\n\t\tb.name, \"\", \"\",\n\t\tb.groupInfo.Dimensions,\n\t\tnil,\n\t\tnil,\n\t\ttime.Time{},\n\t)\n\n\tdimensions := p.Dimensions().ToSet()\n\tset := make([]edge.FieldsTagsTimeSetter, l)\n\treturn b.c.Do(len(b.points), l, func(indices []int) error {\n\t\tvalid := true\n\t\tfor s := 0; s < l; s++ {\n\t\t\tfound := false\n\t\t\tfor i := range indices {\n\t\t\t\tif matches[s][indices[i]] {\n\t\t\t\t\tset[s] = b.points[indices[i]]\n\t\t\t\t\tindices = append(indices[0:i], indices[i+1:]...)\n\t\t\t\t\tfound = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !found {\n\t\t\t\tvalid = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif valid {\n\t\t\tfields, tags, t := b.merge(set, dimensions)\n\n\t\t\tnp := p.ShallowCopy()\n\t\t\tnp.SetFields(fields)\n\t\t\tnp.SetTags(tags)\n\t\t\tnp.SetTime(t.Round(b.n.c.Tolerance))\n\n\t\t\tb.n.timer.Pause()\n\t\t\terr := edge.Forward(b.n.outs, np)\n\t\t\tb.n.timer.Resume()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}\n\n// Merge a set of points into a single point.\nfunc (b *combineBuffer) merge(points []edge.FieldsTagsTimeSetter, dimensions map[string]bool) (models.Fields, models.Tags, time.Time) {\n\tfields := make(models.Fields, len(points[0].Fields())*len(points))\n\ttags := make(models.Tags, len(points[0].Tags())*len(points))\n\n\tfor i, p := range points {\n\t\tfor field, value := range p.Fields() {\n\t\t\tfields[b.n.c.Names[i]+b.n.c.Delimiter+field] = value\n\t\t}\n\t\tfor tag, value := range p.Tags() {\n\t\t\tif !dimensions[tag] {\n\t\t\t\ttags[b.n.c.Names[i]+b.n.c.Delimiter+tag] = value\n\t\t\t} else {\n\t\t\t\ttags[tag] = value\n\t\t\t}\n\t\t}\n\t}\n\n\treturn fields, tags, points[0].Time()\n}\n\n// Type for performing actions on a set of combinations.\ntype combination struct {\n\tmax int64\n}\n\n// Do action for each combination, based on combinatorial logic n choose k.\n// If n choose k > max an error is returned\nfunc (c combination) Do(n, k int, f func(indices []int) error) error {\n\tif count := c.Count(int64(n), int64(k)); count > c.max {\n\t\treturn fmt.Errorf(\"refusing to perform combination as total combinations %d exceeds max combinations %d\", count, c.max)\n\t} else if count == -1 {\n\t\t// Nothing to do\n\t\treturn nil\n\t}\n\n\tindices := make([]int, k)\n\tindicesCopy := make([]int, k)\n\tfor i := 0; i < k; i++ {\n\t\tindices[i] = i\n\t}\n\tcopy(indicesCopy, indices)\n\tif err := f(indicesCopy); err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\ti := k - 1\n\t\tfor ; i >= 0; i-- {\n\t\t\tif indices[i] != i+n-k {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif i == -1 {\n\t\t\treturn nil\n\t\t}\n\t\tindices[i]++\n\t\tfor j := i + 1; j < k; j++ {\n\t\t\tindices[j] = indices[j-1] + 1\n\t\t}\n\t\tcopy(indicesCopy, indices)\n\t\tif err := f(indicesCopy); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\n// Count the number of possible combinations of n choose k.\nfunc (c combination) Count(n, k int64) int64 {\n\tif n < k {\n\t\treturn -1\n\t}\n\tcount := int64(1)\n\tfor i := int64(0); i < k; i++ {\n\t\tcount = (count * (n - i)) / (i + 1)\n\t}\n\treturn count\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/combine_test.go",
    "content": "package kapacitor\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc Test_Combination_Count(t *testing.T) {\n\tc := combination{max: 1e9}\n\ttestCases := []struct {\n\t\tn, k, exp int64\n\t}{\n\t\t{\n\t\t\tn:   1,\n\t\t\tk:   0,\n\t\t\texp: 1,\n\t\t},\n\t\t{\n\t\t\tn:   1,\n\t\t\tk:   1,\n\t\t\texp: 1,\n\t\t},\n\t\t{\n\t\t\tn:   2,\n\t\t\tk:   1,\n\t\t\texp: 2,\n\t\t},\n\t\t{\n\t\t\tn:   5,\n\t\t\tk:   2,\n\t\t\texp: 10,\n\t\t},\n\t\t{\n\t\t\tn:   5,\n\t\t\tk:   3,\n\t\t\texp: 10,\n\t\t},\n\t\t{\n\t\t\tn:   52,\n\t\t\tk:   5,\n\t\t\texp: 2598960,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tif exp, got := tc.exp, c.Count(tc.n, tc.k); exp != got {\n\t\t\tt.Errorf(\"unexpected combination count for %d choose %d: got %d exp %d\", tc.n, tc.k, got, exp)\n\t\t}\n\t}\n}\nfunc Test_Combination_Do(t *testing.T) {\n\tc := combination{max: 1e9}\n\ttestCases := []struct {\n\t\tn, k int\n\t\texp  [][]int\n\t}{\n\t\t{\n\t\t\tn:   1,\n\t\t\tk:   1,\n\t\t\texp: [][]int{{0}},\n\t\t},\n\t\t{\n\t\t\tn: 5,\n\t\t\tk: 2,\n\t\t\texp: [][]int{\n\t\t\t\t{0, 1},\n\t\t\t\t{0, 2},\n\t\t\t\t{0, 3},\n\t\t\t\t{0, 4},\n\t\t\t\t{1, 2},\n\t\t\t\t{1, 3},\n\t\t\t\t{1, 4},\n\t\t\t\t{2, 3},\n\t\t\t\t{2, 4},\n\t\t\t\t{3, 4},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tn: 5,\n\t\t\tk: 3,\n\t\t\texp: [][]int{\n\t\t\t\t{0, 1, 2},\n\t\t\t\t{0, 1, 3},\n\t\t\t\t{0, 1, 4},\n\t\t\t\t{0, 2, 3},\n\t\t\t\t{0, 2, 4},\n\t\t\t\t{0, 3, 4},\n\t\t\t\t{1, 2, 3},\n\t\t\t\t{1, 2, 4},\n\t\t\t\t{1, 3, 4},\n\t\t\t\t{2, 3, 4},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tn: 7,\n\t\t\tk: 5,\n\t\t\texp: [][]int{\n\t\t\t\t{0, 1, 2, 3, 4},\n\t\t\t\t{0, 1, 2, 3, 5},\n\t\t\t\t{0, 1, 2, 3, 6},\n\t\t\t\t{0, 1, 2, 4, 5},\n\t\t\t\t{0, 1, 2, 4, 6},\n\t\t\t\t{0, 1, 2, 5, 6},\n\t\t\t\t{0, 1, 3, 4, 5},\n\t\t\t\t{0, 1, 3, 4, 6},\n\t\t\t\t{0, 1, 3, 5, 6},\n\t\t\t\t{0, 1, 4, 5, 6},\n\t\t\t\t{0, 2, 3, 4, 5},\n\t\t\t\t{0, 2, 3, 4, 6},\n\t\t\t\t{0, 2, 3, 5, 6},\n\t\t\t\t{0, 2, 4, 5, 6},\n\t\t\t\t{0, 3, 4, 5, 6},\n\t\t\t\t{1, 2, 3, 4, 5},\n\t\t\t\t{1, 2, 3, 4, 6},\n\t\t\t\t{1, 2, 3, 5, 6},\n\t\t\t\t{1, 2, 4, 5, 6},\n\t\t\t\t{1, 3, 4, 5, 6},\n\t\t\t\t{2, 3, 4, 5, 6},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\ti := 0\n\t\tc.Do(tc.n, tc.k, func(indices []int) error {\n\t\t\tif i == len(tc.exp) {\n\t\t\t\tt.Fatalf(\"too many combinations returned for %d choose %d: got %v\", tc.n, tc.k, indices)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(tc.exp[i], indices) {\n\t\t\t\tt.Errorf(\"unexpected combination set for %d choose %d index %d: got %v exp %v\", tc.n, tc.k, i, indices, tc.exp[i])\n\t\t\t}\n\t\t\ti++\n\t\t\treturn nil\n\t\t})\n\t\tif i != len(tc.exp) {\n\t\t\tt.Errorf(\"not enough combinations returned for %d choose %d\", tc.n, tc.k)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/default.go",
    "content": "package kapacitor\n\nimport (\n\t\"log\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\nconst (\n\tstatsFieldsDefaulted = \"fields_defaulted\"\n\tstatsTagsDefaulted   = \"tags_defaulted\"\n)\n\ntype DefaultNode struct {\n\tnode\n\td *pipeline.DefaultNode\n\n\tfieldsDefaulted *expvar.Int\n\ttagsDefaulted   *expvar.Int\n}\n\n// Create a new  DefaultNode which applies a transformation func to each point in a stream and returns a single point.\nfunc newDefaultNode(et *ExecutingTask, n *pipeline.DefaultNode, l *log.Logger) (*DefaultNode, error) {\n\tdn := &DefaultNode{\n\t\tnode:            node{Node: n, et: et, logger: l},\n\t\td:               n,\n\t\tfieldsDefaulted: new(expvar.Int),\n\t\ttagsDefaulted:   new(expvar.Int),\n\t}\n\tdn.node.runF = dn.runDefault\n\treturn dn, nil\n}\n\nfunc (n *DefaultNode) runDefault(snapshot []byte) error {\n\tn.statMap.Set(statsFieldsDefaulted, n.fieldsDefaulted)\n\tn.statMap.Set(statsTagsDefaulted, n.tagsDefaulted)\n\n\tconsumer := edge.NewConsumerWithReceiver(\n\t\tn.ins[0],\n\t\tedge.NewReceiverFromForwardReceiverWithStats(\n\t\t\tn.outs,\n\t\t\tedge.NewTimedForwardReceiver(n.timer, n),\n\t\t),\n\t)\n\treturn consumer.Consume()\n}\n\nfunc (n *DefaultNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tbegin = begin.ShallowCopy()\n\t_, tags := n.setDefaults(nil, begin.Tags())\n\tbegin.SetTags(tags)\n\treturn begin, nil\n}\n\nfunc (n *DefaultNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tbp = bp.ShallowCopy()\n\tfields, tags := n.setDefaults(bp.Fields(), bp.Tags())\n\tbp.SetFields(fields)\n\tbp.SetTags(tags)\n\treturn bp, nil\n}\n\nfunc (n *DefaultNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (n *DefaultNode) Point(p edge.PointMessage) (edge.Message, error) {\n\tp = p.ShallowCopy()\n\tfields, tags := n.setDefaults(p.Fields(), p.Tags())\n\tp.SetFields(fields)\n\tp.SetTags(tags)\n\treturn p, nil\n}\n\nfunc (n *DefaultNode) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (n *DefaultNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\nfunc (n *DefaultNode) setDefaults(fields models.Fields, tags models.Tags) (models.Fields, models.Tags) {\n\tnewFields := fields\n\tfieldsCopied := false\n\tfor field, value := range n.d.Fields {\n\t\tif v := fields[field]; v == nil {\n\t\t\tif !fieldsCopied {\n\t\t\t\tnewFields = newFields.Copy()\n\t\t\t\tfieldsCopied = true\n\t\t\t}\n\t\t\tn.fieldsDefaulted.Add(1)\n\t\t\tnewFields[field] = value\n\t\t}\n\t}\n\tnewTags := tags\n\ttagsCopied := false\n\tfor tag, value := range n.d.Tags {\n\t\tif v := tags[tag]; v == \"\" {\n\t\t\tif !tagsCopied {\n\t\t\t\tnewTags = newTags.Copy()\n\t\t\t\ttagsCopied = true\n\t\t\t}\n\t\t\tn.tagsDefaulted.Add(1)\n\t\t\tnewTags[tag] = value\n\t\t}\n\t}\n\treturn newFields, newTags\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/delete.go",
    "content": "package kapacitor\n\nimport (\n\t\"log\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\nconst (\n\tstatsFieldsDeleted = \"fields_deleted\"\n\tstatsTagsDeleted   = \"tags_deleted\"\n)\n\ntype DeleteNode struct {\n\tnode\n\td *pipeline.DeleteNode\n\n\tfieldsDeleted *expvar.Int\n\ttagsDeleted   *expvar.Int\n\n\ttags map[string]bool\n}\n\n// Create a new  DeleteNode which applies a transformation func to each point in a stream and returns a single point.\nfunc newDeleteNode(et *ExecutingTask, n *pipeline.DeleteNode, l *log.Logger) (*DeleteNode, error) {\n\ttags := make(map[string]bool)\n\tfor _, tag := range n.Tags {\n\t\ttags[tag] = true\n\t}\n\n\tdn := &DeleteNode{\n\t\tnode:          node{Node: n, et: et, logger: l},\n\t\td:             n,\n\t\tfieldsDeleted: new(expvar.Int),\n\t\ttagsDeleted:   new(expvar.Int),\n\t\ttags:          tags,\n\t}\n\tdn.node.runF = dn.runDelete\n\treturn dn, nil\n}\n\nfunc (n *DeleteNode) runDelete(snapshot []byte) error {\n\tn.statMap.Set(statsFieldsDeleted, n.fieldsDeleted)\n\tn.statMap.Set(statsTagsDeleted, n.tagsDeleted)\n\tconsumer := edge.NewConsumerWithReceiver(\n\t\tn.ins[0],\n\t\tedge.NewReceiverFromForwardReceiverWithStats(\n\t\t\tn.outs,\n\t\t\tedge.NewTimedForwardReceiver(n.timer, n),\n\t\t),\n\t)\n\treturn consumer.Consume()\n}\n\nfunc (n *DeleteNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tbegin = begin.ShallowCopy()\n\t_, tags := n.doDeletes(nil, begin.Tags())\n\tbegin.SetTags(tags)\n\treturn begin, nil\n}\n\nfunc (n *DeleteNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tbp = bp.ShallowCopy()\n\tfields, tags := n.doDeletes(bp.Fields(), bp.Tags())\n\tbp.SetFields(fields)\n\tbp.SetTags(tags)\n\treturn bp, nil\n}\n\nfunc (n *DeleteNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (n *DeleteNode) Point(p edge.PointMessage) (edge.Message, error) {\n\tp = p.ShallowCopy()\n\tfields, tags := n.doDeletes(p.Fields(), p.Tags())\n\tp.SetFields(fields)\n\tp.SetTags(tags)\n\tdims := p.Dimensions()\n\tif n.checkForDeletedDimension(dims) {\n\t\tp.SetDimensions(n.deleteDimensions(dims))\n\t}\n\treturn p, nil\n}\n\nfunc (n *DeleteNode) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (n *DeleteNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\n// checkForDeletedDimension checks if we deleted a group by dimension\nfunc (n *DeleteNode) checkForDeletedDimension(dimensions models.Dimensions) bool {\n\tfor _, dim := range dimensions.TagNames {\n\t\tif n.tags[dim] {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (n *DeleteNode) deleteDimensions(dims models.Dimensions) models.Dimensions {\n\tnewTagNames := make([]string, 0, len(dims.TagNames)-1)\n\tfor _, dim := range dims.TagNames {\n\t\tif !n.tags[dim] {\n\t\t\tnewTagNames = append(newTagNames, dim)\n\t\t}\n\t}\n\treturn models.Dimensions{\n\t\tTagNames: newTagNames,\n\t\tByName:   dims.ByName,\n\t}\n}\n\nfunc (n *DeleteNode) doDeletes(fields models.Fields, tags models.Tags) (models.Fields, models.Tags) {\n\tnewFields := fields\n\tfieldsCopied := false\n\tfor _, field := range n.d.Fields {\n\t\tif _, ok := fields[field]; ok {\n\t\t\tif !fieldsCopied {\n\t\t\t\tnewFields = newFields.Copy()\n\t\t\t\tfieldsCopied = true\n\t\t\t}\n\t\t\tn.fieldsDeleted.Add(1)\n\t\t\tdelete(newFields, field)\n\t\t}\n\t}\n\tnewTags := tags\n\ttagsCopied := false\n\tfor _, tag := range n.d.Tags {\n\t\tif _, ok := tags[tag]; ok {\n\t\t\tif !tagsCopied {\n\t\t\t\tnewTags = newTags.Copy()\n\t\t\t\ttagsCopied = true\n\t\t\t}\n\t\t\tn.tagsDeleted.Add(1)\n\t\t\tdelete(newTags, tag)\n\t\t}\n\t}\n\treturn newFields, newTags\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/derivative.go",
    "content": "package kapacitor\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\ntype DerivativeNode struct {\n\tnode\n\td *pipeline.DerivativeNode\n}\n\n// Create a new derivative node.\nfunc newDerivativeNode(et *ExecutingTask, n *pipeline.DerivativeNode, l *log.Logger) (*DerivativeNode, error) {\n\tdn := &DerivativeNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\td:    n,\n\t}\n\t// Create stateful expressions\n\tdn.node.runF = dn.runDerivative\n\treturn dn, nil\n}\n\nfunc (n *DerivativeNode) runDerivative([]byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\treturn consumer.Consume()\n}\n\nfunc (n *DerivativeNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, n.newGroup()),\n\t), nil\n}\n\nfunc (n *DerivativeNode) newGroup() *derivativeGroup {\n\treturn &derivativeGroup{\n\t\tn: n,\n\t}\n}\n\ntype derivativeGroup struct {\n\tn        *DerivativeNode\n\tprevious edge.FieldsTagsTimeGetter\n}\n\nfunc (g *derivativeGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tif s := begin.SizeHint(); s > 0 {\n\t\tbegin = begin.ShallowCopy()\n\t\tbegin.SetSizeHint(s - 1)\n\t}\n\tg.previous = nil\n\treturn begin, nil\n}\n\nfunc (g *derivativeGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tnp := bp.ShallowCopy()\n\temit := g.doDerivative(bp, np)\n\tif emit {\n\t\treturn np, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (g *derivativeGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (g *derivativeGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\tnp := p.ShallowCopy()\n\temit := g.doDerivative(p, np)\n\tif emit {\n\t\treturn np, nil\n\t}\n\treturn nil, nil\n}\n\n// doDerivative computes the derivative with respect to g.previous and p.\n// The resulting derivative value will be set on n.\nfunc (g *derivativeGroup) doDerivative(p edge.FieldsTagsTimeGetter, n edge.FieldsTagsTimeSetter) bool {\n\tvar prevFields, currFields models.Fields\n\tvar prevTime, currTime time.Time\n\tif g.previous != nil {\n\t\tprevFields = g.previous.Fields()\n\t\tprevTime = g.previous.Time()\n\t}\n\tcurrFields = p.Fields()\n\tcurrTime = p.Time()\n\tvalue, store, emit := g.n.derivative(\n\t\tprevFields, currFields,\n\t\tprevTime, currTime,\n\t)\n\tif store {\n\t\tg.previous = p\n\t}\n\tif !emit {\n\t\treturn false\n\t}\n\n\tfields := n.Fields().Copy()\n\tfields[g.n.d.As] = value\n\tn.SetFields(fields)\n\treturn true\n}\n\nfunc (g *derivativeGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *derivativeGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\n// derivative calculates the derivative between prev and cur.\n// Return is the resulting derivative, whether the current point should be\n// stored as previous, and whether the point result should be emitted.\nfunc (n *DerivativeNode) derivative(prev, curr models.Fields, prevTime, currTime time.Time) (float64, bool, bool) {\n\tf1, ok := numToFloat(curr[n.d.Field])\n\tif !ok {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Printf(\"E! cannot apply derivative to type %T\", curr[n.d.Field])\n\t\treturn 0, false, false\n\t}\n\n\tf0, ok := numToFloat(prev[n.d.Field])\n\tif !ok {\n\t\t// The only time this will fail to parse is if there is no previous.\n\t\t// Because we only return `store=true` if current parses successfully, we will\n\t\t// never get a previous which doesn't parse.\n\t\treturn 0, true, false\n\t}\n\n\telapsed := float64(currTime.Sub(prevTime))\n\tif elapsed == 0 {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Printf(\"E! cannot perform derivative elapsed time was 0\")\n\t\treturn 0, true, false\n\t}\n\tdiff := f1 - f0\n\t// Drop negative values for non-negative derivatives\n\tif n.d.NonNegativeFlag && diff < 0 {\n\t\treturn 0, true, false\n\t}\n\n\tvalue := float64(diff) / (elapsed / float64(n.d.Unit))\n\treturn value, true, true\n}\n\nfunc numToFloat(num interface{}) (float64, bool) {\n\tswitch n := num.(type) {\n\tcase int64:\n\t\treturn float64(n), true\n\tcase float64:\n\t\treturn n, true\n\tdefault:\n\t\treturn 0, false\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/doc.go",
    "content": "/*\n\tA data pipeline processing engine.\n\n\tSee the README for more complete examples and guides.\n\n\tCode Organization:\n\n\tThe pipeline package provides an API for how nodes can be connected to form a pipeline.\n\tThe individual implementations of each node exist in this kapacitor package.\n\tThe reason for the separation is to keep the exported API from the pipeline package\n\tclean as it is consumed via the TICKscripts (a DSL for Kapacitor).\n\n\tOther Concepts:\n\n\tStream vs Batch -- Use of the word 'stream'  indicates data arrives a single data point at a time.\n\tUse of the word 'batch' indicates data arrives in sets or batches or data points.\n\n\tTask -- A task represents a concrete workload to perform.\n\tIt consists of a pipeline and an identifying name.\n\tBasic CRUD operations can be performed on tasks.\n\n\tTask Master -- Responsible for executing a task in a specific environment.\n\n\tReplay -- Replays static datasets against tasks.\n*/\npackage kapacitor\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/edge.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/server/vars\"\n)\n\nconst (\n\tstatCollected = \"collected\"\n\tstatEmitted   = \"emitted\"\n\n\tdefaultEdgeBufferSize = 1000\n)\n\nvar ErrAborted = errors.New(\"edged aborted\")\n\ntype Edge struct {\n\tedge.StatsEdge\n\n\tmu     sync.Mutex\n\tclosed bool\n\n\tstatsKey string\n\tstatMap  *expvar.Map\n\tlogger   *log.Logger\n}\n\nfunc newEdge(taskName, parentName, childName string, t pipeline.EdgeType, size int, logService LogService) edge.StatsEdge {\n\te := edge.NewStatsEdge(edge.NewChannelEdge(t, defaultEdgeBufferSize))\n\ttags := map[string]string{\n\t\t\"task\":   taskName,\n\t\t\"parent\": parentName,\n\t\t\"child\":  childName,\n\t\t\"type\":   t.String(),\n\t}\n\tkey, sm := vars.NewStatistic(\"edges\", tags)\n\tsm.Set(statCollected, e.CollectedVar())\n\tsm.Set(statEmitted, e.EmittedVar())\n\tname := fmt.Sprintf(\"%s|%s->%s\", taskName, parentName, childName)\n\treturn &Edge{\n\t\tStatsEdge: e,\n\t\tstatsKey:  key,\n\t\tstatMap:   sm,\n\t\tlogger:    logService.NewLogger(fmt.Sprintf(\"[edge:%s] \", name), log.LstdFlags),\n\t}\n}\n\nfunc (e *Edge) Close() error {\n\te.mu.Lock()\n\tdefer e.mu.Unlock()\n\tif e.closed {\n\t\treturn nil\n\t}\n\te.closed = true\n\tvars.DeleteStatistic(e.statsKey)\n\te.logger.Printf(\"D! closing c: %d e: %d\",\n\t\te.Collected(),\n\t\te.Emitted(),\n\t)\n\treturn e.StatsEdge.Close()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/eval.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n)\n\ntype EvalNode struct {\n\tnode\n\te           *pipeline.EvalNode\n\texpressions []stateful.Expression\n\trefVarList  [][]string\n\tscopePool   stateful.ScopePool\n\ttags        map[string]bool\n\n\tevalErrors *expvar.Int\n}\n\n// Create a new  EvalNode which applies a transformation func to each point in a stream and returns a single point.\nfunc newEvalNode(et *ExecutingTask, n *pipeline.EvalNode, l *log.Logger) (*EvalNode, error) {\n\tif len(n.AsList) != len(n.Lambdas) {\n\t\treturn nil, errors.New(\"must provide one name per expression via the 'As' property\")\n\t}\n\ten := &EvalNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\te:    n,\n\t}\n\n\t// Create stateful expressions\n\ten.expressions = make([]stateful.Expression, len(n.Lambdas))\n\ten.refVarList = make([][]string, len(n.Lambdas))\n\texpressions := make([]ast.Node, len(n.Lambdas))\n\tfor i, lambda := range n.Lambdas {\n\t\texpressions[i] = lambda.Expression\n\t\tstatefulExpr, err := stateful.NewExpression(lambda.Expression)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to compile %v expression: %v\", i, err)\n\t\t}\n\t\ten.expressions[i] = statefulExpr\n\t\trefVars := ast.FindReferenceVariables(lambda.Expression)\n\t\ten.refVarList[i] = refVars\n\t}\n\t// Create a single pool for the combination of all expressions\n\ten.scopePool = stateful.NewScopePool(ast.FindReferenceVariables(expressions...))\n\n\t// Create map of tags\n\tif l := len(n.TagsList); l > 0 {\n\t\ten.tags = make(map[string]bool, l)\n\t\tfor _, tag := range n.TagsList {\n\t\t\ten.tags[tag] = true\n\t\t}\n\t}\n\n\ten.node.runF = en.runEval\n\treturn en, nil\n}\n\nfunc (n *EvalNode) runEval(snapshot []byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\n\treturn consumer.Consume()\n\n}\n\nfunc (n *EvalNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, n.newGroup()),\n\t), nil\n}\n\nfunc (n *EvalNode) newGroup() *evalGroup {\n\texpressions := make([]stateful.Expression, len(n.expressions))\n\tfor i, exp := range n.expressions {\n\t\texpressions[i] = exp.CopyReset()\n\t}\n\treturn &evalGroup{\n\t\tn:           n,\n\t\texpressions: expressions,\n\t}\n}\n\nfunc (n *EvalNode) eval(expressions []stateful.Expression, p edge.FieldsTagsTimeSetter) error {\n\n\tvars := n.scopePool.Get()\n\tdefer n.scopePool.Put(vars)\n\n\tfor i, expr := range expressions {\n\t\terr := fillScope(vars, n.refVarList[i], p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv, err := expr.Eval(vars)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tname := n.e.AsList[i]\n\t\tvars.Set(name, v)\n\t}\n\tfields := p.Fields()\n\ttags := p.Tags()\n\tnewTags := tags\n\tif len(n.tags) > 0 {\n\t\tnewTags = newTags.Copy()\n\t\tfor tag := range n.tags {\n\t\t\tv, err := vars.Get(tag)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif s, ok := v.(string); !ok {\n\t\t\t\treturn fmt.Errorf(\"result of a tag expression must be of type string, got %T\", v)\n\t\t\t} else {\n\t\t\t\tnewTags[tag] = s\n\t\t\t}\n\t\t}\n\t}\n\tvar newFields models.Fields\n\tif n.e.KeepFlag {\n\t\tif l := len(n.e.KeepList); l != 0 {\n\t\t\tnewFields = make(models.Fields, l)\n\t\t\tfor _, f := range n.e.KeepList {\n\t\t\t\t// Try the vars scope first\n\t\t\t\tif vars.Has(f) {\n\t\t\t\t\tv, err := vars.Get(f)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\tnewFields[f] = v\n\t\t\t\t} else if v, ok := fields[f]; ok {\n\t\t\t\t\t// Try the raw fields next, since it may not have been a referenced var.\n\t\t\t\t\tnewFields[f] = v\n\t\t\t\t} else {\n\t\t\t\t\treturn fmt.Errorf(\"cannot keep field %q, field does not exist\", f)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tnewFields = make(models.Fields, len(fields)+len(n.e.AsList))\n\t\t\tfor f, v := range fields {\n\t\t\t\tnewFields[f] = v\n\t\t\t}\n\t\t\tfor _, f := range n.e.AsList {\n\t\t\t\tv, err := vars.Get(f)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tnewFields[f] = v\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnewFields = make(models.Fields, len(n.e.AsList)-len(n.tags))\n\t\tfor _, f := range n.e.AsList {\n\t\t\tif n.tags[f] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tv, err := vars.Get(f)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tnewFields[f] = v\n\t\t}\n\t}\n\tp.SetFields(newFields)\n\tp.SetTags(newTags)\n\treturn nil\n}\n\ntype evalGroup struct {\n\tn           *EvalNode\n\texpressions []stateful.Expression\n}\n\nfunc (g *evalGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tbegin = begin.ShallowCopy()\n\tbegin.SetSizeHint(0)\n\treturn begin, nil\n}\n\nfunc (g *evalGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tbp = bp.ShallowCopy()\n\tif g.doEval(bp) {\n\t\treturn bp, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (g *evalGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (g *evalGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\tp = p.ShallowCopy()\n\tif g.doEval(p) {\n\t\treturn p, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (g *evalGroup) doEval(p edge.FieldsTagsTimeSetter) bool {\n\terr := g.n.eval(g.expressions, p)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tif !g.n.e.QuietFlag {\n\t\t\tg.n.logger.Println(\"E!\", err)\n\t\t}\n\t\t// Skip bad point\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (g *evalGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *evalGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/expr.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n)\n\n// EvalPredicate - Evaluate a given expression as a boolean predicate against a set of fields and tags\nfunc EvalPredicate(se stateful.Expression, scopePool stateful.ScopePool, p edge.FieldsTagsTimeGetter) (bool, error) {\n\tvars := scopePool.Get()\n\tdefer scopePool.Put(vars)\n\terr := fillScope(vars, scopePool.ReferenceVariables(), p)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// for function signature check\n\tif _, err := se.Type(vars); err != nil {\n\t\treturn false, err\n\t}\n\n\treturn se.EvalBool(vars)\n}\n\n// fillScope - given a scope and reference variables, we fill the exact variables from the now, fields and tags.\nfunc fillScope(vars *stateful.Scope, referenceVariables []string, p edge.FieldsTagsTimeGetter) error {\n\tnow := p.Time()\n\tfields := p.Fields()\n\ttags := p.Tags()\n\tfor _, refVariableName := range referenceVariables {\n\t\tif refVariableName == \"time\" {\n\t\t\tvars.Set(\"time\", now.Local())\n\t\t\tcontinue\n\t\t}\n\n\t\t// Support the error with tags/fields collision\n\t\tvar fieldValue interface{}\n\t\tvar isFieldExists bool\n\t\tvar tagValue interface{}\n\t\tvar isTagExists bool\n\n\t\tif fieldValue, isFieldExists = fields[refVariableName]; isFieldExists {\n\t\t\tvars.Set(refVariableName, fieldValue)\n\t\t}\n\n\t\tif tagValue, isTagExists = tags[refVariableName]; isTagExists {\n\t\t\tif isFieldExists {\n\t\t\t\treturn fmt.Errorf(\"cannot have field and tags with same name %q\", refVariableName)\n\t\t\t}\n\t\t\tvars.Set(refVariableName, tagValue)\n\t\t}\n\t\tif !isFieldExists && !isTagExists {\n\t\t\tif !vars.Has(refVariableName) {\n\t\t\t\tvars.Set(refVariableName, ast.MissingValue)\n\t\t\t}\n\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/flatten.go",
    "content": "package kapacitor\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\ntype FlattenNode struct {\n\tnode\n\tf *pipeline.FlattenNode\n\n\tbufPool sync.Pool\n}\n\n// Create a new FlattenNode, which takes pairs from parent streams combines them into a single point.\nfunc newFlattenNode(et *ExecutingTask, n *pipeline.FlattenNode, l *log.Logger) (*FlattenNode, error) {\n\tfn := &FlattenNode{\n\t\tf:    n,\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\tbufPool: sync.Pool{\n\t\t\tNew: func() interface{} { return &bytes.Buffer{} },\n\t\t},\n\t}\n\tfn.node.runF = fn.runFlatten\n\treturn fn, nil\n}\n\nfunc (n *FlattenNode) runFlatten([]byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\treturn consumer.Consume()\n}\n\nfunc (n *FlattenNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\tt := first.Time().Round(n.f.Tolerance)\n\treturn &flattenBuffer{\n\t\tn:         n,\n\t\ttime:      t,\n\t\tname:      first.Name(),\n\t\tgroupInfo: group,\n\t}, nil\n}\n\ntype flattenBuffer struct {\n\tn         *FlattenNode\n\ttime      time.Time\n\tname      string\n\tgroupInfo edge.GroupInfo\n\tpoints    []edge.FieldsTagsTimeGetter\n}\n\nfunc (b *flattenBuffer) BeginBatch(begin edge.BeginBatchMessage) error {\n\tb.n.timer.Start()\n\tdefer b.n.timer.Stop()\n\n\tb.name = begin.Name()\n\tb.time = time.Time{}\n\tif s := begin.SizeHint(); s > cap(b.points) {\n\t\tb.points = make([]edge.FieldsTagsTimeGetter, 0, s)\n\t}\n\n\tbegin = begin.ShallowCopy()\n\tbegin.SetSizeHint(0)\n\tb.n.timer.Pause()\n\terr := edge.Forward(b.n.outs, begin)\n\tb.n.timer.Resume()\n\treturn err\n}\n\nfunc (b *flattenBuffer) BatchPoint(bp edge.BatchPointMessage) error {\n\tb.n.timer.Start()\n\tdefer b.n.timer.Stop()\n\n\tt := bp.Time().Round(b.n.f.Tolerance)\n\tbp = bp.ShallowCopy()\n\tbp.SetTime(t)\n\n\tt, fields, err := b.addPoint(bp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(fields) == 0 {\n\t\treturn nil\n\t}\n\n\treturn b.emitBatchPoint(t, fields)\n}\n\nfunc (b *flattenBuffer) emitBatchPoint(t time.Time, fields models.Fields) error {\n\t// Emit batch point\n\tflatP := edge.NewBatchPointMessage(\n\t\tfields,\n\t\tb.groupInfo.Tags,\n\t\tt,\n\t)\n\tb.n.timer.Pause()\n\terr := edge.Forward(b.n.outs, flatP)\n\tb.n.timer.Resume()\n\treturn err\n}\n\nfunc (b *flattenBuffer) EndBatch(end edge.EndBatchMessage) error {\n\tb.n.timer.Start()\n\tdefer b.n.timer.Stop()\n\n\tif len(b.points) > 0 {\n\t\tfields, err := b.n.flatten(b.points)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := b.emitBatchPoint(b.time, fields); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb.points = b.points[0:0]\n\t}\n\n\tb.n.timer.Pause()\n\terr := edge.Forward(b.n.outs, end)\n\tb.n.timer.Resume()\n\treturn err\n}\n\nfunc (b *flattenBuffer) Point(p edge.PointMessage) error {\n\tb.n.timer.Start()\n\tdefer b.n.timer.Stop()\n\n\tt := p.Time().Round(b.n.f.Tolerance)\n\tp = p.ShallowCopy()\n\tp.SetTime(t)\n\n\tt, fields, err := b.addPoint(p)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(fields) == 0 {\n\t\treturn nil\n\t}\n\n\t// Emit point\n\tflatP := edge.NewPointMessage(\n\t\tb.name, \"\", \"\",\n\t\tb.groupInfo.Dimensions,\n\t\tfields,\n\t\tb.groupInfo.Tags,\n\t\tt,\n\t)\n\tb.n.timer.Pause()\n\terr = edge.Forward(b.n.outs, flatP)\n\tb.n.timer.Resume()\n\treturn err\n}\n\nfunc (b *flattenBuffer) addPoint(p edge.FieldsTagsTimeGetter) (next time.Time, fields models.Fields, err error) {\n\tt := p.Time()\n\tif !t.Equal(b.time) {\n\t\tif len(b.points) > 0 {\n\t\t\tfields, err = b.n.flatten(b.points)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tnext = b.time\n\t\t\tb.points = b.points[0:0]\n\t\t}\n\t\t// Update buffer with new time\n\t\tb.time = t\n\t}\n\tb.points = append(b.points, p)\n\treturn\n}\n\nfunc (b *flattenBuffer) Barrier(barrier edge.BarrierMessage) error {\n\treturn edge.Forward(b.n.outs, barrier)\n}\nfunc (b *flattenBuffer) DeleteGroup(d edge.DeleteGroupMessage) error {\n\treturn edge.Forward(b.n.outs, d)\n}\n\nfunc (n *FlattenNode) flatten(points []edge.FieldsTagsTimeGetter) (models.Fields, error) {\n\tfields := make(models.Fields)\n\tif len(points) == 0 {\n\t\treturn fields, nil\n\t}\n\tfieldPrefix := n.bufPool.Get().(*bytes.Buffer)\n\tdefer n.bufPool.Put(fieldPrefix)\nPOINTS:\n\tfor _, p := range points {\n\t\ttags := p.Tags()\n\t\tfor i, tag := range n.f.Dimensions {\n\t\t\tif v, ok := tags[tag]; ok {\n\t\t\t\tif i > 0 {\n\t\t\t\t\tfieldPrefix.WriteString(n.f.Delimiter)\n\t\t\t\t}\n\t\t\t\tfieldPrefix.WriteString(v)\n\t\t\t} else {\n\t\t\t\tn.incrementErrorCount()\n\t\t\t\tn.logger.Printf(\"E! point missing tag %q for flatten operation\", tag)\n\t\t\t\tcontinue POINTS\n\t\t\t}\n\t\t}\n\t\tl := fieldPrefix.Len()\n\t\tfor fname, value := range p.Fields() {\n\t\t\tif !n.f.DropOriginalFieldNameFlag {\n\t\t\t\tif l > 0 {\n\t\t\t\t\tfieldPrefix.WriteString(n.f.Delimiter)\n\t\t\t\t}\n\t\t\t\tfieldPrefix.WriteString(fname)\n\t\t\t}\n\t\t\tfields[fieldPrefix.String()] = value\n\t\t\tfieldPrefix.Truncate(l)\n\t\t}\n\t\tfieldPrefix.Reset()\n\t}\n\treturn fields, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/gobuild.sh",
    "content": "#!/bin/bash\n# This script run inside the Dockerfile_build_ubuntu64_git container and\n# gets the latests Go source code and compiles it.\n# Then passes control over to the normal build.py script\n\nset -e\n\ncd /go/src\ngit fetch --all\ngit checkout $GO_CHECKOUT\n# Merge in recent changes if we are on a branch\n# if we checked out a tag just ignore the error\ngit pull || true\n./make.bash\n\n# Run normal build.py\ncd \"$PROJECT_DIR\"\nexec ./build.py \"$@\"\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/group_by.go",
    "content": "package kapacitor\n\nimport (\n\t\"log\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n)\n\ntype GroupByNode struct {\n\tnode\n\tg *pipeline.GroupByNode\n\n\tbyName   bool\n\ttagNames []string\n\n\tbegin      edge.BeginBatchMessage\n\tdimensions models.Dimensions\n\n\tallDimensions bool\n\n\tmu       sync.RWMutex\n\tlastTime time.Time\n\tgroups   map[models.GroupID]edge.BufferedBatchMessage\n}\n\n// Create a new GroupByNode which splits the stream dynamically based on the specified dimensions.\nfunc newGroupByNode(et *ExecutingTask, n *pipeline.GroupByNode, l *log.Logger) (*GroupByNode, error) {\n\tgn := &GroupByNode{\n\t\tnode:   node{Node: n, et: et, logger: l},\n\t\tg:      n,\n\t\tgroups: make(map[models.GroupID]edge.BufferedBatchMessage),\n\t}\n\tgn.node.runF = gn.runGroupBy\n\n\tgn.allDimensions, gn.tagNames = determineTagNames(n.Dimensions, n.ExcludedDimensions)\n\tgn.byName = n.ByMeasurementFlag\n\treturn gn, nil\n}\n\nfunc (n *GroupByNode) runGroupBy([]byte) error {\n\tvalueF := func() int64 {\n\t\tn.mu.RLock()\n\t\tl := len(n.groups)\n\t\tn.mu.RUnlock()\n\t\treturn int64(l)\n\t}\n\tn.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))\n\n\tconsumer := edge.NewConsumerWithReceiver(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\treturn consumer.Consume()\n}\n\nfunc (n *GroupByNode) Point(p edge.PointMessage) error {\n\tp = p.ShallowCopy()\n\tn.timer.Start()\n\tdims := p.Dimensions()\n\tdims.ByName = dims.ByName || n.byName\n\tdims.TagNames = computeTagNames(p.Tags(), n.allDimensions, n.tagNames, n.g.ExcludedDimensions)\n\tp.SetDimensions(dims)\n\tn.timer.Stop()\n\tif err := edge.Forward(n.outs, p); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (n *GroupByNode) BeginBatch(begin edge.BeginBatchMessage) error {\n\tn.timer.Start()\n\tdefer n.timer.Stop()\n\n\tn.emit(begin.Time())\n\n\tn.begin = begin\n\tn.dimensions = begin.Dimensions()\n\tn.dimensions.ByName = n.dimensions.ByName || n.byName\n\n\treturn nil\n}\n\nfunc (n *GroupByNode) BatchPoint(bp edge.BatchPointMessage) error {\n\tn.timer.Start()\n\tdefer n.timer.Stop()\n\n\tn.dimensions.TagNames = computeTagNames(bp.Tags(), n.allDimensions, n.tagNames, n.g.ExcludedDimensions)\n\tgroupID := models.ToGroupID(n.begin.Name(), bp.Tags(), n.dimensions)\n\tgroup, ok := n.groups[groupID]\n\tif !ok {\n\t\t// Create new begin message\n\t\tnewBegin := n.begin.ShallowCopy()\n\t\tnewBegin.SetTagsAndDimensions(bp.Tags(), n.dimensions)\n\n\t\t// Create buffer for group batch\n\t\tgroup = edge.NewBufferedBatchMessage(\n\t\t\tnewBegin,\n\t\t\tmake([]edge.BatchPointMessage, 0, newBegin.SizeHint()),\n\t\t\tedge.NewEndBatchMessage(),\n\t\t)\n\t\tn.mu.Lock()\n\t\tn.groups[groupID] = group\n\t\tn.mu.Unlock()\n\t}\n\tgroup.SetPoints(append(group.Points(), bp))\n\n\treturn nil\n}\n\nfunc (n *GroupByNode) EndBatch(end edge.EndBatchMessage) error {\n\treturn nil\n}\n\nfunc (n *GroupByNode) Barrier(b edge.BarrierMessage) error {\n\tn.timer.Start()\n\terr := n.emit(b.Time())\n\tn.timer.Stop()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn edge.Forward(n.outs, b)\n}\nfunc (n *GroupByNode) DeleteGroup(d edge.DeleteGroupMessage) error {\n\treturn edge.Forward(n.outs, d)\n}\n\n// emit sends all groups before time t to children nodes.\n// The node timer must be started when calling this method.\nfunc (n *GroupByNode) emit(t time.Time) error {\n\t// TODO: ensure this time comparison works with barrier messages\n\tif !t.Equal(n.lastTime) {\n\t\tn.lastTime = t\n\t\t// Emit all groups\n\t\tfor id, group := range n.groups {\n\t\t\t// Update SizeHint since we know the final point count\n\t\t\tgroup.Begin().SetSizeHint(len(group.Points()))\n\t\t\t// Sort points since we didn't guarantee insertion order was sorted\n\t\t\tsort.Sort(edge.BatchPointMessages(group.Points()))\n\t\t\t// Send group batch to all children\n\t\t\tn.timer.Pause()\n\t\t\tif err := edge.Forward(n.outs, group); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tn.timer.Resume()\n\t\t\tn.mu.Lock()\n\t\t\t// Remove from group\n\t\t\tdelete(n.groups, id)\n\t\t\tn.mu.Unlock()\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc determineTagNames(dimensions []interface{}, excluded []string) (allDimensions bool, realDimensions []string) {\n\tfor _, dim := range dimensions {\n\t\tswitch d := dim.(type) {\n\t\tcase string:\n\t\t\trealDimensions = append(realDimensions, d)\n\t\tcase *ast.StarNode:\n\t\t\tallDimensions = true\n\t\t}\n\t}\n\tsort.Strings(realDimensions)\n\trealDimensions = filterExcludedTagNames(realDimensions, excluded)\n\treturn\n}\n\nfunc filterExcludedTagNames(tagNames, excluded []string) []string {\n\tfiltered := tagNames[0:0]\n\tfor _, t := range tagNames {\n\t\tfound := false\n\t\tfor _, x := range excluded {\n\t\t\tif x == t {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tfiltered = append(filtered, t)\n\t\t}\n\t}\n\treturn filtered\n}\n\nfunc computeTagNames(tags models.Tags, allDimensions bool, tagNames, excluded []string) []string {\n\tif allDimensions {\n\t\treturn filterExcludedTagNames(models.SortedKeys(tags), excluded)\n\t}\n\treturn tagNames\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/http_out.go",
    "content": "package kapacitor\n\nimport (\n\t\"encoding/json\"\n\t\"log\"\n\t\"net/http\"\n\t\"path\"\n\t\"sync\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/services/httpd\"\n)\n\ntype HTTPOutNode struct {\n\tnode\n\tc *pipeline.HTTPOutNode\n\n\tendpoint string\n\n\tmu      sync.RWMutex\n\troutes  []httpd.Route\n\tresult  *models.Result\n\tindexes []*httpOutGroup\n}\n\n// Create a new  HTTPOutNode which caches the most recent item and exposes it over the HTTP API.\nfunc newHTTPOutNode(et *ExecutingTask, n *pipeline.HTTPOutNode, l *log.Logger) (*HTTPOutNode, error) {\n\thn := &HTTPOutNode{\n\t\tnode:   node{Node: n, et: et, logger: l},\n\t\tc:      n,\n\t\tresult: new(models.Result),\n\t}\n\tet.registerOutput(hn.c.Endpoint, hn)\n\thn.node.runF = hn.runOut\n\thn.node.stopF = hn.stopOut\n\treturn hn, nil\n}\n\nfunc (n *HTTPOutNode) Endpoint() string {\n\treturn n.endpoint\n}\n\nfunc (n *HTTPOutNode) runOut([]byte) error {\n\thndl := func(w http.ResponseWriter, req *http.Request) {\n\t\tn.mu.RLock()\n\t\tdefer n.mu.RUnlock()\n\n\t\tif b, err := json.Marshal(n.result); err != nil {\n\t\t\thttpd.HttpError(\n\t\t\t\tw,\n\t\t\t\terr.Error(),\n\t\t\t\ttrue,\n\t\t\t\thttp.StatusInternalServerError,\n\t\t\t)\n\t\t} else {\n\t\t\t_, _ = w.Write(b)\n\t\t}\n\t}\n\n\tp := path.Join(\"/tasks/\", n.et.Task.ID, n.c.Endpoint)\n\n\tr := []httpd.Route{{\n\t\tMethod:      \"GET\",\n\t\tPattern:     p,\n\t\tHandlerFunc: hndl,\n\t}}\n\n\tn.endpoint = n.et.tm.HTTPDService.URL() + p\n\tn.mu.Lock()\n\tn.routes = r\n\tn.mu.Unlock()\n\n\terr := n.et.tm.HTTPDService.AddRoutes(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\n\treturn consumer.Consume()\n}\n\n// Update the result structure with a row.\nfunc (n *HTTPOutNode) updateResultWithRow(idx int, row *models.Row) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tif idx >= len(n.result.Series) {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Printf(\"E! index out of range for row update %d\", idx)\n\t\treturn\n\t}\n\tn.result.Series[idx] = row\n}\n\nfunc (n *HTTPOutNode) stopOut() {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tn.et.tm.HTTPDService.DelRoutes(n.routes)\n}\n\nfunc (n *HTTPOutNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, n.newGroup(group.ID)),\n\t), nil\n}\n\nfunc (n *HTTPOutNode) newGroup(groupID models.GroupID) *httpOutGroup {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\n\tidx := len(n.result.Series)\n\tn.result.Series = append(n.result.Series, nil)\n\tg := &httpOutGroup{\n\t\tn:      n,\n\t\tidx:    idx,\n\t\tbuffer: new(edge.BatchBuffer),\n\t}\n\tn.indexes = append(n.indexes, g)\n\treturn g\n}\nfunc (n *HTTPOutNode) deleteGroup(idx int) {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\n\tfor _, g := range n.indexes[idx+1:] {\n\t\tg.idx--\n\t}\n\tn.indexes = append(n.indexes[0:idx], n.indexes[idx+1:]...)\n\tn.result.Series = append(n.result.Series[0:idx], n.result.Series[idx+1:]...)\n}\n\ntype httpOutGroup struct {\n\tn      *HTTPOutNode\n\tid     models.GroupID\n\tidx    int\n\tbuffer *edge.BatchBuffer\n}\n\nfunc (g *httpOutGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\treturn nil, g.buffer.BeginBatch(begin)\n}\n\nfunc (g *httpOutGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\treturn nil, g.buffer.BatchPoint(bp)\n}\n\nfunc (g *httpOutGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn g.BufferedBatch(g.buffer.BufferedBatchMessage(end))\n}\n\nfunc (g *httpOutGroup) BufferedBatch(batch edge.BufferedBatchMessage) (edge.Message, error) {\n\trow := batch.ToRow()\n\tg.n.updateResultWithRow(g.idx, row)\n\treturn batch, nil\n}\n\nfunc (g *httpOutGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\trow := p.ToRow()\n\tg.n.updateResultWithRow(g.idx, row)\n\treturn p, nil\n}\n\nfunc (g *httpOutGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *httpOutGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\tg.n.deleteGroup(g.idx)\n\treturn d, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/http_post.go",
    "content": "package kapacitor\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"sync\"\n\n\t\"github.com/influxdata/kapacitor/bufpool\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/services/httppost\"\n)\n\ntype HTTPPostNode struct {\n\tnode\n\tc        *pipeline.HTTPPostNode\n\tendpoint *httppost.Endpoint\n\tmu       sync.RWMutex\n\tbp       *bufpool.Pool\n}\n\n// Create a new  HTTPPostNode which submits received items via POST to an HTTP endpoint\nfunc newHTTPPostNode(et *ExecutingTask, n *pipeline.HTTPPostNode, l *log.Logger) (*HTTPPostNode, error) {\n\n\thn := &HTTPPostNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\tc:    n,\n\t\tbp:   bufpool.New(),\n\t}\n\n\t// Should only ever be 0 or 1 from validation of n\n\tif len(n.URLs) == 1 {\n\t\te := httppost.NewEndpoint(n.URLs[0], nil, httppost.BasicAuth{})\n\t\thn.endpoint = e\n\t}\n\n\t// Should only ever be 0 or 1 from validation of n\n\tif len(n.Endpoints) == 1 {\n\t\tendpointName := n.Endpoints[0]\n\t\te, ok := et.tm.HTTPPostService.Endpoint(endpointName)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"endpoint '%s' does not exist\", endpointName)\n\t\t}\n\t\thn.endpoint = e\n\t}\n\n\thn.node.runF = hn.runPost\n\treturn hn, nil\n}\n\nfunc (n *HTTPPostNode) runPost([]byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\n\treturn consumer.Consume()\n\n}\n\nfunc (n *HTTPPostNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\tg := &httpPostGroup{\n\t\tn:      n,\n\t\tbuffer: new(edge.BatchBuffer),\n\t}\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, g),\n\t), nil\n}\n\ntype httpPostGroup struct {\n\tn      *HTTPPostNode\n\tbuffer *edge.BatchBuffer\n}\n\nfunc (g *httpPostGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\treturn nil, g.buffer.BeginBatch(begin)\n}\n\nfunc (g *httpPostGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\treturn nil, g.buffer.BatchPoint(bp)\n}\n\nfunc (g *httpPostGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn g.BufferedBatch(g.buffer.BufferedBatchMessage(end))\n}\n\nfunc (g *httpPostGroup) BufferedBatch(batch edge.BufferedBatchMessage) (edge.Message, error) {\n\trow := batch.ToRow()\n\tg.n.postRow(row)\n\treturn batch, nil\n}\n\nfunc (g *httpPostGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\trow := p.ToRow()\n\tg.n.postRow(row)\n\treturn p, nil\n}\n\nfunc (g *httpPostGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *httpPostGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\nfunc (n *HTTPPostNode) postRow(row *models.Row) {\n\tresult := new(models.Result)\n\tresult.Series = []*models.Row{row}\n\n\tbody := n.bp.Get()\n\tdefer n.bp.Put(body)\n\terr := json.NewEncoder(body).Encode(result)\n\tif err != nil {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Printf(\"E! failed to marshal row data json: %v\", err)\n\t\treturn\n\t}\n\treq, err := n.endpoint.NewHTTPRequest(body)\n\tif err != nil {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Printf(\"E! failed to marshal row data json: %v\", err)\n\t\treturn\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tfor k, v := range n.c.Headers {\n\t\treq.Header.Set(k, v)\n\t}\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Printf(\"E! failed to POST row data: %v\", err)\n\t\treturn\n\t}\n\tresp.Body.Close()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/influxdb_out.go",
    "content": "package kapacitor\n\nimport (\n\t\"bytes\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/influxdb\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/pkg/errors\"\n)\n\nconst (\n\tstatsInfluxDBPointsWritten = \"points_written\"\n\tstatsInfluxDBWriteErrors   = \"write_errors\"\n)\n\ntype InfluxDBOutNode struct {\n\tnode\n\ti  *pipeline.InfluxDBOutNode\n\twb *writeBuffer\n\n\tpointsWritten *expvar.Int\n\twriteErrors   *expvar.Int\n\n\tbatchBuffer *edge.BatchBuffer\n}\n\nfunc newInfluxDBOutNode(et *ExecutingTask, n *pipeline.InfluxDBOutNode, l *log.Logger) (*InfluxDBOutNode, error) {\n\tif et.tm.InfluxDBService == nil {\n\t\treturn nil, errors.New(\"no InfluxDB cluster configured cannot use the InfluxDBOutNode\")\n\t}\n\tcli, err := et.tm.InfluxDBService.NewNamedClient(n.Cluster)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to get InfluxDB client\")\n\t}\n\tin := &InfluxDBOutNode{\n\t\tnode:        node{Node: n, et: et, logger: l},\n\t\ti:           n,\n\t\twb:          newWriteBuffer(int(n.Buffer), n.FlushInterval, cli),\n\t\tbatchBuffer: new(edge.BatchBuffer),\n\t}\n\tin.node.runF = in.runOut\n\tin.node.stopF = in.stopOut\n\tin.wb.i = in\n\treturn in, nil\n}\n\nfunc (n *InfluxDBOutNode) runOut([]byte) error {\n\tn.pointsWritten = &expvar.Int{}\n\tn.writeErrors = &expvar.Int{}\n\n\tn.statMap.Set(statsInfluxDBPointsWritten, n.pointsWritten)\n\tn.statMap.Set(statsInfluxDBWriteErrors, n.writeErrors)\n\n\t// Start the write buffer\n\tn.wb.start()\n\n\t// Create the database and retention policy\n\tif n.i.CreateFlag {\n\t\terr := func() error {\n\t\t\tcli, err := n.et.tm.InfluxDBService.NewNamedClient(n.i.Cluster)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar createDb bytes.Buffer\n\t\t\tcreateDb.WriteString(\"CREATE DATABASE \")\n\t\t\tcreateDb.WriteString(influxql.QuoteIdent(n.i.Database))\n\t\t\tif n.i.RetentionPolicy != \"\" {\n\t\t\t\tcreateDb.WriteString(\" WITH NAME \")\n\t\t\t\tcreateDb.WriteString(influxql.QuoteIdent(n.i.RetentionPolicy))\n\t\t\t}\n\t\t\t_, err = cli.Query(influxdb.Query{Command: createDb.String()})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\t\t}()\n\t\tif err != nil {\n\t\t\tn.incrementErrorCount()\n\t\t\tn.logger.Printf(\"E! failed to create database %q on cluster %q: %v\", n.i.Database, n.i.Cluster, err)\n\t\t}\n\t}\n\n\t// Setup consumer\n\tconsumer := edge.NewConsumerWithReceiver(\n\t\tn.ins[0],\n\t\tedge.NewReceiverFromForwardReceiverWithStats(\n\t\t\tn.outs,\n\t\t\tedge.NewTimedForwardReceiver(n.timer, n),\n\t\t),\n\t)\n\treturn consumer.Consume()\n}\n\nfunc (n *InfluxDBOutNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\treturn nil, n.batchBuffer.BeginBatch(begin)\n}\n\nfunc (n *InfluxDBOutNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\treturn nil, n.batchBuffer.BatchPoint(bp)\n}\n\nfunc (n *InfluxDBOutNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn n.BufferedBatch(n.batchBuffer.BufferedBatchMessage(end))\n}\n\nfunc (n *InfluxDBOutNode) BufferedBatch(batch edge.BufferedBatchMessage) (edge.Message, error) {\n\tn.write(\"\", \"\", batch)\n\treturn batch, nil\n}\n\nfunc (n *InfluxDBOutNode) Point(p edge.PointMessage) (edge.Message, error) {\n\tbatch := edge.NewBufferedBatchMessage(\n\t\tedge.NewBeginBatchMessage(\n\t\t\tp.Name(),\n\t\t\tp.Tags(),\n\t\t\tp.Dimensions().ByName,\n\t\t\tp.Time(),\n\t\t\t1,\n\t\t),\n\t\t[]edge.BatchPointMessage{\n\t\t\tedge.NewBatchPointMessage(\n\t\t\t\tp.Fields(),\n\t\t\t\tp.Tags(),\n\t\t\t\tp.Time(),\n\t\t\t),\n\t\t},\n\t\tedge.NewEndBatchMessage(),\n\t)\n\tn.write(p.Database(), p.RetentionPolicy(), batch)\n\treturn p, nil\n}\n\nfunc (n *InfluxDBOutNode) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (n *InfluxDBOutNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\nfunc (n *InfluxDBOutNode) stopOut() {\n\tn.wb.flush()\n\tn.wb.abort()\n}\n\nfunc (n *InfluxDBOutNode) write(db, rp string, batch edge.BufferedBatchMessage) error {\n\tif n.i.Database != \"\" {\n\t\tdb = n.i.Database\n\t}\n\tif n.i.RetentionPolicy != \"\" {\n\t\trp = n.i.RetentionPolicy\n\t}\n\tname := n.i.Measurement\n\tif name == \"\" {\n\t\tname = batch.Name()\n\t}\n\n\tpoints := make([]influxdb.Point, len(batch.Points()))\n\tfor j, p := range batch.Points() {\n\t\tvar tags map[string]string\n\t\tif len(n.i.Tags) > 0 {\n\t\t\ttags = make(map[string]string, len(p.Tags())+len(n.i.Tags))\n\t\t\tfor k, v := range p.Tags() {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t\tfor k, v := range n.i.Tags {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t} else {\n\t\t\ttags = p.Tags()\n\t\t}\n\t\tpoints[j] = influxdb.Point{\n\t\t\tName:   name,\n\t\t\tTags:   tags,\n\t\t\tFields: p.Fields(),\n\t\t\tTime:   p.Time(),\n\t\t}\n\t}\n\tbpc := influxdb.BatchPointsConfig{\n\t\tDatabase:         db,\n\t\tRetentionPolicy:  rp,\n\t\tWriteConsistency: n.i.WriteConsistency,\n\t\tPrecision:        n.i.Precision,\n\t}\n\tn.wb.enqueue(bpc, points)\n\treturn nil\n}\n\ntype writeBuffer struct {\n\tsize          int\n\tflushInterval time.Duration\n\terrC          chan error\n\tqueue         chan queueEntry\n\tbuffer        map[influxdb.BatchPointsConfig]influxdb.BatchPoints\n\n\tflushing chan struct{}\n\tflushed  chan struct{}\n\n\tstopping chan struct{}\n\twg       sync.WaitGroup\n\tcli      influxdb.Client\n\n\ti *InfluxDBOutNode\n}\n\ntype queueEntry struct {\n\tbpc    influxdb.BatchPointsConfig\n\tpoints []influxdb.Point\n}\n\nfunc newWriteBuffer(size int, flushInterval time.Duration, cli influxdb.Client) *writeBuffer {\n\treturn &writeBuffer{\n\t\tcli:           cli,\n\t\tsize:          size,\n\t\tflushInterval: flushInterval,\n\t\tflushing:      make(chan struct{}),\n\t\tflushed:       make(chan struct{}),\n\t\tqueue:         make(chan queueEntry),\n\t\tbuffer:        make(map[influxdb.BatchPointsConfig]influxdb.BatchPoints),\n\t\tstopping:      make(chan struct{}),\n\t}\n}\n\nfunc (w *writeBuffer) enqueue(bpc influxdb.BatchPointsConfig, points []influxdb.Point) {\n\tqe := queueEntry{\n\t\tbpc:    bpc,\n\t\tpoints: points,\n\t}\n\tselect {\n\tcase w.queue <- qe:\n\tcase <-w.stopping:\n\t}\n}\n\nfunc (w *writeBuffer) start() {\n\tw.wg.Add(1)\n\tgo w.run()\n}\n\nfunc (w *writeBuffer) flush() {\n\tw.flushing <- struct{}{}\n\t<-w.flushed\n}\n\nfunc (w *writeBuffer) abort() {\n\tclose(w.stopping)\n\tw.wg.Wait()\n}\n\nfunc (w *writeBuffer) run() {\n\tdefer w.wg.Done()\n\tflushTick := time.NewTicker(w.flushInterval)\n\tdefer flushTick.Stop()\n\tvar err error\n\tfor {\n\t\tselect {\n\t\tcase qe := <-w.queue:\n\t\t\t// Read incoming points off queue\n\t\t\tbp, ok := w.buffer[qe.bpc]\n\t\t\tif !ok {\n\t\t\t\tbp, err = influxdb.NewBatchPoints(qe.bpc)\n\t\t\t\tif err != nil {\n\t\t\t\t\tw.i.incrementErrorCount()\n\t\t\t\t\tw.i.logger.Println(\"E! failed to write points to InfluxDB:\", err)\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tw.buffer[qe.bpc] = bp\n\t\t\t}\n\t\t\tbp.AddPoints(qe.points)\n\t\t\t// Check if we hit buffer size\n\t\t\tif len(bp.Points()) >= w.size {\n\t\t\t\terr = w.write(bp)\n\t\t\t\tif err != nil {\n\t\t\t\t\tw.i.incrementErrorCount()\n\t\t\t\t\tw.i.logger.Println(\"E! failed to write points to InfluxDB:\", err)\n\t\t\t\t}\n\t\t\t\tdelete(w.buffer, qe.bpc)\n\t\t\t}\n\t\tcase <-w.flushing:\n\t\t\t// Explicit flush called\n\t\t\tw.writeAll()\n\t\t\tw.flushed <- struct{}{}\n\t\tcase <-flushTick.C:\n\t\t\t// Flush all points after flush interval timeout\n\t\t\tw.writeAll()\n\t\tcase <-w.stopping:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (w *writeBuffer) writeAll() {\n\tfor bpc, bp := range w.buffer {\n\t\terr := w.write(bp)\n\t\tif err != nil {\n\t\t\tw.i.incrementErrorCount()\n\t\t\tw.i.logger.Println(\"E! failed to write points to InfluxDB:\", err)\n\t\t}\n\t\tdelete(w.buffer, bpc)\n\t}\n}\n\nfunc (w *writeBuffer) write(bp influxdb.BatchPoints) error {\n\terr := w.cli.Write(bp)\n\tif err != nil {\n\t\tw.i.writeErrors.Add(1)\n\t\treturn err\n\t}\n\tw.i.pointsWritten.Add(int64(len(bp.Points())))\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/influxql.gen.go",
    "content": "// Generated by tmpl\n// https://github.com/benbjohnson/tmpl\n//\n// DO NOT EDIT!\n// Source: influxql.gen.go.tmpl\n\npackage kapacitor\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\nfunc convertFloatPoint(\n\tname string,\n\tp edge.FieldsTagsTimeGetter,\n\tfield string,\n\tisSimpleSelector bool,\n\ttopBottomInfo *pipeline.TopBottomCallInfo,\n) (*influxql.FloatPoint, error) {\n\tvalue, ok := p.Fields()[field]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s missing from point cannot aggregate\", field)\n\t}\n\ttyped, ok := value.(float64)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s has wrong type: got %T exp float64\", field, value)\n\t}\n\tap := &influxql.FloatPoint{\n\t\tName:  name,\n\t\tTags:  influxql.NewTags(p.Tags()),\n\t\tTime:  p.Time().UnixNano(),\n\t\tValue: typed,\n\t}\n\tif topBottomInfo != nil {\n\t\t// We need to populate the Aux fields\n\t\tfloatPopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags())\n\t}\n\n\tif isSimpleSelector {\n\t\tap.Aux = []interface{}{p.Tags(), p.Fields()}\n\t}\n\n\treturn ap, nil\n}\n\ntype floatPointAggregator struct {\n\tfield            string\n\ttopBottomInfo    *pipeline.TopBottomCallInfo\n\tisSimpleSelector bool\n\taggregator       influxql.FloatPointAggregator\n}\n\nfunc floatPopulateAuxFieldsAndTags(ap *influxql.FloatPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) {\n\tap.Aux = make([]interface{}, len(fieldsAndTags))\n\tfor i, name := range fieldsAndTags {\n\t\tif f, ok := fields[name]; ok {\n\t\t\tap.Aux[i] = f\n\t\t} else {\n\t\t\tap.Aux[i] = tags[name]\n\t\t}\n\t}\n}\n\nfunc (a *floatPointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error {\n\tap, err := convertFloatPoint(name, p, a.field, a.isSimpleSelector, a.topBottomInfo)\n\tif err != nil {\n\t\treturn nil\n\t}\n\ta.aggregator.AggregateFloat(ap)\n\treturn nil\n}\n\ntype floatPointEmitter struct {\n\tbaseReduceContext\n\temitter          influxql.FloatPointEmitter\n\tisSimpleSelector bool\n\tbyName           bool\n}\n\nfunc (e *floatPointEmitter) EmitPoint() (edge.PointMessage, error) {\n\tslice := e.emitter.Emit()\n\tif len(slice) != 1 {\n\t\treturn nil, nil\n\t}\n\tap := slice[0]\n\tvar t time.Time\n\tif e.pointTimes {\n\t\tif ap.Time == influxql.ZeroTime {\n\t\t\tt = e.time\n\t\t} else {\n\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t}\n\t} else {\n\t\tt = e.time\n\t}\n\n\tvar fields models.Fields\n\tvar tags models.Tags\n\tif e.isSimpleSelector {\n\t\ttags = ap.Aux[0].(models.Tags)\n\t\tfields = ap.Aux[1].(models.Fields)\n\t\tif e.as != e.field {\n\t\t\tfields = fields.Copy()\n\t\t\tfields[e.as] = fields[e.field]\n\t\t\tdelete(fields, e.field)\n\t\t}\n\t} else {\n\t\ttags = e.groupInfo.Tags\n\t\tfields = map[string]interface{}{e.as: ap.Value}\n\t}\n\n\treturn edge.NewPointMessage(\n\t\te.name, \"\", \"\",\n\t\te.groupInfo.Dimensions,\n\t\tfields,\n\t\ttags,\n\t\tt,\n\t), nil\n}\n\nfunc (e *floatPointEmitter) EmitBatch() edge.BufferedBatchMessage {\n\tslice := e.emitter.Emit()\n\tbegin := edge.NewBeginBatchMessage(\n\t\te.name,\n\t\te.groupInfo.Tags,\n\t\te.groupInfo.Dimensions.ByName,\n\t\te.time,\n\t\tlen(slice),\n\t)\n\tpoints := make([]edge.BatchPointMessage, len(slice))\n\tvar t time.Time\n\tfor i, ap := range slice {\n\t\tif e.pointTimes {\n\t\t\tif ap.Time == influxql.ZeroTime {\n\t\t\t\tt = e.time\n\t\t\t} else {\n\t\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t\t}\n\t\t} else {\n\t\t\tt = e.time\n\t\t}\n\t\tvar tags models.Tags\n\t\tif l := len(ap.Tags.KeyValues()); l > 0 {\n\t\t\t// Merge batch and point specific tags\n\t\t\ttags = make(models.Tags, len(e.groupInfo.Tags)+l)\n\t\t\tfor k, v := range e.groupInfo.Tags {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t\tfor k, v := range ap.Tags.KeyValues() {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t} else {\n\t\t\ttags = e.groupInfo.Tags\n\t\t}\n\t\tpoints[i] = edge.NewBatchPointMessage(\n\t\t\tmodels.Fields{e.as: ap.Value},\n\t\t\ttags,\n\t\t\tt,\n\t\t)\n\t\tif t.After(begin.Time()) {\n\t\t\tbegin.SetTime(t)\n\t\t}\n\t}\n\treturn edge.NewBufferedBatchMessage(\n\t\tbegin,\n\t\tpoints,\n\t\tedge.NewEndBatchMessage(),\n\t)\n}\n\nfunc convertIntegerPoint(\n\tname string,\n\tp edge.FieldsTagsTimeGetter,\n\tfield string,\n\tisSimpleSelector bool,\n\ttopBottomInfo *pipeline.TopBottomCallInfo,\n) (*influxql.IntegerPoint, error) {\n\tvalue, ok := p.Fields()[field]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s missing from point cannot aggregate\", field)\n\t}\n\ttyped, ok := value.(int64)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s has wrong type: got %T exp int64\", field, value)\n\t}\n\tap := &influxql.IntegerPoint{\n\t\tName:  name,\n\t\tTags:  influxql.NewTags(p.Tags()),\n\t\tTime:  p.Time().UnixNano(),\n\t\tValue: typed,\n\t}\n\tif topBottomInfo != nil {\n\t\t// We need to populate the Aux fields\n\t\tintegerPopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags())\n\t}\n\n\tif isSimpleSelector {\n\t\tap.Aux = []interface{}{p.Tags(), p.Fields()}\n\t}\n\n\treturn ap, nil\n}\n\ntype integerPointAggregator struct {\n\tfield            string\n\ttopBottomInfo    *pipeline.TopBottomCallInfo\n\tisSimpleSelector bool\n\taggregator       influxql.IntegerPointAggregator\n}\n\nfunc integerPopulateAuxFieldsAndTags(ap *influxql.IntegerPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) {\n\tap.Aux = make([]interface{}, len(fieldsAndTags))\n\tfor i, name := range fieldsAndTags {\n\t\tif f, ok := fields[name]; ok {\n\t\t\tap.Aux[i] = f\n\t\t} else {\n\t\t\tap.Aux[i] = tags[name]\n\t\t}\n\t}\n}\n\nfunc (a *integerPointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error {\n\tap, err := convertIntegerPoint(name, p, a.field, a.isSimpleSelector, a.topBottomInfo)\n\tif err != nil {\n\t\treturn nil\n\t}\n\ta.aggregator.AggregateInteger(ap)\n\treturn nil\n}\n\ntype integerPointEmitter struct {\n\tbaseReduceContext\n\temitter          influxql.IntegerPointEmitter\n\tisSimpleSelector bool\n\tbyName           bool\n}\n\nfunc (e *integerPointEmitter) EmitPoint() (edge.PointMessage, error) {\n\tslice := e.emitter.Emit()\n\tif len(slice) != 1 {\n\t\treturn nil, nil\n\t}\n\tap := slice[0]\n\tvar t time.Time\n\tif e.pointTimes {\n\t\tif ap.Time == influxql.ZeroTime {\n\t\t\tt = e.time\n\t\t} else {\n\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t}\n\t} else {\n\t\tt = e.time\n\t}\n\n\tvar fields models.Fields\n\tvar tags models.Tags\n\tif e.isSimpleSelector {\n\t\ttags = ap.Aux[0].(models.Tags)\n\t\tfields = ap.Aux[1].(models.Fields)\n\t\tif e.as != e.field {\n\t\t\tfields = fields.Copy()\n\t\t\tfields[e.as] = fields[e.field]\n\t\t\tdelete(fields, e.field)\n\t\t}\n\t} else {\n\t\ttags = e.groupInfo.Tags\n\t\tfields = map[string]interface{}{e.as: ap.Value}\n\t}\n\n\treturn edge.NewPointMessage(\n\t\te.name, \"\", \"\",\n\t\te.groupInfo.Dimensions,\n\t\tfields,\n\t\ttags,\n\t\tt,\n\t), nil\n}\n\nfunc (e *integerPointEmitter) EmitBatch() edge.BufferedBatchMessage {\n\tslice := e.emitter.Emit()\n\tbegin := edge.NewBeginBatchMessage(\n\t\te.name,\n\t\te.groupInfo.Tags,\n\t\te.groupInfo.Dimensions.ByName,\n\t\te.time,\n\t\tlen(slice),\n\t)\n\tpoints := make([]edge.BatchPointMessage, len(slice))\n\tvar t time.Time\n\tfor i, ap := range slice {\n\t\tif e.pointTimes {\n\t\t\tif ap.Time == influxql.ZeroTime {\n\t\t\t\tt = e.time\n\t\t\t} else {\n\t\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t\t}\n\t\t} else {\n\t\t\tt = e.time\n\t\t}\n\t\tvar tags models.Tags\n\t\tif l := len(ap.Tags.KeyValues()); l > 0 {\n\t\t\t// Merge batch and point specific tags\n\t\t\ttags = make(models.Tags, len(e.groupInfo.Tags)+l)\n\t\t\tfor k, v := range e.groupInfo.Tags {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t\tfor k, v := range ap.Tags.KeyValues() {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t} else {\n\t\t\ttags = e.groupInfo.Tags\n\t\t}\n\t\tpoints[i] = edge.NewBatchPointMessage(\n\t\t\tmodels.Fields{e.as: ap.Value},\n\t\t\ttags,\n\t\t\tt,\n\t\t)\n\t\tif t.After(begin.Time()) {\n\t\t\tbegin.SetTime(t)\n\t\t}\n\t}\n\treturn edge.NewBufferedBatchMessage(\n\t\tbegin,\n\t\tpoints,\n\t\tedge.NewEndBatchMessage(),\n\t)\n}\n\nfunc convertStringPoint(\n\tname string,\n\tp edge.FieldsTagsTimeGetter,\n\tfield string,\n\tisSimpleSelector bool,\n\ttopBottomInfo *pipeline.TopBottomCallInfo,\n) (*influxql.StringPoint, error) {\n\tvalue, ok := p.Fields()[field]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s missing from point cannot aggregate\", field)\n\t}\n\ttyped, ok := value.(string)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s has wrong type: got %T exp string\", field, value)\n\t}\n\tap := &influxql.StringPoint{\n\t\tName:  name,\n\t\tTags:  influxql.NewTags(p.Tags()),\n\t\tTime:  p.Time().UnixNano(),\n\t\tValue: typed,\n\t}\n\tif topBottomInfo != nil {\n\t\t// We need to populate the Aux fields\n\t\tstringPopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags())\n\t}\n\n\tif isSimpleSelector {\n\t\tap.Aux = []interface{}{p.Tags(), p.Fields()}\n\t}\n\n\treturn ap, nil\n}\n\ntype stringPointAggregator struct {\n\tfield            string\n\ttopBottomInfo    *pipeline.TopBottomCallInfo\n\tisSimpleSelector bool\n\taggregator       influxql.StringPointAggregator\n}\n\nfunc stringPopulateAuxFieldsAndTags(ap *influxql.StringPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) {\n\tap.Aux = make([]interface{}, len(fieldsAndTags))\n\tfor i, name := range fieldsAndTags {\n\t\tif f, ok := fields[name]; ok {\n\t\t\tap.Aux[i] = f\n\t\t} else {\n\t\t\tap.Aux[i] = tags[name]\n\t\t}\n\t}\n}\n\nfunc (a *stringPointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error {\n\tap, err := convertStringPoint(name, p, a.field, a.isSimpleSelector, a.topBottomInfo)\n\tif err != nil {\n\t\treturn nil\n\t}\n\ta.aggregator.AggregateString(ap)\n\treturn nil\n}\n\ntype stringPointEmitter struct {\n\tbaseReduceContext\n\temitter          influxql.StringPointEmitter\n\tisSimpleSelector bool\n\tbyName           bool\n}\n\nfunc (e *stringPointEmitter) EmitPoint() (edge.PointMessage, error) {\n\tslice := e.emitter.Emit()\n\tif len(slice) != 1 {\n\t\treturn nil, nil\n\t}\n\tap := slice[0]\n\tvar t time.Time\n\tif e.pointTimes {\n\t\tif ap.Time == influxql.ZeroTime {\n\t\t\tt = e.time\n\t\t} else {\n\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t}\n\t} else {\n\t\tt = e.time\n\t}\n\n\tvar fields models.Fields\n\tvar tags models.Tags\n\tif e.isSimpleSelector {\n\t\ttags = ap.Aux[0].(models.Tags)\n\t\tfields = ap.Aux[1].(models.Fields)\n\t\tif e.as != e.field {\n\t\t\tfields = fields.Copy()\n\t\t\tfields[e.as] = fields[e.field]\n\t\t\tdelete(fields, e.field)\n\t\t}\n\t} else {\n\t\ttags = e.groupInfo.Tags\n\t\tfields = map[string]interface{}{e.as: ap.Value}\n\t}\n\n\treturn edge.NewPointMessage(\n\t\te.name, \"\", \"\",\n\t\te.groupInfo.Dimensions,\n\t\tfields,\n\t\ttags,\n\t\tt,\n\t), nil\n}\n\nfunc (e *stringPointEmitter) EmitBatch() edge.BufferedBatchMessage {\n\tslice := e.emitter.Emit()\n\tbegin := edge.NewBeginBatchMessage(\n\t\te.name,\n\t\te.groupInfo.Tags,\n\t\te.groupInfo.Dimensions.ByName,\n\t\te.time,\n\t\tlen(slice),\n\t)\n\tpoints := make([]edge.BatchPointMessage, len(slice))\n\tvar t time.Time\n\tfor i, ap := range slice {\n\t\tif e.pointTimes {\n\t\t\tif ap.Time == influxql.ZeroTime {\n\t\t\t\tt = e.time\n\t\t\t} else {\n\t\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t\t}\n\t\t} else {\n\t\t\tt = e.time\n\t\t}\n\t\tvar tags models.Tags\n\t\tif l := len(ap.Tags.KeyValues()); l > 0 {\n\t\t\t// Merge batch and point specific tags\n\t\t\ttags = make(models.Tags, len(e.groupInfo.Tags)+l)\n\t\t\tfor k, v := range e.groupInfo.Tags {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t\tfor k, v := range ap.Tags.KeyValues() {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t} else {\n\t\t\ttags = e.groupInfo.Tags\n\t\t}\n\t\tpoints[i] = edge.NewBatchPointMessage(\n\t\t\tmodels.Fields{e.as: ap.Value},\n\t\t\ttags,\n\t\t\tt,\n\t\t)\n\t\tif t.After(begin.Time()) {\n\t\t\tbegin.SetTime(t)\n\t\t}\n\t}\n\treturn edge.NewBufferedBatchMessage(\n\t\tbegin,\n\t\tpoints,\n\t\tedge.NewEndBatchMessage(),\n\t)\n}\n\nfunc convertBooleanPoint(\n\tname string,\n\tp edge.FieldsTagsTimeGetter,\n\tfield string,\n\tisSimpleSelector bool,\n\ttopBottomInfo *pipeline.TopBottomCallInfo,\n) (*influxql.BooleanPoint, error) {\n\tvalue, ok := p.Fields()[field]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s missing from point cannot aggregate\", field)\n\t}\n\ttyped, ok := value.(bool)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s has wrong type: got %T exp bool\", field, value)\n\t}\n\tap := &influxql.BooleanPoint{\n\t\tName:  name,\n\t\tTags:  influxql.NewTags(p.Tags()),\n\t\tTime:  p.Time().UnixNano(),\n\t\tValue: typed,\n\t}\n\tif topBottomInfo != nil {\n\t\t// We need to populate the Aux fields\n\t\tbooleanPopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags())\n\t}\n\n\tif isSimpleSelector {\n\t\tap.Aux = []interface{}{p.Tags(), p.Fields()}\n\t}\n\n\treturn ap, nil\n}\n\ntype booleanPointAggregator struct {\n\tfield            string\n\ttopBottomInfo    *pipeline.TopBottomCallInfo\n\tisSimpleSelector bool\n\taggregator       influxql.BooleanPointAggregator\n}\n\nfunc booleanPopulateAuxFieldsAndTags(ap *influxql.BooleanPoint, fieldsAndTags []string, fields models.Fields, tags models.Tags) {\n\tap.Aux = make([]interface{}, len(fieldsAndTags))\n\tfor i, name := range fieldsAndTags {\n\t\tif f, ok := fields[name]; ok {\n\t\t\tap.Aux[i] = f\n\t\t} else {\n\t\t\tap.Aux[i] = tags[name]\n\t\t}\n\t}\n}\n\nfunc (a *booleanPointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error {\n\tap, err := convertBooleanPoint(name, p, a.field, a.isSimpleSelector, a.topBottomInfo)\n\tif err != nil {\n\t\treturn nil\n\t}\n\ta.aggregator.AggregateBoolean(ap)\n\treturn nil\n}\n\ntype booleanPointEmitter struct {\n\tbaseReduceContext\n\temitter          influxql.BooleanPointEmitter\n\tisSimpleSelector bool\n\tbyName           bool\n}\n\nfunc (e *booleanPointEmitter) EmitPoint() (edge.PointMessage, error) {\n\tslice := e.emitter.Emit()\n\tif len(slice) != 1 {\n\t\treturn nil, nil\n\t}\n\tap := slice[0]\n\tvar t time.Time\n\tif e.pointTimes {\n\t\tif ap.Time == influxql.ZeroTime {\n\t\t\tt = e.time\n\t\t} else {\n\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t}\n\t} else {\n\t\tt = e.time\n\t}\n\n\tvar fields models.Fields\n\tvar tags models.Tags\n\tif e.isSimpleSelector {\n\t\ttags = ap.Aux[0].(models.Tags)\n\t\tfields = ap.Aux[1].(models.Fields)\n\t\tif e.as != e.field {\n\t\t\tfields = fields.Copy()\n\t\t\tfields[e.as] = fields[e.field]\n\t\t\tdelete(fields, e.field)\n\t\t}\n\t} else {\n\t\ttags = e.groupInfo.Tags\n\t\tfields = map[string]interface{}{e.as: ap.Value}\n\t}\n\n\treturn edge.NewPointMessage(\n\t\te.name, \"\", \"\",\n\t\te.groupInfo.Dimensions,\n\t\tfields,\n\t\ttags,\n\t\tt,\n\t), nil\n}\n\nfunc (e *booleanPointEmitter) EmitBatch() edge.BufferedBatchMessage {\n\tslice := e.emitter.Emit()\n\tbegin := edge.NewBeginBatchMessage(\n\t\te.name,\n\t\te.groupInfo.Tags,\n\t\te.groupInfo.Dimensions.ByName,\n\t\te.time,\n\t\tlen(slice),\n\t)\n\tpoints := make([]edge.BatchPointMessage, len(slice))\n\tvar t time.Time\n\tfor i, ap := range slice {\n\t\tif e.pointTimes {\n\t\t\tif ap.Time == influxql.ZeroTime {\n\t\t\t\tt = e.time\n\t\t\t} else {\n\t\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t\t}\n\t\t} else {\n\t\t\tt = e.time\n\t\t}\n\t\tvar tags models.Tags\n\t\tif l := len(ap.Tags.KeyValues()); l > 0 {\n\t\t\t// Merge batch and point specific tags\n\t\t\ttags = make(models.Tags, len(e.groupInfo.Tags)+l)\n\t\t\tfor k, v := range e.groupInfo.Tags {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t\tfor k, v := range ap.Tags.KeyValues() {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t} else {\n\t\t\ttags = e.groupInfo.Tags\n\t\t}\n\t\tpoints[i] = edge.NewBatchPointMessage(\n\t\t\tmodels.Fields{e.as: ap.Value},\n\t\t\ttags,\n\t\t\tt,\n\t\t)\n\t\tif t.After(begin.Time()) {\n\t\t\tbegin.SetTime(t)\n\t\t}\n\t}\n\treturn edge.NewBufferedBatchMessage(\n\t\tbegin,\n\t\tpoints,\n\t\tedge.NewEndBatchMessage(),\n\t)\n}\n\n// floatReduceContext uses composition to implement the reduceContext interface\ntype floatReduceContext struct {\n\tfloatPointAggregator\n\tfloatPointEmitter\n}\n\n// floatIntegerReduceContext uses composition to implement the reduceContext interface\ntype floatIntegerReduceContext struct {\n\tfloatPointAggregator\n\tintegerPointEmitter\n}\n\n// floatStringReduceContext uses composition to implement the reduceContext interface\ntype floatStringReduceContext struct {\n\tfloatPointAggregator\n\tstringPointEmitter\n}\n\n// floatBooleanReduceContext uses composition to implement the reduceContext interface\ntype floatBooleanReduceContext struct {\n\tfloatPointAggregator\n\tbooleanPointEmitter\n}\n\n// integerFloatReduceContext uses composition to implement the reduceContext interface\ntype integerFloatReduceContext struct {\n\tintegerPointAggregator\n\tfloatPointEmitter\n}\n\n// integerReduceContext uses composition to implement the reduceContext interface\ntype integerReduceContext struct {\n\tintegerPointAggregator\n\tintegerPointEmitter\n}\n\n// integerStringReduceContext uses composition to implement the reduceContext interface\ntype integerStringReduceContext struct {\n\tintegerPointAggregator\n\tstringPointEmitter\n}\n\n// integerBooleanReduceContext uses composition to implement the reduceContext interface\ntype integerBooleanReduceContext struct {\n\tintegerPointAggregator\n\tbooleanPointEmitter\n}\n\n// stringFloatReduceContext uses composition to implement the reduceContext interface\ntype stringFloatReduceContext struct {\n\tstringPointAggregator\n\tfloatPointEmitter\n}\n\n// stringIntegerReduceContext uses composition to implement the reduceContext interface\ntype stringIntegerReduceContext struct {\n\tstringPointAggregator\n\tintegerPointEmitter\n}\n\n// stringReduceContext uses composition to implement the reduceContext interface\ntype stringReduceContext struct {\n\tstringPointAggregator\n\tstringPointEmitter\n}\n\n// stringBooleanReduceContext uses composition to implement the reduceContext interface\ntype stringBooleanReduceContext struct {\n\tstringPointAggregator\n\tbooleanPointEmitter\n}\n\n// booleanFloatReduceContext uses composition to implement the reduceContext interface\ntype booleanFloatReduceContext struct {\n\tbooleanPointAggregator\n\tfloatPointEmitter\n}\n\n// booleanIntegerReduceContext uses composition to implement the reduceContext interface\ntype booleanIntegerReduceContext struct {\n\tbooleanPointAggregator\n\tintegerPointEmitter\n}\n\n// booleanStringReduceContext uses composition to implement the reduceContext interface\ntype booleanStringReduceContext struct {\n\tbooleanPointAggregator\n\tstringPointEmitter\n}\n\n// booleanReduceContext uses composition to implement the reduceContext interface\ntype booleanReduceContext struct {\n\tbooleanPointAggregator\n\tbooleanPointEmitter\n}\n\nfunc determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipeline.ReduceCreater) (fn createReduceContextFunc, err error) {\n\tswitch kind {\n\n\tcase reflect.Float64:\n\t\tswitch {\n\n\t\tcase rc.CreateFloatReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateFloatReducer()\n\t\t\t\treturn &floatReduceContext{\n\t\t\t\t\tfloatPointAggregator: floatPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tfloatPointEmitter: floatPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateFloatIntegerReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateFloatIntegerReducer()\n\t\t\t\treturn &floatIntegerReduceContext{\n\t\t\t\t\tfloatPointAggregator: floatPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tintegerPointEmitter: integerPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateFloatStringReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateFloatStringReducer()\n\t\t\t\treturn &floatStringReduceContext{\n\t\t\t\t\tfloatPointAggregator: floatPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tstringPointEmitter: stringPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateFloatBooleanReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateFloatBooleanReducer()\n\t\t\t\treturn &floatBooleanReduceContext{\n\t\t\t\t\tfloatPointAggregator: floatPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tbooleanPointEmitter: booleanPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"cannot apply %s to float64 field\", method)\n\t\t}\n\n\tcase reflect.Int64:\n\t\tswitch {\n\n\t\tcase rc.CreateIntegerFloatReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateIntegerFloatReducer()\n\t\t\t\treturn &integerFloatReduceContext{\n\t\t\t\t\tintegerPointAggregator: integerPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tfloatPointEmitter: floatPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateIntegerReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateIntegerReducer()\n\t\t\t\treturn &integerReduceContext{\n\t\t\t\t\tintegerPointAggregator: integerPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tintegerPointEmitter: integerPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateIntegerStringReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateIntegerStringReducer()\n\t\t\t\treturn &integerStringReduceContext{\n\t\t\t\t\tintegerPointAggregator: integerPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tstringPointEmitter: stringPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateIntegerBooleanReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateIntegerBooleanReducer()\n\t\t\t\treturn &integerBooleanReduceContext{\n\t\t\t\t\tintegerPointAggregator: integerPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tbooleanPointEmitter: booleanPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"cannot apply %s to int64 field\", method)\n\t\t}\n\n\tcase reflect.String:\n\t\tswitch {\n\n\t\tcase rc.CreateStringFloatReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateStringFloatReducer()\n\t\t\t\treturn &stringFloatReduceContext{\n\t\t\t\t\tstringPointAggregator: stringPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tfloatPointEmitter: floatPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateStringIntegerReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateStringIntegerReducer()\n\t\t\t\treturn &stringIntegerReduceContext{\n\t\t\t\t\tstringPointAggregator: stringPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tintegerPointEmitter: integerPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateStringReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateStringReducer()\n\t\t\t\treturn &stringReduceContext{\n\t\t\t\t\tstringPointAggregator: stringPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tstringPointEmitter: stringPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateStringBooleanReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateStringBooleanReducer()\n\t\t\t\treturn &stringBooleanReduceContext{\n\t\t\t\t\tstringPointAggregator: stringPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tbooleanPointEmitter: booleanPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"cannot apply %s to string field\", method)\n\t\t}\n\n\tcase reflect.Bool:\n\t\tswitch {\n\n\t\tcase rc.CreateBooleanFloatReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateBooleanFloatReducer()\n\t\t\t\treturn &booleanFloatReduceContext{\n\t\t\t\t\tbooleanPointAggregator: booleanPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tfloatPointEmitter: floatPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateBooleanIntegerReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateBooleanIntegerReducer()\n\t\t\t\treturn &booleanIntegerReduceContext{\n\t\t\t\t\tbooleanPointAggregator: booleanPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tintegerPointEmitter: integerPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateBooleanStringReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateBooleanStringReducer()\n\t\t\t\treturn &booleanStringReduceContext{\n\t\t\t\t\tbooleanPointAggregator: booleanPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tstringPointEmitter: stringPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tcase rc.CreateBooleanReducer != nil:\n\t\t\tfn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.CreateBooleanReducer()\n\t\t\t\treturn &booleanReduceContext{\n\t\t\t\t\tbooleanPointAggregator: booleanPointAggregator{\n\t\t\t\t\t\tfield:            c.field,\n\t\t\t\t\t\ttopBottomInfo:    rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator:       a,\n\t\t\t\t\t},\n\t\t\t\t\tbooleanPointEmitter: booleanPointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector:  rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"cannot apply %s to bool field\", method)\n\t\t}\n\n\tdefault:\n\t\terr = fmt.Errorf(\"invalid field kind: %v\", kind)\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/influxql.gen.go.tmpl",
    "content": "package kapacitor\n\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\t\"reflect\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\n{{/* Define typed Aggregate/Emit types */}}\n{{range .}}\n\nfunc convert{{.Name}}Point(\n\tname string,\n\tp edge.FieldsTagsTimeGetter,\n\tfield string,\n\tisSimpleSelector bool,\n\ttopBottomInfo *pipeline.TopBottomCallInfo,\n) (*influxql.{{.Name}}Point, error) {\n\tvalue, ok := p.Fields()[field]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s missing from point cannot aggregate\", field)\n\t}\n\ttyped, ok := value.({{.Type}})\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"field %s has wrong type: got %T exp {{.Type}}\", field, value)\n\t}\n\tap := &influxql.{{.Name}}Point{\n\t\tName:  name,\n\t\tTags:  influxql.NewTags(p.Tags()),\n\t\tTime:  p.Time().UnixNano(),\n\t\tValue: typed,\n\t}\n\tif topBottomInfo != nil {\n\t\t// We need to populate the Aux fields\n\t\t{{.name}}PopulateAuxFieldsAndTags(ap, topBottomInfo.FieldsAndTags, p.Fields(), p.Tags())\n\t}\n\n\tif isSimpleSelector {\n\t\tap.Aux = []interface{}{ p.Tags(), p.Fields() }\n\t}\n\n\treturn ap, nil\n}\n\ntype {{.name}}PointAggregator struct {\n\tfield         string\n\ttopBottomInfo *pipeline.TopBottomCallInfo\n\tisSimpleSelector bool\n\taggregator influxql.{{.Name}}PointAggregator\n}\n\nfunc {{.name}}PopulateAuxFieldsAndTags(ap *influxql.{{.Name}}Point, fieldsAndTags []string, fields models.Fields, tags models.Tags) {\n\tap.Aux = make([]interface{}, len(fieldsAndTags))\n\tfor i, name := range fieldsAndTags {\n\t\tif f, ok := fields[name]; ok {\n\t\t\tap.Aux[i] = f\n\t\t} else {\n\t\t\tap.Aux[i] = tags[name]\n\t\t}\n\t}\n}\n\nfunc (a *{{.name}}PointAggregator) AggregatePoint(name string, p edge.FieldsTagsTimeGetter) error {\n\tap, err := convert{{.Name}}Point(name, p, a.field, a.isSimpleSelector, a.topBottomInfo)\n\tif err != nil {\n\t\treturn nil\n\t}\n\ta.aggregator.Aggregate{{.Name}}(ap)\n\treturn nil\n}\n\ntype {{.name}}PointEmitter struct {\n\tbaseReduceContext\n\temitter influxql.{{.Name}}PointEmitter\n\tisSimpleSelector bool\n    byName bool\n}\n\nfunc (e *{{.name}}PointEmitter) EmitPoint() (edge.PointMessage, error) {\n\tslice := e.emitter.Emit()\n\tif len(slice) != 1 {\n\t\treturn nil, nil\n\t}\n\tap := slice[0]\n\tvar t time.Time\n\tif e.pointTimes {\n\t\tif ap.Time == influxql.ZeroTime {\n\t\t\tt = e.time\n\t\t} else {\n\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t}\n\t} else {\n\t\tt = e.time\n\t}\n\n\tvar fields models.Fields\n\tvar tags models.Tags\n\tif e.isSimpleSelector {\n\t\ttags = ap.Aux[0].(models.Tags)\n\t\tfields = ap.Aux[1].(models.Fields)\n\t\tif e.as != e.field {\n\t\t\tfields = fields.Copy()\n\t\t\tfields[e.as] = fields[e.field]\n\t\t\tdelete(fields, e.field)\n\t\t}\n\t} else {\n\t\ttags = e.groupInfo.Tags\n\t\tfields = map[string]interface{}{e.as: ap.Value}\n\t}\n\n\treturn edge.NewPointMessage(\n\t\te.name, \"\", \"\",\n\t\te.groupInfo.Dimensions,\n\t\tfields,\n\t\ttags,\n\t\tt,\n\t), nil\n}\n\nfunc (e *{{.name}}PointEmitter) EmitBatch() edge.BufferedBatchMessage {\n\tslice := e.emitter.Emit()\n\tbegin := edge.NewBeginBatchMessage(\n\t\te.name,\n\t\te.groupInfo.Tags,\n\t\te.groupInfo.Dimensions.ByName,\n\t\te.time,\n\t\tlen(slice),\n\t)\n\tpoints := make([]edge.BatchPointMessage, len(slice))\n\tvar t time.Time\n\tfor i, ap := range slice {\n\t\tif e.pointTimes {\n\t\t\tif ap.Time == influxql.ZeroTime {\n\t\t\t\tt = e.time\n\t\t\t} else {\n\t\t\t\tt = time.Unix(0, ap.Time).UTC()\n\t\t\t}\n\t\t} else {\n\t\t\tt = e.time\n\t\t}\n\t\tvar tags models.Tags\n\t\tif l := len(ap.Tags.KeyValues()); l > 0 {\n\t\t\t// Merge batch and point specific tags\n\t\t\ttags = make(models.Tags, len(e.groupInfo.Tags)+l)\n\t\t\tfor k, v := range e.groupInfo.Tags {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t\tfor k, v := range ap.Tags.KeyValues() {\n\t\t\t\ttags[k] = v\n\t\t\t}\n\t\t} else {\n\t\t\ttags = e.groupInfo.Tags\n\t\t}\n\t\tpoints[i] = edge.NewBatchPointMessage(\n\t\t\tmodels.Fields{e.as: ap.Value},\n\t\t\ttags,\n\t\t\tt,\n\t\t)\n\t\tif t.After(begin.Time()) {\n\t\t\tbegin.SetTime(t)\n\t\t}\n\t}\n\treturn edge.NewBufferedBatchMessage(\n\t\tbegin,\n\t\tpoints,\n\t\tedge.NewEndBatchMessage(),\n\t)\n}\n\n{{end}}\n\n{{/* Define composite types for reduceContext */}}\n{{with $types := .}}\n{{range $a := $types}}\n{{range $e := $types}}\n\n// {{$a.name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext uses composition to implement the reduceContext interface\ntype {{$a.name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext struct {\n    {{$a.name}}PointAggregator\n    {{$e.name}}PointEmitter\n}\n\n{{end}}{{end}}\n\n\n{{/* Define switch cases for reduceContext contruction */}}\n\nfunc determineReduceContextCreateFn(method string, kind reflect.Kind, rc pipeline.ReduceCreater)  (fn createReduceContextFunc, err error) {\n\tswitch kind {\n{{range $a := $types}}\n\tcase {{.Kind}}:\n\t\tswitch {\n{{range $e := $types}}\n\t\tcase rc.Create{{$a.Name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer != nil:\n\t\t\t fn = func(c baseReduceContext) reduceContext {\n\t\t\t\ta, e := rc.Create{{$a.Name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}Reducer()\n\t\t\t\treturn &{{$a.name}}{{if ne $a.Name $e.Name}}{{$e.Name}}{{end}}ReduceContext{\n\t\t\t\t\t{{$a.name}}PointAggregator: {{$a.name}}PointAggregator{\n\t\t\t\t\t\tfield:      c.field,\n\t\t\t\t\t\ttopBottomInfo: rc.TopBottomCallInfo,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t\taggregator: a,\n\t\t\t\t\t},\n\t\t\t\t\t{{$e.name}}PointEmitter: {{$e.name}}PointEmitter{\n\t\t\t\t\t\tbaseReduceContext: c,\n\t\t\t\t\t\temitter:           e,\n\t\t\t\t\t\tisSimpleSelector: rc.IsSimpleSelector,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n{{end}}\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"cannot apply %s to {{$a.Type}} field\", method)\n\t\t}\n{{end}}\n\tdefault:\n\t\terr = fmt.Errorf(\"invalid field kind: %v\", kind)\n\t}\n\treturn\n}\n{{end}}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/influxql.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"reflect\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/pkg/errors\"\n)\n\n// tmpl -- go get github.com/benbjohnson/tmpl\n//go:generate tmpl -data=@tmpldata.json influxql.gen.go.tmpl\n\ntype createReduceContextFunc func(c baseReduceContext) reduceContext\n\ntype InfluxQLNode struct {\n\tnode\n\tn                      *pipeline.InfluxQLNode\n\tcreateFn               createReduceContextFunc\n\tisStreamTransformation bool\n\n\tcurrentKind reflect.Kind\n}\n\nfunc newInfluxQLNode(et *ExecutingTask, n *pipeline.InfluxQLNode, l *log.Logger) (*InfluxQLNode, error) {\n\tm := &InfluxQLNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\tn:    n,\n\t\tisStreamTransformation: n.ReduceCreater.IsStreamTransformation,\n\t}\n\tm.node.runF = m.runInfluxQL\n\treturn m, nil\n}\n\ntype reduceContext interface {\n\tAggregatePoint(name string, p edge.FieldsTagsTimeGetter) error\n\tEmitPoint() (edge.PointMessage, error)\n\tEmitBatch() edge.BufferedBatchMessage\n}\n\ntype baseReduceContext struct {\n\tas            string\n\tfield         string\n\tname          string\n\tgroupInfo     edge.GroupInfo\n\ttime          time.Time\n\tpointTimes    bool\n\ttopBottomInfo *pipeline.TopBottomCallInfo\n}\n\nfunc (n *InfluxQLNode) runInfluxQL([]byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\treturn consumer.Consume()\n}\n\nfunc (n *InfluxQLNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, n.newGroup(first)),\n\t), nil\n}\n\nfunc (n *InfluxQLNode) newGroup(first edge.PointMeta) edge.ForwardReceiver {\n\tbc := baseReduceContext{\n\t\tas:         n.n.As,\n\t\tfield:      n.n.Field,\n\t\tname:       first.Name(),\n\t\tgroupInfo:  first.GroupInfo(),\n\t\ttime:       first.Time(),\n\t\tpointTimes: n.n.PointTimes || n.isStreamTransformation,\n\t}\n\tg := influxqlGroup{\n\t\tn:  n,\n\t\tbc: bc,\n\t}\n\tif n.isStreamTransformation {\n\t\treturn &influxqlStreamingTransformGroup{\n\t\t\tinfluxqlGroup: g,\n\t\t}\n\t}\n\treturn &g\n}\n\ntype influxqlGroup struct {\n\tn *InfluxQLNode\n\n\tbc baseReduceContext\n\trc reduceContext\n\n\tbatchSize int\n\tname      string\n\tbegin     edge.BeginBatchMessage\n}\n\nfunc (g *influxqlGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tg.begin = begin\n\tg.batchSize = 0\n\tg.bc.time = begin.Time()\n\tg.rc = nil\n\treturn nil, nil\n}\n\nfunc (g *influxqlGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tif g.rc == nil {\n\t\tif err := g.realizeReduceContextFromFields(bp.Fields()); err != nil {\n\t\t\tg.n.incrementErrorCount()\n\t\t\tg.n.logger.Println(\"E!\", err)\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\tg.batchSize++\n\tif err := g.rc.AggregatePoint(g.begin.Name(), bp); err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! failed to aggregate point in batch:\", err)\n\t}\n\treturn nil, nil\n}\n\nfunc (g *influxqlGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\tif g.batchSize == 0 && !g.n.n.ReduceCreater.IsEmptyOK {\n\t\t// Do not call Emit on the reducer since it can't handle empty batches.\n\t\treturn nil, nil\n\t}\n\tif g.rc == nil {\n\t\t// Assume float64 type since we do not have any data.\n\t\tif err := g.realizeReduceContext(reflect.Float64); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tm, err := g.n.emit(g.rc)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! failed to emit batch:\", err)\n\t\treturn nil, nil\n\t}\n\treturn m, nil\n}\n\nfunc (g *influxqlGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\tif p.Time().Equal(g.bc.time) {\n\t\tg.aggregatePoint(p)\n\t} else {\n\t\t// Time has elapsed, emit current context\n\t\tvar msg edge.Message\n\t\tif g.rc != nil {\n\t\t\tm, err := g.n.emit(g.rc)\n\t\t\tif err != nil {\n\t\t\t\tg.n.incrementErrorCount()\n\t\t\t\tg.n.logger.Println(\"E! failed to emit stream:\", err)\n\t\t\t}\n\t\t\tmsg = m\n\t\t}\n\n\t\t// Reset context\n\t\tg.bc.name = p.Name()\n\t\tg.bc.time = p.Time()\n\t\tg.rc = nil\n\n\t\t// Aggregate the current point\n\t\tg.aggregatePoint(p)\n\n\t\treturn msg, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (g *influxqlGroup) aggregatePoint(p edge.PointMessage) {\n\tif g.rc == nil {\n\t\tif err := g.realizeReduceContextFromFields(p.Fields()); err != nil {\n\t\t\tg.n.incrementErrorCount()\n\t\t\tg.n.logger.Println(\"E!\", err)\n\t\t\treturn\n\t\t}\n\t}\n\terr := g.rc.AggregatePoint(p.Name(), p)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! failed to aggregate point:\", err)\n\t}\n}\n\nfunc (g *influxqlGroup) getFieldKind(fields models.Fields) (reflect.Kind, error) {\n\tf, exists := fields[g.bc.field]\n\tif !exists {\n\t\treturn reflect.Invalid, fmt.Errorf(\"field %q missing from point\", g.bc.field)\n\t}\n\n\treturn reflect.TypeOf(f).Kind(), nil\n}\nfunc (g *influxqlGroup) realizeReduceContextFromFields(fields models.Fields) error {\n\tk, err := g.getFieldKind(fields)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn g.realizeReduceContext(k)\n}\n\nfunc (g *influxqlGroup) realizeReduceContext(kind reflect.Kind) error {\n\tcreateFn, err := g.n.getCreateFn(kind)\n\tif err != nil {\n\t\treturn err\n\t}\n\tg.rc = createFn(g.bc)\n\treturn nil\n}\n\nfunc (g *influxqlGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *influxqlGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\ntype influxqlStreamingTransformGroup struct {\n\tinfluxqlGroup\n}\n\nfunc (g *influxqlStreamingTransformGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tg.begin = begin.ShallowCopy()\n\tg.begin.SetSizeHint(0)\n\tg.bc.time = begin.Time()\n\tg.rc = nil\n\treturn begin, nil\n}\n\nfunc (g *influxqlStreamingTransformGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tif g.rc == nil {\n\t\tif err := g.realizeReduceContextFromFields(bp.Fields()); err != nil {\n\t\t\tg.n.incrementErrorCount()\n\t\t\tg.n.logger.Println(\"E!\", err)\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\tif err := g.rc.AggregatePoint(g.begin.Name(), bp); err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! failed to aggregate batch point:\", err)\n\t}\n\tif ep, err := g.rc.EmitPoint(); err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! failed to emit batch point:\", err)\n\t} else if ep != nil {\n\t\treturn edge.NewBatchPointMessage(\n\t\t\tep.Fields(),\n\t\t\tep.Tags(),\n\t\t\tep.Time(),\n\t\t), nil\n\t}\n\treturn nil, nil\n}\n\nfunc (g *influxqlStreamingTransformGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (g *influxqlStreamingTransformGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\tif g.rc == nil {\n\t\tif err := g.realizeReduceContextFromFields(p.Fields()); err != nil {\n\t\t\tg.n.incrementErrorCount()\n\t\t\tg.n.logger.Println(\"E!\", err)\n\t\t\t// Skip point\n\t\t\treturn nil, nil\n\t\t}\n\t}\n\terr := g.rc.AggregatePoint(p.Name(), p)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! failed to aggregate point:\", err)\n\t}\n\n\tm, err := g.n.emit(g.rc)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! failed to emit stream:\", err)\n\t\treturn nil, nil\n\t}\n\treturn m, nil\n}\n\nfunc (g *influxqlStreamingTransformGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\n\nfunc (n *InfluxQLNode) getCreateFn(kind reflect.Kind) (createReduceContextFunc, error) {\n\tchanged := n.currentKind != kind\n\tif !changed && n.createFn != nil {\n\t\treturn n.createFn, nil\n\t}\n\tn.currentKind = kind\n\tcreateFn, err := determineReduceContextCreateFn(n.n.Method, kind, n.n.ReduceCreater)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"invalid influxql func %s with field %s\", n.n.Method, n.n.Field)\n\t}\n\tn.createFn = createFn\n\treturn n.createFn, nil\n}\n\nfunc (n *InfluxQLNode) emit(context reduceContext) (edge.Message, error) {\n\tswitch n.Provides() {\n\tcase pipeline.StreamEdge:\n\t\treturn context.EmitPoint()\n\tcase pipeline.BatchEdge:\n\t\treturn context.EmitBatch(), nil\n\t}\n\treturn nil, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/join.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/pkg/errors\"\n)\n\ntype JoinNode struct {\n\tnode\n\tj         *pipeline.JoinNode\n\tfill      influxql.FillOption\n\tfillValue interface{}\n\n\tgroupsMu sync.RWMutex\n\tgroups   map[models.GroupID]*joinGroup\n\n\t// Represents the lower bound of times per group per source\n\tlowMarks map[srcGroup]time.Time\n\n\t// Buffer for caching points that need to be matched with specific points.\n\tmatchGroupsBuffer map[models.GroupID][]srcPoint\n\t// Buffer for caching specific points until their match arrivces.\n\tspecificGroupsBuffer map[models.GroupID][]srcPoint\n\n\treported    map[int]bool\n\tallReported bool\n}\n\n// Create a new JoinNode, which takes pairs from parent streams combines them into a single point.\nfunc newJoinNode(et *ExecutingTask, n *pipeline.JoinNode, l *log.Logger) (*JoinNode, error) {\n\tjn := &JoinNode{\n\t\tj:                    n,\n\t\tnode:                 node{Node: n, et: et, logger: l},\n\t\tgroups:               make(map[models.GroupID]*joinGroup),\n\t\tmatchGroupsBuffer:    make(map[models.GroupID][]srcPoint),\n\t\tspecificGroupsBuffer: make(map[models.GroupID][]srcPoint),\n\t\tlowMarks:             make(map[srcGroup]time.Time),\n\t\treported:             make(map[int]bool),\n\t}\n\t// Set fill\n\tswitch fill := n.Fill.(type) {\n\tcase string:\n\t\tswitch fill {\n\t\tcase \"null\":\n\t\t\tjn.fill = influxql.NullFill\n\t\tcase \"none\":\n\t\t\tjn.fill = influxql.NoFill\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"unexpected fill option %s\", fill)\n\t\t}\n\tcase int64, float64:\n\t\tjn.fill = influxql.NumberFill\n\t\tjn.fillValue = fill\n\tdefault:\n\t\tjn.fill = influxql.NoFill\n\t}\n\tjn.node.runF = jn.runJoin\n\treturn jn, nil\n}\n\nfunc (n *JoinNode) runJoin([]byte) error {\n\tconsumer := edge.NewMultiConsumerWithStats(n.ins, n)\n\tvalueF := func() int64 {\n\t\tn.groupsMu.RLock()\n\t\tl := len(n.groups)\n\t\tn.groupsMu.RUnlock()\n\t\treturn int64(l)\n\t}\n\tn.statMap.Set(statCardinalityGauge, expvar.NewIntFuncGauge(valueF))\n\n\treturn consumer.Consume()\n}\n\nfunc (n *JoinNode) BufferedBatch(src int, batch edge.BufferedBatchMessage) error {\n\treturn n.doMessage(src, batch)\n}\n\nfunc (n *JoinNode) Point(src int, p edge.PointMessage) error {\n\treturn n.doMessage(src, p)\n}\n\nfunc (n *JoinNode) Barrier(src int, b edge.BarrierMessage) error {\n\treturn edge.Forward(n.outs, b)\n}\n\nfunc (n *JoinNode) Finish() error {\n\t// No more points are coming signal all groups to finish up.\n\tfor _, group := range n.groups {\n\t\tif err := group.Finish(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\ntype messageMeta interface {\n\tedge.Message\n\tedge.PointMeta\n}\ntype srcPoint struct {\n\tSrc int\n\tMsg messageMeta\n}\n\nfunc (n *JoinNode) doMessage(src int, m messageMeta) error {\n\tn.timer.Start()\n\tdefer n.timer.Stop()\n\tif len(n.j.Dimensions) > 0 {\n\t\t// Match points with their group based on join dimensions.\n\t\tn.matchPoints(srcPoint{Src: src, Msg: m})\n\t} else {\n\t\t// Just send point on to group, we are not joining on specific dimensions.\n\t\tgroup := n.getOrCreateGroup(m.GroupID())\n\t\tgroup.Collect(src, m)\n\t}\n\treturn nil\n}\n\n// The purpose of this method is to match more specific points\n// with the less specific points as they arrive.\n//\n// Where 'more specific' means, that a point has more dimensions than the join.on dimensions.\nfunc (n *JoinNode) matchPoints(p srcPoint) {\n\t// Specific points may be sent to the joinset without a matching point, but not the other way around.\n\t// This is because the specific points have the needed specific tag data.\n\t// The joinset will later handle the fill inner/outer join operations.\n\n\tif !n.allReported {\n\t\tn.reported[p.Src] = true\n\t\tn.allReported = len(n.reported) == len(n.ins)\n\t}\n\tt := p.Msg.Time().Round(n.j.Tolerance)\n\n\tgroupId := models.ToGroupID(\n\t\tp.Msg.Name(),\n\t\tp.Msg.GroupInfo().Tags,\n\t\tmodels.Dimensions{\n\t\t\tByName:   p.Msg.Dimensions().ByName,\n\t\t\tTagNames: n.j.Dimensions,\n\t\t},\n\t)\n\t// Update current srcGroup lowMark\n\tsrcG := srcGroup{src: p.Src, groupId: groupId}\n\tn.lowMarks[srcG] = t\n\n\t// Determine lowMark, the oldest time per parent per group.\n\tvar lowMark time.Time\n\tif n.allReported {\n\t\tfor s := 0; s < len(n.ins); s++ {\n\t\t\tsg := srcGroup{src: s, groupId: groupId}\n\t\t\tif lm := n.lowMarks[sg]; lowMark.IsZero() || lm.Before(lowMark) {\n\t\t\t\tlowMark = lm\n\t\t\t}\n\t\t}\n\t}\n\n\t// Check for cached specific points that can now be sent alone.\n\tif n.allReported {\n\t\t// Send all cached specific point that won't match anymore.\n\t\tvar i int\n\t\tbuf := n.specificGroupsBuffer[groupId]\n\t\tl := len(buf)\n\t\tfor i = 0; i < l; i++ {\n\t\t\tst := buf[i].Msg.Time().Round(n.j.Tolerance)\n\t\t\tif st.Before(lowMark) {\n\t\t\t\t// Send point by itself since it won't get a match.\n\t\t\t\tn.sendSpecificPoint(buf[i])\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Remove all sent points.\n\t\tn.specificGroupsBuffer[groupId] = buf[i:]\n\t}\n\n\tif len(p.Msg.Dimensions().TagNames) > len(n.j.Dimensions) {\n\t\t// We have a specific point and three options:\n\t\t// 1. Find the cached match point and send both to group.\n\t\t// 2. Cache the specific point for later.\n\t\t// 3. Send the specific point alone if it is no longer possible that a match will arrive.\n\n\t\t// Search for a match.\n\t\t// Also purge any old match points.\n\t\tmatches := n.matchGroupsBuffer[groupId]\n\t\tmatched := false\n\t\tvar i int\n\t\tl := len(matches)\n\t\tfor i = 0; i < l; i++ {\n\t\t\tmatch := matches[i]\n\t\t\tpt := match.Msg.Time().Round(n.j.Tolerance)\n\t\t\tif pt.Equal(t) {\n\t\t\t\t// Option 1, send both points\n\t\t\t\tn.sendMatchPoint(p, match)\n\t\t\t\tmatched = true\n\t\t\t}\n\t\t\tif !pt.Before(lowMark) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif n.allReported {\n\t\t\t// Can't trust lowMark until all parents have reported.\n\t\t\t// Remove any unneeded match points.\n\t\t\tn.matchGroupsBuffer[groupId] = matches[i:]\n\t\t}\n\n\t\t// If the point didn't match that leaves us with options 2 and 3.\n\t\tif !matched {\n\t\t\tif n.allReported && t.Before(lowMark) {\n\t\t\t\t// Option 3\n\t\t\t\t// Send this specific point by itself since it won't get a match.\n\t\t\t\tn.sendSpecificPoint(p)\n\t\t\t} else {\n\t\t\t\t// Option 2\n\t\t\t\t// Cache this point for when its match arrives.\n\t\t\t\tn.specificGroupsBuffer[groupId] = append(n.specificGroupsBuffer[groupId], p)\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Cache match point.\n\t\tn.matchGroupsBuffer[groupId] = append(n.matchGroupsBuffer[groupId], p)\n\n\t\t// Send all specific points that match, to the group.\n\t\tvar i int\n\t\tbuf := n.specificGroupsBuffer[groupId]\n\t\tl := len(buf)\n\t\tfor i = 0; i < l; i++ {\n\t\t\tst := buf[i].Msg.Time().Round(n.j.Tolerance)\n\t\t\tif st.Equal(t) {\n\t\t\t\tn.sendMatchPoint(buf[i], p)\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// Remove all sent points\n\t\tn.specificGroupsBuffer[groupId] = buf[i:]\n\t}\n}\n\n// Add the specific tags from the specific point to the matched point\n// and then send both on to the group.\nfunc (n *JoinNode) sendMatchPoint(specific, matched srcPoint) {\n\tvar newMatched messageMeta\n\tswitch msg := matched.Msg.(type) {\n\tcase edge.BufferedBatchMessage:\n\t\tb := msg.ShallowCopy()\n\t\tb.SetBegin(b.Begin().ShallowCopy())\n\t\tb.Begin().SetTags(specific.Msg.GroupInfo().Tags)\n\t\tnewMatched = b\n\tcase edge.PointMessage:\n\t\tp := msg.ShallowCopy()\n\t\tinfo := specific.Msg.GroupInfo()\n\t\tp.SetTagsAndDimensions(info.Tags, info.Dimensions)\n\t\tnewMatched = p\n\t}\n\tgroup := n.getOrCreateGroup(specific.Msg.GroupID())\n\t// Collect specific point\n\tgroup.Collect(specific.Src, specific.Msg)\n\t// Collect new matched point\n\tgroup.Collect(matched.Src, newMatched)\n}\n\n// Send only the specific point to the group\nfunc (n *JoinNode) sendSpecificPoint(specific srcPoint) {\n\tgroup := n.getOrCreateGroup(specific.Msg.GroupID())\n\tgroup.Collect(specific.Src, specific.Msg)\n}\n\n// safely get the group for the point or create one if it doesn't exist.\nfunc (n *JoinNode) getOrCreateGroup(groupID models.GroupID) *joinGroup {\n\tgroup := n.groups[groupID]\n\tif group == nil {\n\t\tgroup = n.newGroup(len(n.ins))\n\t\tn.groupsMu.Lock()\n\t\tn.groups[groupID] = group\n\t\tn.groupsMu.Unlock()\n\t}\n\treturn group\n}\n\nfunc (n *JoinNode) newGroup(count int) *joinGroup {\n\treturn &joinGroup{\n\t\tn:    n,\n\t\tsets: make(map[time.Time][]*joinset),\n\t\thead: make([]time.Time, count),\n\t}\n}\n\n// handles emitting joined sets once enough data has arrived from parents.\ntype joinGroup struct {\n\tn *JoinNode\n\n\tsets       map[time.Time][]*joinset\n\thead       []time.Time\n\toldestTime time.Time\n}\n\nfunc (g *joinGroup) Finish() error {\n\treturn g.emitAll()\n}\n\n// Collect a point from a given parent.\n// emit the oldest set if we have collected enough data.\nfunc (g *joinGroup) Collect(src int, p timeMessage) error {\n\tt := p.Time().Round(g.n.j.Tolerance)\n\tif t.Before(g.oldestTime) || g.oldestTime.IsZero() {\n\t\tg.oldestTime = t\n\t}\n\n\tvar set *joinset\n\tsets := g.sets[t]\n\tif len(sets) == 0 {\n\t\tset = g.newJoinset(t)\n\t\tsets = append(sets, set)\n\t\tg.sets[t] = sets\n\t}\n\tfor i := 0; i < len(sets); i++ {\n\t\tif !sets[i].Has(src) {\n\t\t\tset = sets[i]\n\t\t\tbreak\n\t\t}\n\t}\n\tif set == nil {\n\t\tset = g.newJoinset(t)\n\t\tsets = append(sets, set)\n\t\tg.sets[t] = sets\n\t}\n\tset.Set(src, p)\n\n\t// Update head\n\tg.head[src] = t\n\n\tonlyReadySets := false\n\tfor _, t := range g.head {\n\t\tif !t.After(g.oldestTime) {\n\t\t\tonlyReadySets = true\n\t\t\tbreak\n\t\t}\n\t}\n\terr := g.emit(onlyReadySets)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (g *joinGroup) newJoinset(t time.Time) *joinset {\n\treturn newJoinset(\n\t\tg.n,\n\t\tg.n.j.StreamName,\n\t\tg.n.fill,\n\t\tg.n.fillValue,\n\t\tg.n.j.Names,\n\t\tg.n.j.Delimiter,\n\t\tg.n.j.Tolerance,\n\t\tt,\n\t\tg.n.logger,\n\t)\n}\n\n// emit a set and update the oldestTime.\nfunc (g *joinGroup) emit(onlyReadySets bool) error {\n\tsets := g.sets[g.oldestTime]\n\ti := 0\n\tfor ; i < len(sets); i++ {\n\t\tif sets[i].Ready() || !onlyReadySets {\n\t\t\terr := g.emitJoinedSet(sets[i])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tif i == len(sets) {\n\t\tdelete(g.sets, g.oldestTime)\n\t} else {\n\t\tg.sets[g.oldestTime] = sets[i:]\n\t}\n\n\tg.oldestTime = time.Time{}\n\tfor t := range g.sets {\n\t\tif g.oldestTime.IsZero() || t.Before(g.oldestTime) {\n\t\t\tg.oldestTime = t\n\t\t}\n\t}\n\treturn nil\n}\n\n// emit sets until we have none left.\nfunc (g *joinGroup) emitAll() error {\n\tvar lastErr error\n\tfor len(g.sets) > 0 {\n\t\terr := g.emit(false)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t}\n\t}\n\treturn lastErr\n}\n\n// emit a single joined set\nfunc (g *joinGroup) emitJoinedSet(set *joinset) error {\n\tif set.name == \"\" {\n\t\tset.name = set.First().(edge.NameGetter).Name()\n\t}\n\tswitch g.n.Wants() {\n\tcase pipeline.StreamEdge:\n\t\tp, err := set.JoinIntoPoint()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to join into point\")\n\t\t}\n\t\tif p != nil {\n\t\t\tif err := edge.Forward(g.n.outs, p); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tcase pipeline.BatchEdge:\n\t\tb, err := set.JoinIntoBatch()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to join into batch\")\n\t\t}\n\t\tif b != nil {\n\t\t\tif err := edge.Forward(g.n.outs, b); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// A groupId and its parent\ntype srcGroup struct {\n\tsrc     int\n\tgroupId models.GroupID\n}\n\n// represents a set of points or batches from the same joined time\ntype joinset struct {\n\tj         *JoinNode\n\tname      string\n\tfill      influxql.FillOption\n\tfillValue interface{}\n\tprefixes  []string\n\tdelimiter string\n\n\ttime      time.Time\n\ttolerance time.Duration\n\tvalues    []edge.Message\n\n\texpected int\n\tsize     int\n\tfinished int\n\n\tfirst int\n\n\tlogger *log.Logger\n}\n\nfunc newJoinset(\n\tn *JoinNode,\n\tname string,\n\tfill influxql.FillOption,\n\tfillValue interface{},\n\tprefixes []string,\n\tdelimiter string,\n\ttolerance time.Duration,\n\ttime time.Time,\n\tl *log.Logger,\n) *joinset {\n\texpected := len(prefixes)\n\treturn &joinset{\n\t\tj:         n,\n\t\tname:      name,\n\t\tfill:      fill,\n\t\tfillValue: fillValue,\n\t\tprefixes:  prefixes,\n\t\tdelimiter: delimiter,\n\t\texpected:  expected,\n\t\tvalues:    make([]edge.Message, expected),\n\t\tfirst:     expected,\n\t\ttime:      time,\n\t\ttolerance: tolerance,\n\t\tlogger:    l,\n\t}\n}\n\nfunc (js *joinset) Ready() bool {\n\treturn js.size == js.expected\n}\n\nfunc (js *joinset) Has(i int) bool {\n\treturn js.values[i] != nil\n}\n\n// add a point to the set from a given parent index.\nfunc (js *joinset) Set(i int, v edge.Message) {\n\tif i < js.first {\n\t\tjs.first = i\n\t}\n\tjs.values[i] = v\n\tjs.size++\n}\n\n// a valid point in the set\nfunc (js *joinset) First() edge.Message {\n\treturn js.values[js.first]\n}\n\n// join all points into a single point\nfunc (js *joinset) JoinIntoPoint() (edge.PointMessage, error) {\n\tfirst, ok := js.First().(edge.PointMessage)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected type of first value %T\", js.First())\n\t}\n\tfirstFields := first.Fields()\n\tfields := make(models.Fields, js.size*len(firstFields))\n\tfor i, v := range js.values {\n\t\tif v == nil {\n\t\t\tswitch js.fill {\n\t\t\tcase influxql.NullFill:\n\t\t\t\tfor k := range firstFields {\n\t\t\t\t\tfields[js.prefixes[i]+js.delimiter+k] = nil\n\t\t\t\t}\n\t\t\tcase influxql.NumberFill:\n\t\t\t\tfor k := range firstFields {\n\t\t\t\t\tfields[js.prefixes[i]+js.delimiter+k] = js.fillValue\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\t// inner join no valid point possible\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t} else {\n\t\t\tp, ok := v.(edge.FieldGetter)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected type %T\", v)\n\t\t\t}\n\t\t\tfor k, v := range p.Fields() {\n\t\t\t\tfields[js.prefixes[i]+js.delimiter+k] = v\n\t\t\t}\n\t\t}\n\t}\n\tnp := edge.NewPointMessage(\n\t\tjs.name, \"\", \"\",\n\t\tfirst.Dimensions(),\n\t\tfields,\n\t\tfirst.GroupInfo().Tags,\n\t\tjs.time,\n\t)\n\treturn np, nil\n}\n\n// join all batches the set into a single batch\nfunc (js *joinset) JoinIntoBatch() (edge.BufferedBatchMessage, error) {\n\tfirst, ok := js.First().(edge.BufferedBatchMessage)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected type of first value %T\", js.First())\n\t}\n\tnewBegin := edge.NewBeginBatchMessage(\n\t\tjs.name,\n\t\tfirst.Tags(),\n\t\tfirst.Dimensions().ByName,\n\t\tjs.time,\n\t\t0,\n\t)\n\tnewPoints := make([]edge.BatchPointMessage, 0, len(first.Points()))\n\tempty := make([]bool, js.expected)\n\temptyCount := 0\n\tindexes := make([]int, js.expected)\n\tvar fieldNames []string\n\nBATCH_POINT:\n\tfor emptyCount < js.expected {\n\t\tset := make([]edge.BatchPointMessage, js.expected)\n\t\tsetTime := time.Time{}\n\t\tcount := 0\n\t\tfor i, batch := range js.values {\n\t\t\tif empty[i] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif batch == nil {\n\t\t\t\temptyCount++\n\t\t\t\tempty[i] = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tb, ok := batch.(edge.BufferedBatchMessage)\n\t\t\tif !ok {\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected type of batch value %T\", batch)\n\t\t\t}\n\t\t\tif indexes[i] == len(b.Points()) {\n\t\t\t\temptyCount++\n\t\t\t\tempty[i] = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbp := b.Points()[indexes[i]]\n\t\t\tt := bp.Time().Round(js.tolerance)\n\t\t\tif setTime.IsZero() {\n\t\t\t\tsetTime = t\n\t\t\t}\n\t\t\tif t.Before(setTime) {\n\t\t\t\t// We need to backup\n\t\t\t\tsetTime = t\n\t\t\t\tfor j := range set {\n\t\t\t\t\tif set[j] != nil {\n\t\t\t\t\t\tindexes[j]--\n\t\t\t\t\t}\n\t\t\t\t\tset[j] = nil\n\t\t\t\t}\n\t\t\t\tset[i] = bp\n\t\t\t\tindexes[i]++\n\t\t\t\tcount = 1\n\t\t\t} else if t.Equal(setTime) {\n\t\t\t\tif fieldNames == nil {\n\t\t\t\t\tfor k := range bp.Fields() {\n\t\t\t\t\t\tfieldNames = append(fieldNames, k)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tset[i] = bp\n\t\t\t\tindexes[i]++\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t\t// we didn't get any points from any group we must be empty\n\t\t// skip this set\n\t\tif count == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Join all batch points in set\n\t\tfields := make(models.Fields, js.expected*len(fieldNames))\n\t\tfor i, bp := range set {\n\t\t\tif bp == nil {\n\t\t\t\tswitch js.fill {\n\t\t\t\tcase influxql.NullFill:\n\t\t\t\t\tfor _, k := range fieldNames {\n\t\t\t\t\t\tfields[js.prefixes[i]+js.delimiter+k] = nil\n\t\t\t\t\t}\n\t\t\t\tcase influxql.NumberFill:\n\t\t\t\t\tfor _, k := range fieldNames {\n\t\t\t\t\t\tfields[js.prefixes[i]+js.delimiter+k] = js.fillValue\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\t// inner join no valid point possible\n\t\t\t\t\tcontinue BATCH_POINT\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor k, v := range bp.Fields() {\n\t\t\t\t\tfields[js.prefixes[i]+js.delimiter+k] = v\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tbp := edge.NewBatchPointMessage(\n\t\t\tfields,\n\t\t\tnewBegin.Tags(),\n\t\t\tsetTime,\n\t\t)\n\t\tnewPoints = append(newPoints, bp)\n\t}\n\tnewBegin.SetSizeHint(len(newPoints))\n\treturn edge.NewBufferedBatchMessage(\n\t\tnewBegin,\n\t\tnewPoints,\n\t\tedge.NewEndBatchMessage(),\n\t), nil\n}\n\ntype durationVar struct {\n\texpvar.Int\n}\n\nfunc (d *durationVar) String() string {\n\treturn time.Duration(d.IntValue()).String()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/kapacitor_loopback.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\nconst (\n\tstatsKapacitorLoopbackPointsWritten = \"points_written\"\n)\n\ntype KapacitorLoopbackNode struct {\n\tnode\n\tk *pipeline.KapacitorLoopbackNode\n\n\tpointsWritten *expvar.Int\n\n\tbegin edge.BeginBatchMessage\n}\n\nfunc newKapacitorLoopbackNode(et *ExecutingTask, n *pipeline.KapacitorLoopbackNode, l *log.Logger) (*KapacitorLoopbackNode, error) {\n\tkn := &KapacitorLoopbackNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\tk:    n,\n\t}\n\tkn.node.runF = kn.runOut\n\t// Check that a loop has not been created within this task\n\tfor _, dbrp := range et.Task.DBRPs {\n\t\tif dbrp.Database == n.Database && dbrp.RetentionPolicy == n.RetentionPolicy {\n\t\t\treturn nil, fmt.Errorf(\"loop detected on dbrp: %v\", dbrp)\n\t\t}\n\t}\n\treturn kn, nil\n}\n\nfunc (n *KapacitorLoopbackNode) runOut([]byte) error {\n\tn.pointsWritten = &expvar.Int{}\n\tn.statMap.Set(statsInfluxDBPointsWritten, n.pointsWritten)\n\n\tconsumer := edge.NewConsumerWithReceiver(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\treturn consumer.Consume()\n}\n\nfunc (n *KapacitorLoopbackNode) Point(p edge.PointMessage) error {\n\tn.timer.Start()\n\tdefer n.timer.Stop()\n\n\tp = p.ShallowCopy()\n\n\tif n.k.Database != \"\" {\n\t\tp.SetDatabase(n.k.Database)\n\t}\n\tif n.k.RetentionPolicy != \"\" {\n\t\tp.SetRetentionPolicy(n.k.RetentionPolicy)\n\t}\n\tif n.k.Measurement != \"\" {\n\t\tp.SetName(n.k.Measurement)\n\t}\n\tif len(n.k.Tags) > 0 {\n\t\ttags := p.Tags().Copy()\n\t\tfor k, v := range n.k.Tags {\n\t\t\ttags[k] = v\n\t\t}\n\t\tp.SetTags(tags)\n\t}\n\n\tn.timer.Pause()\n\terr := n.et.tm.WriteKapacitorPoint(p)\n\tn.timer.Resume()\n\n\tif err != nil {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Println(\"E! failed to write point over loopback\")\n\t} else {\n\t\tn.pointsWritten.Add(1)\n\t}\n\treturn nil\n}\n\nfunc (n *KapacitorLoopbackNode) BeginBatch(begin edge.BeginBatchMessage) error {\n\tn.begin = begin\n\treturn nil\n}\n\nfunc (n *KapacitorLoopbackNode) BatchPoint(bp edge.BatchPointMessage) error {\n\ttags := bp.Tags()\n\tif len(n.k.Tags) > 0 {\n\t\ttags = bp.Tags().Copy()\n\t\tfor k, v := range n.k.Tags {\n\t\t\ttags[k] = v\n\t\t}\n\t}\n\tp := edge.NewPointMessage(\n\t\tn.begin.Name(),\n\t\tn.k.Database,\n\t\tn.k.RetentionPolicy,\n\t\tmodels.Dimensions{},\n\t\tbp.Fields(),\n\t\ttags,\n\t\tbp.Time(),\n\t)\n\n\tn.timer.Pause()\n\terr := n.et.tm.WriteKapacitorPoint(p)\n\tn.timer.Resume()\n\n\tif err != nil {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Println(\"E! failed to write point over loopback\")\n\t} else {\n\t\tn.pointsWritten.Add(1)\n\t}\n\treturn nil\n}\nfunc (n *KapacitorLoopbackNode) EndBatch(edge.EndBatchMessage) error {\n\treturn nil\n}\nfunc (n *KapacitorLoopbackNode) Barrier(edge.BarrierMessage) error {\n\treturn nil\n}\nfunc (n *KapacitorLoopbackNode) DeleteGroup(edge.DeleteGroupMessage) error {\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/list-deps",
    "content": "#!/bin/bash\n\n# Make sure we are in the dir of the script\nDIR=$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\ncd $DIR\n\n# List all dependent packages and whether they have been vendored.\ndeps() {\n    local package packages allDeps paths\n\n    # Get the current package\n    package=$(go list .)\n    # Get the sub packages excluding vendored packages\n    packages=$(go list ./... | grep -v \"^$package/vendor\")\n    allDeps=$(go list -f '{{ join .Deps \"\\n\"}}' $packages)\n\n    for dep in $allDeps\n    do\n        # Skip standard lib deps\n        paths=(${dep//\\// })\n        if ! [[ \"${paths[0]}\" =~ .*\\..* ]]\n        then\n            continue\n        fi\n        # Skip deps from within current package\n        if [[ \"$dep\" =~ ^$package ]]\n        then\n            if [[ \"$dep\" =~ ^$package/vendor ]]\n            then\n                # Rewrite vendored deps as normal deps\n                dep=\"v ${dep/$package\\/vendor\\//}\"\n            else\n                continue\n            fi\n        else\n            dep=\"n $dep\"\n        fi\n\n        echo $dep\n    done\n}\n\n\n# Deduplicate and sort the output\ndeps | sort -k 2 -u\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/log.go",
    "content": "package kapacitor\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"strings\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/wlog\"\n)\n\ntype LogNode struct {\n\tnode\n\n\tkey string\n\tbuf bytes.Buffer\n\tenc *json.Encoder\n\n\tbatchBuffer *edge.BatchBuffer\n}\n\n// Create a new  LogNode which logs all data it receives\nfunc newLogNode(et *ExecutingTask, n *pipeline.LogNode, l *log.Logger) (*LogNode, error) {\n\tlevel, ok := wlog.StringToLevel[strings.ToUpper(n.Level)]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"invalid log level %s\", n.Level)\n\t}\n\tnn := &LogNode{\n\t\tnode:        node{Node: n, et: et, logger: l},\n\t\tkey:         fmt.Sprintf(\"%c! %s\", wlog.ReverseLevels[level], n.Prefix),\n\t\tbatchBuffer: new(edge.BatchBuffer),\n\t}\n\tnn.enc = json.NewEncoder(&nn.buf)\n\tnn.node.runF = nn.runLog\n\treturn nn, nil\n}\n\nfunc (n *LogNode) runLog([]byte) error {\n\tconsumer := edge.NewConsumerWithReceiver(\n\t\tn.ins[0],\n\t\tedge.NewReceiverFromForwardReceiverWithStats(\n\t\t\tn.outs,\n\t\t\tedge.NewTimedForwardReceiver(n.timer, n),\n\t\t),\n\t)\n\treturn consumer.Consume()\n\n}\n\nfunc (n *LogNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\treturn nil, n.batchBuffer.BeginBatch(begin)\n}\n\nfunc (n *LogNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\treturn nil, n.batchBuffer.BatchPoint(bp)\n}\n\nfunc (n *LogNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn n.BufferedBatch(n.batchBuffer.BufferedBatchMessage(end))\n}\n\nfunc (n *LogNode) BufferedBatch(batch edge.BufferedBatchMessage) (edge.Message, error) {\n\tn.buf.Reset()\n\tif err := n.enc.Encode(batch); err != nil {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Println(\"E!\", err)\n\t\treturn batch, nil\n\t}\n\tn.logger.Println(n.key, n.buf.String())\n\treturn batch, nil\n}\n\nfunc (n *LogNode) Point(p edge.PointMessage) (edge.Message, error) {\n\tn.buf.Reset()\n\tif err := n.enc.Encode(p); err != nil {\n\t\tn.incrementErrorCount()\n\t\tn.logger.Println(\"E!\", err)\n\t\treturn p, nil\n\t}\n\tn.logger.Println(n.key, n.buf.String())\n\treturn p, nil\n}\n\nfunc (n *LogNode) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (n *LogNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/metaclient.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/services/meta\"\n)\n\ntype NoopMetaClient struct{}\n\nfunc (m *NoopMetaClient) WaitForLeader(d time.Duration) error {\n\treturn nil\n}\nfunc (m *NoopMetaClient) CreateDatabase(name string) (*meta.DatabaseInfo, error) {\n\treturn nil, nil\n}\nfunc (m *NoopMetaClient) CreateDatabaseWithRetentionPolicy(name string, rpi *meta.RetentionPolicySpec) (*meta.DatabaseInfo, error) {\n\treturn nil, nil\n}\nfunc (m *NoopMetaClient) CreateRetentionPolicy(database string, rpi *meta.RetentionPolicySpec) (*meta.RetentionPolicyInfo, error) {\n\treturn nil, nil\n}\nfunc (m *NoopMetaClient) Database(name string) *meta.DatabaseInfo {\n\treturn &meta.DatabaseInfo{\n\t\tName: name,\n\t}\n}\nfunc (m *NoopMetaClient) RetentionPolicy(database, name string) (*meta.RetentionPolicyInfo, error) {\n\treturn nil, nil\n}\nfunc (m *NoopMetaClient) Authenticate(username, password string) (ui *meta.UserInfo, err error) {\n\treturn nil, errors.New(\"not authenticated\")\n}\nfunc (m *NoopMetaClient) Users() ([]meta.UserInfo, error) {\n\treturn nil, errors.New(\"no user\")\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/node.go",
    "content": "package kapacitor\n\nimport (\n\t\"bytes\"\n\t\"expvar\"\n\t\"fmt\"\n\t\"log\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\tkexpvar \"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/server/vars\"\n\t\"github.com/influxdata/kapacitor/timer\"\n\t\"github.com/pkg/errors\"\n)\n\nconst (\n\tstatErrorCount       = \"errors\"\n\tstatCardinalityGauge = \"working_cardinality\"\n\tstatAverageExecTime  = \"avg_exec_time_ns\"\n)\n\n// A node that can be  in an executor.\ntype Node interface {\n\tpipeline.Node\n\n\taddParentEdge(edge.StatsEdge)\n\n\tinit()\n\n\t// start the node and its children\n\tstart(snapshot []byte)\n\tstop()\n\n\t// snapshot running state\n\tsnapshot() ([]byte, error)\n\trestore(snapshot []byte) error\n\n\t// wait for the node to finish processing and return any errors\n\tWait() error\n\n\t// link specified child\n\tlinkChild(c Node) error\n\taddParent(p Node)\n\n\t// close children edges\n\tcloseChildEdges()\n\t// abort parent edges\n\tabortParentEdges()\n\n\t// executing dot\n\tedot(buf *bytes.Buffer, labels bool)\n\n\tnodeStatsByGroup() map[models.GroupID]nodeStats\n\n\tcollectedCount() int64\n\n\temittedCount() int64\n\n\tincrementErrorCount()\n\n\tstats() map[string]interface{}\n}\n\n//implementation of Node\ntype node struct {\n\tpipeline.Node\n\tet         *ExecutingTask\n\tparents    []Node\n\tchildren   []Node\n\trunF       func(snapshot []byte) error\n\tstopF      func()\n\terrCh      chan error\n\terr        error\n\tfinishedMu sync.Mutex\n\tfinished   bool\n\tins        []edge.StatsEdge\n\touts       []edge.StatsEdge\n\tlogger     *log.Logger\n\ttimer      timer.Timer\n\tstatsKey   string\n\tstatMap    *kexpvar.Map\n\n\tnodeErrors *kexpvar.Int\n}\n\nfunc (n *node) addParentEdge(e edge.StatsEdge) {\n\tn.ins = append(n.ins, e)\n}\n\nfunc (n *node) abortParentEdges() {\n\tfor _, in := range n.ins {\n\t\tin.Abort()\n\t}\n}\n\nfunc (n *node) init() {\n\ttags := map[string]string{\n\t\t\"task\": n.et.Task.ID,\n\t\t\"node\": n.Name(),\n\t\t\"type\": n.et.Task.Type.String(),\n\t\t\"kind\": n.Desc(),\n\t}\n\tn.statsKey, n.statMap = vars.NewStatistic(\"nodes\", tags)\n\tavgExecVar := &MaxDuration{}\n\tn.statMap.Set(statAverageExecTime, avgExecVar)\n\tn.nodeErrors = &kexpvar.Int{}\n\tn.statMap.Set(statErrorCount, n.nodeErrors)\n\tn.statMap.Set(statCardinalityGauge, kexpvar.NewIntFuncGauge(nil))\n\tn.timer = n.et.tm.TimingService.NewTimer(avgExecVar)\n\tn.errCh = make(chan error, 1)\n}\n\nfunc (n *node) start(snapshot []byte) {\n\tgo func() {\n\t\tvar err error\n\t\tdefer func() {\n\t\t\t// Always close children edges\n\t\t\tn.closeChildEdges()\n\t\t\t// Propagate error up\n\t\t\tif err != nil {\n\t\t\t\t// Handle panic in runF\n\t\t\t\tr := recover()\n\t\t\t\tif r != nil {\n\t\t\t\t\ttrace := make([]byte, 512)\n\t\t\t\t\tn := runtime.Stack(trace, false)\n\t\t\t\t\terr = fmt.Errorf(\"%s: Trace:%s\", r, string(trace[:n]))\n\t\t\t\t}\n\t\t\t\tn.abortParentEdges()\n\t\t\t\tn.logger.Println(\"E!\", err)\n\t\t\t\terr = errors.Wrap(err, n.Name())\n\t\t\t}\n\t\t\tn.errCh <- err\n\t\t}()\n\t\t// Run node\n\t\terr = n.runF(snapshot)\n\t}()\n}\n\nfunc (n *node) stop() {\n\tif n.stopF != nil {\n\t\tn.stopF()\n\t}\n\tvars.DeleteStatistic(n.statsKey)\n}\n\n// no-op snapshot\nfunc (n *node) snapshot() (b []byte, err error) { return }\n\n// no-op restore\nfunc (n *node) restore([]byte) error { return nil }\n\nfunc (n *node) Wait() error {\n\tn.finishedMu.Lock()\n\tdefer n.finishedMu.Unlock()\n\tif !n.finished {\n\t\tn.finished = true\n\t\tn.err = <-n.errCh\n\t}\n\treturn n.err\n}\n\nfunc (n *node) addChild(c Node) (edge.StatsEdge, error) {\n\tif n.Provides() != c.Wants() {\n\t\treturn nil, fmt.Errorf(\"cannot add child mismatched edges: %s:%s -> %s:%s\", n.Name(), n.Provides(), c.Name(), c.Wants())\n\t}\n\tif n.Provides() == pipeline.NoEdge {\n\t\treturn nil, fmt.Errorf(\"cannot add child no edge expected: %s:%s -> %s:%s\", n.Name(), n.Provides(), c.Name(), c.Wants())\n\t}\n\tn.children = append(n.children, c)\n\n\tedge := newEdge(n.et.Task.ID, n.Name(), c.Name(), n.Provides(), defaultEdgeBufferSize, n.et.tm.LogService)\n\tif edge == nil {\n\t\treturn nil, fmt.Errorf(\"unknown edge type %s\", n.Provides())\n\t}\n\tc.addParentEdge(edge)\n\treturn edge, nil\n}\n\nfunc (n *node) addParent(p Node) {\n\tn.parents = append(n.parents, p)\n}\n\nfunc (n *node) linkChild(c Node) error {\n\t// add child\n\tedge, err := n.addChild(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// add parent\n\tc.addParent(n)\n\n\t// store edge to child\n\tn.outs = append(n.outs, edge)\n\treturn nil\n}\n\nfunc (n *node) closeChildEdges() {\n\tfor _, child := range n.outs {\n\t\tchild.Close()\n\t}\n}\n\nfunc (n *node) edot(buf *bytes.Buffer, labels bool) {\n\tif labels {\n\t\t// Print all stats on node.\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\"\\n%s [xlabel=\\\"\",\n\t\t\t\tn.Name(),\n\t\t\t),\n\t\t)\n\t\ti := 0\n\t\tn.statMap.DoSorted(func(kv expvar.KeyValue) {\n\t\t\tif i != 0 {\n\t\t\t\t// NOTE: A literal \\r, indicates a newline right justified in graphviz syntax.\n\t\t\t\tbuf.WriteString(`\\r`)\n\t\t\t}\n\t\t\ti++\n\t\t\tvar s string\n\t\t\tif sv, ok := kv.Value.(kexpvar.StringVar); ok {\n\t\t\t\ts = sv.StringValue()\n\t\t\t} else {\n\t\t\t\ts = kv.Value.String()\n\t\t\t}\n\t\t\tbuf.WriteString(\n\t\t\t\tfmt.Sprintf(\"%s=%s\",\n\t\t\t\t\tkv.Key,\n\t\t\t\t\ts,\n\t\t\t\t),\n\t\t\t)\n\t\t})\n\t\tbuf.Write([]byte(\"\\\"];\\n\"))\n\n\t\tfor i, c := range n.children {\n\t\t\tbuf.Write([]byte(\n\t\t\t\tfmt.Sprintf(\"%s -> %s [label=\\\"processed=%d\\\"];\\n\",\n\t\t\t\t\tn.Name(),\n\t\t\t\t\tc.Name(),\n\t\t\t\t\tn.outs[i].Collected(),\n\t\t\t\t),\n\t\t\t))\n\t\t}\n\n\t} else {\n\t\t// Print all stats on node.\n\t\tbuf.Write([]byte(\n\t\t\tfmt.Sprintf(\"\\n%s [\",\n\t\t\t\tn.Name(),\n\t\t\t),\n\t\t))\n\t\tn.statMap.DoSorted(func(kv expvar.KeyValue) {\n\t\t\tvar s string\n\t\t\tif sv, ok := kv.Value.(kexpvar.StringVar); ok {\n\t\t\t\ts = sv.StringValue()\n\t\t\t} else {\n\t\t\t\ts = kv.Value.String()\n\t\t\t}\n\t\t\tbuf.Write([]byte(\n\t\t\t\tfmt.Sprintf(\"%s=\\\"%s\\\" \",\n\t\t\t\t\tkv.Key,\n\t\t\t\t\ts,\n\t\t\t\t),\n\t\t\t))\n\t\t})\n\t\tbuf.Write([]byte(\"];\\n\"))\n\t\tfor i, c := range n.children {\n\t\t\tbuf.Write([]byte(\n\t\t\t\tfmt.Sprintf(\"%s -> %s [processed=\\\"%d\\\"];\\n\",\n\t\t\t\t\tn.Name(),\n\t\t\t\t\tc.Name(),\n\t\t\t\t\tn.outs[i].Collected(),\n\t\t\t\t),\n\t\t\t))\n\t\t}\n\t}\n}\n\n// node collected count is the sum of emitted counts of parent edges\nfunc (n *node) collectedCount() (count int64) {\n\tfor _, in := range n.ins {\n\t\tcount += in.Emitted()\n\t}\n\treturn\n}\n\n// node emitted count is the sum of collected counts of children edges\nfunc (n *node) emittedCount() (count int64) {\n\tfor _, out := range n.outs {\n\t\tcount += out.Collected()\n\t}\n\treturn\n}\n\n// node increment error count increments a nodes error_count stat\nfunc (n *node) incrementErrorCount() {\n\tn.nodeErrors.Add(1)\n}\n\nfunc (n *node) stats() map[string]interface{} {\n\tstats := make(map[string]interface{})\n\n\tn.statMap.Do(func(kv expvar.KeyValue) {\n\t\tswitch v := kv.Value.(type) {\n\t\tcase kexpvar.IntVar:\n\t\t\tstats[kv.Key] = v.IntValue()\n\t\tcase kexpvar.FloatVar:\n\t\t\tstats[kv.Key] = v.FloatValue()\n\t\tdefault:\n\t\t\tstats[kv.Key] = v.String()\n\t\t}\n\t})\n\n\treturn stats\n}\n\n// Statistics for a node\ntype nodeStats struct {\n\tFields     models.Fields\n\tTags       models.Tags\n\tDimensions models.Dimensions\n}\n\n// Return a copy of the current node statistics.\n// If if no groups have been seen yet a NilGroup will be created with zero stats.\nfunc (n *node) nodeStatsByGroup() (stats map[models.GroupID]nodeStats) {\n\t// Get the counts for just one output.\n\tstats = make(map[models.GroupID]nodeStats)\n\tif len(n.outs) > 0 {\n\t\tn.outs[0].ReadGroupStats(func(g *edge.GroupStats) {\n\t\t\tstats[g.GroupInfo.ID] = nodeStats{\n\t\t\t\tFields: models.Fields{\n\t\t\t\t\t// A node's emitted count is the collected count of its output.\n\t\t\t\t\t\"emitted\": g.Collected,\n\t\t\t\t},\n\t\t\t\tTags:       g.GroupInfo.Tags,\n\t\t\t\tDimensions: g.GroupInfo.Dimensions,\n\t\t\t}\n\t\t})\n\t}\n\tif len(stats) == 0 {\n\t\t// If we have no groups/stats add nil group with emitted = 0\n\t\tstats[\"\"] = nodeStats{\n\t\t\tFields: models.Fields{\n\t\t\t\t\"emitted\": int64(0),\n\t\t\t},\n\t\t}\n\t}\n\treturn\n}\n\n// MaxDuration is a 64-bit int variable representing a duration in nanoseconds,that satisfies the expvar.Var interface.\n// When setting a value it will only be set if it is greater than the current value.\ntype MaxDuration struct {\n\td      int64\n\tsetter timer.Setter\n}\n\nfunc (v *MaxDuration) String() string {\n\treturn `\"` + v.StringValue() + `\"`\n}\n\nfunc (v *MaxDuration) StringValue() string {\n\treturn time.Duration(v.IntValue()).String()\n}\n\nfunc (v *MaxDuration) IntValue() int64 {\n\treturn atomic.LoadInt64(&v.d)\n}\n\n// Set sets value if it is greater than current value.\n// If set was successful and a setter exists, will pass on value to setter.\nfunc (v *MaxDuration) Set(next int64) {\n\tfor {\n\t\tcur := v.IntValue()\n\t\tif next > cur {\n\t\t\tif atomic.CompareAndSwapInt64(&v.d, cur, next) {\n\t\t\t\tif v.setter != nil {\n\t\t\t\t\tv.setter.Set(next)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/noop.go",
    "content": "package kapacitor\n\nimport (\n\t\"log\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\ntype NoOpNode struct {\n\tnode\n}\n\n// Create a new  NoOpNode which does nothing with the data and just passes it through.\nfunc newNoOpNode(et *ExecutingTask, n *pipeline.NoOpNode, l *log.Logger) (*NoOpNode, error) {\n\tnn := &NoOpNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t}\n\tnn.node.runF = nn.runNoOp\n\treturn nn, nil\n}\n\nfunc (n *NoOpNode) runNoOp([]byte) error {\n\tfor m, ok := n.ins[0].Emit(); ok; m, ok = n.ins[0].Emit() {\n\t\tif err := edge.Forward(n.outs, m); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/output.go",
    "content": "package kapacitor\n\n// An output of a pipeline. Still need to improve this interface to expose different types of outputs.\ntype Output interface {\n\tEndpoint() string\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/query.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/pkg/errors\"\n)\n\ntype Query struct {\n\tstartTL         *influxql.TimeLiteral\n\tstopTL          *influxql.TimeLiteral\n\tgroupByTimeDL   *influxql.DurationLiteral\n\tgroupByOffsetDL *influxql.DurationLiteral\n\tstmt            *influxql.SelectStatement\n\talignGroup      bool\n}\n\nfunc NewQuery(queryString string) (*Query, error) {\n\tquery := &Query{}\n\t// Parse and validate query\n\tq, err := influxql.ParseQuery(queryString)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to parse InfluxQL query\")\n\t}\n\tif l := len(q.Statements); l != 1 {\n\t\treturn nil, fmt.Errorf(\"query must be a single select statement, got %d statements\", l)\n\t}\n\tvar ok bool\n\tquery.stmt, ok = q.Statements[0].(*influxql.SelectStatement)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"query is not a select statement %q\", q)\n\t}\n\n\t// Add in time condition nodes\n\tquery.startTL = &influxql.TimeLiteral{}\n\tstartExpr := &influxql.BinaryExpr{\n\t\tOp:  influxql.GTE,\n\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\tRHS: query.startTL,\n\t}\n\n\tquery.stopTL = &influxql.TimeLiteral{}\n\tstopExpr := &influxql.BinaryExpr{\n\t\tOp:  influxql.LT,\n\t\tLHS: &influxql.VarRef{Val: \"time\"},\n\t\tRHS: query.stopTL,\n\t}\n\n\tif query.stmt.Condition != nil {\n\t\tquery.stmt.Condition = &influxql.BinaryExpr{\n\t\t\tOp:  influxql.AND,\n\t\t\tLHS: query.stmt.Condition,\n\t\t\tRHS: &influxql.BinaryExpr{\n\t\t\t\tOp:  influxql.AND,\n\t\t\t\tLHS: startExpr,\n\t\t\t\tRHS: stopExpr,\n\t\t\t},\n\t\t}\n\t} else {\n\t\tquery.stmt.Condition = &influxql.BinaryExpr{\n\t\t\tOp:  influxql.AND,\n\t\t\tLHS: startExpr,\n\t\t\tRHS: stopExpr,\n\t\t}\n\t}\n\treturn query, nil\n}\n\n// Return the db rp pairs of the query\nfunc (q *Query) DBRPs() ([]DBRP, error) {\n\tdbrps := make([]DBRP, len(q.stmt.Sources))\n\tfor i, s := range q.stmt.Sources {\n\t\tm, ok := s.(*influxql.Measurement)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"unknown query source %T\", s)\n\t\t}\n\t\tdbrps[i] = DBRP{\n\t\t\tDatabase:        m.Database,\n\t\t\tRetentionPolicy: m.RetentionPolicy,\n\t\t}\n\t}\n\treturn dbrps, nil\n}\n\n// Set the start time of the query\nfunc (q *Query) StartTime() time.Time {\n\treturn q.startTL.Val\n}\n\n// Set the stop time of the query\nfunc (q *Query) StopTime() time.Time {\n\treturn q.stopTL.Val\n}\n\n// Set the start time of the query\nfunc (q *Query) SetStartTime(s time.Time) {\n\tq.startTL.Val = s\n\tif q.alignGroup && q.groupByTimeDL != nil && q.groupByOffsetDL != nil {\n\t\tq.groupByOffsetDL.Val = s.Sub(time.Unix(0, 0)) % q.groupByTimeDL.Val\n\t}\n}\n\n// Set the stop time of the query\nfunc (q *Query) SetStopTime(s time.Time) {\n\tq.stopTL.Val = s\n}\n\n// Deep clone this query\nfunc (q *Query) Clone() (*Query, error) {\n\tn := &Query{\n\t\tstmt:       q.stmt.Clone(),\n\t\talignGroup: q.alignGroup,\n\t}\n\t// Find the start/stop time literals\n\tvar err error\n\tinfluxql.WalkFunc(n.stmt.Condition, func(qlNode influxql.Node) {\n\t\tif bn, ok := qlNode.(*influxql.BinaryExpr); ok {\n\t\t\tswitch bn.Op {\n\t\t\tcase influxql.GTE:\n\t\t\t\tif vf, ok := bn.LHS.(*influxql.VarRef); !ok || vf.Val != \"time\" {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif tl, ok := bn.RHS.(*influxql.TimeLiteral); ok {\n\t\t\t\t\t// We have a \"time\" >= 'time literal'\n\t\t\t\t\tif n.startTL == nil {\n\t\t\t\t\t\tn.startTL = tl\n\t\t\t\t\t} else {\n\t\t\t\t\t\terr = errors.New(\"invalid query, found multiple start time conditions\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\tcase influxql.LT:\n\t\t\t\tif vf, ok := bn.LHS.(*influxql.VarRef); !ok || vf.Val != \"time\" {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif tl, ok := bn.RHS.(*influxql.TimeLiteral); ok {\n\t\t\t\t\t// We have a \"time\" < 'time literal'\n\t\t\t\t\tif n.stopTL == nil {\n\t\t\t\t\t\tn.stopTL = tl\n\t\t\t\t\t} else {\n\t\t\t\t\t\terr = errors.New(\"invalid query, found multiple stop time conditions\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tinfluxql.WalkFunc(n.stmt.Dimensions, func(qlNode influxql.Node) {\n\t\tif cn, ok := qlNode.(*influxql.Call); ok {\n\t\t\tif cn.Name == \"time\" {\n\t\t\t\tif dln, ok := cn.Args[0].(*influxql.DurationLiteral); ok {\n\t\t\t\t\tn.groupByTimeDL = &influxql.DurationLiteral{\n\t\t\t\t\t\tVal: dln.Val,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif don, ok := cn.Args[1].(*influxql.DurationLiteral); ok {\n\t\t\t\t\tn.groupByOffsetDL = &influxql.DurationLiteral{\n\t\t\t\t\t\tVal: don.Val,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\tif n.startTL == nil {\n\t\terr = errors.New(\"invalid query, missing start time condition\")\n\t}\n\tif n.stopTL == nil {\n\t\terr = errors.New(\"invalid query, missing stop time condition\")\n\t}\n\treturn n, err\n}\n\n// Set the dimensions on the query\nfunc (q *Query) Dimensions(dims []interface{}) error {\n\tq.stmt.Dimensions = q.stmt.Dimensions[:0]\n\tq.groupByTimeDL = nil\n\tq.groupByOffsetDL = &influxql.DurationLiteral{\n\t\tVal: 0,\n\t}\n\t// Add in dimensions\n\thasTime := false\n\tfor _, d := range dims {\n\t\tswitch dim := d.(type) {\n\t\tcase time.Duration:\n\t\t\tif hasTime {\n\t\t\t\treturn fmt.Errorf(\"groupBy cannot have more than one time dimension\")\n\t\t\t}\n\t\t\t// Add time dimension\n\t\t\thasTime = true\n\t\t\tq.groupByTimeDL = &influxql.DurationLiteral{\n\t\t\t\tVal: dim,\n\t\t\t}\n\t\t\tif q.alignGroup {\n\t\t\t\tq.SetStartTime(q.StartTime())\n\t\t\t}\n\t\t\tq.stmt.Dimensions = append(q.stmt.Dimensions,\n\t\t\t\t&influxql.Dimension{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\tq.groupByTimeDL,\n\t\t\t\t\t\t\tq.groupByOffsetDL,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\t\tcase string:\n\t\t\tq.stmt.Dimensions = append(q.stmt.Dimensions,\n\t\t\t\t&influxql.Dimension{\n\t\t\t\t\tExpr: &influxql.VarRef{\n\t\t\t\t\t\tVal: dim,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\tcase *ast.StarNode:\n\t\t\tq.stmt.Dimensions = append(q.stmt.Dimensions,\n\t\t\t\t&influxql.Dimension{\n\t\t\t\t\tExpr: &influxql.Wildcard{},\n\t\t\t\t})\n\t\tcase TimeDimension:\n\t\t\tif hasTime {\n\t\t\t\treturn fmt.Errorf(\"groupBy cannot have more than one time dimension\")\n\t\t\t}\n\t\t\t// Add time dimension\n\t\t\thasTime = true\n\t\t\tq.groupByTimeDL = &influxql.DurationLiteral{\n\t\t\t\tVal: dim.Length,\n\t\t\t}\n\t\t\tq.groupByOffsetDL.Val = dim.Offset\n\t\t\tif q.alignGroup {\n\t\t\t\tq.SetStartTime(q.StartTime())\n\t\t\t}\n\t\t\tq.stmt.Dimensions = append(q.stmt.Dimensions,\n\t\t\t\t&influxql.Dimension{\n\t\t\t\t\tExpr: &influxql.Call{\n\t\t\t\t\t\tName: \"time\",\n\t\t\t\t\t\tArgs: []influxql.Expr{\n\t\t\t\t\t\t\tq.groupByTimeDL,\n\t\t\t\t\t\t\tq.groupByOffsetDL,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t})\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"invalid dimension type:%T, must be string or time.Duration\", d)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (q *Query) IsGroupedByTime() bool {\n\treturn q.groupByTimeDL != nil\n}\n\nfunc (q *Query) AlignGroup() {\n\tq.alignGroup = true\n}\n\nfunc (q *Query) Fill(option influxql.FillOption, value interface{}) {\n\tq.stmt.Fill = option\n\tq.stmt.FillValue = value\n}\n\nfunc (q *Query) String() string {\n\treturn q.stmt.String()\n}\n\ntype TimeDimension struct {\n\tLength time.Duration\n\tOffset time.Duration\n}\n\nfunc groupByTime(length time.Duration, offset ...time.Duration) (TimeDimension, error) {\n\tvar o time.Duration\n\tif l := len(offset); l == 1 {\n\t\to = offset[0]\n\n\t} else if l != 0 {\n\t\treturn TimeDimension{}, fmt.Errorf(\"time() function expects 1 or 2 args, got %d\", l+1)\n\t}\n\treturn TimeDimension{\n\t\tLength: length,\n\t\tOffset: o,\n\t}, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/query_test.go",
    "content": "package kapacitor_test\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor\"\n)\n\nfunc TestQuery_Clone(t *testing.T) {\n\ttestCases := []string{\n\t\t\"SELECT usage FROM telegraf.autogen.cpu\",\n\t\t\"SELECT mean(usage) FROM telegraf.autogen.cpu WHERE host = 'serverA'\",\n\t\t\"SELECT mean(usage) FROM telegraf.autogen.cpu WHERE host = 'serverA' AND dc = 'slc'\",\n\t\t\"SELECT mean(usage) FROM telegraf.autogen.cpu WHERE host = 'serverA' AND dc = 'slc' OR product = 'login'\",\n\t\t\"SELECT mean(usage) FROM telegraf.autogen.cpu WHERE host = 'serverA' AND (dc = 'slc' OR product = 'login')\",\n\t}\n\n\tequal := func(q0, q1 *kapacitor.Query) error {\n\t\tif got, exp := q0.String(), q1.String(); got != exp {\n\t\t\treturn fmt.Errorf(\"unequal query string: got %s exp %s\", got, exp)\n\t\t}\n\t\tif got, exp := q0.StartTime(), q1.StartTime(); got != exp {\n\t\t\treturn fmt.Errorf(\"unequal query start time: got %v exp %v\", got, exp)\n\t\t}\n\t\tif got, exp := q0.StopTime(), q1.StopTime(); got != exp {\n\t\t\treturn fmt.Errorf(\"unequal query stop time: got %v exp %v\", got, exp)\n\t\t}\n\t\tif got, exp := q0.IsGroupedByTime(), q1.IsGroupedByTime(); got != exp {\n\t\t\treturn fmt.Errorf(\"unequal query IsGroupedByTime: got %v exp %v\", got, exp)\n\t\t}\n\t\treturn nil\n\t}\n\tfor _, query := range testCases {\n\t\tq, err := kapacitor.NewQuery(query)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tclone, err := q.Clone()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\t// Modify original start time\n\t\tstart := time.Date(1975, 1, 1, 0, 0, 0, 0, time.UTC)\n\t\tq.SetStartTime(start)\n\n\t\tif err := equal(clone, q); err == nil {\n\t\t\tt.Errorf(\"equal after modification: got %v\", clone)\n\t\t}\n\n\t\t// Modify clone in the same way\n\t\tclone.SetStartTime(start)\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\t// Re-clone\n\t\tclone, err = q.Clone()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\t// Modify original stop time\n\t\tstop := time.Date(1975, 1, 2, 0, 0, 0, 0, time.UTC)\n\t\tq.SetStopTime(stop)\n\n\t\tif err := equal(clone, q); err == nil {\n\t\t\tt.Errorf(\"equal after modification: got %v\", clone)\n\t\t}\n\n\t\t// Modify clone in the same way\n\t\tclone.SetStopTime(stop)\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\t// Re-clone\n\t\tclone, err = q.Clone()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\t// Set dimensions\n\t\tq.Dimensions([]interface{}{time.Hour})\n\t\tif err := equal(clone, q); err == nil {\n\t\t\tt.Errorf(\"equal after modification: got %v\", clone)\n\t\t}\n\t\t// Set dimesions on the clone in the same way\n\t\tclone.Dimensions([]interface{}{time.Hour})\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\t// Re-clone\n\t\tclone, err = q.Clone()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\t// Set group align and dimensions\n\t\tq.AlignGroup()\n\t\tq.Dimensions([]interface{}{kapacitor.TimeDimension{\n\t\t\tLength: time.Minute,\n\t\t\tOffset: time.Second,\n\t\t}})\n\t\tif err := equal(clone, q); err == nil {\n\t\t\tt.Errorf(\"equal after modification: got %v\", clone)\n\t\t\treturn\n\t\t}\n\t\t// Set group align and dimesions on the clone in the same way\n\t\tclone.AlignGroup()\n\t\tclone.Dimensions([]interface{}{kapacitor.TimeDimension{\n\t\t\tLength: time.Minute,\n\t\t\tOffset: time.Second,\n\t\t}})\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t\t// Re-clone\n\t\tclone, err = q.Clone()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif err := equal(clone, q); err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\nfunc TestQuery_IsGroupedByTime(t *testing.T) {\n\tq, err := kapacitor.NewQuery(\"SELECT usage FROM telegraf.autogen.cpu\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tq.Dimensions([]interface{}{time.Hour})\n\tif !q.IsGroupedByTime() {\n\t\tt.Error(\"expected query to be grouped by time\")\n\t}\n\n\tq, err = kapacitor.NewQuery(\"SELECT usage FROM telegraf.autogen.cpu\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tq.Dimensions([]interface{}{kapacitor.TimeDimension{Length: time.Hour, Offset: time.Minute}})\n\tif !q.IsGroupedByTime() {\n\t\tt.Error(\"expected query to be grouped by time\")\n\t}\n\n\tq.Dimensions([]interface{}{\"host\"})\n\tif q.IsGroupedByTime() {\n\t\tt.Error(\"expected query to not be grouped by time\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/replay.go",
    "content": "package kapacitor\n\nimport (\n\t\"bufio\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\tdbmodels \"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/kapacitor/clock\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n)\n\n// Replay stream data from a channel source.\nfunc ReplayStreamFromChan(clck clock.Clock, points <-chan edge.PointMessage, collector StreamCollector, recTime bool) <-chan error {\n\terrC := make(chan error, 1)\n\tgo func() {\n\t\terrC <- replayStreamFromChan(clck, points, collector, recTime)\n\t}()\n\treturn errC\n}\n\n// Replay stream data from an IO source.\nfunc ReplayStreamFromIO(clck clock.Clock, data io.ReadCloser, collector StreamCollector, recTime bool, precision string) <-chan error {\n\tallErrs := make(chan error, 2)\n\terrC := make(chan error, 1)\n\tpoints := make(chan edge.PointMessage)\n\tgo func() {\n\t\tallErrs <- replayStreamFromChan(clck, points, collector, recTime)\n\t}()\n\tgo func() {\n\t\tallErrs <- readPointsFromIO(data, points, precision)\n\t}()\n\tgo func() {\n\t\tfor i := 0; i < cap(allErrs); i++ {\n\t\t\terr := <-allErrs\n\t\t\tif err != nil {\n\t\t\t\terrC <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\terrC <- nil\n\t}()\n\treturn errC\n}\n\nfunc replayStreamFromChan(clck clock.Clock, points <-chan edge.PointMessage, collector StreamCollector, recTime bool) error {\n\tdefer collector.Close()\n\tstart := time.Time{}\n\tvar diff time.Duration\n\tzero := clck.Zero()\n\tfor p := range points {\n\t\tif start.IsZero() {\n\t\t\tstart = p.Time()\n\t\t\tdiff = zero.Sub(start)\n\t\t}\n\t\twaitTime := p.Time().Add(diff).UTC()\n\t\tif !recTime {\n\t\t\tp = p.ShallowCopy()\n\t\t\tp.SetTime(waitTime)\n\t\t}\n\t\tclck.Until(waitTime)\n\t\terr := collector.CollectPoint(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc readPointsFromIO(data io.ReadCloser, points chan<- edge.PointMessage, precision string) error {\n\tdefer data.Close()\n\tdefer close(points)\n\n\tnow := time.Time{}\n\n\tin := bufio.NewScanner(data)\n\tfor in.Scan() {\n\t\tdb := in.Text()\n\t\tif !in.Scan() {\n\t\t\treturn fmt.Errorf(\"invalid replay file format, expected another line\")\n\t\t}\n\t\trp := in.Text()\n\t\tif !in.Scan() {\n\t\t\treturn fmt.Errorf(\"invalid replay file format, expected another line\")\n\t\t}\n\t\tmps, err := dbmodels.ParsePointsWithPrecision(\n\t\t\tin.Bytes(),\n\t\t\tnow,\n\t\t\tprecision,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tmp := mps[0]\n\t\tp := edge.NewPointMessage(\n\t\t\tmp.Name(),\n\t\t\tdb,\n\t\t\trp,\n\t\t\tmodels.Dimensions{},\n\t\t\tmodels.Fields(mp.Fields()),\n\t\t\tmodels.Tags(mp.Tags().Map()),\n\t\t\tmp.Time().UTC(),\n\t\t)\n\t\tpoints <- p\n\t}\n\treturn nil\n}\n\n// Replay batch data from a channel source.\nfunc ReplayBatchFromChan(clck clock.Clock, batches []<-chan edge.BufferedBatchMessage, collectors []BatchCollector, recTime bool) <-chan error {\n\terrC := make(chan error, 1)\n\tif e, g := len(batches), len(collectors); e != g {\n\t\terrC <- fmt.Errorf(\"unexpected number of batch collectors. exp %d got %d\", e, g)\n\t\treturn errC\n\t}\n\n\tallErrs := make(chan error, len(batches))\n\tfor i := range batches {\n\t\tgo func(collector BatchCollector, batches <-chan edge.BufferedBatchMessage, clck clock.Clock, recTime bool) {\n\t\t\tallErrs <- replayBatchFromChan(clck, batches, collector, recTime)\n\t\t}(collectors[i], batches[i], clck, recTime)\n\t}\n\tgo func() {\n\t\t// Wait for each one to finish and report first error if any\n\t\tfor i := 0; i < cap(allErrs); i++ {\n\t\t\terr := <-allErrs\n\t\t\tif err != nil {\n\t\t\t\terrC <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\terrC <- nil\n\t}()\n\treturn errC\n\n}\n\n// Replay batch data from an IO source.\nfunc ReplayBatchFromIO(clck clock.Clock, data []io.ReadCloser, collectors []BatchCollector, recTime bool) <-chan error {\n\terrC := make(chan error, 1)\n\tif e, g := len(data), len(collectors); e != g {\n\t\terrC <- fmt.Errorf(\"unexpected number of batch collectors. exp %d got %d\", e, g)\n\t\treturn errC\n\t}\n\n\tallErrs := make(chan error, len(data)*2)\n\tfor i := range data {\n\t\tbatches := make(chan edge.BufferedBatchMessage)\n\t\tgo func(collector BatchCollector, batches <-chan edge.BufferedBatchMessage, clck clock.Clock, recTime bool) {\n\t\t\tallErrs <- replayBatchFromChan(clck, batches, collector, recTime)\n\t\t}(collectors[i], batches, clck, recTime)\n\t\tgo func(data io.ReadCloser, batches chan<- edge.BufferedBatchMessage) {\n\t\t\tallErrs <- readBatchFromIO(data, batches)\n\t\t}(data[i], batches)\n\t}\n\tgo func() {\n\t\t// Wait for each one to finish and report first error if any\n\t\tfor i := 0; i < cap(allErrs); i++ {\n\t\t\terr := <-allErrs\n\t\t\tif err != nil {\n\t\t\t\terrC <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\terrC <- nil\n\t}()\n\treturn errC\n}\n\n// Replay the batch data from a single source\nfunc replayBatchFromChan(clck clock.Clock, batches <-chan edge.BufferedBatchMessage, collector BatchCollector, recTime bool) error {\n\tdefer collector.Close()\n\n\t// Find relative times\n\tvar start, tmax time.Time\n\tvar diff time.Duration\n\tzero := clck.Zero()\n\n\tfor b := range batches {\n\t\tif len(b.Points()) == 0 {\n\t\t\t// Emit empty batch\n\t\t\tif b.Begin().Time().IsZero() {\n\t\t\t\t// Set tmax to last batch if not set.\n\t\t\t\tb.Begin().SetTime(tmax)\n\t\t\t} else {\n\t\t\t\ttmax = b.Begin().Time().UTC()\n\t\t\t\tb.Begin().SetTime(tmax)\n\t\t\t}\n\t\t\tif err := collector.CollectBatch(b); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tpoints := b.Points()\n\t\tif start.IsZero() {\n\t\t\tstart = points[0].Time()\n\t\t\tdiff = zero.Sub(start)\n\t\t}\n\t\tvar lastTime time.Time\n\t\tif !recTime {\n\t\t\tfor i := range points {\n\t\t\t\tpoints[i].SetTime(points[i].Time().Add(diff).UTC())\n\t\t\t}\n\t\t\tlastTime = points[len(points)-1].Time()\n\t\t} else {\n\t\t\tlastTime = points[len(points)-1].Time().Add(diff).UTC()\n\t\t}\n\t\tclck.Until(lastTime)\n\t\tif lpt := points[len(points)-1].Time(); b.Begin().Time().Before(lpt) {\n\t\t\tb.Begin().SetTime(lpt)\n\t\t}\n\t\ttmax = b.Begin().Time().UTC()\n\t\tb.Begin().SetTime(tmax)\n\t\tif err := collector.CollectBatch(b); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// Replay the batch data from a single source\nfunc readBatchFromIO(data io.ReadCloser, batches chan<- edge.BufferedBatchMessage) error {\n\tdefer close(batches)\n\tdefer data.Close()\n\tdec := edge.NewBufferedBatchMessageDecoder(data)\n\tfor dec.More() {\n\t\tb, err := dec.Decode()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(b.Points()) == 0 {\n\t\t\t// do nothing\n\t\t\tcontinue\n\t\t}\n\t\tbatches <- b\n\t}\n\treturn nil\n}\n\nfunc WritePointForRecording(w io.Writer, p edge.PointMessage, precision string) error {\n\tif _, err := fmt.Fprintf(w, \"%s\\n%s\\n\", p.Database(), p.RetentionPolicy()); err != nil {\n\t\treturn err\n\t}\n\tif _, err := w.Write(p.Bytes(precision)); err != nil {\n\t\treturn err\n\t}\n\tif _, err := w.Write([]byte(\"\\n\")); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc WriteBatchForRecording(w io.Writer, b edge.BufferedBatchMessage) error {\n\tenc := json.NewEncoder(w)\n\terr := enc.Encode(b)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/result.go",
    "content": "package kapacitor\n\nimport (\n\t\"encoding/json\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"time\"\n\n\t\"github.com/influxdata/influxdb/influxql\"\n)\n\n// The result from an output.\ntype Result influxql.Result\n\n// Unmarshal a Result object from JSON.\nfunc ResultFromJSON(in io.Reader) (r Result) {\n\tb, err := ioutil.ReadAll(in)\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn\n\t}\n\n\t_ = json.Unmarshal(b, &r)\n\t// Convert all times to time.Time\n\tConvertResultTimes(&r)\n\treturn\n}\n\nfunc ConvertResultTimes(r *Result) {\n\tfor _, series := range r.Series {\n\t\tfor i, v := range series.Values {\n\t\t\tfor j, c := range series.Columns {\n\t\t\t\tif c == \"time\" {\n\t\t\t\t\ttStr, ok := v[j].(string)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tt, err := time.Parse(time.RFC3339, tStr)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tseries.Values[i][j] = t\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/sample.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\ntype SampleNode struct {\n\tnode\n\ts *pipeline.SampleNode\n\n\tcounts   map[models.GroupID]int64\n\tduration time.Duration\n}\n\n// Create a new  SampleNode which filters data from a source.\nfunc newSampleNode(et *ExecutingTask, n *pipeline.SampleNode, l *log.Logger) (*SampleNode, error) {\n\tsn := &SampleNode{\n\t\tnode:     node{Node: n, et: et, logger: l},\n\t\ts:        n,\n\t\tcounts:   make(map[models.GroupID]int64),\n\t\tduration: n.Duration,\n\t}\n\tsn.node.runF = sn.runSample\n\tif n.Duration == 0 && n.N == 0 {\n\t\treturn nil, errors.New(\"invalid sample rate: must be positive integer or duration\")\n\t}\n\treturn sn, nil\n}\n\nfunc (n *SampleNode) runSample([]byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\treturn consumer.Consume()\n}\n\nfunc (n *SampleNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, n.newGroup()),\n\t), nil\n}\nfunc (n *SampleNode) newGroup() *sampleGroup {\n\treturn &sampleGroup{\n\t\tn: n,\n\t}\n}\n\ntype sampleGroup struct {\n\tn *SampleNode\n\n\tcount int64\n}\n\nfunc (g *sampleGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tg.count = 0\n\treturn begin, nil\n}\n\nfunc (g *sampleGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tkeep := g.n.shouldKeep(g.count, bp.Time())\n\tg.count++\n\tif keep {\n\t\treturn bp, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (g *sampleGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (g *sampleGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\tkeep := g.n.shouldKeep(g.count, p.Time())\n\tg.count++\n\tif keep {\n\t\treturn p, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (g *sampleGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *sampleGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\nfunc (n *SampleNode) shouldKeep(count int64, t time.Time) bool {\n\tif n.duration != 0 {\n\t\tkeepTime := t.Truncate(n.duration)\n\t\treturn t.Equal(keepTime)\n\t} else {\n\t\treturn count%n.s.N == 0\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/shift.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\ntype ShiftNode struct {\n\tnode\n\ts *pipeline.ShiftNode\n\n\tshift time.Duration\n}\n\n// Create a new  ShiftNode which shifts points and batches in time.\nfunc newShiftNode(et *ExecutingTask, n *pipeline.ShiftNode, l *log.Logger) (*ShiftNode, error) {\n\tsn := &ShiftNode{\n\t\tnode:  node{Node: n, et: et, logger: l},\n\t\ts:     n,\n\t\tshift: n.Shift,\n\t}\n\tsn.node.runF = sn.runShift\n\tif n.Shift == 0 {\n\t\treturn nil, errors.New(\"invalid shift value: must be non zero duration\")\n\t}\n\treturn sn, nil\n}\n\nfunc (n *ShiftNode) runShift([]byte) error {\n\tconsumer := edge.NewConsumerWithReceiver(\n\t\tn.ins[0],\n\t\tedge.NewReceiverFromForwardReceiverWithStats(\n\t\t\tn.outs,\n\t\t\tedge.NewTimedForwardReceiver(n.timer, n),\n\t\t),\n\t)\n\treturn consumer.Consume()\n}\n\nfunc (n *ShiftNode) doShift(t edge.TimeSetter) {\n\tt.SetTime(t.Time().Add(n.shift))\n}\n\nfunc (n *ShiftNode) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tbegin = begin.ShallowCopy()\n\tn.doShift(begin)\n\treturn begin, nil\n}\n\nfunc (n *ShiftNode) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tbp = bp.ShallowCopy()\n\tn.doShift(bp)\n\treturn bp, nil\n}\n\nfunc (n *ShiftNode) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (n *ShiftNode) Point(p edge.PointMessage) (edge.Message, error) {\n\tp = p.ShallowCopy()\n\tn.doShift(p)\n\treturn p, nil\n}\n\nfunc (n *ShiftNode) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (n *ShiftNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/state_tracking.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n)\n\ntype stateTracker interface {\n\ttrack(t time.Time, inState bool) interface{}\n\treset()\n}\n\ntype stateTrackingGroup struct {\n\tn *StateTrackingNode\n\tstateful.Expression\n\ttracker stateTracker\n}\n\ntype StateTrackingNode struct {\n\tnode\n\tas string\n\n\texpr      stateful.Expression\n\tscopePool stateful.ScopePool\n\n\tnewTracker func() stateTracker\n}\n\nfunc (n *StateTrackingNode) runStateTracking(_ []byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\treturn consumer.Consume()\n}\n\nfunc (n *StateTrackingNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, n.newGroup()),\n\t), nil\n}\n\nfunc (n *StateTrackingNode) newGroup() *stateTrackingGroup {\n\t// Create a new tracking group\n\tg := &stateTrackingGroup{\n\t\tn: n,\n\t}\n\n\tg.Expression = n.expr.CopyReset()\n\n\tg.tracker = n.newTracker()\n\treturn g\n}\n\nfunc (g *stateTrackingGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tg.tracker.reset()\n\treturn begin, nil\n}\n\nfunc (g *stateTrackingGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\tbp = bp.ShallowCopy()\n\terr := g.track(bp)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! error while evaluating expression:\", err)\n\t\treturn nil, nil\n\t}\n\treturn bp, nil\n}\n\nfunc (g *stateTrackingGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (g *stateTrackingGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\tp = p.ShallowCopy()\n\terr := g.track(p)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! error while evaluating expression:\", err)\n\t\treturn nil, nil\n\t}\n\treturn p, nil\n}\n\nfunc (g *stateTrackingGroup) track(p edge.FieldsTagsTimeSetter) error {\n\tpass, err := EvalPredicate(g.Expression, g.n.scopePool, p)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfields := p.Fields().Copy()\n\tfields[g.n.as] = g.tracker.track(p.Time(), pass)\n\tp.SetFields(fields)\n\treturn nil\n}\n\nfunc (g *stateTrackingGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *stateTrackingGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\ntype stateDurationTracker struct {\n\tsd *pipeline.StateDurationNode\n\n\tstartTime time.Time\n}\n\nfunc (sdt *stateDurationTracker) reset() {\n\tsdt.startTime = time.Time{}\n}\n\nfunc (sdt *stateDurationTracker) track(t time.Time, inState bool) interface{} {\n\tif !inState {\n\t\tsdt.startTime = time.Time{}\n\t\treturn float64(-1)\n\t}\n\n\tif sdt.startTime.IsZero() {\n\t\tsdt.startTime = t\n\t}\n\treturn float64(t.Sub(sdt.startTime)) / float64(sdt.sd.Unit)\n}\n\nfunc newStateDurationNode(et *ExecutingTask, sd *pipeline.StateDurationNode, l *log.Logger) (*StateTrackingNode, error) {\n\tif sd.Lambda == nil {\n\t\treturn nil, fmt.Errorf(\"nil expression passed to StateDurationNode\")\n\t}\n\t// Validate lambda expression\n\texpr, err := stateful.NewExpression(sd.Lambda.Expression)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn := &StateTrackingNode{\n\t\tnode:       node{Node: sd, et: et, logger: l},\n\t\tas:         sd.As,\n\t\tnewTracker: func() stateTracker { return &stateDurationTracker{sd: sd} },\n\t\texpr:       expr,\n\t\tscopePool:  stateful.NewScopePool(ast.FindReferenceVariables(sd.Lambda.Expression)),\n\t}\n\tn.node.runF = n.runStateTracking\n\treturn n, nil\n}\n\ntype stateCountTracker struct {\n\tcount int64\n}\n\nfunc (sct *stateCountTracker) reset() {\n\tsct.count = 0\n}\n\nfunc (sct *stateCountTracker) track(t time.Time, inState bool) interface{} {\n\tif !inState {\n\t\tsct.count = 0\n\t\treturn int64(-1)\n\t}\n\n\tsct.count++\n\treturn sct.count\n}\n\nfunc newStateCountNode(et *ExecutingTask, sc *pipeline.StateCountNode, l *log.Logger) (*StateTrackingNode, error) {\n\tif sc.Lambda == nil {\n\t\treturn nil, fmt.Errorf(\"nil expression passed to StateCountNode\")\n\t}\n\t// Validate lambda expression\n\texpr, err := stateful.NewExpression(sc.Lambda.Expression)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tn := &StateTrackingNode{\n\t\tnode:       node{Node: sc, et: et, logger: l},\n\t\tas:         sc.As,\n\t\tnewTracker: func() stateTracker { return &stateCountTracker{} },\n\t\texpr:       expr,\n\t\tscopePool:  stateful.NewScopePool(ast.FindReferenceVariables(sc.Lambda.Expression)),\n\t}\n\tn.node.runF = n.runStateTracking\n\treturn n, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/stats.go",
    "content": "package kapacitor\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\ntype StatsNode struct {\n\tnode\n\ts       *pipeline.StatsNode\n\ten      Node\n\tclosing chan struct{}\n\tclosed  bool\n\tmu      sync.Mutex\n}\n\n// Create a new  FromNode which filters data from a source.\nfunc newStatsNode(et *ExecutingTask, n *pipeline.StatsNode, l *log.Logger) (*StatsNode, error) {\n\t// Lookup the executing node for stats.\n\ten := et.lookup[n.SourceNode.ID()]\n\tif en == nil {\n\t\treturn nil, fmt.Errorf(\"no node found for %s\", n.SourceNode.Name())\n\t}\n\tsn := &StatsNode{\n\t\tnode:    node{Node: n, et: et, logger: l},\n\t\ts:       n,\n\t\ten:      en,\n\t\tclosing: make(chan struct{}),\n\t}\n\tsn.node.runF = sn.runStats\n\tsn.node.stopF = sn.stopStats\n\treturn sn, nil\n}\n\nfunc (n *StatsNode) runStats([]byte) error {\n\tif n.s.AlignFlag {\n\t\t// Wait till we are roughly aligned with the interval.\n\t\tnow := time.Now()\n\t\tnext := now.Truncate(n.s.Interval).Add(n.s.Interval)\n\t\tafter := time.NewTicker(next.Sub(now))\n\t\tselect {\n\t\tcase <-after.C:\n\t\t\tafter.Stop()\n\t\tcase <-n.closing:\n\t\t\tafter.Stop()\n\t\t\treturn nil\n\t\t}\n\t\tif err := n.emit(now); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tticker := time.NewTicker(n.s.Interval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-n.closing:\n\t\t\treturn nil\n\t\tcase now := <-ticker.C:\n\t\t\tif err := n.emit(now); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Emit a set of stats data points.\nfunc (n *StatsNode) emit(now time.Time) error {\n\tn.timer.Start()\n\tdefer n.timer.Stop()\n\n\tname := \"stats\"\n\tt := now.UTC()\n\tif n.s.AlignFlag {\n\t\tt = t.Round(n.s.Interval)\n\t}\n\tstats := n.en.nodeStatsByGroup()\n\tfor _, stat := range stats {\n\t\tpoint := edge.NewPointMessage(\n\t\t\tname, \"\", \"\",\n\t\t\tstat.Dimensions,\n\t\t\tstat.Fields,\n\t\t\tstat.Tags,\n\t\t\tt,\n\t\t)\n\t\tn.timer.Pause()\n\t\tfor _, out := range n.outs {\n\t\t\terr := out.Collect(point)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tn.timer.Resume()\n\t}\n\treturn nil\n}\n\nfunc (n *StatsNode) stopStats() {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tif !n.closed {\n\t\tn.closed = true\n\t\tclose(n.closing)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/stream.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n)\n\ntype StreamNode struct {\n\tnode\n\ts *pipeline.StreamNode\n}\n\n// Create a new  StreamNode which copies all data to children\nfunc newStreamNode(et *ExecutingTask, n *pipeline.StreamNode, l *log.Logger) (*StreamNode, error) {\n\tsn := &StreamNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\ts:    n,\n\t}\n\tsn.node.runF = sn.runSourceStream\n\treturn sn, nil\n}\n\nfunc (n *StreamNode) runSourceStream([]byte) error {\n\tfor m, ok := n.ins[0].Emit(); ok; m, ok = n.ins[0].Emit() {\n\t\tfor _, child := range n.outs {\n\t\t\terr := child.Collect(m)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\ntype FromNode struct {\n\tnode\n\ts             *pipeline.FromNode\n\texpression    stateful.Expression\n\tscopePool     stateful.ScopePool\n\ttagNames      []string\n\tallDimensions bool\n\tdb            string\n\trp            string\n\tname          string\n}\n\n// Create a new  FromNode which filters data from a source.\nfunc newFromNode(et *ExecutingTask, n *pipeline.FromNode, l *log.Logger) (*FromNode, error) {\n\tsn := &FromNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\ts:    n,\n\t\tdb:   n.Database,\n\t\trp:   n.RetentionPolicy,\n\t\tname: n.Measurement,\n\t}\n\tsn.node.runF = sn.runStream\n\tsn.allDimensions, sn.tagNames = determineTagNames(n.Dimensions, nil)\n\n\tif n.Lambda != nil {\n\t\texpr, err := stateful.NewExpression(n.Lambda.Expression)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to compile from expression: %v\", err)\n\t\t}\n\n\t\tsn.expression = expr\n\t\tsn.scopePool = stateful.NewScopePool(ast.FindReferenceVariables(n.Lambda.Expression))\n\t}\n\n\treturn sn, nil\n}\n\nfunc (n *FromNode) runStream([]byte) error {\n\tconsumer := edge.NewConsumerWithReceiver(\n\t\tn.ins[0],\n\t\tedge.NewReceiverFromForwardReceiverWithStats(\n\t\t\tn.outs,\n\t\t\tedge.NewTimedForwardReceiver(n.timer, n),\n\t\t),\n\t)\n\treturn consumer.Consume()\n}\nfunc (n *FromNode) BeginBatch(edge.BeginBatchMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"from does not support batch data\")\n}\nfunc (n *FromNode) BatchPoint(edge.BatchPointMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"from does not support batch data\")\n}\nfunc (n *FromNode) EndBatch(edge.EndBatchMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"from does not support batch data\")\n}\n\nfunc (n *FromNode) Point(p edge.PointMessage) (edge.Message, error) {\n\tif n.matches(p) {\n\t\tp = p.ShallowCopy()\n\t\tif n.s.Truncate != 0 {\n\t\t\tp.SetTime(p.Time().Truncate(n.s.Truncate))\n\t\t}\n\t\tif n.s.Round != 0 {\n\t\t\tp.SetTime(p.Time().Round(n.s.Round))\n\t\t}\n\t\tp.SetDimensions(models.Dimensions{\n\t\t\tByName:   n.s.GroupByMeasurementFlag,\n\t\t\tTagNames: computeTagNames(p.Tags(), n.allDimensions, n.tagNames, nil),\n\t\t})\n\t\treturn p, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (n *FromNode) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (n *FromNode) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\nfunc (n *FromNode) matches(p edge.PointMessage) bool {\n\tif n.db != \"\" && p.Database() != n.db {\n\t\treturn false\n\t}\n\tif n.rp != \"\" && p.RetentionPolicy() != n.rp {\n\t\treturn false\n\t}\n\tif n.name != \"\" && p.Name() != n.name {\n\t\treturn false\n\t}\n\tif n.expression != nil {\n\t\tif pass, err := EvalPredicate(n.expression, n.scopePool, p); err != nil {\n\t\t\tn.incrementErrorCount()\n\t\t\tn.logger.Println(\"E! error while evaluating WHERE expression:\", err)\n\t\t\treturn false\n\t\t} else {\n\t\t\treturn pass\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/task.go",
    "content": "package kapacitor\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"math/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\n// The type of a task\ntype TaskType int\n\nconst (\n\tStreamTask TaskType = iota\n\tBatchTask\n)\n\nfunc (t TaskType) String() string {\n\tswitch t {\n\tcase StreamTask:\n\t\treturn \"stream\"\n\tcase BatchTask:\n\t\treturn \"batch\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}\n\nfunc (t TaskType) MarshalText() ([]byte, error) {\n\treturn []byte(t.String()), nil\n}\n\nfunc (t *TaskType) UnmarshalText(text []byte) error {\n\tswitch string(text) {\n\tcase \"stream\":\n\t\t*t = StreamTask\n\tcase \"batch\":\n\t\t*t = BatchTask\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown task type %s\", string(text))\n\t}\n\treturn nil\n}\n\ntype DBRP struct {\n\tDatabase        string `json:\"db\"`\n\tRetentionPolicy string `json:\"rp\"`\n}\n\nfunc CreateDBRPMap(dbrps []DBRP) map[DBRP]bool {\n\tdbMap := make(map[DBRP]bool, len(dbrps))\n\tfor _, dbrp := range dbrps {\n\t\tdbMap[dbrp] = true\n\t}\n\treturn dbMap\n}\n\nfunc (d DBRP) String() string {\n\treturn fmt.Sprintf(\"%q.%q\", d.Database, d.RetentionPolicy)\n}\n\n// The complete definition of a task, its id, pipeline and type.\ntype Task struct {\n\tID               string\n\tPipeline         *pipeline.Pipeline\n\tType             TaskType\n\tDBRPs            []DBRP\n\tSnapshotInterval time.Duration\n}\n\nfunc (t *Task) Dot() []byte {\n\treturn t.Pipeline.Dot(t.ID)\n}\n\n// returns all the measurements from a FromNode\nfunc (t *Task) Measurements() []string {\n\tmeasurements := make([]string, 0)\n\n\t_ = t.Pipeline.Walk(func(node pipeline.Node) error {\n\t\tswitch streamNode := node.(type) {\n\t\tcase *pipeline.FromNode:\n\t\t\tmeasurements = append(measurements, streamNode.Measurement)\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn measurements\n}\n\n// ----------------------------------\n// ExecutingTask\n\n// A task that is ready for execution.\ntype ExecutingTask struct {\n\ttm      *TaskMaster\n\tTask    *Task\n\tsource  Node\n\toutputs map[string]Output\n\t// node lookup from pipeline.ID -> Node\n\tlookup   map[pipeline.ID]Node\n\tnodes    []Node\n\tstopping chan struct{}\n\twg       sync.WaitGroup\n\tlogger   *log.Logger\n\n\t// Mutex for throughput var\n\ttmu        sync.RWMutex\n\tthroughput float64\n}\n\n// Create a new  task from a defined kapacitor.\nfunc NewExecutingTask(tm *TaskMaster, t *Task) (*ExecutingTask, error) {\n\tl := tm.LogService.NewLogger(fmt.Sprintf(\"[task:%s] \", t.ID), log.LstdFlags)\n\tet := &ExecutingTask{\n\t\ttm:      tm,\n\t\tTask:    t,\n\t\toutputs: make(map[string]Output),\n\t\tlookup:  make(map[pipeline.ID]Node),\n\t\tlogger:  l,\n\t}\n\terr := et.link()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn et, nil\n}\n\n// walks the entire pipeline applying function f.\nfunc (et *ExecutingTask) walk(f func(n Node) error) error {\n\tfor _, n := range et.nodes {\n\t\terr := f(n)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// walks the entire pipeline in reverse order applying function f.\nfunc (et *ExecutingTask) rwalk(f func(n Node) error) error {\n\tfor i := len(et.nodes) - 1; i >= 0; i-- {\n\t\terr := f(et.nodes[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// Link all the nodes together based on the task pipeline.\nfunc (et *ExecutingTask) link() error {\n\n\t// Walk Pipeline and create equivalent executing nodes\n\terr := et.Task.Pipeline.Walk(func(n pipeline.Node) error {\n\t\tl := et.tm.LogService.NewLogger(\n\t\t\tfmt.Sprintf(\"[%s:%s] \", et.Task.ID, n.Name()),\n\t\t\tlog.LstdFlags,\n\t\t)\n\t\ten, err := et.createNode(n, l)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tet.lookup[n.ID()] = en\n\t\t// Save the walk order\n\t\tet.nodes = append(et.nodes, en)\n\t\t// Duplicate the Edges\n\t\tfor _, p := range n.Parents() {\n\t\t\tep := et.lookup[p.ID()]\n\t\t\terr := ep.linkChild(en)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// The first node is always the source node\n\tet.source = et.nodes[0]\n\treturn nil\n}\n\n// Start the task.\nfunc (et *ExecutingTask) start(ins []edge.StatsEdge, snapshot *TaskSnapshot) error {\n\n\tfor _, in := range ins {\n\t\tet.source.addParentEdge(in)\n\t}\n\tvalidSnapshot := false\n\tif snapshot != nil {\n\t\terr := et.walk(func(n Node) error {\n\t\t\t_, ok := snapshot.NodeSnapshots[n.Name()]\n\t\t\tif !ok {\n\t\t\t\treturn fmt.Errorf(\"task pipeline changed not using snapshot\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tvalidSnapshot = err == nil\n\t}\n\n\terr := et.walk(func(n Node) error {\n\t\tif validSnapshot {\n\t\t\tn.start(snapshot.NodeSnapshots[n.Name()])\n\t\t} else {\n\t\t\tn.start(nil)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tet.stopping = make(chan struct{})\n\tif et.Task.SnapshotInterval > 0 {\n\t\tet.wg.Add(1)\n\t\tgo et.runSnapshotter()\n\t}\n\t// Start calcThroughput\n\tet.wg.Add(1)\n\tgo et.calcThroughput()\n\treturn nil\n}\n\nfunc (et *ExecutingTask) stop() (err error) {\n\tclose(et.stopping)\n\t_ = et.walk(func(n Node) error {\n\t\tn.stop()\n\t\te := n.Wait()\n\t\tif e != nil {\n\t\t\terr = e\n\t\t}\n\t\treturn nil\n\t})\n\tet.wg.Wait()\n\treturn\n}\n\nvar ErrWrongTaskType = errors.New(\"wrong task type\")\n\n// Instruct source batch node to start querying and sending batches of data\nfunc (et *ExecutingTask) StartBatching() error {\n\tif et.Task.Type != BatchTask {\n\t\treturn ErrWrongTaskType\n\t}\n\n\tbatcher := et.source.(*BatchNode)\n\n\terr := et.checkDBRPs(batcher)\n\tif err != nil {\n\t\tbatcher.Abort()\n\t\treturn err\n\t}\n\n\tbatcher.Start()\n\treturn nil\n}\n\nfunc (et *ExecutingTask) BatchCount() (int, error) {\n\tif et.Task.Type != BatchTask {\n\t\treturn 0, ErrWrongTaskType\n\t}\n\n\tbatcher := et.source.(*BatchNode)\n\treturn batcher.Count(), nil\n}\n\n// Get the next `num` batch queries that the batcher will run starting at time `start`.\nfunc (et *ExecutingTask) BatchQueries(start, stop time.Time) ([]BatchQueries, error) {\n\tif et.Task.Type != BatchTask {\n\t\treturn nil, ErrWrongTaskType\n\t}\n\n\tbatcher := et.source.(*BatchNode)\n\n\terr := et.checkDBRPs(batcher)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn batcher.Queries(start, stop)\n}\n\n// Check that the task allows access to DBRPs\nfunc (et *ExecutingTask) checkDBRPs(batcher *BatchNode) error {\n\tdbMap := CreateDBRPMap(et.Task.DBRPs)\n\tdbrps, err := batcher.DBRPs()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, dbrp := range dbrps {\n\t\tif !dbMap[dbrp] {\n\t\t\treturn fmt.Errorf(\"batch query is not allowed to request data from %v\", dbrp)\n\t\t}\n\t}\n\treturn nil\n}\n\n// Stop all stats nodes\nfunc (et *ExecutingTask) StopStats() {\n\t_ = et.walk(func(n Node) error {\n\t\tif s, ok := n.(*StatsNode); ok {\n\t\t\ts.stopStats()\n\t\t}\n\t\treturn nil\n\t})\n}\n\n// Wait till the task finishes and return any error\nfunc (et *ExecutingTask) Wait() error {\n\treturn et.rwalk(func(n Node) error {\n\t\treturn n.Wait()\n\t})\n}\n\n// Get a named output.\nfunc (et *ExecutingTask) GetOutput(name string) (Output, error) {\n\tif o, ok := et.outputs[name]; ok {\n\t\treturn o, nil\n\t} else {\n\t\treturn nil, fmt.Errorf(\"unknown output %s\", name)\n\t}\n}\n\n// Register a named output.\nfunc (et *ExecutingTask) registerOutput(name string, o Output) {\n\tet.outputs[name] = o\n}\n\ntype ExecutionStats struct {\n\tTaskStats map[string]interface{}\n\tNodeStats map[string]map[string]interface{}\n}\n\nfunc (et *ExecutingTask) ExecutionStats() (ExecutionStats, error) {\n\texecutionStats := ExecutionStats{\n\t\tTaskStats: make(map[string]interface{}),\n\t\tNodeStats: make(map[string]map[string]interface{}),\n\t}\n\n\t// Fill the task stats\n\texecutionStats.TaskStats[\"throughput\"] = et.getThroughput()\n\n\t// Fill the nodes stats\n\terr := et.walk(func(node Node) error {\n\t\tnodeStats := node.stats()\n\n\t\t// Add collected and emitted\n\t\tnodeStats[\"collected\"] = node.collectedCount()\n\t\tnodeStats[\"emitted\"] = node.emittedCount()\n\n\t\texecutionStats.NodeStats[node.Name()] = nodeStats\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\treturn executionStats, err\n\t}\n\n\treturn executionStats, nil\n}\n\n// Return a graphviz .dot formatted byte array.\n// Label edges with relavant execution information.\nfunc (et *ExecutingTask) EDot(labels bool) []byte {\n\n\tvar buf bytes.Buffer\n\n\tbuf.WriteString(\"digraph \")\n\tbuf.WriteString(et.Task.ID)\n\tbuf.WriteString(\" {\\n\")\n\t// Write graph attributes\n\tunit := \"points\"\n\tif et.Task.Type == BatchTask {\n\t\tunit = \"batches\"\n\t}\n\tbuf.WriteString(\"graph [\")\n\tif labels {\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\"label=\\\"Throughput: %0.2f %s/s\\\" forcelabels=true pad=\\\"0.8,0.5\\\"\",\n\t\t\t\tet.getThroughput(),\n\t\t\t\tunit,\n\t\t\t),\n\t\t)\n\t} else {\n\t\tbuf.WriteString(\n\t\t\tfmt.Sprintf(\"throughput=\\\"%0.2f %s/s\\\"\",\n\t\t\t\tet.getThroughput(),\n\t\t\t\tunit,\n\t\t\t),\n\t\t)\n\t}\n\tbuf.WriteString(\"];\\n\")\n\n\t_ = et.walk(func(n Node) error {\n\t\tn.edot(&buf, labels)\n\t\treturn nil\n\t})\n\tbuf.Write([]byte(\"}\"))\n\n\treturn buf.Bytes()\n}\n\n// Return the current throughput value.\nfunc (et *ExecutingTask) getThroughput() float64 {\n\tet.tmu.RLock()\n\tdefer et.tmu.RUnlock()\n\treturn et.throughput\n}\n\nfunc (et *ExecutingTask) calcThroughput() {\n\tdefer et.wg.Done()\n\tvar previous int64\n\tlast := time.Now()\n\tticker := time.NewTicker(time.Second)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tcurrent := et.source.collectedCount()\n\t\t\tnow := time.Now()\n\t\t\telapsed := float64(now.Sub(last)) / float64(time.Second)\n\n\t\t\tet.tmu.Lock()\n\t\t\tet.throughput = float64(current-previous) / elapsed\n\t\t\tet.tmu.Unlock()\n\n\t\t\tlast = now\n\t\t\tprevious = current\n\n\t\tcase <-et.stopping:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Create a  node from a given pipeline node.\nfunc (et *ExecutingTask) createNode(p pipeline.Node, l *log.Logger) (n Node, err error) {\n\tswitch t := p.(type) {\n\tcase *pipeline.FromNode:\n\t\tn, err = newFromNode(et, t, l)\n\tcase *pipeline.StreamNode:\n\t\tn, err = newStreamNode(et, t, l)\n\tcase *pipeline.BatchNode:\n\t\tn, err = newBatchNode(et, t, l)\n\tcase *pipeline.QueryNode:\n\t\tn, err = newQueryNode(et, t, l)\n\tcase *pipeline.WindowNode:\n\t\tn, err = newWindowNode(et, t, l)\n\tcase *pipeline.HTTPOutNode:\n\t\tn, err = newHTTPOutNode(et, t, l)\n\tcase *pipeline.HTTPPostNode:\n\t\tn, err = newHTTPPostNode(et, t, l)\n\tcase *pipeline.InfluxDBOutNode:\n\t\tn, err = newInfluxDBOutNode(et, t, l)\n\tcase *pipeline.KapacitorLoopbackNode:\n\t\tn, err = newKapacitorLoopbackNode(et, t, l)\n\tcase *pipeline.AlertNode:\n\t\tn, err = newAlertNode(et, t, l)\n\tcase *pipeline.GroupByNode:\n\t\tn, err = newGroupByNode(et, t, l)\n\tcase *pipeline.UnionNode:\n\t\tn, err = newUnionNode(et, t, l)\n\tcase *pipeline.JoinNode:\n\t\tn, err = newJoinNode(et, t, l)\n\tcase *pipeline.FlattenNode:\n\t\tn, err = newFlattenNode(et, t, l)\n\tcase *pipeline.EvalNode:\n\t\tn, err = newEvalNode(et, t, l)\n\tcase *pipeline.WhereNode:\n\t\tn, err = newWhereNode(et, t, l)\n\tcase *pipeline.SampleNode:\n\t\tn, err = newSampleNode(et, t, l)\n\tcase *pipeline.DerivativeNode:\n\t\tn, err = newDerivativeNode(et, t, l)\n\tcase *pipeline.UDFNode:\n\t\tn, err = newUDFNode(et, t, l)\n\tcase *pipeline.StatsNode:\n\t\tn, err = newStatsNode(et, t, l)\n\tcase *pipeline.ShiftNode:\n\t\tn, err = newShiftNode(et, t, l)\n\tcase *pipeline.NoOpNode:\n\t\tn, err = newNoOpNode(et, t, l)\n\tcase *pipeline.InfluxQLNode:\n\t\tn, err = newInfluxQLNode(et, t, l)\n\tcase *pipeline.LogNode:\n\t\tn, err = newLogNode(et, t, l)\n\tcase *pipeline.DefaultNode:\n\t\tn, err = newDefaultNode(et, t, l)\n\tcase *pipeline.DeleteNode:\n\t\tn, err = newDeleteNode(et, t, l)\n\tcase *pipeline.CombineNode:\n\t\tn, err = newCombineNode(et, t, l)\n\tcase *pipeline.K8sAutoscaleNode:\n\t\tn, err = newK8sAutoscaleNode(et, t, l)\n\tcase *pipeline.SwarmAutoscaleNode:\n\t\tn, err = newSwarmAutoscaleNode(et, t, l)\n\tcase *pipeline.StateDurationNode:\n\t\tn, err = newStateDurationNode(et, t, l)\n\tcase *pipeline.StateCountNode:\n\t\tn, err = newStateCountNode(et, t, l)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown pipeline node type %T\", p)\n\t}\n\tif err == nil && n != nil {\n\t\tn.init()\n\t}\n\treturn n, err\n}\n\ntype TaskSnapshot struct {\n\tNodeSnapshots map[string][]byte\n}\n\nfunc (et *ExecutingTask) Snapshot() (*TaskSnapshot, error) {\n\tsnapshot := &TaskSnapshot{\n\t\tNodeSnapshots: make(map[string][]byte),\n\t}\n\terr := et.walk(func(n Node) error {\n\t\tdata, err := n.snapshot()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tsnapshot.NodeSnapshots[n.Name()] = data\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn snapshot, nil\n}\n\nfunc (et *ExecutingTask) runSnapshotter() {\n\tdefer et.wg.Done()\n\t// Wait random duration to splay snapshot events across interval\n\tselect {\n\tcase <-time.After(time.Duration(rand.Float64() * float64(et.Task.SnapshotInterval))):\n\tcase <-et.stopping:\n\t\treturn\n\t}\n\tticker := time.NewTicker(et.Task.SnapshotInterval)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\tsnapshot, err := et.Snapshot()\n\t\t\tif err != nil {\n\t\t\t\tet.logger.Println(\"E! failed to snapshot task\", et.Task.ID, err)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tsize := 0\n\t\t\tfor _, data := range snapshot.NodeSnapshots {\n\t\t\t\tsize += len(data)\n\t\t\t}\n\t\t\t// Only save the snapshot if it has content\n\t\t\tif size > 0 {\n\t\t\t\terr = et.tm.TaskStore.SaveSnapshot(et.Task.ID, snapshot)\n\t\t\t\tif err != nil {\n\t\t\t\t\tet.logger.Println(\"E! failed to save task snapshot\", et.Task.ID, err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-et.stopping:\n\t\t\treturn\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/task_master.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\timodels \"github.com/influxdata/influxdb/models\"\n\t\"github.com/influxdata/kapacitor/alert\"\n\t\"github.com/influxdata/kapacitor/command\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/expvar\"\n\t\"github.com/influxdata/kapacitor/influxdb\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/server/vars\"\n\talertservice \"github.com/influxdata/kapacitor/services/alert\"\n\t\"github.com/influxdata/kapacitor/services/alerta\"\n\t\"github.com/influxdata/kapacitor/services/hipchat\"\n\t\"github.com/influxdata/kapacitor/services/httpd\"\n\t\"github.com/influxdata/kapacitor/services/httppost\"\n\tk8s \"github.com/influxdata/kapacitor/services/k8s/client\"\n\t\"github.com/influxdata/kapacitor/services/mqtt\"\n\t\"github.com/influxdata/kapacitor/services/opsgenie\"\n\t\"github.com/influxdata/kapacitor/services/pagerduty\"\n\t\"github.com/influxdata/kapacitor/services/pushover\"\n\t\"github.com/influxdata/kapacitor/services/sensu\"\n\t\"github.com/influxdata/kapacitor/services/slack\"\n\t\"github.com/influxdata/kapacitor/services/smtp\"\n\t\"github.com/influxdata/kapacitor/services/snmptrap\"\n\tswarm \"github.com/influxdata/kapacitor/services/swarm/client\"\n\t\"github.com/influxdata/kapacitor/services/telegram\"\n\t\"github.com/influxdata/kapacitor/services/victorops\"\n\t\"github.com/influxdata/kapacitor/tick\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n\t\"github.com/influxdata/kapacitor/timer\"\n\t\"github.com/influxdata/kapacitor/udf\"\n)\n\nconst (\n\tstatPointsReceived = \"points_received\"\n\tMainTaskMaster     = \"main\"\n)\n\ntype LogService interface {\n\tNewLogger(prefix string, flag int) *log.Logger\n}\n\ntype UDFService interface {\n\tList() []string\n\tInfo(name string) (udf.Info, bool)\n\tCreate(name, taskID, nodeID string, l *log.Logger, abortCallback func()) (udf.Interface, error)\n}\n\nvar ErrTaskMasterClosed = errors.New(\"TaskMaster is closed\")\nvar ErrTaskMasterOpen = errors.New(\"TaskMaster is open\")\n\ntype deleteHook func(*TaskMaster)\n\n// An execution framework for  a set of tasks.\ntype TaskMaster struct {\n\t// Unique id for this task master instance\n\tid string\n\n\tServerInfo vars.Infoer\n\n\tHTTPDService interface {\n\t\tAddRoutes([]httpd.Route) error\n\t\tDelRoutes([]httpd.Route)\n\t\tURL() string\n\t}\n\tTaskStore interface {\n\t\tSaveSnapshot(id string, snapshot *TaskSnapshot) error\n\t\tHasSnapshot(id string) bool\n\t\tLoadSnapshot(id string) (*TaskSnapshot, error)\n\t}\n\tDeadmanService pipeline.DeadmanService\n\n\tUDFService UDFService\n\n\tAlertService interface {\n\t\talertservice.AnonHandlerRegistrar\n\t\talertservice.Events\n\t\talertservice.TopicPersister\n\t}\n\tInfluxDBService interface {\n\t\tNewNamedClient(name string) (influxdb.Client, error)\n\t}\n\tSMTPService interface {\n\t\tGlobal() bool\n\t\tStateChangesOnly() bool\n\t\tHandler(smtp.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\tMQTTService interface {\n\t\tHandler(mqtt.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\n\tOpsGenieService interface {\n\t\tGlobal() bool\n\t\tHandler(opsgenie.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\tVictorOpsService interface {\n\t\tGlobal() bool\n\t\tHandler(victorops.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\tPagerDutyService interface {\n\t\tGlobal() bool\n\t\tHandler(pagerduty.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\tPushoverService interface {\n\t\tHandler(pushover.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\tHTTPPostService interface {\n\t\tHandler(httppost.HandlerConfig, *log.Logger) alert.Handler\n\t\tEndpoint(string) (*httppost.Endpoint, bool)\n\t}\n\tSlackService interface {\n\t\tGlobal() bool\n\t\tStateChangesOnly() bool\n\t\tHandler(slack.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\tSNMPTrapService interface {\n\t\tHandler(snmptrap.HandlerConfig, *log.Logger) (alert.Handler, error)\n\t}\n\tTelegramService interface {\n\t\tGlobal() bool\n\t\tStateChangesOnly() bool\n\t\tHandler(telegram.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\tHipChatService interface {\n\t\tGlobal() bool\n\t\tStateChangesOnly() bool\n\t\tHandler(hipchat.HandlerConfig, *log.Logger) alert.Handler\n\t}\n\tAlertaService interface {\n\t\tDefaultHandlerConfig() alerta.HandlerConfig\n\t\tHandler(alerta.HandlerConfig, *log.Logger) (alert.Handler, error)\n\t}\n\tSensuService interface {\n\t\tHandler(sensu.HandlerConfig, *log.Logger) (alert.Handler, error)\n\t}\n\tTalkService interface {\n\t\tHandler(*log.Logger) alert.Handler\n\t}\n\tTimingService interface {\n\t\tNewTimer(timer.Setter) timer.Timer\n\t}\n\tK8sService interface {\n\t\tClient(string) (k8s.Client, error)\n\t}\n\tSwarmService interface {\n\t\tClient(string) (swarm.Client, error)\n\t}\n\tLogService LogService\n\n\tCommander command.Commander\n\n\tDefaultRetentionPolicy string\n\n\t// Incoming streams\n\twritePointsIn StreamCollector\n\twritesClosed  bool\n\twritesMu      sync.RWMutex\n\n\t// Forks of incoming streams\n\t// We are mapping from (db, rp, measurement) to map of task ids to their edges\n\t// The outer map (from dbrp&measurement) is for fast access on forkPoint\n\t// While the inner map is for handling fork deletions better (see taskToForkKeys)\n\tforks map[forkKey]map[string]edge.Edge\n\n\t// Stats for number of points each fork has received\n\tforkStats map[forkKey]*expvar.Int\n\n\t// Task to fork keys is map to help in deletes, in deletes\n\t// we have only the task id, and they are called after the task is deleted from TaskMaster.tasks\n\ttaskToForkKeys map[string][]forkKey\n\n\t// Set of incoming batches\n\tbatches map[string][]BatchCollector\n\n\t// Executing tasks\n\ttasks map[string]*ExecutingTask\n\n\t// DeleteHooks for tasks\n\tdeleteHooks map[string][]deleteHook\n\n\tlogger *log.Logger\n\n\tclosed  bool\n\tdrained bool\n\tmu      sync.RWMutex\n\twg      sync.WaitGroup\n}\n\ntype forkKey struct {\n\tDatabase        string\n\tRetentionPolicy string\n\tMeasurement     string\n}\n\n// Create a new Executor with a given clock.\nfunc NewTaskMaster(id string, info vars.Infoer, l LogService) *TaskMaster {\n\treturn &TaskMaster{\n\t\tid:             id,\n\t\tforks:          make(map[forkKey]map[string]edge.Edge),\n\t\tforkStats:      make(map[forkKey]*expvar.Int),\n\t\ttaskToForkKeys: make(map[string][]forkKey),\n\t\tbatches:        make(map[string][]BatchCollector),\n\t\ttasks:          make(map[string]*ExecutingTask),\n\t\tdeleteHooks:    make(map[string][]deleteHook),\n\t\tLogService:     l,\n\t\tServerInfo:     info,\n\t\tlogger:         l.NewLogger(fmt.Sprintf(\"[task_master:%s] \", id), log.LstdFlags),\n\t\tclosed:         true,\n\t\tTimingService:  noOpTimingService{},\n\t}\n}\n\n// Returns a new TaskMaster instance with the same services as the current one.\nfunc (tm *TaskMaster) New(id string) *TaskMaster {\n\tn := NewTaskMaster(id, tm.ServerInfo, tm.LogService)\n\tn.DefaultRetentionPolicy = tm.DefaultRetentionPolicy\n\tn.HTTPDService = tm.HTTPDService\n\tn.TaskStore = tm.TaskStore\n\tn.DeadmanService = tm.DeadmanService\n\tn.UDFService = tm.UDFService\n\tn.AlertService = tm.AlertService\n\tn.InfluxDBService = tm.InfluxDBService\n\tn.SMTPService = tm.SMTPService\n\tn.MQTTService = tm.MQTTService\n\tn.OpsGenieService = tm.OpsGenieService\n\tn.VictorOpsService = tm.VictorOpsService\n\tn.PagerDutyService = tm.PagerDutyService\n\tn.PushoverService = tm.PushoverService\n\tn.SlackService = tm.SlackService\n\tn.TelegramService = tm.TelegramService\n\tn.SNMPTrapService = tm.SNMPTrapService\n\tn.HipChatService = tm.HipChatService\n\tn.AlertaService = tm.AlertaService\n\tn.SensuService = tm.SensuService\n\tn.TalkService = tm.TalkService\n\tn.TimingService = tm.TimingService\n\tn.K8sService = tm.K8sService\n\tn.Commander = tm.Commander\n\treturn n\n}\n\nfunc (tm *TaskMaster) ID() string {\n\treturn tm.id\n}\n\nfunc (tm *TaskMaster) Open() (err error) {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\tif !tm.closed {\n\t\treturn ErrTaskMasterOpen\n\t}\n\ttm.closed = false\n\ttm.drained = false\n\ttm.writePointsIn, err = tm.stream(\"write_points\")\n\tif err != nil {\n\t\ttm.closed = true\n\t\treturn\n\t}\n\ttm.logger.Println(\"I! opened\")\n\treturn\n}\n\nfunc (tm *TaskMaster) StopTasks() {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\tfor _, et := range tm.tasks {\n\t\t_ = tm.stopTask(et.Task.ID)\n\t}\n}\n\nfunc (tm *TaskMaster) Close() error {\n\ttm.mu.Lock()\n\tclosed := tm.closed\n\ttm.mu.Unlock()\n\n\tif closed {\n\t\treturn ErrTaskMasterClosed\n\t}\n\n\ttm.Drain()\n\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\ttm.closed = true\n\tfor _, et := range tm.tasks {\n\t\t_ = tm.stopTask(et.Task.ID)\n\t}\n\ttm.logger.Println(\"I! closed\")\n\treturn nil\n}\n\nfunc (tm *TaskMaster) Drain() {\n\ttm.waitForForks()\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\n\tfor id, _ := range tm.taskToForkKeys {\n\t\ttm.delFork(id)\n\t}\n}\n\n// Create a new template in the context of a TaskMaster\nfunc (tm *TaskMaster) NewTemplate(\n\tid,\n\tscript string,\n\ttt TaskType,\n) (*Template, error) {\n\tt := &Template{\n\t\tid: id,\n\t}\n\tscope := tm.CreateTICKScope()\n\n\tvar srcEdge pipeline.EdgeType\n\tswitch tt {\n\tcase StreamTask:\n\t\tsrcEdge = pipeline.StreamEdge\n\tcase BatchTask:\n\t\tsrcEdge = pipeline.BatchEdge\n\t}\n\n\ttp, err := pipeline.CreateTemplatePipeline(script, srcEdge, scope, tm.DeadmanService)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tt.tp = tp\n\treturn t, nil\n}\n\n// Create a new task in the context of a TaskMaster\nfunc (tm *TaskMaster) NewTask(\n\tid,\n\tscript string,\n\ttt TaskType,\n\tdbrps []DBRP,\n\tsnapshotInterval time.Duration,\n\tvars map[string]tick.Var,\n) (*Task, error) {\n\tt := &Task{\n\t\tID:               id,\n\t\tType:             tt,\n\t\tDBRPs:            dbrps,\n\t\tSnapshotInterval: snapshotInterval,\n\t}\n\tscope := tm.CreateTICKScope()\n\n\tvar srcEdge pipeline.EdgeType\n\tswitch tt {\n\tcase StreamTask:\n\t\tsrcEdge = pipeline.StreamEdge\n\tcase BatchTask:\n\t\tsrcEdge = pipeline.BatchEdge\n\t}\n\n\tp, err := pipeline.CreatePipeline(script, srcEdge, scope, tm.DeadmanService, vars)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// A task will always have a stream or batch node.\n\t// If it doesn't have anything more then the task does nothing with the data.\n\tif p.Len() <= 1 {\n\t\treturn nil, fmt.Errorf(\"task does nothing\")\n\t}\n\tt.Pipeline = p\n\treturn t, nil\n}\n\nfunc (tm *TaskMaster) waitForForks() {\n\ttm.mu.Lock()\n\tdrained := tm.drained\n\ttm.mu.Unlock()\n\n\tif drained {\n\t\treturn\n\t}\n\n\ttm.mu.Lock()\n\ttm.drained = true\n\ttm.mu.Unlock()\n\n\ttm.writesMu.Lock()\n\ttm.writesClosed = true\n\ttm.writesMu.Unlock()\n\n\t// Close the write points in stream\n\ttm.writePointsIn.Close()\n\n\t// Don't hold the lock while we wait\n\ttm.wg.Wait()\n}\n\nfunc (tm *TaskMaster) CreateTICKScope() *stateful.Scope {\n\tscope := stateful.NewScope()\n\tscope.Set(\"time\", groupByTime)\n\t// Add dynamic methods to the scope for UDFs\n\tif tm.UDFService != nil {\n\t\tfor _, f := range tm.UDFService.List() {\n\t\t\tf := f\n\t\t\tinfo, _ := tm.UDFService.Info(f)\n\t\t\tscope.SetDynamicMethod(\n\t\t\t\tf,\n\t\t\t\tfunc(self interface{}, args ...interface{}) (interface{}, error) {\n\t\t\t\t\tparent, ok := self.(pipeline.Node)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, fmt.Errorf(\"cannot call %s on %T\", f, self)\n\t\t\t\t\t}\n\t\t\t\t\tudf := pipeline.NewUDF(\n\t\t\t\t\t\tparent,\n\t\t\t\t\t\tf,\n\t\t\t\t\t\tinfo.Wants,\n\t\t\t\t\t\tinfo.Provides,\n\t\t\t\t\t\tinfo.Options,\n\t\t\t\t\t)\n\t\t\t\t\treturn udf, nil\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\t}\n\treturn scope\n}\n\nfunc (tm *TaskMaster) StartTask(t *Task) (*ExecutingTask, error) {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\tif tm.closed {\n\t\treturn nil, errors.New(\"task master is closed cannot start a task\")\n\t}\n\ttm.logger.Println(\"D! Starting task:\", t.ID)\n\tet, err := NewExecutingTask(tm, t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar ins []edge.StatsEdge\n\tswitch et.Task.Type {\n\tcase StreamTask:\n\t\te, err := tm.newFork(et.Task.ID, et.Task.DBRPs, et.Task.Measurements())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tins = []edge.StatsEdge{e}\n\tcase BatchTask:\n\t\tcount, err := et.BatchCount()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tins = make([]edge.StatsEdge, count)\n\t\tfor i := 0; i < count; i++ {\n\t\t\tin := newEdge(t.ID, \"batch\", fmt.Sprintf(\"batch%d\", i), pipeline.BatchEdge, defaultEdgeBufferSize, tm.LogService)\n\t\t\tins[i] = in\n\t\t\ttm.batches[t.ID] = append(tm.batches[t.ID], &batchCollector{edge: in})\n\t\t}\n\t}\n\n\tvar snapshot *TaskSnapshot\n\tif tm.TaskStore.HasSnapshot(t.ID) {\n\t\tsnapshot, err = tm.TaskStore.LoadSnapshot(t.ID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr = et.start(ins, snapshot)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttm.tasks[et.Task.ID] = et\n\ttm.logger.Println(\"I! Started task:\", t.ID)\n\ttm.logger.Println(\"D!\", string(t.Dot()))\n\n\treturn et, nil\n}\n\nfunc (tm *TaskMaster) BatchCollectors(id string) []BatchCollector {\n\treturn tm.batches[id]\n}\n\nfunc (tm *TaskMaster) StopTask(id string) error {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\treturn tm.stopTask(id)\n}\n\nfunc (tm *TaskMaster) DeleteTask(id string) error {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\tif err := tm.stopTask(id); err != nil {\n\t\treturn err\n\t}\n\ttm.deleteTask(id)\n\treturn nil\n}\n\n// internal stopTask function. The caller must have acquired\n// the lock in order to call this function\nfunc (tm *TaskMaster) stopTask(id string) (err error) {\n\tif et, ok := tm.tasks[id]; ok {\n\n\t\tdelete(tm.tasks, id)\n\n\t\tswitch et.Task.Type {\n\t\tcase StreamTask:\n\t\t\ttm.delFork(id)\n\t\tcase BatchTask:\n\t\t\tdelete(tm.batches, id)\n\t\t}\n\n\t\terr = et.stop()\n\t\tif err != nil {\n\t\t\ttm.logger.Println(\"E! Stopped task:\", id, err)\n\t\t} else {\n\t\t\ttm.logger.Println(\"I! Stopped task:\", id)\n\t\t}\n\t}\n\treturn\n}\n\n// internal deleteTask function. The caller must have acquired\n// the lock in order to call this function\nfunc (tm *TaskMaster) deleteTask(id string) {\n\thooks := tm.deleteHooks[id]\n\tfor _, deleteHook := range hooks {\n\t\tdeleteHook(tm)\n\t}\n}\n\nfunc (tm *TaskMaster) registerDeleteHookForTask(id string, hook deleteHook) {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\ttm.deleteHooks[id] = append(tm.deleteHooks[id], hook)\n}\n\nfunc (tm *TaskMaster) IsExecuting(id string) bool {\n\ttm.mu.RLock()\n\tdefer tm.mu.RUnlock()\n\t_, executing := tm.tasks[id]\n\treturn executing\n}\n\nfunc (tm *TaskMaster) ExecutionStats(id string) (ExecutionStats, error) {\n\ttm.mu.RLock()\n\tdefer tm.mu.RUnlock()\n\ttask, executing := tm.tasks[id]\n\tif !executing {\n\t\treturn ExecutionStats{}, nil\n\t}\n\n\treturn task.ExecutionStats()\n}\n\nfunc (tm *TaskMaster) ExecutingDot(id string, labels bool) string {\n\ttm.mu.RLock()\n\tdefer tm.mu.RUnlock()\n\tet, executing := tm.tasks[id]\n\tif executing {\n\t\treturn string(et.EDot(labels))\n\t}\n\treturn \"\"\n}\n\nfunc (tm *TaskMaster) Stream(name string) (StreamCollector, error) {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\treturn tm.stream(name)\n}\n\nfunc (tm *TaskMaster) stream(name string) (StreamCollector, error) {\n\tif tm.closed {\n\t\treturn nil, ErrTaskMasterClosed\n\t}\n\tin := newEdge(fmt.Sprintf(\"task_master:%s\", tm.id), name, \"stream\", pipeline.StreamEdge, defaultEdgeBufferSize, tm.LogService)\n\tse := &streamEdge{edge: in}\n\ttm.wg.Add(1)\n\tgo func() {\n\t\tdefer tm.wg.Done()\n\t\ttm.runForking(se)\n\t}()\n\treturn se, nil\n}\n\ntype StreamCollector interface {\n\tCollectPoint(edge.PointMessage) error\n\tClose() error\n}\n\ntype StreamEdge interface {\n\tCollectPoint(edge.PointMessage) error\n\tEmitPoint() (edge.PointMessage, bool)\n\tClose() error\n}\n\ntype streamEdge struct {\n\tedge edge.Edge\n}\n\nfunc (s *streamEdge) CollectPoint(p edge.PointMessage) error {\n\treturn s.edge.Collect(p)\n}\nfunc (s *streamEdge) EmitPoint() (edge.PointMessage, bool) {\n\tm, ok := s.edge.Emit()\n\tif !ok {\n\t\treturn nil, false\n\t}\n\tp, ok := m.(edge.PointMessage)\n\tif !ok {\n\t\tpanic(\"impossible to receive non PointMessage message\")\n\t}\n\treturn p, true\n}\nfunc (s *streamEdge) Close() error {\n\treturn s.edge.Close()\n}\n\nfunc (tm *TaskMaster) runForking(in StreamEdge) {\n\tfor p, ok := in.EmitPoint(); ok; p, ok = in.EmitPoint() {\n\t\ttm.forkPoint(p)\n\t}\n}\n\nfunc (tm *TaskMaster) forkPoint(p edge.PointMessage) {\n\ttm.mu.RLock()\n\tlocked := true\n\tdefer func() {\n\t\tif locked {\n\t\t\ttm.mu.RUnlock()\n\t\t}\n\t}()\n\n\t// Create the fork keys - which is (db, rp, measurement)\n\tkey := forkKey{\n\t\tDatabase:        p.Database(),\n\t\tRetentionPolicy: p.RetentionPolicy(),\n\t\tMeasurement:     p.Name(),\n\t}\n\n\t// If we have empty measurement in this db,rp we need to send it all\n\t// the points\n\temptyMeasurementKey := forkKey{\n\t\tDatabase:        p.Database(),\n\t\tRetentionPolicy: p.RetentionPolicy(),\n\t\tMeasurement:     \"\",\n\t}\n\n\t// Merge the results to the forks map\n\tfor _, edge := range tm.forks[key] {\n\t\t_ = edge.Collect(p)\n\t}\n\n\tfor _, edge := range tm.forks[emptyMeasurementKey] {\n\t\t_ = edge.Collect(p)\n\t}\n\n\tc, ok := tm.forkStats[key]\n\tif !ok {\n\t\t// Release read lock\n\t\ttm.mu.RUnlock()\n\t\tlocked = false\n\n\t\t// Get write lock\n\t\ttm.mu.Lock()\n\t\t// Now with write lock check again\n\t\tc, ok = tm.forkStats[key]\n\t\tif !ok {\n\t\t\t// Create statistics\n\t\t\tc = &expvar.Int{}\n\t\t\ttm.forkStats[key] = c\n\t\t}\n\t\ttm.mu.Unlock()\n\n\t\ttags := map[string]string{\n\t\t\t\"task_master\":      tm.id,\n\t\t\t\"database\":         key.Database,\n\t\t\t\"retention_policy\": key.RetentionPolicy,\n\t\t\t\"measurement\":      key.Measurement,\n\t\t}\n\t\t_, statMap := vars.NewStatistic(\"ingress\", tags)\n\t\tstatMap.Set(statPointsReceived, c)\n\t}\n\tc.Add(1)\n}\n\nfunc (tm *TaskMaster) WritePoints(database, retentionPolicy string, consistencyLevel imodels.ConsistencyLevel, points []imodels.Point) error {\n\ttm.writesMu.RLock()\n\tdefer tm.writesMu.RUnlock()\n\tif tm.writesClosed {\n\t\treturn ErrTaskMasterClosed\n\t}\n\tif retentionPolicy == \"\" {\n\t\tretentionPolicy = tm.DefaultRetentionPolicy\n\t}\n\tfor _, mp := range points {\n\t\tp := edge.NewPointMessage(\n\t\t\tmp.Name(),\n\t\t\tdatabase,\n\t\t\tretentionPolicy,\n\t\t\tmodels.Dimensions{},\n\t\t\tmodels.Fields(mp.Fields()),\n\t\t\tmodels.Tags(mp.Tags().Map()),\n\t\t\tmp.Time(),\n\t\t)\n\t\terr := tm.writePointsIn.CollectPoint(p)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (tm *TaskMaster) WriteKapacitorPoint(p edge.PointMessage) error {\n\ttm.writesMu.RLock()\n\tdefer tm.writesMu.RUnlock()\n\tif tm.writesClosed {\n\t\treturn ErrTaskMasterClosed\n\t}\n\tp = p.ShallowCopy()\n\tp.SetDimensions(models.Dimensions{})\n\treturn tm.writePointsIn.CollectPoint(p)\n}\n\nfunc (tm *TaskMaster) NewFork(taskName string, dbrps []DBRP, measurements []string) (edge.StatsEdge, error) {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\treturn tm.newFork(taskName, dbrps, measurements)\n}\n\nfunc forkKeys(dbrps []DBRP, measurements []string) []forkKey {\n\tkeys := make([]forkKey, 0)\n\n\tfor _, dbrp := range dbrps {\n\t\tfor _, measurement := range measurements {\n\t\t\tkey := forkKey{\n\t\t\t\tRetentionPolicy: dbrp.RetentionPolicy,\n\t\t\t\tDatabase:        dbrp.Database,\n\t\t\t\tMeasurement:     measurement,\n\t\t\t}\n\n\t\t\tkeys = append(keys, key)\n\t\t}\n\t}\n\n\treturn keys\n}\n\n// internal newFork, must have acquired lock before calling.\nfunc (tm *TaskMaster) newFork(taskName string, dbrps []DBRP, measurements []string) (edge.StatsEdge, error) {\n\tif tm.closed {\n\t\treturn nil, ErrTaskMasterClosed\n\t}\n\n\te := newEdge(taskName, \"stream\", \"stream0\", pipeline.StreamEdge, defaultEdgeBufferSize, tm.LogService)\n\n\tfor _, key := range forkKeys(dbrps, measurements) {\n\t\ttm.taskToForkKeys[taskName] = append(tm.taskToForkKeys[taskName], key)\n\n\t\t// Add the task to the tasksMap if it doesn't exists\n\t\ttasksMap, ok := tm.forks[key]\n\t\tif !ok {\n\t\t\ttasksMap = make(map[string]edge.Edge, 0)\n\t\t}\n\n\t\t// Add the edge to task map\n\t\ttasksMap[taskName] = e\n\n\t\t// update the task map in the forks\n\t\ttm.forks[key] = tasksMap\n\t}\n\n\treturn e, nil\n}\n\nfunc (tm *TaskMaster) DelFork(id string) {\n\ttm.mu.Lock()\n\tdefer tm.mu.Unlock()\n\ttm.delFork(id)\n}\n\n// internal delFork function, must have lock to call\nfunc (tm *TaskMaster) delFork(id string) {\n\n\t// mark if we already closed the edge because the edge is replicated\n\t// by it's fork keys (db,rp,measurement)\n\tisEdgeClosed := false\n\n\t// Find the fork keys\n\tfor _, key := range tm.taskToForkKeys[id] {\n\n\t\t// check if the edge exists\n\t\tedge, ok := tm.forks[key][id]\n\t\tif ok {\n\n\t\t\t// Only close the edge if we are already didn't closed it\n\t\t\tif edge != nil && !isEdgeClosed {\n\t\t\t\tisEdgeClosed = true\n\t\t\t\tedge.Close()\n\t\t\t}\n\n\t\t\t// remove the task in fork map\n\t\t\tdelete(tm.forks[key], id)\n\t\t}\n\t}\n\n\t// remove mapping from task id to it's keys\n\tdelete(tm.taskToForkKeys, id)\n}\n\nfunc (tm *TaskMaster) SnapshotTask(id string) (*TaskSnapshot, error) {\n\ttm.mu.RLock()\n\tet, ok := tm.tasks[id]\n\ttm.mu.RUnlock()\n\n\tif ok {\n\t\treturn et.Snapshot()\n\t}\n\treturn nil, fmt.Errorf(\"task %s is not running or does not exist\", id)\n}\n\ntype noOpTimingService struct{}\n\nfunc (noOpTimingService) NewTimer(timer.Setter) timer.Timer {\n\treturn timer.NewNoOp()\n}\n\ntype TaskMasterLookup struct {\n\tsync.Mutex\n\ttaskMasters map[string]*TaskMaster\n}\n\nfunc NewTaskMasterLookup() *TaskMasterLookup {\n\treturn &TaskMasterLookup{\n\t\ttaskMasters: make(map[string]*TaskMaster),\n\t}\n}\n\nfunc (tml *TaskMasterLookup) Get(id string) *TaskMaster {\n\ttml.Lock()\n\tdefer tml.Unlock()\n\treturn tml.taskMasters[id]\n}\n\nfunc (tml *TaskMasterLookup) Main() *TaskMaster {\n\treturn tml.Get(MainTaskMaster)\n}\n\nfunc (tml *TaskMasterLookup) Set(tm *TaskMaster) {\n\ttml.Lock()\n\tdefer tml.Unlock()\n\ttml.taskMasters[tm.id] = tm\n}\n\nfunc (tml *TaskMasterLookup) Delete(tm *TaskMaster) {\n\ttml.Lock()\n\tdefer tml.Unlock()\n\tdelete(tml.taskMasters, tm.id)\n}\n\ntype BatchCollector interface {\n\tCollectBatch(edge.BufferedBatchMessage) error\n\tClose() error\n}\n\ntype batchCollector struct {\n\tedge edge.Edge\n}\n\nfunc (c *batchCollector) CollectBatch(batch edge.BufferedBatchMessage) error {\n\treturn c.edge.Collect(batch)\n}\nfunc (c *batchCollector) Close() error {\n\treturn c.edge.Close()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/template.go",
    "content": "package kapacitor\n\nimport (\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/tick\"\n)\n\ntype Template struct {\n\tid string\n\ttp *pipeline.TemplatePipeline\n}\n\nfunc (t *Template) Vars() map[string]tick.Var {\n\treturn t.tp.Vars()\n}\n\nfunc (t *Template) Dot() string {\n\treturn string(t.tp.Dot(t.id))\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/test.sh",
    "content": "#!/bin/bash\n#\n# This is the Kapacitor test script.\n# This script can run tests in different environments.\n# # Usage: ./test.sh <environment_index>\n# Corresponding environments for environment_index:\n#      0: normal 64bit tests\n#      1: race enabled 64bit tests\n#      2: normal 32bit tests\n#      count: print the number of test environments\n#      *: to run all tests in parallel containers\n#\n# Logs from the test runs will be saved in OUTPUT_DIR, which defaults to ./test-logs\n#\n\nset -eo pipefail\n\n# Get dir of script and make it is our working directory.\nDIR=$(cd $(dirname \"${BASH_SOURCE[0]}\") && pwd)\ncd $DIR\n\n# Unique number for this build\nBUILD_NUM=${BUILD_NUM-$RANDOM}\n# Index for which test environment to use\nENVIRONMENT_INDEX=$1\n# Set the default OUTPUT_DIR\nOUTPUT_DIR=${OUTPUT_DIR-./test-logs}\n# Set the default DOCKER_SAVE_DIR\nDOCKER_SAVE_DIR=${DOCKER_SAVE_DIR-$HOME/docker}\n# Set default parallelism\nPARALLELISM=${PARALLELISM-1}\n# Set default timeout\nTIMEOUT=${TIMEOUT-480s}\n# No uncommitted changes\nNO_UNCOMMITTED=${NO_UNCOMMITTED-false}\n# Home dir of the docker user\nHOME_DIR=/root\n\nno_uncomitted_arg=\"$no_uncommitted_arg\"\nif [ ! $NO_UNCOMMITTED ]\nthen\n    no_uncomitted_arg=\"\"\nfi\n\n# Update this value if you add a new test environment.\nENV_COUNT=3\n\n# Default return code 0\nrc=0\n\n# Convert dockerfile name to valid docker image tag name.\nfunction filename2imagename {\n    echo ${1/Dockerfile/kapacitor}\n}\n\n# Run a test in a docker container\n# Usage: run_test_docker <Dockerfile> <env_name>\nfunction run_test_docker {\n    local dockerfile=$1\n    local imagename=$(filename2imagename \"$dockerfile\")\n    shift\n    local name=$1\n    shift\n    local logfile=\"$OUTPUT_DIR/${name}.log\"\n\n    imagename=\"$imagename-$BUILD_NUM\"\n    dataname=\"kapacitor-data-$BUILD_NUM\"\n\n    echo \"Building docker image $imagename\"\n    docker build -f \"$dockerfile\" -t \"$imagename\" .\n\n    echo \"Running test in docker $name with args $@\"\n\n    # Create data volume with code\n    docker create \\\n        --name $dataname \\\n        -v \"$HOME_DIR/go/src/github.com/influxdata/kapacitor\" \\\n        $imagename /bin/true\n    docker cp \"$DIR/\" \"$dataname:$HOME_DIR/go/src/github.com/influxdata/\"\n\n    # Run tests in docker\n    docker run \\\n         --rm \\\n         --volumes-from $dataname \\\n         -e \"GORACE=$GORACE\" \\\n         -e \"AWS_ACCESS_KEY_ID=$AWS_ACCESS_KEY_ID\" \\\n         -e \"AWS_SECRET_ACCESS_KEY=$AWS_SECRET_ACCESS_KEY\" \\\n         \"$imagename\" \\\n         \"--parallel=$PARALLELISM\" \\\n         \"--timeout=$TIMEOUT\" \\\n         \"$@\" \\\n         2>&1 | tee \"$logfile\"\n\n    # Copy results back out\n    docker cp \\\n        \"$dataname:$HOME_DIR/go/src/github.com/influxdata/kapacitor/build\" \\\n        ./\n\n    # Remove the data and builder containers\n    docker rm -v $dataname\n}\n\nif [ ! -d \"$OUTPUT_DIR\" ]\nthen\n    mkdir -p \"$OUTPUT_DIR\"\nfi\n\n# Run the tests.\ncase $ENVIRONMENT_INDEX in\n    0)\n        # 64 bit tests\n        run_test_docker Dockerfile_build_ubuntu64 test_64bit --test --generate $no_uncommitted_arg\n        rc=$?\n        ;;\n    1)\n        # 64 bit race tests\n        GORACE=\"halt_on_error=1\"\n        run_test_docker Dockerfile_build_ubuntu64 test_64bit_race --test --generate $no_uncommitted_arg --race\n        rc=$?\n        ;;\n    2)\n        # 32 bit tests\n        run_test_docker Dockerfile_build_ubuntu32 test_32bit --test --generate $no_uncommitted_arg --arch=i386\n        rc=$?\n        ;;\n    \"count\")\n        echo $ENV_COUNT\n        ;;\n    *)\n        echo \"No individual test environment specified running tests for all $ENV_COUNT environments.\"\n        # Run all test environments\n        pids=()\n        for t in $(seq 0 \"$(($ENV_COUNT - 1))\")\n        do\n            $0 $t 2>&1 > /dev/null &\n            # add PID to list\n            pids+=($!)\n        done\n\n        echo \"Started all tests. Follow logs in ${OUTPUT_DIR}. Waiting...\"\n\n        # Wait for all tests to finish\n        for pid in \"${pids[@]}\"\n        do\n            wait $pid\n            rc=$(($? + $rc))\n        done\n\n        # Check if all tests passed\n        if [ $rc -eq 0 ]\n        then\n            echo \"All test have passed\"\n        else\n            echo \"Some tests failed check logs in $OUTPUT_DIR for results\"\n        fi\n        ;;\nesac\n\nexit $rc\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/tmpldata.json",
    "content": "[\n\t{\n\t\t\"Name\":\"Float\",\n\t\t\"name\":\"float\",\n\t\t\"Type\":\"float64\",\n\t\t\"Kind\":\"reflect.Float64\",\n\t\t\"Nil\":\"0\",\n\t\t\"Zero\":\"float64(0)\"\n\t},\n\t{\n\t\t\"Name\":\"Integer\",\n\t\t\"name\":\"integer\",\n\t\t\"Type\":\"int64\",\n\t\t\"Kind\":\"reflect.Int64\",\n\t\t\"Nil\":\"0\",\n\t\t\"Zero\":\"int64(0)\"\n\t},\n\t{\n\t\t\"Name\":\"String\",\n\t\t\"name\":\"string\",\n\t\t\"Type\":\"string\",\n\t\t\"Kind\":\"reflect.String\",\n\t\t\"Nil\":\"\\\"\\\"\",\n\t\t\"Zero\":\"\\\"\\\"\"\n\t},\n\t{\n\t\t\"Name\":\"Boolean\",\n\t\t\"name\":\"boolean\",\n\t\t\"Type\":\"bool\",\n\t\t\"Kind\":\"reflect.Bool\",\n\t\t\"Nil\":\"false\",\n\t\t\"Zero\":\"false\"\n\t}\n]\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/agent/README.md",
    "content": "# UDF Agents and Servers\n\nA UDF is a User Defined Function, meaning that you can write your own functions/algorithms and plug them into Kapacitor.\nYour custom function runs in its own process and Kapacitor communicates with it via a defined protocol, see [udf.proto](https://github.com/influxdata/kapacitor/blob/master/udf/udf.proto).\nTo facilitate working with the protocol several `agents` have been written in various languages that abstract the protocol communication through an interface in the respective languages.\nYou can find those agent implementations in this directory and subdirectories based on language name.\n\n\nExample uses of the agents can be found in the `examples` directory.\nThese examples are working examples and are executed as part of the testing suite,\nsee [server_test.go](https://github.com/influxdata/kapacitor/blob/master/cmd/kapacitord/run/server_test.go).\n\n## Child process vs Socket\n\nThere are two approaches for writing UDFs.\n\n* A child process based approach where Kapacitor spawns a child process and communicates over STDIN/STDOUT.\n* A socket based approach where you start the UDF process externally and Kapacitor connects to it over a socket.\n\nFor the socket based approach there will only ever be one instance of your UDF process running.\nEach use of the UDF in a TICKscript will be a new connection the socket.\nWhere as each use of a process based UDF means a new child process is spawned for each.\n\n## Design\n\nThe protocol for communicating with Kapacitor consists of Request and Response messages.\nThe agents wrap the communication and serialization and expose an interface that needs to be implemented to handle each request/response.\nIn addition to the request/response paradigm agents provide a way to stream data back to Kapacitor.\nYour UDF is in control of when new points or batches are sent back to Kapacitor.\n\n\n### Agents and Servers\n\nThere are two main objects provided in the current implementations, an `Agent` and a `Server`.\nThe `Agent` is responsible for managing the communication over input and output streams.\nThe `Server` is responsible for accepting new connections and creating new `Agents` to handle those new connections.\n\nBoth process based and socket based UDFs will need to use an `Agent` to handle the communication/serialization aspects of the protocol.\nOnly socket based UDFs need use the `Server`.\n\n## Writing an Agent for a new Language\n\nThe UDF protocol is designed to be simple and consists of reading and writing protocol buffer messages.\n\nIn order to write a UDF in the language of your choice your language must have protocol buffer support and be able to read and write to a socket.\n\nThe basic steps are:\n\n0. Add the language to the `udf/io.go` generate comment so the udf.proto code exists for your language.\n1. Implement a Varint encoder/decoder, this is trivial see the python implementation.\n2. Implement a method for reading and writing streamed protobuf messages. See `udf.proto` for more details.\n3. Create an interface for handling each of the request/responses.\n4. Write a loop for reading from an input stream and calling the handler interface, and write responses to an output stream.\n5. Provide an thread safe mechanism for writing points and batches to the output stream independent of the handler interface.\n    This is easily accomplished with a synchronized write method, see the python implementation.\n6. Implement the examples using your new agent.\n7. Add your example to the test suite in `cmd/kapacitord/run/server_test.go`.\n\nFor process based UDFs it is expected that the process terminate after STDIN is closed and the remaining requests processed.\nAfter STDIN is closed, the agent process can continue to send Responses to Kapacitor as long as a keepalive timeout does not occur.\nOnce a keepalive timeout is reached and after a 2*keepalive_time grace period, if the process has not terminated then it will be forcefully terminated.\n\n## Docker\n\nIt is expected that the example can run inside the test suite.\nSince generating different protocol buffer code requires different plugins and libraries to run we make use of Docker to provide the necessary environment.\nThis makes testing the code easier as the developer does not have to install each supported language locally.\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/agent/agent.go",
    "content": "package agent\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n)\n\n// The Agent calls the appropriate methods on the Handler as it receives requests over a socket.\n//\n// Returning an error from any method will cause the Agent to stop and an ErrorResponse to be sent.\n// Some *Response objects (like SnapshotResponse) allow for returning their own error within the object itself.\n// These types of errors will not stop the Agent and Kapacitor will deal with them appropriately.\n//\n// The Handler is called from a single goroutine, meaning methods will not be called concurrently.\n//\n// To write Points/Batches back to the Agent/Kapacitor use the Agent.Responses channel.\ntype Handler interface {\n\t// Return the InfoResponse. Describing the properties of this Handler\n\tInfo() (*InfoResponse, error)\n\t// Initialize the Handler with the provided options.\n\tInit(*InitRequest) (*InitResponse, error)\n\t// Create a snapshot of the running state of the handler.\n\tSnapshot() (*SnapshotResponse, error)\n\t// Restore a previous snapshot.\n\tRestore(*RestoreRequest) (*RestoreResponse, error)\n\n\t// A batch has begun.\n\tBeginBatch(*BeginBatch) error\n\t// A point has arrived.\n\tPoint(*Point) error\n\t// The batch is complete.\n\tEndBatch(*EndBatch) error\n\n\t// Gracefully stop the Handler.\n\t// No other methods will be called.\n\tStop()\n}\n\n// Go implementation of a Kapacitor UDF agent.\n// This agent is responsible for reading and writing\n// messages over a socket.\n//\n// The Agent requires a Handler object in order to fulfill requests.\ntype Agent struct {\n\tin  io.ReadCloser\n\tout io.WriteCloser\n\n\toutGroup     sync.WaitGroup\n\toutResponses chan *Response\n\n\tresponses chan *Response\n\t// A channel for writing Responses, specifically Batch and Point responses.\n\tResponses chan<- *Response\n\n\twriteErrC chan error\n\treadErrC  chan error\n\n\t// The handler for requests.\n\tHandler Handler\n}\n\n// Create a new Agent is the provided in/out objects.\n// To create an Agent that reads from STDIN/STDOUT of the process use New(os.Stdin, os.Stdout)\nfunc New(in io.ReadCloser, out io.WriteCloser) *Agent {\n\ts := &Agent{\n\t\tin:           in,\n\t\tout:          out,\n\t\toutResponses: make(chan *Response),\n\t\tresponses:    make(chan *Response),\n\t}\n\ts.Responses = s.responses\n\treturn s\n}\n\n// Start the Agent, you must set an Handler on the agent before starting.\nfunc (a *Agent) Start() error {\n\tif a.Handler == nil {\n\t\treturn errors.New(\"must set a Handler on the agent before starting\")\n\t}\n\n\ta.readErrC = make(chan error, 1)\n\ta.writeErrC = make(chan error, 1)\n\ta.outGroup.Add(1)\n\tgo func() {\n\t\tdefer a.outGroup.Done()\n\t\terr := a.readLoop()\n\t\tif err != nil {\n\t\t\ta.outResponses <- &Response{\n\t\t\t\tMessage: &Response_Error{\n\t\t\t\t\tError: &ErrorResponse{Error: err.Error()},\n\t\t\t\t},\n\t\t\t}\n\t\t}\n\t\ta.readErrC <- err\n\t}()\n\tgo func() {\n\t\ta.writeErrC <- a.writeLoop()\n\t}()\n\n\ta.outGroup.Add(1)\n\tgo func() {\n\t\tdefer a.outGroup.Done()\n\t\ta.forwardResponses()\n\t}()\n\n\treturn nil\n}\n\n// Wait for the Agent to terminate.\n// The Agent will not terminate till the Responses channel is closed.\n// You will need to close this channel externally, typically in the Stop method for the Handler.\n// The Agent will terminate if the In reader is closed or an error occurs.\nfunc (a *Agent) Wait() error {\n\ta.outGroup.Wait()\n\tclose(a.outResponses)\n\tfor a.readErrC != nil || a.writeErrC != nil {\n\t\tselect {\n\t\tcase err := <-a.readErrC:\n\t\t\ta.readErrC = nil\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"read error: %s\", err)\n\t\t\t}\n\t\tcase err := <-a.writeErrC:\n\t\t\ta.writeErrC = nil\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"write error: %s\", err)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Agent) readLoop() error {\n\tdefer a.Handler.Stop()\n\tdefer a.in.Close()\n\tin := bufio.NewReader(a.in)\n\tvar buf []byte\n\trequest := &Request{}\n\tfor {\n\t\terr := ReadMessage(&buf, in, request)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Hand message to handler\n\t\tvar res *Response\n\t\tswitch msg := request.Message.(type) {\n\t\tcase *Request_Info:\n\t\t\tinfo, err := a.Handler.Info()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &Response{}\n\t\t\tres.Message = &Response_Info{\n\t\t\t\tInfo: info,\n\t\t\t}\n\t\tcase *Request_Init:\n\t\t\tinit, err := a.Handler.Init(msg.Init)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &Response{}\n\t\t\tres.Message = &Response_Init{\n\t\t\t\tInit: init,\n\t\t\t}\n\t\tcase *Request_Keepalive:\n\t\t\tres = &Response{\n\t\t\t\tMessage: &Response_Keepalive{\n\t\t\t\t\tKeepalive: &KeepaliveResponse{\n\t\t\t\t\t\tTime: msg.Keepalive.Time,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\tcase *Request_Snapshot:\n\t\t\tsnapshot, err := a.Handler.Snapshot()\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &Response{}\n\t\t\tres.Message = &Response_Snapshot{\n\t\t\t\tSnapshot: snapshot,\n\t\t\t}\n\t\tcase *Request_Restore:\n\t\t\trestore, err := a.Handler.Restore(msg.Restore)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tres = &Response{}\n\t\t\tres.Message = &Response_Restore{\n\t\t\t\tRestore: restore,\n\t\t\t}\n\t\tcase *Request_Begin:\n\t\t\terr := a.Handler.BeginBatch(msg.Begin)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *Request_Point:\n\t\t\terr := a.Handler.Point(msg.Point)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tcase *Request_End:\n\t\t\terr := a.Handler.EndBatch(msg.End)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif res != nil {\n\t\t\ta.outResponses <- res\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Agent) writeLoop() error {\n\tdefer a.out.Close()\n\tfor response := range a.outResponses {\n\t\terr := WriteMessage(response, a.out)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (a *Agent) forwardResponses() {\n\tfor r := range a.responses {\n\t\ta.outResponses <- r\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/agent/io.go",
    "content": "package agent\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/golang/protobuf/proto\"\n)\n\n//go:generate protoc --go_out=./ --python_out=./py/kapacitor/udf/ udf.proto\n\n// Interface for reading messages\n// If you have an io.Reader\n// wrap your reader in a bufio Reader\n// to stasify this interface.\n//\n// Example:\n// brr := bufio.NewReader(reader)\ntype ByteReadReader interface {\n\tio.Reader\n\tio.ByteReader\n}\n\n// Write the message to the io.Writer with a varint size header.\nfunc WriteMessage(msg proto.Message, w io.Writer) error {\n\t// marshal message\n\tdata, err := proto.Marshal(msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvarint := make([]byte, binary.MaxVarintLen32)\n\tn := binary.PutUvarint(varint, uint64(len(data)))\n\n\t_, err = w.Write(varint[:n])\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\n// Read a message from io.ByteReader by first reading a varint size,\n// and then reading and decoding the message object.\n// If buf is not big enough a new buffer will be allocated to replace buf.\nfunc ReadMessage(buf *[]byte, r ByteReadReader, msg proto.Message) error {\n\tsize, err := binary.ReadUvarint(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cap(*buf) < int(size) {\n\t\t*buf = make([]byte, size)\n\t}\n\tb := (*buf)[:size]\n\tread := uint64(0)\n\n\tfor read != size {\n\t\tn, err := r.Read(b[read:])\n\t\tif err == io.EOF {\n\t\t\treturn fmt.Errorf(\"unexpected EOF, expected %d more bytes\", size)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tread += uint64(n)\n\t}\n\terr = proto.Unmarshal(b, msg)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/agent/io_test.go",
    "content": "package agent_test\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/influxdata/kapacitor/udf/agent\"\n)\n\nfunc TestMessage_ReadWrite(t *testing.T) {\n\treq := &agent.Request{}\n\treq.Message = &agent.Request_Keepalive{\n\t\tKeepalive: &agent.KeepaliveRequest{\n\t\t\tTime: 42,\n\t\t},\n\t}\n\n\tvar buf bytes.Buffer\n\n\terr := agent.WriteMessage(req, &buf)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tnreq := &agent.Request{}\n\tvar b []byte\n\terr = agent.ReadMessage(&b, &buf, nreq)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !reflect.DeepEqual(req, nreq) {\n\t\tt.Errorf(\"unexpected request: \\ngot %v\\nexp %v\", nreq, req)\n\t}\n}\n\nfunc TestMessage_ReadWriteMultiple(t *testing.T) {\n\treq := &agent.Request{}\n\treq.Message = &agent.Request_Keepalive{\n\t\tKeepalive: &agent.KeepaliveRequest{\n\t\t\tTime: 42,\n\t\t},\n\t}\n\n\tvar buf bytes.Buffer\n\n\tvar count int = 1e4\n\tfor i := 0; i < count; i++ {\n\t\terr := agent.WriteMessage(req, &buf)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tnreq := &agent.Request{}\n\tvar b []byte\n\n\tfor i := 0; i < count; i++ {\n\t\terr := agent.ReadMessage(&b, &buf, nreq)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\tif !reflect.DeepEqual(req, nreq) {\n\t\t\tt.Fatalf(\"unexpected request: i:%d \\ngot %v\\nexp %v\", i, nreq, req)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/agent/server.go",
    "content": "package agent\n\nimport (\n\t\"net\"\n\t\"os\"\n\t\"os/signal\"\n\t\"sync\"\n)\n\n// A server accepts connections on a listener and\n// spawns new Agents for each connection.\ntype Server struct {\n\tlistener net.Listener\n\taccepter Accepter\n\n\tmu       sync.Mutex\n\tstopped  bool\n\tstopping chan struct{}\n\n\twg sync.WaitGroup\n}\n\ntype Accepter interface {\n\t// Accept new connections from the listener and handle them accordingly.\n\t// The typical action is to create a new Agent with the connection as both its in and out objects.\n\tAccept(net.Conn)\n}\n\n// Create a new server.\nfunc NewServer(l net.Listener, a Accepter) *Server {\n\treturn &Server{\n\t\tlistener: l,\n\t\taccepter: a,\n\t\tstopping: make(chan struct{}),\n\t}\n}\n\n// Server starts the server and blocks.\nfunc (s *Server) Serve() error {\n\ts.mu.Lock()\n\tif s.stopped {\n\t\ts.mu.Unlock()\n\t\treturn nil\n\t}\n\ts.wg.Add(1)\n\ts.mu.Unlock()\n\n\tdefer s.wg.Done()\n\treturn s.run()\n}\n\n// Stop closes the listener and stops all server activity.\nfunc (s *Server) Stop() {\n\ts.mu.Lock()\n\tif s.stopped {\n\t\ts.mu.Unlock()\n\t\treturn\n\t}\n\ts.stopped = true\n\ts.listener.Close()\n\ts.mu.Unlock()\n\n\tclose(s.stopping)\n\ts.wg.Wait()\n}\n\n// StopOnSignals registers a signal handler to stop the Server for the given signals.\nfunc (s *Server) StopOnSignals(signals ...os.Signal) {\n\ts.mu.Lock()\n\tif s.stopped {\n\t\ts.mu.Unlock()\n\t\treturn\n\t}\n\ts.wg.Add(1)\n\ts.mu.Unlock()\n\n\tc := make(chan os.Signal)\n\tsignal.Notify(c, signals...)\n\tgo func() {\n\t\tdefer s.wg.Done()\n\t\tselect {\n\t\tcase <-s.stopping:\n\t\tcase <-c:\n\t\t\ts.Stop()\n\t\t}\n\t}()\n}\n\nfunc (s *Server) run() error {\n\tconns := make(chan net.Conn)\n\terrC := make(chan error, 1)\n\ts.wg.Add(1)\n\tgo func() {\n\t\tdefer s.wg.Done()\n\t\tfor {\n\t\t\tconn, err := s.listener.Accept()\n\t\t\tif err != nil {\n\t\t\t\terrC <- err\n\t\t\t}\n\t\t\tconns <- conn\n\t\t}\n\t}()\n\tfor {\n\t\tselect {\n\t\tcase <-s.stopping:\n\t\t\treturn nil\n\t\tcase err := <-errC:\n\t\t\ts.mu.Lock()\n\t\t\tstopped := s.stopped\n\t\t\ts.mu.Unlock()\n\t\t\tif stopped {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\tcase conn := <-conns:\n\t\t\ts.accepter.Accept(conn)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/agent/udf.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: udf.proto\n// DO NOT EDIT!\n\n/*\nPackage agent is a generated protocol buffer package.\n\nIt is generated from these files:\n\tudf.proto\n\nIt has these top-level messages:\n\tInfoRequest\n\tInfoResponse\n\tOptionInfo\n\tInitRequest\n\tOption\n\tOptionValue\n\tInitResponse\n\tSnapshotRequest\n\tSnapshotResponse\n\tRestoreRequest\n\tRestoreResponse\n\tKeepaliveRequest\n\tKeepaliveResponse\n\tErrorResponse\n\tBeginBatch\n\tPoint\n\tEndBatch\n\tRequest\n\tResponse\n*/\npackage agent\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport fmt \"fmt\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = fmt.Errorf\nvar _ = math.Inf\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the proto package it is being compiled against.\n// A compilation error at this line likely means your copy of the\n// proto package needs to be updated.\nconst _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package\n\ntype EdgeType int32\n\nconst (\n\tEdgeType_STREAM EdgeType = 0\n\tEdgeType_BATCH  EdgeType = 1\n)\n\nvar EdgeType_name = map[int32]string{\n\t0: \"STREAM\",\n\t1: \"BATCH\",\n}\nvar EdgeType_value = map[string]int32{\n\t\"STREAM\": 0,\n\t\"BATCH\":  1,\n}\n\nfunc (x EdgeType) String() string {\n\treturn proto.EnumName(EdgeType_name, int32(x))\n}\nfunc (EdgeType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }\n\ntype ValueType int32\n\nconst (\n\tValueType_BOOL     ValueType = 0\n\tValueType_INT      ValueType = 1\n\tValueType_DOUBLE   ValueType = 2\n\tValueType_STRING   ValueType = 3\n\tValueType_DURATION ValueType = 4\n)\n\nvar ValueType_name = map[int32]string{\n\t0: \"BOOL\",\n\t1: \"INT\",\n\t2: \"DOUBLE\",\n\t3: \"STRING\",\n\t4: \"DURATION\",\n}\nvar ValueType_value = map[string]int32{\n\t\"BOOL\":     0,\n\t\"INT\":      1,\n\t\"DOUBLE\":   2,\n\t\"STRING\":   3,\n\t\"DURATION\": 4,\n}\n\nfunc (x ValueType) String() string {\n\treturn proto.EnumName(ValueType_name, int32(x))\n}\nfunc (ValueType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }\n\n// Request that the process return information about available Options.\ntype InfoRequest struct {\n}\n\nfunc (m *InfoRequest) Reset()                    { *m = InfoRequest{} }\nfunc (m *InfoRequest) String() string            { return proto.CompactTextString(m) }\nfunc (*InfoRequest) ProtoMessage()               {}\nfunc (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }\n\ntype InfoResponse struct {\n\tWants    EdgeType               `protobuf:\"varint,1,opt,name=wants,enum=agent.EdgeType\" json:\"wants,omitempty\"`\n\tProvides EdgeType               `protobuf:\"varint,2,opt,name=provides,enum=agent.EdgeType\" json:\"provides,omitempty\"`\n\tOptions  map[string]*OptionInfo `protobuf:\"bytes,3,rep,name=options\" json:\"options,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n}\n\nfunc (m *InfoResponse) Reset()                    { *m = InfoResponse{} }\nfunc (m *InfoResponse) String() string            { return proto.CompactTextString(m) }\nfunc (*InfoResponse) ProtoMessage()               {}\nfunc (*InfoResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }\n\nfunc (m *InfoResponse) GetOptions() map[string]*OptionInfo {\n\tif m != nil {\n\t\treturn m.Options\n\t}\n\treturn nil\n}\n\ntype OptionInfo struct {\n\tValueTypes []ValueType `protobuf:\"varint,1,rep,name=valueTypes,enum=agent.ValueType\" json:\"valueTypes,omitempty\"`\n}\n\nfunc (m *OptionInfo) Reset()                    { *m = OptionInfo{} }\nfunc (m *OptionInfo) String() string            { return proto.CompactTextString(m) }\nfunc (*OptionInfo) ProtoMessage()               {}\nfunc (*OptionInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }\n\n// Request that the process initialize itself with the provided options.\ntype InitRequest struct {\n\tOptions []*Option `protobuf:\"bytes,1,rep,name=options\" json:\"options,omitempty\"`\n\tTaskID  string    `protobuf:\"bytes,2,opt,name=taskID\" json:\"taskID,omitempty\"`\n\tNodeID  string    `protobuf:\"bytes,3,opt,name=nodeID\" json:\"nodeID,omitempty\"`\n}\n\nfunc (m *InitRequest) Reset()                    { *m = InitRequest{} }\nfunc (m *InitRequest) String() string            { return proto.CompactTextString(m) }\nfunc (*InitRequest) ProtoMessage()               {}\nfunc (*InitRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }\n\nfunc (m *InitRequest) GetOptions() []*Option {\n\tif m != nil {\n\t\treturn m.Options\n\t}\n\treturn nil\n}\n\ntype Option struct {\n\tName   string         `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tValues []*OptionValue `protobuf:\"bytes,2,rep,name=values\" json:\"values,omitempty\"`\n}\n\nfunc (m *Option) Reset()                    { *m = Option{} }\nfunc (m *Option) String() string            { return proto.CompactTextString(m) }\nfunc (*Option) ProtoMessage()               {}\nfunc (*Option) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }\n\nfunc (m *Option) GetValues() []*OptionValue {\n\tif m != nil {\n\t\treturn m.Values\n\t}\n\treturn nil\n}\n\ntype OptionValue struct {\n\tType ValueType `protobuf:\"varint,1,opt,name=type,enum=agent.ValueType\" json:\"type,omitempty\"`\n\t// Types that are valid to be assigned to Value:\n\t//\t*OptionValue_BoolValue\n\t//\t*OptionValue_IntValue\n\t//\t*OptionValue_DoubleValue\n\t//\t*OptionValue_StringValue\n\t//\t*OptionValue_DurationValue\n\tValue isOptionValue_Value `protobuf_oneof:\"value\"`\n}\n\nfunc (m *OptionValue) Reset()                    { *m = OptionValue{} }\nfunc (m *OptionValue) String() string            { return proto.CompactTextString(m) }\nfunc (*OptionValue) ProtoMessage()               {}\nfunc (*OptionValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }\n\ntype isOptionValue_Value interface {\n\tisOptionValue_Value()\n}\n\ntype OptionValue_BoolValue struct {\n\tBoolValue bool `protobuf:\"varint,2,opt,name=boolValue,oneof\"`\n}\ntype OptionValue_IntValue struct {\n\tIntValue int64 `protobuf:\"varint,3,opt,name=intValue,oneof\"`\n}\ntype OptionValue_DoubleValue struct {\n\tDoubleValue float64 `protobuf:\"fixed64,4,opt,name=doubleValue,oneof\"`\n}\ntype OptionValue_StringValue struct {\n\tStringValue string `protobuf:\"bytes,5,opt,name=stringValue,oneof\"`\n}\ntype OptionValue_DurationValue struct {\n\tDurationValue int64 `protobuf:\"varint,6,opt,name=durationValue,oneof\"`\n}\n\nfunc (*OptionValue_BoolValue) isOptionValue_Value()     {}\nfunc (*OptionValue_IntValue) isOptionValue_Value()      {}\nfunc (*OptionValue_DoubleValue) isOptionValue_Value()   {}\nfunc (*OptionValue_StringValue) isOptionValue_Value()   {}\nfunc (*OptionValue_DurationValue) isOptionValue_Value() {}\n\nfunc (m *OptionValue) GetValue() isOptionValue_Value {\n\tif m != nil {\n\t\treturn m.Value\n\t}\n\treturn nil\n}\n\nfunc (m *OptionValue) GetBoolValue() bool {\n\tif x, ok := m.GetValue().(*OptionValue_BoolValue); ok {\n\t\treturn x.BoolValue\n\t}\n\treturn false\n}\n\nfunc (m *OptionValue) GetIntValue() int64 {\n\tif x, ok := m.GetValue().(*OptionValue_IntValue); ok {\n\t\treturn x.IntValue\n\t}\n\treturn 0\n}\n\nfunc (m *OptionValue) GetDoubleValue() float64 {\n\tif x, ok := m.GetValue().(*OptionValue_DoubleValue); ok {\n\t\treturn x.DoubleValue\n\t}\n\treturn 0\n}\n\nfunc (m *OptionValue) GetStringValue() string {\n\tif x, ok := m.GetValue().(*OptionValue_StringValue); ok {\n\t\treturn x.StringValue\n\t}\n\treturn \"\"\n}\n\nfunc (m *OptionValue) GetDurationValue() int64 {\n\tif x, ok := m.GetValue().(*OptionValue_DurationValue); ok {\n\t\treturn x.DurationValue\n\t}\n\treturn 0\n}\n\n// XXX_OneofFuncs is for the internal use of the proto package.\nfunc (*OptionValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {\n\treturn _OptionValue_OneofMarshaler, _OptionValue_OneofUnmarshaler, _OptionValue_OneofSizer, []interface{}{\n\t\t(*OptionValue_BoolValue)(nil),\n\t\t(*OptionValue_IntValue)(nil),\n\t\t(*OptionValue_DoubleValue)(nil),\n\t\t(*OptionValue_StringValue)(nil),\n\t\t(*OptionValue_DurationValue)(nil),\n\t}\n}\n\nfunc _OptionValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {\n\tm := msg.(*OptionValue)\n\t// value\n\tswitch x := m.Value.(type) {\n\tcase *OptionValue_BoolValue:\n\t\tt := uint64(0)\n\t\tif x.BoolValue {\n\t\t\tt = 1\n\t\t}\n\t\tb.EncodeVarint(2<<3 | proto.WireVarint)\n\t\tb.EncodeVarint(t)\n\tcase *OptionValue_IntValue:\n\t\tb.EncodeVarint(3<<3 | proto.WireVarint)\n\t\tb.EncodeVarint(uint64(x.IntValue))\n\tcase *OptionValue_DoubleValue:\n\t\tb.EncodeVarint(4<<3 | proto.WireFixed64)\n\t\tb.EncodeFixed64(math.Float64bits(x.DoubleValue))\n\tcase *OptionValue_StringValue:\n\t\tb.EncodeVarint(5<<3 | proto.WireBytes)\n\t\tb.EncodeStringBytes(x.StringValue)\n\tcase *OptionValue_DurationValue:\n\t\tb.EncodeVarint(6<<3 | proto.WireVarint)\n\t\tb.EncodeVarint(uint64(x.DurationValue))\n\tcase nil:\n\tdefault:\n\t\treturn fmt.Errorf(\"OptionValue.Value has unexpected type %T\", x)\n\t}\n\treturn nil\n}\n\nfunc _OptionValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {\n\tm := msg.(*OptionValue)\n\tswitch tag {\n\tcase 2: // value.boolValue\n\t\tif wire != proto.WireVarint {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tx, err := b.DecodeVarint()\n\t\tm.Value = &OptionValue_BoolValue{x != 0}\n\t\treturn true, err\n\tcase 3: // value.intValue\n\t\tif wire != proto.WireVarint {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tx, err := b.DecodeVarint()\n\t\tm.Value = &OptionValue_IntValue{int64(x)}\n\t\treturn true, err\n\tcase 4: // value.doubleValue\n\t\tif wire != proto.WireFixed64 {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tx, err := b.DecodeFixed64()\n\t\tm.Value = &OptionValue_DoubleValue{math.Float64frombits(x)}\n\t\treturn true, err\n\tcase 5: // value.stringValue\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tx, err := b.DecodeStringBytes()\n\t\tm.Value = &OptionValue_StringValue{x}\n\t\treturn true, err\n\tcase 6: // value.durationValue\n\t\tif wire != proto.WireVarint {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tx, err := b.DecodeVarint()\n\t\tm.Value = &OptionValue_DurationValue{int64(x)}\n\t\treturn true, err\n\tdefault:\n\t\treturn false, nil\n\t}\n}\n\nfunc _OptionValue_OneofSizer(msg proto.Message) (n int) {\n\tm := msg.(*OptionValue)\n\t// value\n\tswitch x := m.Value.(type) {\n\tcase *OptionValue_BoolValue:\n\t\tn += proto.SizeVarint(2<<3 | proto.WireVarint)\n\t\tn += 1\n\tcase *OptionValue_IntValue:\n\t\tn += proto.SizeVarint(3<<3 | proto.WireVarint)\n\t\tn += proto.SizeVarint(uint64(x.IntValue))\n\tcase *OptionValue_DoubleValue:\n\t\tn += proto.SizeVarint(4<<3 | proto.WireFixed64)\n\t\tn += 8\n\tcase *OptionValue_StringValue:\n\t\tn += proto.SizeVarint(5<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(len(x.StringValue)))\n\t\tn += len(x.StringValue)\n\tcase *OptionValue_DurationValue:\n\t\tn += proto.SizeVarint(6<<3 | proto.WireVarint)\n\t\tn += proto.SizeVarint(uint64(x.DurationValue))\n\tcase nil:\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"proto: unexpected type %T in oneof\", x))\n\t}\n\treturn n\n}\n\n// Respond to Kapacitor whether initialization was successful.\ntype InitResponse struct {\n\tSuccess bool   `protobuf:\"varint,1,opt,name=success\" json:\"success,omitempty\"`\n\tError   string `protobuf:\"bytes,2,opt,name=error\" json:\"error,omitempty\"`\n}\n\nfunc (m *InitResponse) Reset()                    { *m = InitResponse{} }\nfunc (m *InitResponse) String() string            { return proto.CompactTextString(m) }\nfunc (*InitResponse) ProtoMessage()               {}\nfunc (*InitResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }\n\n// Request that the process provide a snapshot of its state.\ntype SnapshotRequest struct {\n}\n\nfunc (m *SnapshotRequest) Reset()                    { *m = SnapshotRequest{} }\nfunc (m *SnapshotRequest) String() string            { return proto.CompactTextString(m) }\nfunc (*SnapshotRequest) ProtoMessage()               {}\nfunc (*SnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }\n\n// Respond to Kapacitor with a serialized snapshot of the running state.\ntype SnapshotResponse struct {\n\tSnapshot []byte `protobuf:\"bytes,1,opt,name=snapshot,proto3\" json:\"snapshot,omitempty\"`\n}\n\nfunc (m *SnapshotResponse) Reset()                    { *m = SnapshotResponse{} }\nfunc (m *SnapshotResponse) String() string            { return proto.CompactTextString(m) }\nfunc (*SnapshotResponse) ProtoMessage()               {}\nfunc (*SnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }\n\n// Request that the process restore its state from a snapshot.\ntype RestoreRequest struct {\n\tSnapshot []byte `protobuf:\"bytes,1,opt,name=snapshot,proto3\" json:\"snapshot,omitempty\"`\n}\n\nfunc (m *RestoreRequest) Reset()                    { *m = RestoreRequest{} }\nfunc (m *RestoreRequest) String() string            { return proto.CompactTextString(m) }\nfunc (*RestoreRequest) ProtoMessage()               {}\nfunc (*RestoreRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }\n\n// Respond with success or failure to a RestoreRequest\ntype RestoreResponse struct {\n\tSuccess bool   `protobuf:\"varint,1,opt,name=success\" json:\"success,omitempty\"`\n\tError   string `protobuf:\"bytes,2,opt,name=error\" json:\"error,omitempty\"`\n}\n\nfunc (m *RestoreResponse) Reset()                    { *m = RestoreResponse{} }\nfunc (m *RestoreResponse) String() string            { return proto.CompactTextString(m) }\nfunc (*RestoreResponse) ProtoMessage()               {}\nfunc (*RestoreResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }\n\n// Request that the process respond with a Keepalive to verify it is responding.\ntype KeepaliveRequest struct {\n\t// The number of nanoseconds since the epoch.\n\t// Used only for debugging keepalive requests.\n\tTime int64 `protobuf:\"varint,1,opt,name=time\" json:\"time,omitempty\"`\n}\n\nfunc (m *KeepaliveRequest) Reset()                    { *m = KeepaliveRequest{} }\nfunc (m *KeepaliveRequest) String() string            { return proto.CompactTextString(m) }\nfunc (*KeepaliveRequest) ProtoMessage()               {}\nfunc (*KeepaliveRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }\n\n// Respond to KeepaliveRequest\ntype KeepaliveResponse struct {\n\t// The number of nanoseconds since the epoch.\n\t// Used only for debugging keepalive requests.\n\tTime int64 `protobuf:\"varint,1,opt,name=time\" json:\"time,omitempty\"`\n}\n\nfunc (m *KeepaliveResponse) Reset()                    { *m = KeepaliveResponse{} }\nfunc (m *KeepaliveResponse) String() string            { return proto.CompactTextString(m) }\nfunc (*KeepaliveResponse) ProtoMessage()               {}\nfunc (*KeepaliveResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }\n\n// Sent from the process to Kapacitor indicating an error has occurred.\n// If an ErrorResponse is received, Kapacitor will terminate the process.\ntype ErrorResponse struct {\n\tError string `protobuf:\"bytes,1,opt,name=error\" json:\"error,omitempty\"`\n}\n\nfunc (m *ErrorResponse) Reset()                    { *m = ErrorResponse{} }\nfunc (m *ErrorResponse) String() string            { return proto.CompactTextString(m) }\nfunc (*ErrorResponse) ProtoMessage()               {}\nfunc (*ErrorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }\n\n// Indicates the beginning of a batch.\n// All subsequent points should be considered\n// part of the batch until EndBatch arrives.\n// This includes grouping. Batches of\n// differing groups may not be interleaved.\n//\n// All the meta data but tmax is provided,\n// since tmax may not be known at\n// the beginning of a batch.\n//\n// Size is the number of points in the batch.\n// If size is 0 then the batch has an undetermined size.\ntype BeginBatch struct {\n\tName   string            `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tGroup  string            `protobuf:\"bytes,2,opt,name=group\" json:\"group,omitempty\"`\n\tTags   map[string]string `protobuf:\"bytes,3,rep,name=tags\" json:\"tags,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\tSize   int64             `protobuf:\"varint,4,opt,name=size\" json:\"size,omitempty\"`\n\tByName bool              `protobuf:\"varint,5,opt,name=byName\" json:\"byName,omitempty\"`\n}\n\nfunc (m *BeginBatch) Reset()                    { *m = BeginBatch{} }\nfunc (m *BeginBatch) String() string            { return proto.CompactTextString(m) }\nfunc (*BeginBatch) ProtoMessage()               {}\nfunc (*BeginBatch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }\n\nfunc (m *BeginBatch) GetTags() map[string]string {\n\tif m != nil {\n\t\treturn m.Tags\n\t}\n\treturn nil\n}\n\n// Message containing information about a single data point.\n// Can be sent on it's own or bookended by BeginBatch and EndBatch messages.\ntype Point struct {\n\tTime            int64              `protobuf:\"varint,1,opt,name=time\" json:\"time,omitempty\"`\n\tName            string             `protobuf:\"bytes,2,opt,name=name\" json:\"name,omitempty\"`\n\tDatabase        string             `protobuf:\"bytes,3,opt,name=database\" json:\"database,omitempty\"`\n\tRetentionPolicy string             `protobuf:\"bytes,4,opt,name=retentionPolicy\" json:\"retentionPolicy,omitempty\"`\n\tGroup           string             `protobuf:\"bytes,5,opt,name=group\" json:\"group,omitempty\"`\n\tDimensions      []string           `protobuf:\"bytes,6,rep,name=dimensions\" json:\"dimensions,omitempty\"`\n\tTags            map[string]string  `protobuf:\"bytes,7,rep,name=tags\" json:\"tags,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\tFieldsDouble    map[string]float64 `protobuf:\"bytes,8,rep,name=fieldsDouble\" json:\"fieldsDouble,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"fixed64,2,opt,name=value\"`\n\tFieldsInt       map[string]int64   `protobuf:\"bytes,9,rep,name=fieldsInt\" json:\"fieldsInt,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"varint,2,opt,name=value\"`\n\tFieldsString    map[string]string  `protobuf:\"bytes,10,rep,name=fieldsString\" json:\"fieldsString,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\tFieldsBool      map[string]bool    `protobuf:\"bytes,12,rep,name=fieldsBool\" json:\"fieldsBool,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"varint,2,opt,name=value\"`\n\tByName          bool               `protobuf:\"varint,11,opt,name=byName\" json:\"byName,omitempty\"`\n}\n\nfunc (m *Point) Reset()                    { *m = Point{} }\nfunc (m *Point) String() string            { return proto.CompactTextString(m) }\nfunc (*Point) ProtoMessage()               {}\nfunc (*Point) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }\n\nfunc (m *Point) GetTags() map[string]string {\n\tif m != nil {\n\t\treturn m.Tags\n\t}\n\treturn nil\n}\n\nfunc (m *Point) GetFieldsDouble() map[string]float64 {\n\tif m != nil {\n\t\treturn m.FieldsDouble\n\t}\n\treturn nil\n}\n\nfunc (m *Point) GetFieldsInt() map[string]int64 {\n\tif m != nil {\n\t\treturn m.FieldsInt\n\t}\n\treturn nil\n}\n\nfunc (m *Point) GetFieldsString() map[string]string {\n\tif m != nil {\n\t\treturn m.FieldsString\n\t}\n\treturn nil\n}\n\nfunc (m *Point) GetFieldsBool() map[string]bool {\n\tif m != nil {\n\t\treturn m.FieldsBool\n\t}\n\treturn nil\n}\n\n// Indicates the end of a batch and contains\n// all meta data associated with the batch.\n// The same meta information is provided for\n// ease of use with the addition of tmax since it\n// may not be know at BeginBatch.\ntype EndBatch struct {\n\tName   string            `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tGroup  string            `protobuf:\"bytes,2,opt,name=group\" json:\"group,omitempty\"`\n\tTmax   int64             `protobuf:\"varint,3,opt,name=tmax\" json:\"tmax,omitempty\"`\n\tTags   map[string]string `protobuf:\"bytes,4,rep,name=tags\" json:\"tags,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\tByName bool              `protobuf:\"varint,5,opt,name=byName\" json:\"byName,omitempty\"`\n}\n\nfunc (m *EndBatch) Reset()                    { *m = EndBatch{} }\nfunc (m *EndBatch) String() string            { return proto.CompactTextString(m) }\nfunc (*EndBatch) ProtoMessage()               {}\nfunc (*EndBatch) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }\n\nfunc (m *EndBatch) GetTags() map[string]string {\n\tif m != nil {\n\t\treturn m.Tags\n\t}\n\treturn nil\n}\n\n// Request message wrapper -- sent from Kapacitor to process\ntype Request struct {\n\t// Types that are valid to be assigned to Message:\n\t//\t*Request_Info\n\t//\t*Request_Init\n\t//\t*Request_Keepalive\n\t//\t*Request_Snapshot\n\t//\t*Request_Restore\n\t//\t*Request_Begin\n\t//\t*Request_Point\n\t//\t*Request_End\n\tMessage isRequest_Message `protobuf_oneof:\"message\"`\n}\n\nfunc (m *Request) Reset()                    { *m = Request{} }\nfunc (m *Request) String() string            { return proto.CompactTextString(m) }\nfunc (*Request) ProtoMessage()               {}\nfunc (*Request) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }\n\ntype isRequest_Message interface {\n\tisRequest_Message()\n}\n\ntype Request_Info struct {\n\tInfo *InfoRequest `protobuf:\"bytes,1,opt,name=info,oneof\"`\n}\ntype Request_Init struct {\n\tInit *InitRequest `protobuf:\"bytes,2,opt,name=init,oneof\"`\n}\ntype Request_Keepalive struct {\n\tKeepalive *KeepaliveRequest `protobuf:\"bytes,3,opt,name=keepalive,oneof\"`\n}\ntype Request_Snapshot struct {\n\tSnapshot *SnapshotRequest `protobuf:\"bytes,4,opt,name=snapshot,oneof\"`\n}\ntype Request_Restore struct {\n\tRestore *RestoreRequest `protobuf:\"bytes,5,opt,name=restore,oneof\"`\n}\ntype Request_Begin struct {\n\tBegin *BeginBatch `protobuf:\"bytes,16,opt,name=begin,oneof\"`\n}\ntype Request_Point struct {\n\tPoint *Point `protobuf:\"bytes,17,opt,name=point,oneof\"`\n}\ntype Request_End struct {\n\tEnd *EndBatch `protobuf:\"bytes,18,opt,name=end,oneof\"`\n}\n\nfunc (*Request_Info) isRequest_Message()      {}\nfunc (*Request_Init) isRequest_Message()      {}\nfunc (*Request_Keepalive) isRequest_Message() {}\nfunc (*Request_Snapshot) isRequest_Message()  {}\nfunc (*Request_Restore) isRequest_Message()   {}\nfunc (*Request_Begin) isRequest_Message()     {}\nfunc (*Request_Point) isRequest_Message()     {}\nfunc (*Request_End) isRequest_Message()       {}\n\nfunc (m *Request) GetMessage() isRequest_Message {\n\tif m != nil {\n\t\treturn m.Message\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetInfo() *InfoRequest {\n\tif x, ok := m.GetMessage().(*Request_Info); ok {\n\t\treturn x.Info\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetInit() *InitRequest {\n\tif x, ok := m.GetMessage().(*Request_Init); ok {\n\t\treturn x.Init\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetKeepalive() *KeepaliveRequest {\n\tif x, ok := m.GetMessage().(*Request_Keepalive); ok {\n\t\treturn x.Keepalive\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetSnapshot() *SnapshotRequest {\n\tif x, ok := m.GetMessage().(*Request_Snapshot); ok {\n\t\treturn x.Snapshot\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetRestore() *RestoreRequest {\n\tif x, ok := m.GetMessage().(*Request_Restore); ok {\n\t\treturn x.Restore\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetBegin() *BeginBatch {\n\tif x, ok := m.GetMessage().(*Request_Begin); ok {\n\t\treturn x.Begin\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetPoint() *Point {\n\tif x, ok := m.GetMessage().(*Request_Point); ok {\n\t\treturn x.Point\n\t}\n\treturn nil\n}\n\nfunc (m *Request) GetEnd() *EndBatch {\n\tif x, ok := m.GetMessage().(*Request_End); ok {\n\t\treturn x.End\n\t}\n\treturn nil\n}\n\n// XXX_OneofFuncs is for the internal use of the proto package.\nfunc (*Request) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {\n\treturn _Request_OneofMarshaler, _Request_OneofUnmarshaler, _Request_OneofSizer, []interface{}{\n\t\t(*Request_Info)(nil),\n\t\t(*Request_Init)(nil),\n\t\t(*Request_Keepalive)(nil),\n\t\t(*Request_Snapshot)(nil),\n\t\t(*Request_Restore)(nil),\n\t\t(*Request_Begin)(nil),\n\t\t(*Request_Point)(nil),\n\t\t(*Request_End)(nil),\n\t}\n}\n\nfunc _Request_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {\n\tm := msg.(*Request)\n\t// message\n\tswitch x := m.Message.(type) {\n\tcase *Request_Info:\n\t\tb.EncodeVarint(1<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Info); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Request_Init:\n\t\tb.EncodeVarint(2<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Init); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Request_Keepalive:\n\t\tb.EncodeVarint(3<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Keepalive); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Request_Snapshot:\n\t\tb.EncodeVarint(4<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Snapshot); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Request_Restore:\n\t\tb.EncodeVarint(5<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Restore); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Request_Begin:\n\t\tb.EncodeVarint(16<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Begin); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Request_Point:\n\t\tb.EncodeVarint(17<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Point); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Request_End:\n\t\tb.EncodeVarint(18<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.End); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase nil:\n\tdefault:\n\t\treturn fmt.Errorf(\"Request.Message has unexpected type %T\", x)\n\t}\n\treturn nil\n}\n\nfunc _Request_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {\n\tm := msg.(*Request)\n\tswitch tag {\n\tcase 1: // message.info\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(InfoRequest)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Request_Info{msg}\n\t\treturn true, err\n\tcase 2: // message.init\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(InitRequest)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Request_Init{msg}\n\t\treturn true, err\n\tcase 3: // message.keepalive\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(KeepaliveRequest)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Request_Keepalive{msg}\n\t\treturn true, err\n\tcase 4: // message.snapshot\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(SnapshotRequest)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Request_Snapshot{msg}\n\t\treturn true, err\n\tcase 5: // message.restore\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(RestoreRequest)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Request_Restore{msg}\n\t\treturn true, err\n\tcase 16: // message.begin\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(BeginBatch)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Request_Begin{msg}\n\t\treturn true, err\n\tcase 17: // message.point\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(Point)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Request_Point{msg}\n\t\treturn true, err\n\tcase 18: // message.end\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(EndBatch)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Request_End{msg}\n\t\treturn true, err\n\tdefault:\n\t\treturn false, nil\n\t}\n}\n\nfunc _Request_OneofSizer(msg proto.Message) (n int) {\n\tm := msg.(*Request)\n\t// message\n\tswitch x := m.Message.(type) {\n\tcase *Request_Info:\n\t\ts := proto.Size(x.Info)\n\t\tn += proto.SizeVarint(1<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Request_Init:\n\t\ts := proto.Size(x.Init)\n\t\tn += proto.SizeVarint(2<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Request_Keepalive:\n\t\ts := proto.Size(x.Keepalive)\n\t\tn += proto.SizeVarint(3<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Request_Snapshot:\n\t\ts := proto.Size(x.Snapshot)\n\t\tn += proto.SizeVarint(4<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Request_Restore:\n\t\ts := proto.Size(x.Restore)\n\t\tn += proto.SizeVarint(5<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Request_Begin:\n\t\ts := proto.Size(x.Begin)\n\t\tn += proto.SizeVarint(16<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Request_Point:\n\t\ts := proto.Size(x.Point)\n\t\tn += proto.SizeVarint(17<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Request_End:\n\t\ts := proto.Size(x.End)\n\t\tn += proto.SizeVarint(18<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase nil:\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"proto: unexpected type %T in oneof\", x))\n\t}\n\treturn n\n}\n\n// Response message wrapper -- sent from process to Kapacitor\ntype Response struct {\n\t// Types that are valid to be assigned to Message:\n\t//\t*Response_Info\n\t//\t*Response_Init\n\t//\t*Response_Keepalive\n\t//\t*Response_Snapshot\n\t//\t*Response_Restore\n\t//\t*Response_Error\n\t//\t*Response_Begin\n\t//\t*Response_Point\n\t//\t*Response_End\n\tMessage isResponse_Message `protobuf_oneof:\"message\"`\n}\n\nfunc (m *Response) Reset()                    { *m = Response{} }\nfunc (m *Response) String() string            { return proto.CompactTextString(m) }\nfunc (*Response) ProtoMessage()               {}\nfunc (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }\n\ntype isResponse_Message interface {\n\tisResponse_Message()\n}\n\ntype Response_Info struct {\n\tInfo *InfoResponse `protobuf:\"bytes,1,opt,name=info,oneof\"`\n}\ntype Response_Init struct {\n\tInit *InitResponse `protobuf:\"bytes,2,opt,name=init,oneof\"`\n}\ntype Response_Keepalive struct {\n\tKeepalive *KeepaliveResponse `protobuf:\"bytes,3,opt,name=keepalive,oneof\"`\n}\ntype Response_Snapshot struct {\n\tSnapshot *SnapshotResponse `protobuf:\"bytes,4,opt,name=snapshot,oneof\"`\n}\ntype Response_Restore struct {\n\tRestore *RestoreResponse `protobuf:\"bytes,5,opt,name=restore,oneof\"`\n}\ntype Response_Error struct {\n\tError *ErrorResponse `protobuf:\"bytes,6,opt,name=error,oneof\"`\n}\ntype Response_Begin struct {\n\tBegin *BeginBatch `protobuf:\"bytes,16,opt,name=begin,oneof\"`\n}\ntype Response_Point struct {\n\tPoint *Point `protobuf:\"bytes,17,opt,name=point,oneof\"`\n}\ntype Response_End struct {\n\tEnd *EndBatch `protobuf:\"bytes,18,opt,name=end,oneof\"`\n}\n\nfunc (*Response_Info) isResponse_Message()      {}\nfunc (*Response_Init) isResponse_Message()      {}\nfunc (*Response_Keepalive) isResponse_Message() {}\nfunc (*Response_Snapshot) isResponse_Message()  {}\nfunc (*Response_Restore) isResponse_Message()   {}\nfunc (*Response_Error) isResponse_Message()     {}\nfunc (*Response_Begin) isResponse_Message()     {}\nfunc (*Response_Point) isResponse_Message()     {}\nfunc (*Response_End) isResponse_Message()       {}\n\nfunc (m *Response) GetMessage() isResponse_Message {\n\tif m != nil {\n\t\treturn m.Message\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetInfo() *InfoResponse {\n\tif x, ok := m.GetMessage().(*Response_Info); ok {\n\t\treturn x.Info\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetInit() *InitResponse {\n\tif x, ok := m.GetMessage().(*Response_Init); ok {\n\t\treturn x.Init\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetKeepalive() *KeepaliveResponse {\n\tif x, ok := m.GetMessage().(*Response_Keepalive); ok {\n\t\treturn x.Keepalive\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetSnapshot() *SnapshotResponse {\n\tif x, ok := m.GetMessage().(*Response_Snapshot); ok {\n\t\treturn x.Snapshot\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetRestore() *RestoreResponse {\n\tif x, ok := m.GetMessage().(*Response_Restore); ok {\n\t\treturn x.Restore\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetError() *ErrorResponse {\n\tif x, ok := m.GetMessage().(*Response_Error); ok {\n\t\treturn x.Error\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetBegin() *BeginBatch {\n\tif x, ok := m.GetMessage().(*Response_Begin); ok {\n\t\treturn x.Begin\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetPoint() *Point {\n\tif x, ok := m.GetMessage().(*Response_Point); ok {\n\t\treturn x.Point\n\t}\n\treturn nil\n}\n\nfunc (m *Response) GetEnd() *EndBatch {\n\tif x, ok := m.GetMessage().(*Response_End); ok {\n\t\treturn x.End\n\t}\n\treturn nil\n}\n\n// XXX_OneofFuncs is for the internal use of the proto package.\nfunc (*Response) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {\n\treturn _Response_OneofMarshaler, _Response_OneofUnmarshaler, _Response_OneofSizer, []interface{}{\n\t\t(*Response_Info)(nil),\n\t\t(*Response_Init)(nil),\n\t\t(*Response_Keepalive)(nil),\n\t\t(*Response_Snapshot)(nil),\n\t\t(*Response_Restore)(nil),\n\t\t(*Response_Error)(nil),\n\t\t(*Response_Begin)(nil),\n\t\t(*Response_Point)(nil),\n\t\t(*Response_End)(nil),\n\t}\n}\n\nfunc _Response_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {\n\tm := msg.(*Response)\n\t// message\n\tswitch x := m.Message.(type) {\n\tcase *Response_Info:\n\t\tb.EncodeVarint(1<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Info); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Response_Init:\n\t\tb.EncodeVarint(2<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Init); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Response_Keepalive:\n\t\tb.EncodeVarint(3<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Keepalive); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Response_Snapshot:\n\t\tb.EncodeVarint(4<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Snapshot); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Response_Restore:\n\t\tb.EncodeVarint(5<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Restore); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Response_Error:\n\t\tb.EncodeVarint(6<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Error); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Response_Begin:\n\t\tb.EncodeVarint(16<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Begin); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Response_Point:\n\t\tb.EncodeVarint(17<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.Point); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase *Response_End:\n\t\tb.EncodeVarint(18<<3 | proto.WireBytes)\n\t\tif err := b.EncodeMessage(x.End); err != nil {\n\t\t\treturn err\n\t\t}\n\tcase nil:\n\tdefault:\n\t\treturn fmt.Errorf(\"Response.Message has unexpected type %T\", x)\n\t}\n\treturn nil\n}\n\nfunc _Response_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {\n\tm := msg.(*Response)\n\tswitch tag {\n\tcase 1: // message.info\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(InfoResponse)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_Info{msg}\n\t\treturn true, err\n\tcase 2: // message.init\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(InitResponse)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_Init{msg}\n\t\treturn true, err\n\tcase 3: // message.keepalive\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(KeepaliveResponse)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_Keepalive{msg}\n\t\treturn true, err\n\tcase 4: // message.snapshot\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(SnapshotResponse)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_Snapshot{msg}\n\t\treturn true, err\n\tcase 5: // message.restore\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(RestoreResponse)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_Restore{msg}\n\t\treturn true, err\n\tcase 6: // message.error\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(ErrorResponse)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_Error{msg}\n\t\treturn true, err\n\tcase 16: // message.begin\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(BeginBatch)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_Begin{msg}\n\t\treturn true, err\n\tcase 17: // message.point\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(Point)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_Point{msg}\n\t\treturn true, err\n\tcase 18: // message.end\n\t\tif wire != proto.WireBytes {\n\t\t\treturn true, proto.ErrInternalBadWireType\n\t\t}\n\t\tmsg := new(EndBatch)\n\t\terr := b.DecodeMessage(msg)\n\t\tm.Message = &Response_End{msg}\n\t\treturn true, err\n\tdefault:\n\t\treturn false, nil\n\t}\n}\n\nfunc _Response_OneofSizer(msg proto.Message) (n int) {\n\tm := msg.(*Response)\n\t// message\n\tswitch x := m.Message.(type) {\n\tcase *Response_Info:\n\t\ts := proto.Size(x.Info)\n\t\tn += proto.SizeVarint(1<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Response_Init:\n\t\ts := proto.Size(x.Init)\n\t\tn += proto.SizeVarint(2<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Response_Keepalive:\n\t\ts := proto.Size(x.Keepalive)\n\t\tn += proto.SizeVarint(3<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Response_Snapshot:\n\t\ts := proto.Size(x.Snapshot)\n\t\tn += proto.SizeVarint(4<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Response_Restore:\n\t\ts := proto.Size(x.Restore)\n\t\tn += proto.SizeVarint(5<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Response_Error:\n\t\ts := proto.Size(x.Error)\n\t\tn += proto.SizeVarint(6<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Response_Begin:\n\t\ts := proto.Size(x.Begin)\n\t\tn += proto.SizeVarint(16<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Response_Point:\n\t\ts := proto.Size(x.Point)\n\t\tn += proto.SizeVarint(17<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase *Response_End:\n\t\ts := proto.Size(x.End)\n\t\tn += proto.SizeVarint(18<<3 | proto.WireBytes)\n\t\tn += proto.SizeVarint(uint64(s))\n\t\tn += s\n\tcase nil:\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"proto: unexpected type %T in oneof\", x))\n\t}\n\treturn n\n}\n\nfunc init() {\n\tproto.RegisterType((*InfoRequest)(nil), \"agent.InfoRequest\")\n\tproto.RegisterType((*InfoResponse)(nil), \"agent.InfoResponse\")\n\tproto.RegisterType((*OptionInfo)(nil), \"agent.OptionInfo\")\n\tproto.RegisterType((*InitRequest)(nil), \"agent.InitRequest\")\n\tproto.RegisterType((*Option)(nil), \"agent.Option\")\n\tproto.RegisterType((*OptionValue)(nil), \"agent.OptionValue\")\n\tproto.RegisterType((*InitResponse)(nil), \"agent.InitResponse\")\n\tproto.RegisterType((*SnapshotRequest)(nil), \"agent.SnapshotRequest\")\n\tproto.RegisterType((*SnapshotResponse)(nil), \"agent.SnapshotResponse\")\n\tproto.RegisterType((*RestoreRequest)(nil), \"agent.RestoreRequest\")\n\tproto.RegisterType((*RestoreResponse)(nil), \"agent.RestoreResponse\")\n\tproto.RegisterType((*KeepaliveRequest)(nil), \"agent.KeepaliveRequest\")\n\tproto.RegisterType((*KeepaliveResponse)(nil), \"agent.KeepaliveResponse\")\n\tproto.RegisterType((*ErrorResponse)(nil), \"agent.ErrorResponse\")\n\tproto.RegisterType((*BeginBatch)(nil), \"agent.BeginBatch\")\n\tproto.RegisterType((*Point)(nil), \"agent.Point\")\n\tproto.RegisterType((*EndBatch)(nil), \"agent.EndBatch\")\n\tproto.RegisterType((*Request)(nil), \"agent.Request\")\n\tproto.RegisterType((*Response)(nil), \"agent.Response\")\n\tproto.RegisterEnum(\"agent.EdgeType\", EdgeType_name, EdgeType_value)\n\tproto.RegisterEnum(\"agent.ValueType\", ValueType_name, ValueType_value)\n}\n\nfunc init() { proto.RegisterFile(\"udf.proto\", fileDescriptor0) }\n\nvar fileDescriptor0 = []byte{\n\t// 1151 bytes of a gzipped FileDescriptorProto\n\t0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xc4, 0x57, 0xdd, 0x72, 0xdb, 0xc4,\n\t0x17, 0xaf, 0x22, 0xcb, 0x96, 0x8e, 0x9d, 0x58, 0xde, 0xe6, 0x9f, 0xea, 0x1f, 0x3a, 0x99, 0x20,\n\t0x68, 0x9b, 0x84, 0x62, 0xc0, 0xc0, 0x50, 0x3a, 0x05, 0x26, 0xc6, 0x86, 0x78, 0x68, 0xe3, 0x8e,\n\t0xe2, 0xf6, 0x5e, 0x8e, 0x36, 0xae, 0x26, 0x8e, 0x64, 0x24, 0x39, 0x60, 0xae, 0x78, 0x1c, 0x1e,\n\t0x80, 0x87, 0xe0, 0x82, 0x27, 0x61, 0x86, 0x77, 0x60, 0xbf, 0xb4, 0x5a, 0xd9, 0x86, 0x4c, 0x99,\n\t0xce, 0x70, 0xa7, 0x3d, 0xe7, 0x77, 0xbe, 0xcf, 0x9e, 0xb3, 0x02, 0x6b, 0x1e, 0x5c, 0xb4, 0x67,\n\t0x49, 0x9c, 0xc5, 0xc8, 0xf0, 0x27, 0x38, 0xca, 0xdc, 0x4d, 0xa8, 0x0f, 0xa2, 0x8b, 0xd8, 0xc3,\n\t0xdf, 0xcf, 0x71, 0x9a, 0xb9, 0x7f, 0x6a, 0xd0, 0xe0, 0xe7, 0x74, 0x16, 0x47, 0x29, 0x46, 0xf7,\n\t0xc0, 0xf8, 0xc1, 0x8f, 0xb2, 0xd4, 0xd1, 0xf6, 0xb5, 0x83, 0xad, 0x4e, 0xb3, 0xcd, 0xc4, 0xda,\n\t0xfd, 0x60, 0x82, 0x47, 0x8b, 0x19, 0xf6, 0x38, 0x17, 0xbd, 0x07, 0x26, 0x51, 0x7b, 0x1d, 0x06,\n\t0x38, 0x75, 0x36, 0xd6, 0x23, 0x25, 0x00, 0x3d, 0x86, 0x5a, 0x3c, 0xcb, 0x42, 0xa2, 0xdf, 0xd1,\n\t0xf7, 0xf5, 0x83, 0x7a, 0x67, 0x5f, 0x60, 0x55, 0xcb, 0xed, 0x21, 0x87, 0xf4, 0xa3, 0x2c, 0x59,\n\t0x78, 0xb9, 0xc0, 0xee, 0x33, 0x68, 0xa8, 0x0c, 0x64, 0x83, 0x7e, 0x89, 0x17, 0xcc, 0x3b, 0xcb,\n\t0xa3, 0x9f, 0xe8, 0x01, 0x18, 0xd7, 0xfe, 0x74, 0x8e, 0x99, 0x1f, 0xf5, 0x4e, 0x4b, 0xe8, 0xe6,\n\t0x52, 0xcc, 0x02, 0xe7, 0x3f, 0xde, 0x78, 0xa4, 0xb9, 0x5f, 0x02, 0x14, 0x0c, 0xf4, 0x21, 0x00,\n\t0x63, 0x51, 0x7f, 0x69, 0xc4, 0x3a, 0x89, 0xc3, 0x16, 0xf2, 0x2f, 0x73, 0x86, 0xa7, 0x60, 0xdc,\n\t0x0b, 0x9a, 0xbe, 0x30, 0x13, 0xe9, 0x23, 0xb6, 0x65, 0x64, 0x1a, 0x8b, 0x6c, 0xb3, 0x64, 0x5d,\n\t0x86, 0x81, 0x76, 0xa0, 0x9a, 0xf9, 0xe9, 0xe5, 0xa0, 0xc7, 0xbc, 0xb4, 0x3c, 0x71, 0xa2, 0xf4,\n\t0x28, 0x0e, 0x30, 0xa1, 0xeb, 0x9c, 0xce, 0x4f, 0xee, 0x09, 0x54, 0xb9, 0x0a, 0x84, 0xa0, 0x12,\n\t0xf9, 0x57, 0x58, 0x44, 0xcc, 0xbe, 0xd1, 0x11, 0x54, 0x99, 0x4f, 0x34, 0xf7, 0xd4, 0x2a, 0x2a,\n\t0x59, 0x65, 0x9e, 0x7b, 0x02, 0xe1, 0xfe, 0xa1, 0x41, 0x5d, 0xa1, 0xa3, 0x77, 0xa1, 0x92, 0x91,\n\t0x50, 0x44, 0x7d, 0x57, 0xa3, 0x65, 0x5c, 0xb4, 0x07, 0xd6, 0x38, 0x8e, 0xa7, 0x2f, 0x65, 0x62,\n\t0xcd, 0x93, 0x5b, 0x5e, 0x41, 0x42, 0x77, 0xc1, 0x0c, 0xa3, 0x8c, 0xb3, 0xa9, 0xe7, 0x3a, 0x61,\n\t0x4b, 0x0a, 0x72, 0xa1, 0x1e, 0xc4, 0xf3, 0xf1, 0x14, 0x73, 0x40, 0x85, 0x00, 0x34, 0x02, 0x50,\n\t0x89, 0x14, 0x93, 0x66, 0x49, 0x18, 0x4d, 0x38, 0xc6, 0xa0, 0xe1, 0x51, 0x8c, 0x42, 0x44, 0xf7,\n\t0x61, 0x33, 0x98, 0x27, 0xbe, 0x74, 0xde, 0xa9, 0x0a, 0x53, 0x65, 0x72, 0xb7, 0x26, 0x5a, 0x80,\n\t0x94, 0xb7, 0xc1, 0xcb, 0x23, 0xba, 0xd9, 0x81, 0x5a, 0x3a, 0x3f, 0x3f, 0xc7, 0x29, 0xef, 0x67,\n\t0xd3, 0xcb, 0x8f, 0x68, 0x1b, 0x0c, 0x9c, 0x24, 0x71, 0x22, 0xea, 0xc1, 0x0f, 0x6e, 0x0b, 0x9a,\n\t0x67, 0x91, 0x3f, 0x4b, 0x5f, 0xc5, 0x79, 0x89, 0xdd, 0x36, 0xd8, 0x05, 0x49, 0xa8, 0xdd, 0x05,\n\t0x33, 0x15, 0x34, 0xa6, 0xb7, 0xe1, 0xc9, 0xb3, 0xfb, 0x10, 0xb6, 0x08, 0x2e, 0x8b, 0x13, 0x9c,\n\t0x37, 0xc9, 0x3f, 0xa1, 0x8f, 0xa1, 0x29, 0xd1, 0xff, 0xd2, 0xe7, 0xfb, 0x60, 0x7f, 0x87, 0xf1,\n\t0xcc, 0x9f, 0x86, 0xd7, 0xd2, 0x24, 0x69, 0x9a, 0x2c, 0x14, 0x4d, 0xa3, 0x7b, 0xec, 0xdb, 0x7d,\n\t0x00, 0x2d, 0x05, 0x27, 0x8c, 0xad, 0x03, 0xde, 0x83, 0xcd, 0x3e, 0xd5, 0x2c, 0x41, 0xd2, 0xae,\n\t0xa6, 0xda, 0xfd, 0x5d, 0x03, 0xe8, 0xe2, 0x49, 0x18, 0x75, 0xfd, 0xec, 0xfc, 0xd5, 0xda, 0x3e,\n\t0x25, 0x82, 0x93, 0x24, 0x9e, 0xcf, 0x72, 0x87, 0xd9, 0x01, 0x7d, 0x40, 0x6c, 0xfa, 0x93, 0x7c,\n\t0x16, 0xbc, 0x25, 0x3a, 0xb0, 0x50, 0xd5, 0x1e, 0x11, 0x2e, 0x1f, 0x03, 0x0c, 0x48, 0x55, 0xa7,\n\t0xe1, 0x4f, 0xbc, 0x8f, 0x88, 0x93, 0xf4, 0x9b, 0x5e, 0x9c, 0xf1, 0xe2, 0x94, 0x1a, 0x34, 0x58,\n\t0x92, 0xc4, 0x69, 0xf7, 0x33, 0xb0, 0xa4, 0xf8, 0x9a, 0x61, 0xb1, 0xad, 0x0e, 0x0b, 0x4b, 0x9d,\n\t0x0c, 0xbf, 0x54, 0xc1, 0x78, 0x1e, 0x93, 0x16, 0x5e, 0x97, 0x13, 0x19, 0xdd, 0x86, 0x12, 0x1d,\n\t0xa9, 0x6b, 0xe0, 0x67, 0xfe, 0xd8, 0x4f, 0xb1, 0xb8, 0xbd, 0xf2, 0x8c, 0x0e, 0xa0, 0x99, 0xe0,\n\t0x8c, 0xc4, 0x45, 0x7a, 0xf4, 0x79, 0x3c, 0x0d, 0xcf, 0x17, 0xcc, 0x7b, 0xcb, 0x5b, 0x26, 0x17,\n\t0x39, 0x32, 0xd4, 0x1c, 0xed, 0x01, 0x04, 0xc4, 0x6e, 0x94, 0xb2, 0xd9, 0x52, 0x25, 0x99, 0xb2,\n\t0x3c, 0x85, 0x42, 0x26, 0x00, 0xcf, 0x61, 0x8d, 0xe5, 0x70, 0x47, 0xe4, 0x90, 0xf9, 0xbf, 0x92,\n\t0xbe, 0x2e, 0x34, 0x2e, 0x42, 0x3c, 0x0d, 0xd2, 0x1e, 0xbb, 0x7e, 0x8e, 0xc9, 0x64, 0xf6, 0x4a,\n\t0x32, 0xdf, 0x28, 0x00, 0x2e, 0x5b, 0x92, 0x41, 0x9f, 0x83, 0xc5, 0xcf, 0x83, 0x28, 0x73, 0xac,\n\t0x52, 0xe1, 0x54, 0x05, 0x84, 0xcb, 0xa5, 0x0b, 0x74, 0x61, 0xfe, 0x8c, 0xdd, 0x6c, 0x07, 0xfe,\n\t0xd6, 0x3c, 0x07, 0x94, 0xcc, 0x73, 0x12, 0x7a, 0x02, 0xc0, 0xcf, 0x5d, 0x32, 0x81, 0x9c, 0x06,\n\t0xd3, 0x70, 0x77, 0x8d, 0x06, 0xca, 0xe6, 0xf2, 0x0a, 0x5e, 0xe9, 0x95, 0xfa, 0x1b, 0xe9, 0x95,\n\t0xdd, 0xaf, 0xa0, 0xb5, 0x92, 0xb0, 0x9b, 0x14, 0x68, 0xaa, 0x82, 0x27, 0xb0, 0x55, 0x4e, 0xd8,\n\t0x4d, 0xd2, 0xfa, 0x5a, 0xf3, 0x4a, 0xc2, 0x5e, 0xcb, 0xff, 0x2f, 0xa0, 0xb9, 0x94, 0xaf, 0x9b,\n\t0xc4, 0x4d, 0xf5, 0xaa, 0xfc, 0xa6, 0x81, 0xd9, 0x8f, 0x82, 0xd7, 0xbd, 0xf7, 0xf4, 0x5e, 0x5d,\n\t0xf9, 0x3f, 0xf2, 0x7d, 0xe1, 0xb1, 0x6f, 0xf4, 0xbe, 0xe8, 0xe3, 0x0a, 0x2b, 0xe9, 0xff, 0xf3,\n\t0x37, 0x84, 0x50, 0xbe, 0xd2, 0xca, 0x6f, 0xfc, 0xd6, 0xff, 0xac, 0x43, 0x2d, 0x1f, 0x9a, 0x07,\n\t0x50, 0x09, 0xc9, 0xab, 0x80, 0x09, 0x16, 0x3b, 0x55, 0x79, 0x2d, 0x91, 0xc5, 0xc3, 0x10, 0x1c,\n\t0x19, 0x66, 0xe2, 0xc5, 0x51, 0x20, 0xe5, 0xc3, 0x80, 0x23, 0xc3, 0x0c, 0x11, 0xc7, 0x2e, 0xf3,\n\t0xa1, 0xcb, 0x02, 0xaf, 0x77, 0xee, 0x08, 0xf8, 0xf2, 0xd0, 0xa6, 0x0b, 0x56, 0x62, 0xd1, 0x27,\n\t0xca, 0xd2, 0xa8, 0x30, 0xb9, 0xfc, 0x92, 0x2f, 0x2d, 0x28, 0xba, 0x78, 0x73, 0x24, 0xfa, 0x08,\n\t0x6a, 0x09, 0x5f, 0x27, 0x2c, 0x41, 0xf5, 0xce, 0xff, 0x84, 0x50, 0x79, 0x25, 0x11, 0x99, 0x1c,\n\t0x87, 0x0e, 0xc1, 0x18, 0xd3, 0xd1, 0xeb, 0xd8, 0xa5, 0xe7, 0x53, 0x31, 0x8e, 0x09, 0x98, 0x23,\n\t0xc8, 0xd3, 0xc1, 0x98, 0xd1, 0xcb, 0xe6, 0xb4, 0x18, 0xb4, 0xa1, 0x5e, 0x40, 0x8a, 0x62, 0x4c,\n\t0xf4, 0x0e, 0xe8, 0x38, 0x0a, 0x1c, 0xc4, 0x30, 0xcd, 0xa5, 0x8a, 0x12, 0x18, 0xe5, 0x76, 0x2d,\n\t0xa8, 0x5d, 0x91, 0x95, 0x46, 0x98, 0xee, 0xaf, 0x3a, 0x98, 0x72, 0xd5, 0x1c, 0x96, 0x6a, 0x70,\n\t0x7b, 0xcd, 0x3b, 0x51, 0x16, 0xe1, 0xb0, 0x54, 0x84, 0xdb, 0xa5, 0x22, 0xa8, 0x50, 0x52, 0x85,\n\t0x47, 0xab, 0x55, 0x70, 0x56, 0xab, 0x20, 0x85, 0x94, 0x32, 0x7c, 0xba, 0x52, 0x86, 0x3b, 0x2b,\n\t0x65, 0x90, 0x72, 0x45, 0x1d, 0x3a, 0xcb, 0x75, 0xd8, 0x59, 0xae, 0x83, 0x14, 0x92, 0x85, 0x78,\n\t0x98, 0x6f, 0xd9, 0x2a, 0x93, 0xd8, 0xce, 0x33, 0xa7, 0xae, 0x62, 0x9a, 0x65, 0x06, 0xfa, 0xcf,\n\t0xcb, 0x76, 0xf4, 0x36, 0x99, 0x01, 0xe2, 0xa9, 0x8f, 0x00, 0xaa, 0x67, 0x23, 0xaf, 0x7f, 0xfc,\n\t0xcc, 0xbe, 0x85, 0x2c, 0x30, 0xba, 0xc7, 0xa3, 0xaf, 0x4f, 0x6c, 0xed, 0xa8, 0x07, 0x96, 0x7c,\n\t0x57, 0x22, 0x13, 0x2a, 0xdd, 0xe1, 0xf0, 0x29, 0x41, 0xd4, 0x40, 0x1f, 0x9c, 0x8e, 0x6c, 0x8d,\n\t0x8a, 0xf5, 0x86, 0x2f, 0xba, 0x4f, 0xfb, 0xf6, 0x86, 0x50, 0x31, 0x38, 0xfd, 0xd6, 0xd6, 0x51,\n\t0x03, 0xcc, 0xde, 0x0b, 0xef, 0x78, 0x34, 0x18, 0x9e, 0xda, 0x95, 0x71, 0x95, 0xfd, 0xbf, 0x7c,\n\t0xfc, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x71, 0xb7, 0xac, 0x48, 0xcc, 0x0c, 0x00, 0x00,\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/agent/udf.proto",
    "content": "syntax = \"proto3\";\n\npackage agent;\n\n//------------------------------------------------------\n// RPC Messages for Kapacitor to communicate with\n// a child process or socket for data processing.\n//\n// Messages are streamed by writing a varint header\n// that contains the length of the following message.\n//\n// To decode the stream read a varint, then read\n// the determined size and decode as a protobuf message.\n// There is not footer so the next varint if any begins\n// right after the previous message.\n//\n//------------------------------------------------------\n// Management messages\n//\n// *Request messages are sent to the UDF from Kapacitor.\n// *Response messages are sent to Kapacitor from the UDF.\n//\n// While there is an obvious request/response structure for communicating,\n// there is a loose coupling between request and response.\n// Meaning that ordering or synchronizing STDIN and STDOUT in anyway\n// is not necessary.\n// For example if Kapacitor requests a snapshot and the\n// UDF is in the middle of writing a previous response or\n// data points those can continue. Eventually Kapacitor will receive\n// the snapshot response and act accordingly.\n//\n// A KeepaliveRequest/KeepaliveResponse system is used to ensure that\n// the process is responsive. Every time that a KeepaliveRequest is sent\n// a KeepaliveResponse must be returned within a timeout.\n// If the timeout is reached than the process is considered dead and will be terminated/restarted.\n//\n// It is recommend to disable buffering on the input and output sockets.\n// Some languages like python will automatically buffer the STDIN and STDOUT sockets.\n// To disable this behavior use the -u flag on the python interpreter.\n\n// Request that the process return information about available Options.\nmessage InfoRequest {\n}\n\nenum EdgeType {\n    STREAM  = 0;\n    BATCH   = 1;\n}\n\nmessage InfoResponse {\n    EdgeType wants = 1;\n    EdgeType provides = 2;\n    map<string, OptionInfo> options = 3;\n}\n\nenum ValueType {\n    BOOL     = 0;\n    INT      = 1;\n    DOUBLE   = 2;\n    STRING   = 3;\n    DURATION = 4;\n}\n\nmessage OptionInfo {\n    repeated ValueType valueTypes = 1;\n}\n\n// Request that the process initialize itself with the provided options.\nmessage InitRequest {\n    repeated Option options = 1;\n    string taskID           = 2;\n    string nodeID           = 3;\n}\n\nmessage Option {\n    string               name   = 1;\n    repeated OptionValue values = 2;\n}\n\nmessage OptionValue {\n    ValueType type = 1;\n    oneof value {\n        bool   boolValue     = 2;\n        int64  intValue      = 3;\n        double doubleValue   = 4;\n        string stringValue   = 5;\n        int64  durationValue = 6;\n    }\n}\n\n// Respond to Kapacitor whether initialization was successful.\nmessage InitResponse {\n    bool   success = 1;\n    string error   = 2;\n}\n\n// Request that the process provide a snapshot of its state.\nmessage SnapshotRequest {\n}\n\n// Respond to Kapacitor with a serialized snapshot of the running state.\nmessage SnapshotResponse {\n    bytes snapshot = 1;\n}\n\n// Request that the process restore its state from a snapshot.\nmessage RestoreRequest {\n    bytes snapshot = 1;\n}\n\n// Respond with success or failure to a RestoreRequest\nmessage RestoreResponse {\n    bool   success = 1;\n    string error   = 2;\n}\n\n\n// Request that the process respond with a Keepalive to verify it is responding.\nmessage KeepaliveRequest {\n    // The number of nanoseconds since the epoch.\n    // Used only for debugging keepalive requests.\n    int64 time = 1;\n}\n\n// Respond to KeepaliveRequest\nmessage KeepaliveResponse {\n    // The number of nanoseconds since the epoch.\n    // Used only for debugging keepalive requests.\n    int64 time = 1;\n}\n\n// Sent from the process to Kapacitor indicating an error has occurred.\n// If an ErrorResponse is received, Kapacitor will terminate the process.\nmessage ErrorResponse {\n    string error = 1;\n}\n\n//------------------------------------------------------\n// Data flow messages\n//\n// Sent and received by both the process and Kapacitor\n\n\n// Indicates the beginning of a batch.\n// All subsequent points should be considered\n// part of the batch until EndBatch arrives.\n// This includes grouping. Batches of\n// differing groups may not be interleaved.\n//\n// All the meta data but tmax is provided,\n// since tmax may not be known at\n// the beginning of a batch.\n//\n// Size is the number of points in the batch.\n// If size is 0 then the batch has an undetermined size.\nmessage BeginBatch {\n    string             name   = 1;\n    string             group  = 2;\n    map<string,string> tags   = 3;\n    int64              size   = 4;\n    bool               byName = 5;\n}\n\n// Message containing information about a single data point.\n// Can be sent on it's own or bookended by BeginBatch and EndBatch messages.\nmessage Point {\n    int64              time            = 1;\n    string             name            = 2;\n    string             database        = 3;\n    string             retentionPolicy = 4;\n    string             group           = 5;\n    repeated string    dimensions      = 6;\n    map<string,string> tags            = 7;\n    map<string,double> fieldsDouble    = 8;\n    map<string,int64>  fieldsInt       = 9;\n    map<string,string> fieldsString    = 10;\n    map<string,bool>   fieldsBool      = 12;\n    bool               byName          = 11;\n}\n\n// Indicates the end of a batch and contains\n// all meta data associated with the batch.\n// The same meta information is provided for\n// ease of use with the addition of tmax since it\n// may not be know at BeginBatch.\nmessage EndBatch {\n    string             name   = 1;\n    string             group  = 2;\n    int64              tmax   = 3;\n    map<string,string> tags   = 4;\n    bool               byName = 5;\n}\n\n//-----------------------------------------------------------\n// Wrapper messages\n//\n// All messages sent over STDIN will be Request messages.\n// All messages sent over STDOUT must be Response messages.\n\n\n// Request message wrapper -- sent from Kapacitor to process\nmessage Request {\n    oneof message {\n        // Management requests\n        InfoRequest      info      = 1;\n        InitRequest      init      = 2;\n        KeepaliveRequest keepalive = 3;\n        SnapshotRequest  snapshot  = 4;\n        RestoreRequest   restore   = 5;\n\n        // Data flow responses\n        BeginBatch begin = 16;\n        Point      point = 17;\n        EndBatch   end   = 18;\n    }\n}\n\n// Response message wrapper -- sent from process to Kapacitor\nmessage Response {\n    oneof message {\n        // Management responses\n        InfoResponse      info      = 1;\n        InitResponse      init      = 2;\n        KeepaliveResponse keepalive = 3;\n        SnapshotResponse  snapshot  = 4;\n        RestoreResponse   restore   = 5;\n        ErrorResponse     error     = 6;\n\n        // Data flow responses\n        BeginBatch begin = 16;\n        Point      point = 17;\n        EndBatch   end   = 18;\n    }\n}\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/server.go",
    "content": "package udf\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/udf/agent\"\n)\n\nvar ErrServerStopped = errors.New(\"server already stopped\")\n\n// Server provides an implementation for the core communication with UDFs.\n// The Server provides only a partial implementation of udf.Interface as\n// it is expected that setup and teardown will be necessary to create a Server.\n// As such the Open and Close methods are not implemented.\n//\n// Once a Server is created and started the owner can send points or batches\n// to the UDF by writing them to the PointIn or BatchIn channels respectively,\n// and according to the type of UDF created.\n//\n// The Server may be Aborted at anytime for various reasons. It is the owner's responsibility\n// via the abortCallback to stop writing to the *In channels since no more selects on the channels\n// will be performed.\n//\n// Calling Stop on the Server should only be done once the owner has stopped writing to the *In channel,\n// at which point the remaining data will be processed and the UDF will be allowed to clean up.\n//\n// Callling Info returns information about available options the UDF has.\n//\n// Calling Init is required to process data.\n// The behavior is undefined if you send points/batches to the Server without calling Init.\ntype Server struct {\n\n\t// If the processes is Aborted (via Keepalive timeout, etc.)\n\t// then no more data will be read off the *In channels.\n\t//\n\t// Optional callback if the process aborts.\n\t// It is the owners response\n\tabortCallback func()\n\tabortOnce     sync.Once\n\n\t// If abort fails after sometime this will be called\n\tkillCallback func()\n\n\tinMsg chan edge.Message\n\n\toutMsg chan edge.Message\n\n\tstopped  bool\n\tstopping chan struct{}\n\taborted  bool\n\taborting chan struct{}\n\t// The first error that occurred or nil\n\terr   error\n\terrMu sync.Mutex\n\n\trequests      chan *agent.Request\n\trequestsGroup sync.WaitGroup\n\n\tkeepalive        chan int64\n\tkeepaliveTimeout time.Duration\n\n\ttaskID string\n\tnodeID string\n\n\tin  agent.ByteReadReader\n\tout io.WriteCloser\n\n\t// Group for waiting on read/write goroutines\n\tioGroup sync.WaitGroup\n\n\tmu     sync.Mutex\n\tlogger *log.Logger\n\n\tresponseBuf []byte\n\n\tinfoResponse     chan *agent.Response\n\tinitResponse     chan *agent.Response\n\tsnapshotResponse chan *agent.Response\n\trestoreResponse  chan *agent.Response\n\n\t// Buffer up batch messages\n\tbegin  *agent.BeginBatch\n\tpoints []edge.BatchPointMessage\n}\n\nfunc NewServer(\n\ttaskID, nodeID string,\n\tin agent.ByteReadReader,\n\tout io.WriteCloser,\n\tl *log.Logger,\n\ttimeout time.Duration,\n\tabortCallback func(),\n\tkillCallback func(),\n) *Server {\n\ts := &Server{\n\t\ttaskID:           taskID,\n\t\tnodeID:           nodeID,\n\t\tin:               in,\n\t\tout:              out,\n\t\tlogger:           l,\n\t\trequests:         make(chan *agent.Request),\n\t\tkeepalive:        make(chan int64, 1),\n\t\tkeepaliveTimeout: timeout,\n\t\tabortCallback:    abortCallback,\n\t\tkillCallback:     killCallback,\n\t\tinMsg:            make(chan edge.Message),\n\t\toutMsg:           make(chan edge.Message),\n\t\tinfoResponse:     make(chan *agent.Response, 1),\n\t\tinitResponse:     make(chan *agent.Response, 1),\n\t\tsnapshotResponse: make(chan *agent.Response, 1),\n\t\trestoreResponse:  make(chan *agent.Response, 1),\n\t}\n\n\treturn s\n}\n\nfunc (s *Server) In() chan<- edge.Message {\n\treturn s.inMsg\n}\nfunc (s *Server) Out() <-chan edge.Message {\n\treturn s.outMsg\n}\n\nfunc (s *Server) setError(err error) {\n\ts.errMu.Lock()\n\tdefer s.errMu.Unlock()\n\tif s.err == nil {\n\t\ts.err = err\n\t}\n}\n\n// Start the Server\nfunc (s *Server) Start() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\ts.stopped = false\n\ts.stopping = make(chan struct{})\n\ts.aborted = false\n\ts.aborting = make(chan struct{})\n\n\ts.ioGroup.Add(1)\n\tgo func() {\n\t\terr := s.writeData()\n\t\tif err != nil {\n\t\t\ts.setError(err)\n\t\t\tdefer s.abort()\n\t\t}\n\t\ts.ioGroup.Done()\n\t}()\n\ts.ioGroup.Add(1)\n\tgo func() {\n\t\terr := s.readData()\n\t\tif err != nil {\n\t\t\ts.setError(err)\n\t\t\tdefer s.abort()\n\t\t}\n\t\ts.ioGroup.Done()\n\t}()\n\n\ts.requestsGroup.Add(2)\n\tgo s.runKeepalive()\n\tgo s.watchKeepalive()\n\n\treturn nil\n}\n\n// Abort the server.\n// Data in-flight will not be processed.\n// Give a reason for aborting via the err parameter.\nfunc (s *Server) Abort(err error) {\n\ts.setError(err)\n\ts.abort()\n}\n\nfunc (s *Server) abort() {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\tif s.aborted {\n\t\treturn\n\t}\n\ts.aborted = true\n\tclose(s.aborting)\n\tif s.abortCallback != nil {\n\t\ts.abortOnce.Do(s.abortCallback)\n\t}\n\t_ = s.stop()\n}\n\n// Wait for all IO to stop on the in/out objects.\nfunc (s *Server) WaitIO() {\n\ts.ioGroup.Wait()\n}\n\n// Stop the Server cleanly.\n//\n// Calling Stop should only be done once the owner has stopped writing to the *In channel,\n// at which point the remaining data will be processed and the subprocess will be allowed to exit cleanly.\nfunc (s *Server) Stop() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn s.stop()\n}\n\n// internal stop function you must acquire the lock before calling\nfunc (s *Server) stop() error {\n\n\tif s.stopped {\n\t\treturn s.err\n\t}\n\ts.stopped = true\n\n\tclose(s.stopping)\n\n\ts.requestsGroup.Wait()\n\n\tclose(s.requests)\n\n\tclose(s.inMsg)\n\n\ts.ioGroup.Wait()\n\n\t// Return the error that occurred first\n\ts.errMu.Lock()\n\tdefer s.errMu.Unlock()\n\treturn s.err\n}\n\ntype Info struct {\n\tWants    agent.EdgeType\n\tProvides agent.EdgeType\n\tOptions  map[string]*agent.OptionInfo\n}\n\n// Get information about the process, available options etc.\n// Info need not be called every time a process is started.\nfunc (s *Server) Info() (Info, error) {\n\tinfo := Info{}\n\treq := &agent.Request{Message: &agent.Request_Info{\n\t\tInfo: &agent.InfoRequest{},\n\t}}\n\n\tresp, err := s.doRequestResponse(req, s.infoResponse)\n\tif err != nil {\n\t\treturn info, err\n\t}\n\tri := resp.Message.(*agent.Response_Info).Info\n\tinfo.Options = ri.Options\n\tinfo.Wants = ri.Wants\n\tinfo.Provides = ri.Provides\n\n\treturn info, nil\n}\n\n// Initialize the process with a set of Options.\n// Calling Init is required even if you do not have any specific Options, just pass nil\nfunc (s *Server) Init(options []*agent.Option) error {\n\treq := &agent.Request{Message: &agent.Request_Init{\n\t\tInit: &agent.InitRequest{\n\t\t\tOptions: options,\n\t\t\tTaskID:  s.taskID,\n\t\t\tNodeID:  s.nodeID,\n\t\t},\n\t}}\n\tresp, err := s.doRequestResponse(req, s.initResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinit := resp.Message.(*agent.Response_Init).Init\n\tif !init.Success {\n\t\treturn fmt.Errorf(\"failed to initialize processes %s\", init.Error)\n\t}\n\treturn nil\n}\n\n// Request a snapshot from the process.\nfunc (s *Server) Snapshot() ([]byte, error) {\n\treq := &agent.Request{Message: &agent.Request_Snapshot{\n\t\tSnapshot: &agent.SnapshotRequest{},\n\t}}\n\tresp, err := s.doRequestResponse(req, s.snapshotResponse)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsnapshot := resp.Message.(*agent.Response_Snapshot).Snapshot.Snapshot\n\treturn snapshot, nil\n}\n\n// Request to restore a snapshot.\nfunc (s *Server) Restore(snapshot []byte) error {\n\treq := &agent.Request{Message: &agent.Request_Restore{\n\t\tRestore: &agent.RestoreRequest{snapshot},\n\t}}\n\tresp, err := s.doRequestResponse(req, s.restoreResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trestore := resp.Message.(*agent.Response_Restore).Restore\n\tif !restore.Success {\n\t\treturn fmt.Errorf(\"error restoring snapshot: %s\", restore.Error)\n\t}\n\treturn nil\n}\n\nfunc (s *Server) doRequestResponse(req *agent.Request, respC chan *agent.Response) (*agent.Response, error) {\n\terr := func() error {\n\t\ts.mu.Lock()\n\t\tdefer s.mu.Unlock()\n\t\tif s.stopped {\n\t\t\tif s.err != nil {\n\t\t\t\treturn s.err\n\t\t\t}\n\t\t\treturn ErrServerStopped\n\t\t}\n\t\ts.requestsGroup.Add(1)\n\t\treturn nil\n\t}()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer s.requestsGroup.Done()\n\n\tselect {\n\tcase <-s.aborting:\n\t\treturn nil, s.err\n\tcase s.requests <- req:\n\t}\n\n\tselect {\n\tcase <-s.aborting:\n\t\treturn nil, s.err\n\tcase res := <-respC:\n\t\treturn res, nil\n\t}\n}\n\nfunc (s *Server) doResponse(response *agent.Response, respC chan *agent.Response) {\n\tselect {\n\tcase respC <- response:\n\tdefault:\n\t\ts.logger.Printf(\"E! received %T without requesting it\", response.Message)\n\t}\n}\n\n// send KeepaliveRequest on the specified interval\nfunc (s *Server) runKeepalive() {\n\tdefer s.requestsGroup.Done()\n\tif s.keepaliveTimeout <= 0 {\n\t\treturn\n\t}\n\tticker := time.NewTicker(s.keepaliveTimeout / 2)\n\tdefer ticker.Stop()\n\tfor {\n\t\tselect {\n\t\tcase <-ticker.C:\n\t\t\treq := &agent.Request{Message: &agent.Request_Keepalive{\n\t\t\t\tKeepalive: &agent.KeepaliveRequest{\n\t\t\t\t\tTime: time.Now().UnixNano(),\n\t\t\t\t},\n\t\t\t}}\n\t\t\tselect {\n\t\t\tcase s.requests <- req:\n\t\t\tcase <-s.aborting:\n\t\t\t}\n\t\tcase <-s.stopping:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Abort the process if a keepalive timeout is reached.\nfunc (s *Server) watchKeepalive() {\n\t// Defer functions are called LIFO.\n\t// We need to call p.abort after p.requestsGroup.Done so we just set a flag.\n\tvar err error\n\tdefer func() {\n\t\tif err != nil {\n\t\t\ts.setError(err)\n\t\t\taborted := make(chan struct{})\n\t\t\tgo func() {\n\t\t\t\ttimeout := s.keepaliveTimeout * 2\n\t\t\t\tif timeout <= 0 {\n\t\t\t\t\ttimeout = time.Second\n\t\t\t\t}\n\t\t\t\ttime.Sleep(timeout)\n\t\t\t\tselect {\n\t\t\t\tcase <-aborted:\n\t\t\t\t\t// We cleanly aborted process is stopped\n\t\t\t\tdefault:\n\t\t\t\t\t// We failed to abort just kill it.\n\t\t\t\t\tif s.killCallback != nil {\n\t\t\t\t\t\ts.logger.Println(\"E! process not responding! killing\")\n\t\t\t\t\t\ts.killCallback()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t\ts.abort()\n\t\t\tclose(aborted)\n\t\t}\n\t}()\n\tdefer s.requestsGroup.Done()\n\t// If timeout is <= 0 then we don't ever timeout from keepalive,\n\t// but we need to receive from p.keepalive or handleResponse will block.\n\t// So we set a long timeout and then ignore it if its reached.\n\ttimeout := s.keepaliveTimeout\n\tif timeout <= 0 {\n\t\ttimeout = time.Hour\n\t}\n\tlast := time.Now().UnixNano()\n\tfor {\n\t\tselect {\n\t\tcase last = <-s.keepalive:\n\t\tcase <-time.After(timeout):\n\t\t\t// Ignore invalid timeout\n\t\t\tif s.keepaliveTimeout <= 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\terr = fmt.Errorf(\"keepalive timedout, last keepalive received was: %s\", time.Unix(0, last))\n\t\t\ts.logger.Println(\"E!\", err)\n\t\t\treturn\n\t\tcase <-s.stopping:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// Write Requests\nfunc (s *Server) writeData() error {\n\tdefer s.out.Close()\n\tvar begin edge.BeginBatchMessage\n\tfor {\n\t\tselect {\n\t\tcase m, ok := <-s.inMsg:\n\t\t\tif !ok {\n\t\t\t\ts.inMsg = nil\n\t\t\t}\n\t\t\tswitch msg := m.(type) {\n\t\t\tcase edge.PointMessage:\n\t\t\t\terr := s.writePoint(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase edge.BeginBatchMessage:\n\t\t\t\tbegin = msg\n\t\t\t\terr := s.writeBeginBatch(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase edge.BatchPointMessage:\n\t\t\t\terr := s.writeBatchPoint(begin.GroupID(), msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase edge.EndBatchMessage:\n\t\t\t\terr := s.writeEndBatch(begin.Name(), begin.Time(), begin.GroupInfo(), msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\tcase edge.BufferedBatchMessage:\n\t\t\t\terr := s.writeBufferedBatch(msg)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\tcase req, ok := <-s.requests:\n\t\t\tif ok {\n\t\t\t\terr := s.writeRequest(req)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ts.requests = nil\n\t\t\t}\n\t\tcase <-s.aborting:\n\t\t\treturn s.err\n\t\t}\n\t\tif s.inMsg == nil && s.requests == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (s *Server) writePoint(p edge.PointMessage) error {\n\tstrs, floats, ints, bools := s.fieldsToTypedMaps(p.Fields())\n\tudfPoint := &agent.Point{\n\t\tTime:            p.Time().UnixNano(),\n\t\tName:            p.Name(),\n\t\tDatabase:        p.Database(),\n\t\tRetentionPolicy: p.RetentionPolicy(),\n\t\tGroup:           string(p.GroupID()),\n\t\tDimensions:      p.Dimensions().TagNames,\n\t\tByName:          p.Dimensions().ByName,\n\t\tTags:            p.Tags(),\n\t\tFieldsDouble:    floats,\n\t\tFieldsInt:       ints,\n\t\tFieldsString:    strs,\n\t\tFieldsBool:      bools,\n\t}\n\treq := &agent.Request{\n\t\tMessage: &agent.Request_Point{Point: udfPoint},\n\t}\n\treturn s.writeRequest(req)\n}\n\nfunc (s *Server) fieldsToTypedMaps(fields models.Fields) (\n\tstrs map[string]string,\n\tfloats map[string]float64,\n\tints map[string]int64,\n\tbools map[string]bool,\n) {\n\tfor k, v := range fields {\n\t\tswitch value := v.(type) {\n\t\tcase string:\n\t\t\tif strs == nil {\n\t\t\t\tstrs = make(map[string]string)\n\t\t\t}\n\t\t\tstrs[k] = value\n\t\tcase float64:\n\t\t\tif floats == nil {\n\t\t\t\tfloats = make(map[string]float64)\n\t\t\t}\n\t\t\tfloats[k] = value\n\t\tcase int64:\n\t\t\tif ints == nil {\n\t\t\t\tints = make(map[string]int64)\n\t\t\t}\n\t\t\tints[k] = value\n\t\tcase bool:\n\t\t\tif bools == nil {\n\t\t\t\tbools = make(map[string]bool)\n\t\t\t}\n\t\t\tbools[k] = value\n\t\tdefault:\n\t\t\tpanic(\"unsupported field value type\")\n\t\t}\n\t}\n\treturn\n}\n\nfunc (s *Server) typeMapsToFields(\n\tstrs map[string]string,\n\tfloats map[string]float64,\n\tints map[string]int64,\n\tbools map[string]bool,\n) models.Fields {\n\tfields := make(models.Fields)\n\tfor k, v := range strs {\n\t\tfields[k] = v\n\t}\n\tfor k, v := range ints {\n\t\tfields[k] = v\n\t}\n\tfor k, v := range floats {\n\t\tfields[k] = v\n\t}\n\tfor k, v := range bools {\n\t\tfields[k] = v\n\t}\n\treturn fields\n}\n\nfunc (s *Server) writeBeginBatch(begin edge.BeginBatchMessage) error {\n\treq := &agent.Request{\n\t\tMessage: &agent.Request_Begin{\n\t\t\tBegin: &agent.BeginBatch{\n\t\t\t\tName:   begin.Name(),\n\t\t\t\tGroup:  string(begin.GroupID()),\n\t\t\t\tTags:   begin.Tags(),\n\t\t\t\tSize:   int64(begin.SizeHint()),\n\t\t\t\tByName: begin.Dimensions().ByName,\n\t\t\t}},\n\t}\n\treturn s.writeRequest(req)\n}\n\nfunc (s *Server) writeBatchPoint(group models.GroupID, bp edge.BatchPointMessage) error {\n\tstrs, floats, ints, bools := s.fieldsToTypedMaps(bp.Fields())\n\treq := &agent.Request{\n\t\tMessage: &agent.Request_Point{\n\t\t\tPoint: &agent.Point{\n\t\t\t\tTime:         bp.Time().UnixNano(),\n\t\t\t\tGroup:        string(group),\n\t\t\t\tTags:         bp.Tags(),\n\t\t\t\tFieldsDouble: floats,\n\t\t\t\tFieldsInt:    ints,\n\t\t\t\tFieldsString: strs,\n\t\t\t\tFieldsBool:   bools,\n\t\t\t},\n\t\t},\n\t}\n\treturn s.writeRequest(req)\n}\n\nfunc (s *Server) writeEndBatch(name string, tmax time.Time, groupInfo edge.GroupInfo, end edge.EndBatchMessage) error {\n\treq := &agent.Request{\n\t\tMessage: &agent.Request_End{\n\t\t\tEnd: &agent.EndBatch{\n\t\t\t\tName:  name,\n\t\t\t\tGroup: string(groupInfo.ID),\n\t\t\t\tTmax:  tmax.UnixNano(),\n\t\t\t\tTags:  groupInfo.Tags,\n\t\t\t},\n\t\t},\n\t}\n\treturn s.writeRequest(req)\n}\n\nfunc (s *Server) writeBufferedBatch(batch edge.BufferedBatchMessage) error {\n\tif err := s.writeBeginBatch(batch.Begin()); err != nil {\n\t\treturn err\n\t}\n\tfor _, bp := range batch.Points() {\n\t\tif err := s.writeBatchPoint(batch.GroupID(), bp); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn s.writeEndBatch(batch.Name(), batch.Time(), batch.GroupInfo(), batch.End())\n}\n\nfunc (s *Server) writeRequest(req *agent.Request) error {\n\terr := agent.WriteMessage(req, s.out)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"write error: %s\", err)\n\t}\n\treturn err\n}\n\n// Read Responses from STDOUT of the process.\nfunc (s *Server) readData() error {\n\tdefer func() {\n\t\tclose(s.outMsg)\n\t}()\n\tfor {\n\t\tresponse, err := s.readResponse()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"read error: %s\", err)\n\t\t\treturn err\n\t\t}\n\t\terr = s.handleResponse(response)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc (s *Server) readResponse() (*agent.Response, error) {\n\tresponse := new(agent.Response)\n\terr := agent.ReadMessage(&s.responseBuf, s.in, response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}\n\nfunc (s *Server) handleResponse(response *agent.Response) error {\n\t// Always reset the keepalive timer since we received a response\n\tselect {\n\tcase s.keepalive <- time.Now().UnixNano():\n\tcase <-s.stopping:\n\t\t// No one is watching the keepalive anymore so we don't need to feed it,\n\t\t// but we still want to handle the response\n\tcase <-s.aborting:\n\t\treturn s.err\n\t}\n\t// handle response\n\tswitch msg := response.Message.(type) {\n\tcase *agent.Response_Keepalive:\n\t\t// Noop we already reset the keepalive timer\n\tcase *agent.Response_Info:\n\t\ts.doResponse(response, s.infoResponse)\n\tcase *agent.Response_Init:\n\t\ts.doResponse(response, s.initResponse)\n\tcase *agent.Response_Snapshot:\n\t\ts.doResponse(response, s.snapshotResponse)\n\tcase *agent.Response_Restore:\n\t\ts.doResponse(response, s.restoreResponse)\n\tcase *agent.Response_Error:\n\t\ts.logger.Println(\"E!\", msg.Error.Error)\n\t\treturn errors.New(msg.Error.Error)\n\tcase *agent.Response_Begin:\n\t\ts.begin = msg.Begin\n\t\ts.points = make([]edge.BatchPointMessage, 0, msg.Begin.Size)\n\tcase *agent.Response_Point:\n\t\tif s.points != nil {\n\t\t\tbp := edge.NewBatchPointMessage(\n\t\t\t\ts.typeMapsToFields(\n\t\t\t\t\tmsg.Point.FieldsString,\n\t\t\t\t\tmsg.Point.FieldsDouble,\n\t\t\t\t\tmsg.Point.FieldsInt,\n\t\t\t\t\tmsg.Point.FieldsBool,\n\t\t\t\t),\n\t\t\t\tmsg.Point.Tags,\n\t\t\t\ttime.Unix(0, msg.Point.Time).UTC(),\n\t\t\t)\n\t\t\ts.points = append(s.points, bp)\n\t\t} else {\n\t\t\tp := edge.NewPointMessage(\n\t\t\t\tmsg.Point.Name,\n\t\t\t\tmsg.Point.Database,\n\t\t\t\tmsg.Point.RetentionPolicy,\n\t\t\t\tmodels.Dimensions{ByName: msg.Point.ByName, TagNames: msg.Point.Dimensions},\n\t\t\t\ts.typeMapsToFields(\n\t\t\t\t\tmsg.Point.FieldsString,\n\t\t\t\t\tmsg.Point.FieldsDouble,\n\t\t\t\t\tmsg.Point.FieldsInt,\n\t\t\t\t\tmsg.Point.FieldsBool,\n\t\t\t\t),\n\t\t\t\tmsg.Point.Tags,\n\t\t\t\ttime.Unix(0, msg.Point.Time).UTC(),\n\t\t\t)\n\t\t\tselect {\n\t\t\tcase s.outMsg <- p:\n\t\t\tcase <-s.aborting:\n\t\t\t\treturn s.err\n\t\t\t}\n\t\t}\n\tcase *agent.Response_End:\n\t\tbegin := edge.NewBeginBatchMessage(\n\t\t\tmsg.End.Name,\n\t\t\tmsg.End.Tags,\n\t\t\ts.begin.ByName,\n\t\t\ttime.Unix(0, msg.End.Tmax).UTC(),\n\t\t\tlen(s.points),\n\t\t)\n\t\tbufferedBatch := edge.NewBufferedBatchMessage(\n\t\t\tbegin,\n\t\t\ts.points,\n\t\t\tedge.NewEndBatchMessage(),\n\t\t)\n\t\tselect {\n\t\tcase s.outMsg <- bufferedBatch:\n\t\tcase <-s.aborting:\n\t\t\treturn s.err\n\t\t}\n\t\ts.begin = nil\n\t\ts.points = nil\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"unexpected response message %T\", msg))\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/server_test.go",
    "content": "package udf_test\n\nimport (\n\t\"errors\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/udf\"\n\t\"github.com/influxdata/kapacitor/udf/agent\"\n\tudf_test \"github.com/influxdata/kapacitor/udf/test\"\n)\n\nfunc TestUDF_StartStop(t *testing.T) {\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_StartStop] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, 0, nil, nil)\n\n\ts.Start()\n\n\tclose(u.Responses)\n\ts.Stop()\n\t// read all requests and wait till the chan is closed\n\tfor range u.Requests {\n\t}\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDF_StartInitStop(t *testing.T) {\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_StartStop] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, 0, nil, nil)\n\tgo func() {\n\t\treq := <-u.Requests\n\t\t_, ok := req.Message.(*agent.Request_Init)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected init message got %T\", req.Message)\n\t\t}\n\t\tres := &agent.Response{\n\t\t\tMessage: &agent.Response_Init{\n\t\t\t\tInit: &agent.InitResponse{\n\t\t\t\t\tSuccess: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tu.Responses <- res\n\t\tclose(u.Responses)\n\t}()\n\n\ts.Start()\n\terr := s.Init(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts.Stop()\n\t// read all requests and wait till the chan is closed\n\tfor range u.Requests {\n\t}\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDF_StartInitAbort(t *testing.T) {\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_StartInfoAbort] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, 0, nil, nil)\n\ts.Start()\n\texpErr := errors.New(\"explicit abort\")\n\tgo func() {\n\t\treq := <-u.Requests\n\t\t_, ok := req.Message.(*agent.Request_Init)\n\t\tif !ok {\n\t\t\tt.Error(\"expected init message\")\n\t\t}\n\t\ts.Abort(expErr)\n\t\tclose(u.Responses)\n\t}()\n\terr := s.Init(nil)\n\tif err != expErr {\n\t\tt.Fatal(\"expected explicit abort error\")\n\t}\n}\n\nfunc TestUDF_StartInfoStop(t *testing.T) {\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_StartInfoStop] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, 0, nil, nil)\n\tgo func() {\n\t\treq := <-u.Requests\n\t\t_, ok := req.Message.(*agent.Request_Info)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected info message got %T\", req.Message)\n\t\t}\n\t\tres := &agent.Response{\n\t\t\tMessage: &agent.Response_Info{\n\t\t\t\tInfo: &agent.InfoResponse{\n\t\t\t\t\tWants:    agent.EdgeType_STREAM,\n\t\t\t\t\tProvides: agent.EdgeType_BATCH,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tu.Responses <- res\n\t\tclose(u.Responses)\n\t}()\n\ts.Start()\n\tinfo, err := s.Info()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif exp, got := agent.EdgeType_STREAM, info.Wants; got != exp {\n\t\tt.Errorf(\"unexpected info.Wants got %v exp %v\", got, exp)\n\t}\n\tif exp, got := agent.EdgeType_BATCH, info.Provides; got != exp {\n\t\tt.Errorf(\"unexpected info.Provides got %v exp %v\", got, exp)\n\t}\n\n\ts.Stop()\n\t// read all requests and wait till the chan is closed\n\tfor range u.Requests {\n\t}\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDF_StartInfoAbort(t *testing.T) {\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_StartInfoAbort] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, 0, nil, nil)\n\ts.Start()\n\texpErr := errors.New(\"explicit abort\")\n\tgo func() {\n\t\treq := <-u.Requests\n\t\t_, ok := req.Message.(*agent.Request_Info)\n\t\tif !ok {\n\t\t\tt.Error(\"expected info message\")\n\t\t}\n\t\ts.Abort(expErr)\n\t\tclose(u.Responses)\n\t}()\n\t_, err := s.Info()\n\tif err != expErr {\n\t\tt.Fatal(\"expected ErrUDFProcessAborted\")\n\t}\n}\n\nfunc TestUDF_Keepalive(t *testing.T) {\n\tt.Parallel()\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_Keepalive] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, time.Millisecond*100, nil, nil)\n\ts.Start()\n\ts.Init(nil)\n\treq := <-u.Requests\n\t_, ok := req.Message.(*agent.Request_Init)\n\tif !ok {\n\t\tt.Error(\"expected init message\")\n\t}\n\tselect {\n\tcase req = <-u.Requests:\n\tcase <-time.After(time.Second):\n\t\tt.Fatal(\"expected keepalive message\")\n\t}\n\tif req == nil {\n\t\tt.Fatal(\"expected keepalive message got nil, u was killed.\")\n\t}\n\t_, ok = req.Message.(*agent.Request_Keepalive)\n\tif !ok {\n\t\tt.Errorf(\"expected keepalive message got %T\", req.Message)\n\t}\n\n\tclose(u.Responses)\n\ts.Stop()\n\t// read all requests and wait till the chan is closed\n\tfor range u.Requests {\n\t}\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDF_MissedKeepalive(t *testing.T) {\n\tt.Parallel()\n\tabortCalled := make(chan struct{})\n\taborted := func() {\n\t\tclose(abortCalled)\n\t}\n\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_MissedKeepalive] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, time.Millisecond*100, aborted, nil)\n\ts.Start()\n\n\t// Since the keepalive is missed, the process should abort on its own.\n\tfor range u.Requests {\n\t}\n\n\tselect {\n\tcase <-abortCalled:\n\tcase <-time.After(time.Second):\n\t\tt.Error(\"expected abort callback to be called\")\n\t}\n\n\tclose(u.Responses)\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDF_KillCallBack(t *testing.T) {\n\tt.Parallel()\n\ttimeout := time.Millisecond * 100\n\tabortCalled := make(chan struct{})\n\tkillCalled := make(chan struct{})\n\taborted := func() {\n\t\ttime.Sleep(timeout * 3)\n\t\tclose(abortCalled)\n\t}\n\tkill := func() {\n\t\tclose(killCalled)\n\t}\n\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_MissedKeepalive] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, timeout, aborted, kill)\n\ts.Start()\n\n\t// Since the keepalive is missed, the process should abort on its own.\n\tfor range u.Requests {\n\t}\n\n\t// Since abort takes a long time killCallback should be called\n\tselect {\n\tcase <-killCalled:\n\tcase <-time.After(time.Second):\n\t\tt.Error(\"expected kill callback to be called\")\n\t}\n\n\tclose(u.Responses)\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDF_MissedKeepaliveInit(t *testing.T) {\n\tt.Parallel()\n\tabortCalled := make(chan struct{})\n\taborted := func() {\n\t\tclose(abortCalled)\n\t}\n\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_MissedKeepaliveInit] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, time.Millisecond*100, aborted, nil)\n\ts.Start()\n\ts.Init(nil)\n\n\t// Since the keepalive is missed, the process should abort on its own.\n\tfor range u.Requests {\n\t}\n\n\tselect {\n\tcase <-abortCalled:\n\tcase <-time.After(time.Second):\n\t\tt.Error(\"expected abort callback to be called\")\n\t}\n\tclose(u.Responses)\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDF_MissedKeepaliveInfo(t *testing.T) {\n\tt.Parallel()\n\tabortCalled := make(chan struct{})\n\taborted := func() {\n\t\tclose(abortCalled)\n\t}\n\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_MissedKeepaliveInfo] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, time.Millisecond*100, aborted, nil)\n\ts.Start()\n\ts.Info()\n\n\t// Since the keepalive is missed, the process should abort on its own.\n\tfor range u.Requests {\n\t}\n\n\tselect {\n\tcase <-abortCalled:\n\tcase <-time.After(time.Second):\n\t\tt.Error(\"expected abort callback to be called\")\n\t}\n\tclose(u.Responses)\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDF_SnapshotRestore(t *testing.T) {\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_SnapshotRestore] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, 0, nil, nil)\n\tgo func() {\n\t\t// Init\n\t\treq := <-u.Requests\n\t\t_, ok := req.Message.(*agent.Request_Init)\n\t\tif !ok {\n\t\t\tt.Error(\"expected init message\")\n\t\t}\n\t\tu.Responses <- &agent.Response{\n\t\t\tMessage: &agent.Response_Init{\n\t\t\t\tInit: &agent.InitResponse{Success: true},\n\t\t\t},\n\t\t}\n\t\t// Snapshot\n\t\treq = <-u.Requests\n\t\tif req == nil {\n\t\t\tt.Fatal(\"expected snapshot message got nil\")\n\t\t}\n\t\t_, ok = req.Message.(*agent.Request_Snapshot)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected snapshot message got %T\", req.Message)\n\t\t}\n\t\tdata := []byte{42}\n\t\tu.Responses <- &agent.Response{\n\t\t\tMessage: &agent.Response_Snapshot{\n\t\t\t\tSnapshot: &agent.SnapshotResponse{Snapshot: data},\n\t\t\t},\n\t\t}\n\t\t// Restore\n\t\treq = <-u.Requests\n\t\tif req == nil {\n\t\t\tt.Fatal(\"expected restore message got nil\")\n\t\t}\n\t\trestore, ok := req.Message.(*agent.Request_Restore)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected restore message got %T\", req.Message)\n\t\t}\n\t\tif !reflect.DeepEqual(data, restore.Restore.Snapshot) {\n\t\t\tt.Errorf(\"unexpected restore snapshot got %v exp %v\", restore.Restore.Snapshot, data)\n\t\t}\n\t\tu.Responses <- &agent.Response{\n\t\t\tMessage: &agent.Response_Restore{\n\t\t\t\tRestore: &agent.RestoreResponse{Success: true},\n\t\t\t},\n\t\t}\n\t\tclose(u.Responses)\n\t}()\n\ts.Start()\n\ts.Init(nil)\n\tsnapshot, err := s.Snapshot()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = s.Restore(snapshot)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts.Stop()\n\t// read all requests and wait till the chan is closed\n\tfor range u.Requests {\n\t}\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\nfunc TestUDF_StartInitPointStop(t *testing.T) {\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_StartPointStop] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, 0, nil, nil)\n\tgo func() {\n\t\treq := <-u.Requests\n\t\t_, ok := req.Message.(*agent.Request_Init)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected init message got %T\", req.Message)\n\t\t}\n\t\tres := &agent.Response{\n\t\t\tMessage: &agent.Response_Init{\n\t\t\t\tInit: &agent.InitResponse{\n\t\t\t\t\tSuccess: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tu.Responses <- res\n\t\treq = <-u.Requests\n\t\tpt, ok := req.Message.(*agent.Request_Point)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected point message got %T\", req.Message)\n\t\t}\n\t\tres = &agent.Response{\n\t\t\tMessage: &agent.Response_Point{\n\t\t\t\tPoint: pt.Point,\n\t\t\t},\n\t\t}\n\t\tu.Responses <- res\n\t\tclose(u.Responses)\n\t}()\n\n\ts.Start()\n\terr := s.Init(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Write point to server\n\tp := edge.NewPointMessage(\n\t\t\"test\",\n\t\t\"db\",\n\t\t\"rp\",\n\t\tmodels.Dimensions{},\n\t\tmodels.Fields{\"f1\": 1.0, \"f2\": 2.0},\n\t\tmodels.Tags{\"t1\": \"v1\", \"t2\": \"v2\"},\n\t\ttime.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),\n\t)\n\ts.In() <- p\n\trp := <-s.Out()\n\tif !reflect.DeepEqual(rp, p) {\n\t\tt.Errorf(\"unexpected returned point got: %v exp %v\", rp, p)\n\t}\n\n\ts.Stop()\n\t// read all requests and wait till the chan is closed\n\tfor range u.Requests {\n\t}\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\nfunc TestUDF_StartInitBatchStop(t *testing.T) {\n\tu := udf_test.NewIO()\n\tl := log.New(os.Stderr, \"[TestUDF_StartPointStop] \", log.LstdFlags)\n\ts := udf.NewServer(\"testTask\", \"testNode\", u.Out(), u.In(), l, 0, nil, nil)\n\tgo func() {\n\t\treq := <-u.Requests\n\t\t_, ok := req.Message.(*agent.Request_Init)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected init message got %T\", req.Message)\n\t\t}\n\t\tres := &agent.Response{\n\t\t\tMessage: &agent.Response_Init{\n\t\t\t\tInit: &agent.InitResponse{\n\t\t\t\t\tSuccess: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tu.Responses <- res\n\t\t// Begin batch\n\t\treq = <-u.Requests\n\t\tbb, ok := req.Message.(*agent.Request_Begin)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected begin message got %T\", req.Message)\n\t\t}\n\t\tres = &agent.Response{\n\t\t\tMessage: &agent.Response_Begin{\n\t\t\t\tBegin: bb.Begin,\n\t\t\t},\n\t\t}\n\t\tu.Responses <- res\n\n\t\t// Point\n\t\treq = <-u.Requests\n\t\tpt, ok := req.Message.(*agent.Request_Point)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected point message got %T\", req.Message)\n\t\t}\n\t\tres = &agent.Response{\n\t\t\tMessage: &agent.Response_Point{\n\t\t\t\tPoint: pt.Point,\n\t\t\t},\n\t\t}\n\t\tu.Responses <- res\n\n\t\t// End batch\n\t\treq = <-u.Requests\n\t\teb, ok := req.Message.(*agent.Request_End)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected end message got %T\", req.Message)\n\t\t}\n\t\tres = &agent.Response{\n\t\t\tMessage: &agent.Response_End{\n\t\t\t\tEnd: eb.End,\n\t\t\t},\n\t\t}\n\t\tu.Responses <- res\n\t\tclose(u.Responses)\n\t}()\n\n\ts.Start()\n\terr := s.Init(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Write point to server\n\tb := edge.NewBufferedBatchMessage(\n\t\tedge.NewBeginBatchMessage(\n\t\t\t\"test\",\n\t\t\tmodels.Tags{\"t1\": \"v1\"},\n\t\t\tfalse,\n\t\t\ttime.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t1,\n\t\t),\n\t\t[]edge.BatchPointMessage{\n\t\t\tedge.NewBatchPointMessage(\n\t\t\t\tmodels.Fields{\"f1\": 1.0, \"f2\": 2.0, \"f3\": int64(1), \"f4\": \"str\"},\n\t\t\t\tmodels.Tags{\"t1\": \"v1\", \"t2\": \"v2\"},\n\t\t\t\ttime.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t),\n\t\t},\n\t\tedge.NewEndBatchMessage(),\n\t)\n\ts.In() <- b\n\trb := <-s.Out()\n\tif !reflect.DeepEqual(b, rb) {\n\t\tt.Errorf(\"unexpected returned batch got: %v exp %v\", rb, b)\n\t}\n\n\ts.Stop()\n\t// read all requests and wait till the chan is closed\n\tfor range u.Requests {\n\t}\n\tif err := <-u.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf/udf.go",
    "content": "package udf\n\nimport (\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/udf/agent\"\n)\n\n// Interface for communicating with a UDF\ntype Interface interface {\n\tOpen() error\n\tInfo() (Info, error)\n\tInit(options []*agent.Option) error\n\tAbort(err error)\n\tClose() error\n\n\tSnapshot() ([]byte, error)\n\tRestore(snapshot []byte) error\n\n\tIn() chan<- edge.Message\n\tOut() <-chan edge.Message\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf.go",
    "content": "package kapacitor\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/cenkalti/backoff\"\n\t\"github.com/influxdata/kapacitor/command\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/udf\"\n\t\"github.com/influxdata/kapacitor/udf/agent\"\n\t\"github.com/pkg/errors\"\n)\n\n// User defined function\ntype UDFNode struct {\n\tnode\n\tu       *pipeline.UDFNode\n\tudf     udf.Interface\n\taborted chan struct{}\n\n\twg      sync.WaitGroup\n\tmu      sync.Mutex\n\tstopped bool\n}\n\n// Create a new UDFNode that sends incoming data to child udf\nfunc newUDFNode(et *ExecutingTask, n *pipeline.UDFNode, l *log.Logger) (*UDFNode, error) {\n\tun := &UDFNode{\n\t\tnode:    node{Node: n, et: et, logger: l},\n\t\tu:       n,\n\t\taborted: make(chan struct{}),\n\t}\n\t// Create the UDF\n\tf, err := et.tm.UDFService.Create(\n\t\tn.UDFName,\n\t\tet.Task.ID,\n\t\tn.Name(),\n\t\tl,\n\t\tun.abortedCallback,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tun.udf = f\n\tun.node.runF = un.runUDF\n\tun.node.stopF = un.stopUDF\n\treturn un, nil\n}\n\nvar errNodeAborted = errors.New(\"node aborted\")\n\nfunc (n *UDFNode) stopUDF() {\n\tn.mu.Lock()\n\tdefer n.mu.Unlock()\n\tif !n.stopped {\n\t\tn.stopped = true\n\t\tif n.udf != nil {\n\t\t\tn.udf.Abort(errNodeAborted)\n\t\t}\n\t}\n}\n\nfunc (n *UDFNode) runUDF(snapshot []byte) (err error) {\n\tdefer func() {\n\t\tn.mu.Lock()\n\t\tdefer n.mu.Unlock()\n\t\t//Ignore stopped errors if the udf was stopped externally\n\t\tif n.stopped && (err == udf.ErrServerStopped || err == errNodeAborted) {\n\t\t\terr = nil\n\t\t}\n\t\tn.stopped = true\n\t}()\n\n\tif err := n.udf.Open(); err != nil {\n\t\treturn err\n\t}\n\tif err := n.udf.Init(n.u.Options); err != nil {\n\t\treturn err\n\t}\n\tif snapshot != nil {\n\t\tif err := n.udf.Restore(snapshot); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tforwardErr := make(chan error, 1)\n\tgo func() {\n\t\tout := n.udf.Out()\n\t\tfor m := range out {\n\t\t\tif err := edge.Forward(n.outs, m); err != nil {\n\t\t\t\tforwardErr <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tforwardErr <- nil\n\t}()\n\n\t// The abort callback needs to know when we are done writing\n\t// so we wrap in a wait group.\n\tn.wg.Add(1)\n\tgo func() {\n\t\tdefer n.wg.Done()\n\t\tin := n.udf.In()\n\t\tfor m, ok := n.ins[0].Emit(); ok; m, ok = n.ins[0].Emit() {\n\t\t\tn.timer.Start()\n\t\t\tselect {\n\t\t\tcase in <- m:\n\t\t\tcase <-n.aborted:\n\t\t\t\treturn\n\t\t\t}\n\t\t\tn.timer.Stop()\n\t\t}\n\t}()\n\n\t// wait till we are done writing\n\tn.wg.Wait()\n\n\t// Close the udf\n\tif err := n.udf.Close(); err != nil {\n\t\treturn err\n\t}\n\n\t// Wait/Return any error from the forwarding goroutine\n\treturn <-forwardErr\n}\n\nfunc (n *UDFNode) abortedCallback() {\n\tclose(n.aborted)\n\t// wait till we are done writing\n\tn.wg.Wait()\n}\n\nfunc (n *UDFNode) snapshot() ([]byte, error) {\n\treturn n.udf.Snapshot()\n}\n\n// UDFProcess wraps an external process and sends and receives data\n// over STDIN and STDOUT. Lines received over STDERR are logged\n// via normal Kapacitor logging.\ntype UDFProcess struct {\n\ttaskName string\n\tnodeName string\n\n\tserver    *udf.Server\n\tcommander command.Commander\n\tcmdSpec   command.Spec\n\tcmd       command.Command\n\n\tstderr io.Reader\n\n\t// Group for waiting on the process itself\n\tprocessGroup   sync.WaitGroup\n\tlogStdErrGroup sync.WaitGroup\n\n\tmu sync.Mutex\n\n\tlogger        *log.Logger\n\ttimeout       time.Duration\n\tabortCallback func()\n}\n\nfunc NewUDFProcess(\n\ttaskName, nodeName string,\n\tcommander command.Commander,\n\tcmdSpec command.Spec,\n\tl *log.Logger,\n\ttimeout time.Duration,\n\tabortCallback func(),\n) *UDFProcess {\n\treturn &UDFProcess{\n\t\ttaskName:      taskName,\n\t\tnodeName:      nodeName,\n\t\tcommander:     commander,\n\t\tcmdSpec:       cmdSpec,\n\t\tlogger:        l,\n\t\ttimeout:       timeout,\n\t\tabortCallback: abortCallback,\n\t}\n}\n\n// Open the UDFProcess\nfunc (p *UDFProcess) Open() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tcmd := p.commander.NewCommand(p.cmdSpec)\n\tstdin, err := cmd.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.stderr = stderr\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\tp.cmd = cmd\n\n\toutBuf := bufio.NewReader(stdout)\n\n\tp.server = udf.NewServer(\n\t\tp.taskName,\n\t\tp.nodeName,\n\t\toutBuf,\n\t\tstdin,\n\t\tp.logger,\n\t\tp.timeout,\n\t\tp.abortCallback,\n\t\tcmd.Kill,\n\t)\n\tif err := p.server.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tp.logStdErrGroup.Add(1)\n\tgo p.logStdErr()\n\n\t// Wait for process to terminate\n\tp.processGroup.Add(1)\n\tgo func() {\n\t\t// First wait for the pipe read writes to finish\n\t\tp.logStdErrGroup.Wait()\n\t\tp.server.WaitIO()\n\t\terr := cmd.Wait()\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"process exited unexpectedly: %v\", err)\n\t\t\tdefer p.server.Abort(err)\n\t\t}\n\t\tp.processGroup.Done()\n\t}()\n\n\treturn nil\n}\n\n// Stop the UDFProcess cleanly.\n//\n// Calling Close should only be done once the owner has stopped writing to the *In channel,\n// at which point the remaining data will be processed and the subprocess will be allowed to exit cleanly.\nfunc (p *UDFProcess) Close() error {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\terr := p.server.Stop()\n\tp.processGroup.Wait()\n\treturn err\n}\n\n// Replay any lines from STDERR of the process to the Kapacitor log.\nfunc (p *UDFProcess) logStdErr() {\n\tdefer p.logStdErrGroup.Done()\n\tscanner := bufio.NewScanner(p.stderr)\n\tfor scanner.Scan() {\n\t\tp.logger.Println(\"I!P\", scanner.Text())\n\t}\n}\n\nfunc (p *UDFProcess) Abort(err error)                    { p.server.Abort(err) }\nfunc (p *UDFProcess) Init(options []*agent.Option) error { return p.server.Init(options) }\nfunc (p *UDFProcess) Snapshot() ([]byte, error)          { return p.server.Snapshot() }\nfunc (p *UDFProcess) Restore(snapshot []byte) error      { return p.server.Restore(snapshot) }\nfunc (p *UDFProcess) In() chan<- edge.Message            { return p.server.In() }\nfunc (p *UDFProcess) Out() <-chan edge.Message           { return p.server.Out() }\nfunc (p *UDFProcess) Info() (udf.Info, error)            { return p.server.Info() }\n\ntype UDFSocket struct {\n\ttaskName string\n\tnodeName string\n\n\tserver *udf.Server\n\tsocket Socket\n\n\tlogger        *log.Logger\n\ttimeout       time.Duration\n\tabortCallback func()\n}\n\ntype Socket interface {\n\tOpen() error\n\tClose() error\n\tIn() io.WriteCloser\n\tOut() io.Reader\n}\n\nfunc NewUDFSocket(\n\ttaskName, nodeName string,\n\tsocket Socket,\n\tl *log.Logger,\n\ttimeout time.Duration,\n\tabortCallback func(),\n) *UDFSocket {\n\treturn &UDFSocket{\n\t\ttaskName:      taskName,\n\t\tnodeName:      nodeName,\n\t\tsocket:        socket,\n\t\tlogger:        l,\n\t\ttimeout:       timeout,\n\t\tabortCallback: abortCallback,\n\t}\n}\n\nfunc (s *UDFSocket) Open() error {\n\terr := s.socket.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tin := s.socket.In()\n\tout := s.socket.Out()\n\toutBuf := bufio.NewReader(out)\n\n\ts.server = udf.NewServer(\n\t\ts.taskName,\n\t\ts.nodeName,\n\t\toutBuf,\n\t\tin,\n\t\ts.logger,\n\t\ts.timeout,\n\t\ts.abortCallback,\n\t\tfunc() { s.socket.Close() },\n\t)\n\treturn s.server.Start()\n}\n\nfunc (s *UDFSocket) Close() error {\n\tif err := s.server.Stop(); err != nil {\n\t\t// Always close the socket\n\t\ts.socket.Close()\n\t\treturn errors.Wrap(err, \"stopping UDF server\")\n\t}\n\tif err := s.socket.Close(); err != nil {\n\t\treturn errors.Wrap(err, \"closing UDF socket connection\")\n\t}\n\treturn nil\n}\n\nfunc (s *UDFSocket) Abort(err error)                    { s.server.Abort(err) }\nfunc (s *UDFSocket) Init(options []*agent.Option) error { return s.server.Init(options) }\nfunc (s *UDFSocket) Snapshot() ([]byte, error)          { return s.server.Snapshot() }\nfunc (s *UDFSocket) Restore(snapshot []byte) error      { return s.server.Restore(snapshot) }\nfunc (s *UDFSocket) In() chan<- edge.Message            { return s.server.In() }\nfunc (s *UDFSocket) Out() <-chan edge.Message           { return s.server.Out() }\nfunc (s *UDFSocket) Info() (udf.Info, error)            { return s.server.Info() }\n\ntype socket struct {\n\tpath string\n\tconn *net.UnixConn\n}\n\nfunc NewSocketConn(path string) Socket {\n\treturn &socket{\n\t\tpath: path,\n\t}\n}\n\nfunc (s *socket) Open() error {\n\taddr, err := net.ResolveUnixAddr(\"unix\", s.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Connect to socket\n\tb := backoff.NewExponentialBackOff()\n\tb.MaxElapsedTime = time.Minute * 5\n\n\terr = backoff.Retry(func() error {\n\t\tconn, err := net.DialUnix(\"unix\", nil, addr)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ts.conn = conn\n\t\treturn nil\n\t},\n\t\tb,\n\t)\n\treturn err\n}\n\nfunc (s *socket) Close() error {\n\treturn s.conn.Close()\n}\n\ntype unixCloser struct {\n\t*net.UnixConn\n}\n\nfunc (u unixCloser) Close() error {\n\t// Only close the write end of the socket connection.\n\t// The socket connection as a whole will be closed later.\n\treturn u.CloseWrite()\n}\n\nfunc (s *socket) In() io.WriteCloser {\n\treturn unixCloser{s.conn}\n}\n\nfunc (s *socket) Out() io.Reader {\n\treturn s.conn\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/udf_test.go",
    "content": "package kapacitor_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor\"\n\t\"github.com/influxdata/kapacitor/command\"\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/udf\"\n\t\"github.com/influxdata/kapacitor/udf/agent\"\n\tudf_test \"github.com/influxdata/kapacitor/udf/test\"\n)\n\nfunc newUDFSocket(name string) (*kapacitor.UDFSocket, *udf_test.IO) {\n\tuio := udf_test.NewIO()\n\tl := log.New(os.Stderr, fmt.Sprintf(\"[%s] \", name), log.LstdFlags)\n\tu := kapacitor.NewUDFSocket(name, \"testNode\", newTestSocket(uio), l, 0, nil)\n\treturn u, uio\n}\n\nfunc newUDFProcess(name string) (*kapacitor.UDFProcess, *udf_test.IO) {\n\tuio := udf_test.NewIO()\n\tcmd := newTestCommander(uio)\n\tl := log.New(os.Stderr, fmt.Sprintf(\"[%s] \", name), log.LstdFlags)\n\tu := kapacitor.NewUDFProcess(name, \"testNode\", cmd, command.Spec{}, l, 0, nil)\n\treturn u, uio\n}\n\nfunc TestUDFSocket_OpenClose(t *testing.T) {\n\tu, uio := newUDFSocket(\"OpenClose\")\n\ttestUDF_OpenClose(u, uio, t)\n}\nfunc TestUDFProcess_OpenClose(t *testing.T) {\n\tu, uio := newUDFProcess(\"OpenClose\")\n\ttestUDF_OpenClose(u, uio, t)\n}\n\nfunc testUDF_OpenClose(u udf.Interface, uio *udf_test.IO, t *testing.T) {\n\tu.Open()\n\n\tclose(uio.Responses)\n\tu.Close()\n\t// read all requests and wait till the chan is closed\n\tfor range uio.Requests {\n\t}\n\tif err := <-uio.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDFSocket_WritePoint(t *testing.T) {\n\tu, uio := newUDFSocket(\"WritePoint\")\n\ttestUDF_WritePoint(u, uio, t)\n}\n\nfunc TestUDFProcess_WritePoint(t *testing.T) {\n\tu, uio := newUDFProcess(\"WritePoint\")\n\ttestUDF_WritePoint(u, uio, t)\n}\n\nfunc testUDF_WritePoint(u udf.Interface, uio *udf_test.IO, t *testing.T) {\n\tgo func() {\n\t\treq := <-uio.Requests\n\t\t_, ok := req.Message.(*agent.Request_Init)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected init message got %T\", req.Message)\n\t\t}\n\t\tres := &agent.Response{\n\t\t\tMessage: &agent.Response_Init{\n\t\t\t\tInit: &agent.InitResponse{\n\t\t\t\t\tSuccess: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tuio.Responses <- res\n\t\treq = <-uio.Requests\n\t\tpt, ok := req.Message.(*agent.Request_Point)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected point message got %T\", req.Message)\n\t\t}\n\t\tres = &agent.Response{\n\t\t\tMessage: &agent.Response_Point{\n\t\t\t\tPoint: pt.Point,\n\t\t\t},\n\t\t}\n\t\tuio.Responses <- res\n\t\tclose(uio.Responses)\n\t}()\n\n\terr := u.Open()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = u.Init(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Write point to server\n\tp := edge.NewPointMessage(\n\t\t\"test\",\n\t\t\"db\",\n\t\t\"rp\",\n\t\tmodels.Dimensions{},\n\t\tmodels.Fields{\"f1\": 1.0, \"f2\": 2.0},\n\t\tmodels.Tags{\"t1\": \"v1\", \"t2\": \"v2\"},\n\t\ttime.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),\n\t)\n\tu.In() <- p\n\trp := <-u.Out()\n\tif !reflect.DeepEqual(rp, p) {\n\t\tt.Errorf(\"unexpected returned point got: %v exp %v\", rp, p)\n\t}\n\n\tu.Close()\n\t// read all requests and wait till the chan is closed\n\tfor range uio.Requests {\n\t}\n\tif err := <-uio.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestUDFSocket_WriteBatch(t *testing.T) {\n\tu, uio := newUDFSocket(\"WriteBatch\")\n\ttestUDF_WriteBatch(u, uio, t)\n}\n\nfunc TestUDFProcess_WriteBatch(t *testing.T) {\n\tu, uio := newUDFProcess(\"WriteBatch\")\n\ttestUDF_WriteBatch(u, uio, t)\n}\n\nfunc testUDF_WriteBatch(u udf.Interface, uio *udf_test.IO, t *testing.T) {\n\tgo func() {\n\t\treq := <-uio.Requests\n\t\t_, ok := req.Message.(*agent.Request_Init)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected init message got %T\", req.Message)\n\t\t}\n\t\tres := &agent.Response{\n\t\t\tMessage: &agent.Response_Init{\n\t\t\t\tInit: &agent.InitResponse{\n\t\t\t\t\tSuccess: true,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tuio.Responses <- res\n\t\t// Begin batch\n\t\treq = <-uio.Requests\n\t\tbb, ok := req.Message.(*agent.Request_Begin)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected begin message got %T\", req.Message)\n\t\t}\n\t\tres = &agent.Response{\n\t\t\tMessage: &agent.Response_Begin{\n\t\t\t\tBegin: bb.Begin,\n\t\t\t},\n\t\t}\n\t\tuio.Responses <- res\n\n\t\t// Point\n\t\treq = <-uio.Requests\n\t\tpt, ok := req.Message.(*agent.Request_Point)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected point message got %T\", req.Message)\n\t\t}\n\t\tres = &agent.Response{\n\t\t\tMessage: &agent.Response_Point{\n\t\t\t\tPoint: pt.Point,\n\t\t\t},\n\t\t}\n\t\tuio.Responses <- res\n\n\t\t// End batch\n\t\treq = <-uio.Requests\n\t\teb, ok := req.Message.(*agent.Request_End)\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected end message got %T\", req.Message)\n\t\t}\n\t\tres = &agent.Response{\n\t\t\tMessage: &agent.Response_End{\n\t\t\t\tEnd: eb.End,\n\t\t\t},\n\t\t}\n\t\tuio.Responses <- res\n\t\tclose(uio.Responses)\n\t}()\n\n\terr := u.Open()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = u.Init(nil)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Write point to server\n\tb := edge.NewBufferedBatchMessage(\n\t\tedge.NewBeginBatchMessage(\n\t\t\t\"test\",\n\t\t\tmodels.Tags{\"t1\": \"v1\"},\n\t\t\tfalse,\n\t\t\ttime.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t1,\n\t\t),\n\t\t[]edge.BatchPointMessage{\n\t\t\tedge.NewBatchPointMessage(\n\t\t\t\tmodels.Fields{\"f1\": 1.0, \"f2\": 2.0, \"f3\": int64(1), \"f4\": \"str\"},\n\t\t\t\tmodels.Tags{\"t1\": \"v1\", \"t2\": \"v2\"},\n\t\t\t\ttime.Date(1971, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t),\n\t\t},\n\t\tedge.NewEndBatchMessage(),\n\t)\n\tu.In() <- b\n\trb := <-u.Out()\n\tif !reflect.DeepEqual(b, rb) {\n\t\tt.Errorf(\"unexpected returned batch got: %v exp %v\", rb, b)\n\t}\n\n\tu.Close()\n\t// read all requests and wait till the chan is closed\n\tfor range uio.Requests {\n\t}\n\tif err := <-uio.ErrC; err != nil {\n\t\tt.Error(err)\n\t}\n}\n\ntype testCommander struct {\n\tuio *udf_test.IO\n}\n\nfunc newTestCommander(uio *udf_test.IO) command.Commander {\n\treturn &testCommander{\n\t\tuio: uio,\n\t}\n}\n\nfunc (c *testCommander) NewCommand(command.Spec) command.Command {\n\treturn c\n}\n\nfunc (c *testCommander) Start() error { return nil }\n\nfunc (c *testCommander) Wait() error { return nil }\n\nfunc (c *testCommander) Stdin(io.Reader)  {}\nfunc (c *testCommander) Stdout(io.Writer) {}\nfunc (c *testCommander) Stderr(io.Writer) {}\n\nfunc (c *testCommander) StdinPipe() (io.WriteCloser, error) {\n\treturn c.uio.In(), nil\n}\n\nfunc (c *testCommander) StdoutPipe() (io.Reader, error) {\n\treturn c.uio.Out(), nil\n}\n\nfunc (c *testCommander) StderrPipe() (io.Reader, error) {\n\treturn &bytes.Buffer{}, nil\n}\n\nfunc (c *testCommander) Kill() {\n\tc.uio.Kill()\n}\n\ntype testSocket struct {\n\tuio *udf_test.IO\n}\n\nfunc newTestSocket(uio *udf_test.IO) kapacitor.Socket {\n\treturn &testSocket{\n\t\tuio: uio,\n\t}\n}\nfunc (s *testSocket) Open() error {\n\treturn nil\n}\n\nfunc (s *testSocket) Close() error {\n\treturn nil\n}\n\nfunc (s *testSocket) In() io.WriteCloser {\n\treturn s.uio.In()\n}\n\nfunc (s *testSocket) Out() io.Reader {\n\treturn s.uio.Out()\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/union.go",
    "content": "package kapacitor\n\nimport (\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\ntype UnionNode struct {\n\tnode\n\tu *pipeline.UnionNode\n\n\t// Buffer of points/batches from each source.\n\tsources [][]timeMessage\n\t// the low water marks for each source.\n\tlowMarks []time.Time\n\n\trename string\n}\n\ntype timeMessage interface {\n\tedge.Message\n\tedge.TimeGetter\n}\n\n// Create a new  UnionNode which combines all parent data streams into a single stream.\n// No transformation of any kind is performed.\nfunc newUnionNode(et *ExecutingTask, n *pipeline.UnionNode, l *log.Logger) (*UnionNode, error) {\n\tun := &UnionNode{\n\t\tu:      n,\n\t\tnode:   node{Node: n, et: et, logger: l},\n\t\trename: n.Rename,\n\t}\n\tun.node.runF = un.runUnion\n\treturn un, nil\n}\n\nfunc (n *UnionNode) runUnion([]byte) error {\n\t// Keep buffer of values from parents so they can be ordered.\n\n\tn.sources = make([][]timeMessage, len(n.ins))\n\tn.lowMarks = make([]time.Time, len(n.ins))\n\n\tconsumer := edge.NewMultiConsumerWithStats(n.ins, n)\n\treturn consumer.Consume()\n}\n\nfunc (n *UnionNode) BufferedBatch(src int, batch edge.BufferedBatchMessage) error {\n\tn.timer.Start()\n\tdefer n.timer.Stop()\n\n\tif n.rename != \"\" {\n\t\tbatch = batch.ShallowCopy()\n\t\tbatch.SetBegin(batch.Begin().ShallowCopy())\n\t\tbatch.Begin().SetName(n.rename)\n\t}\n\n\t// Add newest point to buffer\n\tn.sources[src] = append(n.sources[src], batch)\n\n\t// Emit the next values\n\treturn n.emitReady(false)\n}\n\nfunc (n *UnionNode) Point(src int, p edge.PointMessage) error {\n\tn.timer.Start()\n\tdefer n.timer.Stop()\n\tif n.rename != \"\" {\n\t\tp = p.ShallowCopy()\n\t\tp.SetName(n.rename)\n\t}\n\n\t// Add newest point to buffer\n\tn.sources[src] = append(n.sources[src], p)\n\n\t// Emit the next values\n\treturn n.emitReady(false)\n}\n\nfunc (n *UnionNode) Barrier(src int, b edge.BarrierMessage) error {\n\tn.timer.Start()\n\tdefer n.timer.Stop()\n\n\t// Add newest point to buffer\n\tn.sources[src] = append(n.sources[src], b)\n\n\t// Emit the next values\n\treturn n.emitReady(false)\n}\n\nfunc (n *UnionNode) Finish() error {\n\t// We are done, emit all buffered\n\treturn n.emitReady(true)\n}\n\nfunc (n *UnionNode) emitReady(drain bool) error {\n\temitted := true\n\t// Emit all points until nothing changes\n\tfor emitted {\n\t\temitted = false\n\t\t// Find low water mark\n\t\tvar mark time.Time\n\t\tvalidSources := 0\n\t\tfor i, values := range n.sources {\n\t\t\tsourceMark := n.lowMarks[i]\n\t\t\tif len(values) > 0 {\n\t\t\t\tt := values[0].Time()\n\t\t\t\tif mark.IsZero() || t.Before(mark) {\n\t\t\t\t\tmark = t\n\t\t\t\t}\n\t\t\t\tsourceMark = t\n\t\t\t}\n\t\t\tn.lowMarks[i] = sourceMark\n\t\t\tif !sourceMark.IsZero() {\n\t\t\t\tvalidSources++\n\t\t\t\t// Only consider the sourceMark if we are not draining\n\t\t\t\tif !drain && (mark.IsZero() || sourceMark.Before(mark)) {\n\t\t\t\t\tmark = sourceMark\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif !drain && validSources != len(n.sources) {\n\t\t\t// We can't continue processing until we have\n\t\t\t// at least one value from each parent.\n\t\t\t// Unless we are draining the buffer than we can continue.\n\t\t\treturn nil\n\t\t}\n\n\t\t// Emit all values that are at or below the mark.\n\t\tfor i, values := range n.sources {\n\t\t\tvar j int\n\t\t\tl := len(values)\n\t\t\tfor j = 0; j < l; j++ {\n\t\t\t\tif !values[j].Time().After(mark) {\n\t\t\t\t\terr := n.emit(values[j])\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t\t// Note that we emitted something\n\t\t\t\t\temitted = true\n\t\t\t\t} else {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Drop values that were emitted\n\t\t\tn.sources[i] = values[j:]\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (n *UnionNode) emit(m edge.Message) error {\n\tn.timer.Pause()\n\tdefer n.timer.Resume()\n\treturn edge.Forward(n.outs, m)\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/update_tick_docs.sh",
    "content": "#!/bin/bash\n\n# To generate the tick docs we use a little utility similar\n# to godoc called tickdoc. It organizes the fields and method\n# of structs into property methods and chaining methods.\n\ndest=$1 # output path for the .md files\n\nif [ -z \"$dest\" ]\nthen\n    echo \"Usage: ./update_tick_docs.sh output_path\"\n    exit 1\nfi\n\ntickdoc -config tickdoc.conf ./pipeline $dest\n\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/where.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n\t\"github.com/influxdata/kapacitor/tick/ast\"\n\t\"github.com/influxdata/kapacitor/tick/stateful\"\n)\n\ntype WhereNode struct {\n\tnode\n\tw        *pipeline.WhereNode\n\tendpoint string\n\n\texpression stateful.Expression\n\tscopePool  stateful.ScopePool\n}\n\n// Create a new WhereNode which filters down the batch or stream by a condition\nfunc newWhereNode(et *ExecutingTask, n *pipeline.WhereNode, l *log.Logger) (wn *WhereNode, err error) {\n\twn = &WhereNode{\n\t\tnode: node{Node: n, et: et, logger: l},\n\t\tw:    n,\n\t}\n\n\texpr, err := stateful.NewExpression(n.Lambda.Expression)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to compile expression in where clause: %v\", err)\n\t}\n\twn.expression = expr\n\twn.scopePool = stateful.NewScopePool(ast.FindReferenceVariables(n.Lambda.Expression))\n\n\twn.runF = wn.runWhere\n\tif n.Lambda == nil {\n\t\treturn nil, errors.New(\"nil expression passed to WhereNode\")\n\t}\n\treturn\n}\n\nfunc (n *WhereNode) runWhere(snapshot []byte) error {\n\tconsumer := edge.NewGroupedConsumer(\n\t\tn.ins[0],\n\t\tn,\n\t)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\n\treturn consumer.Consume()\n}\n\nfunc (n *WhereNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, n.newGroup()),\n\t), nil\n}\n\nfunc (n *WhereNode) newGroup() *whereGroup {\n\treturn &whereGroup{\n\t\tn:    n,\n\t\texpr: n.expression.CopyReset(),\n\t}\n}\n\ntype whereGroup struct {\n\tn    *WhereNode\n\texpr stateful.Expression\n}\n\nfunc (g *whereGroup) BeginBatch(begin edge.BeginBatchMessage) (edge.Message, error) {\n\tbegin = begin.ShallowCopy()\n\tbegin.SetSizeHint(0)\n\treturn begin, nil\n}\n\nfunc (g *whereGroup) BatchPoint(bp edge.BatchPointMessage) (edge.Message, error) {\n\treturn g.doWhere(bp)\n}\n\nfunc (g *whereGroup) EndBatch(end edge.EndBatchMessage) (edge.Message, error) {\n\treturn end, nil\n}\n\nfunc (g *whereGroup) Point(p edge.PointMessage) (edge.Message, error) {\n\treturn g.doWhere(p)\n}\n\nfunc (g *whereGroup) doWhere(p edge.FieldsTagsTimeGetterMessage) (edge.Message, error) {\n\tpass, err := EvalPredicate(g.expr, g.n.scopePool, p)\n\tif err != nil {\n\t\tg.n.incrementErrorCount()\n\t\tg.n.logger.Println(\"E! error while evaluating expression:\", err)\n\t\treturn nil, nil\n\t}\n\tif pass {\n\t\treturn p, nil\n\t}\n\treturn nil, nil\n}\n\nfunc (g *whereGroup) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\treturn b, nil\n}\nfunc (g *whereGroup) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/window.go",
    "content": "package kapacitor\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"log\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/influxdata/kapacitor/pipeline\"\n)\n\ntype WindowNode struct {\n\tnode\n\tw *pipeline.WindowNode\n}\n\n// Create a new  WindowNode, which windows data for a period of time and emits the window.\nfunc newWindowNode(et *ExecutingTask, n *pipeline.WindowNode, l *log.Logger) (*WindowNode, error) {\n\tif n.Period == 0 && n.PeriodCount == 0 {\n\t\treturn nil, errors.New(\"window node must have either a non zero period or non zero period count\")\n\t}\n\twn := &WindowNode{\n\t\tw:    n,\n\t\tnode: node{Node: n, et: et, logger: l},\n\t}\n\twn.node.runF = wn.runWindow\n\treturn wn, nil\n}\n\nfunc (n *WindowNode) runWindow([]byte) error {\n\tconsumer := edge.NewGroupedConsumer(n.ins[0], n)\n\tn.statMap.Set(statCardinalityGauge, consumer.CardinalityVar())\n\treturn consumer.Consume()\n}\n\nfunc (n *WindowNode) NewGroup(group edge.GroupInfo, first edge.PointMeta) (edge.Receiver, error) {\n\tr, err := n.newWindow(group, first)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn edge.NewReceiverFromForwardReceiverWithStats(\n\t\tn.outs,\n\t\tedge.NewTimedForwardReceiver(n.timer, r),\n\t), nil\n}\n\nfunc (n *WindowNode) DeleteGroup(group models.GroupID) {\n\t// Nothing to do\n}\n\nfunc (n *WindowNode) newWindow(group edge.GroupInfo, first edge.PointMeta) (edge.ForwardReceiver, error) {\n\tswitch {\n\tcase n.w.Period != 0:\n\t\treturn newWindowByTime(\n\t\t\tfirst.Name(),\n\t\t\tfirst.Time(),\n\t\t\tgroup,\n\t\t\tn.w.Period,\n\t\t\tn.w.Every,\n\t\t\tn.w.AlignFlag,\n\t\t\tn.w.FillPeriodFlag,\n\t\t\tn.logger,\n\t\t), nil\n\tcase n.w.PeriodCount != 0:\n\t\treturn newWindowByCount(\n\t\t\tfirst.Name(),\n\t\t\tgroup,\n\t\t\tint(n.w.PeriodCount),\n\t\t\tint(n.w.EveryCount),\n\t\t\tn.w.FillPeriodFlag,\n\t\t\tn.logger,\n\t\t), nil\n\tdefault:\n\t\treturn nil, errors.New(\"unreachable code, window node should have a non-zero period or period count\")\n\t}\n}\n\ntype windowByTime struct {\n\tname  string\n\tgroup edge.GroupInfo\n\n\tnextEmit time.Time\n\n\tbuf *windowTimeBuffer\n\n\talign,\n\tfillPeriod bool\n\n\tperiod time.Duration\n\tevery  time.Duration\n\n\tlogger *log.Logger\n}\n\nfunc newWindowByTime(\n\tname string,\n\tt time.Time,\n\tgroup edge.GroupInfo,\n\tperiod,\n\tevery time.Duration,\n\talign,\n\tfillPeriod bool,\n\tlogger *log.Logger,\n\n) *windowByTime {\n\t// Determine nextEmit time.\n\tvar nextEmit time.Time\n\tif fillPeriod {\n\t\tnextEmit = t.Add(period)\n\t\tif align {\n\t\t\tfirstPeriod := nextEmit\n\t\t\t// Needs to be aligned with Every and be greater than now+Period\n\t\t\tnextEmit = nextEmit.Truncate(every)\n\t\t\tif !nextEmit.After(firstPeriod) {\n\t\t\t\t// This means we will drop the first few points\n\t\t\t\tnextEmit = nextEmit.Add(every)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tnextEmit = t.Add(every)\n\t\tif align {\n\t\t\tnextEmit = nextEmit.Truncate(every)\n\t\t}\n\t}\n\treturn &windowByTime{\n\t\tname:       name,\n\t\tgroup:      group,\n\t\tnextEmit:   nextEmit,\n\t\tbuf:        &windowTimeBuffer{logger: logger},\n\t\talign:      align,\n\t\tfillPeriod: fillPeriod,\n\t\tperiod:     period,\n\t\tevery:      every,\n\t\tlogger:     logger,\n\t}\n}\n\nfunc (w *windowByTime) BeginBatch(edge.BeginBatchMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"window does not support batch data\")\n}\nfunc (w *windowByTime) BatchPoint(edge.BatchPointMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"window does not support batch data\")\n}\nfunc (w *windowByTime) EndBatch(edge.EndBatchMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"window does not support batch data\")\n}\nfunc (w *windowByTime) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\t//TODO(nathanielc): Implement barrier messages to flush window\n\treturn b, nil\n}\nfunc (w *windowByTime) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\nfunc (w *windowByTime) Point(p edge.PointMessage) (msg edge.Message, err error) {\n\tif w.every == 0 {\n\t\t// Insert point before.\n\t\tw.buf.insert(p)\n\t\t// Since we are emitting every point we can use a right aligned window (oldest, now]\n\t\tif !p.Time().Before(w.nextEmit) {\n\t\t\t// purge old points\n\t\t\toldest := p.Time().Add(-1 * w.period)\n\t\t\tw.buf.purge(oldest, false)\n\n\t\t\t// get current batch\n\t\t\tmsg = w.batch(p.Time())\n\n\t\t\t// Next emit time is now\n\t\t\tw.nextEmit = p.Time()\n\t\t}\n\t} else {\n\t\t// Since more points can arrive with the same time we need to use a left aligned window [oldest, now).\n\t\tif !p.Time().Before(w.nextEmit) {\n\t\t\t// purge old points\n\t\t\toldest := w.nextEmit.Add(-1 * w.period)\n\t\t\tw.buf.purge(oldest, true)\n\n\t\t\t// get current batch\n\t\t\tmsg = w.batch(w.nextEmit)\n\n\t\t\t// Determine next emit time.\n\t\t\t// This is dependent on the current time not the last time we emitted.\n\t\t\tw.nextEmit = p.Time().Add(w.every)\n\t\t\tif w.align {\n\t\t\t\tw.nextEmit = w.nextEmit.Truncate(w.every)\n\t\t\t}\n\t\t}\n\t\t// Insert point after.\n\t\tw.buf.insert(p)\n\t}\n\treturn\n}\n\n// batch returns the current window buffer as a batch message.\n// TODO(nathanielc): A possible optimization could be to not buffer the data at all if we know that we do not have overlapping windows.\nfunc (w *windowByTime) batch(tmax time.Time) edge.BufferedBatchMessage {\n\tpoints := w.buf.points()\n\treturn edge.NewBufferedBatchMessage(\n\t\tedge.NewBeginBatchMessage(\n\t\t\tw.name,\n\t\t\tw.group.Tags,\n\t\t\tw.group.Dimensions.ByName,\n\t\t\ttmax,\n\t\t\tlen(points),\n\t\t),\n\t\tpoints,\n\t\tedge.NewEndBatchMessage(),\n\t)\n}\n\n// implements a purpose built ring buffer for the window of points\ntype windowTimeBuffer struct {\n\twindow []edge.PointMessage\n\tstart  int\n\tstop   int\n\tsize   int\n\tlogger *log.Logger\n}\n\n// Insert a single point into the buffer.\nfunc (b *windowTimeBuffer) insert(p edge.PointMessage) {\n\tif b.size == cap(b.window) {\n\t\t//Increase our buffer\n\t\tc := 2 * (b.size + 1)\n\t\tw := make([]edge.PointMessage, b.size+1, c)\n\t\tif b.size == 0 {\n\t\t\t//do nothing\n\t\t} else if b.stop > b.start {\n\t\t\tn := copy(w, b.window[b.start:b.stop])\n\t\t\tif n != b.size {\n\t\t\t\tpanic(fmt.Sprintf(\"did not copy all the data: copied: %d size: %d start: %d stop: %d\\n\", n, b.size, b.start, b.stop))\n\t\t\t}\n\t\t} else {\n\t\t\tn := 0\n\t\t\tn += copy(w, b.window[b.start:])\n\t\t\tn += copy(w[b.size-b.start:], b.window[:b.stop])\n\t\t\tif n != b.size {\n\t\t\t\tpanic(fmt.Sprintf(\"did not copy all the data: copied: %d size: %d start: %d stop: %d\\n\", n, b.size, b.start, b.stop))\n\t\t\t}\n\t\t}\n\t\tb.window = w\n\t\tb.start = 0\n\t\tb.stop = b.size\n\t}\n\n\t// Check if we need to wrap around\n\tif len(b.window) == cap(b.window) && b.stop == len(b.window) {\n\t\tb.stop = 0\n\t}\n\n\t// Insert point\n\tif b.stop == len(b.window) {\n\t\tb.window = append(b.window, p)\n\t} else {\n\t\tb.window[b.stop] = p\n\t}\n\tb.size++\n\tb.stop++\n}\n\n// Purge expired data from the window.\nfunc (b *windowTimeBuffer) purge(oldest time.Time, inclusive bool) {\n\tinclude := func(t time.Time) bool {\n\t\tif inclusive {\n\t\t\treturn !t.Before(oldest)\n\t\t}\n\t\treturn t.After(oldest)\n\t}\n\tl := len(b.window)\n\tif l == 0 {\n\t\treturn\n\t}\n\tif b.start < b.stop {\n\t\tfor ; b.start < b.stop; b.start++ {\n\t\t\tif include(b.window[b.start].Time()) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tb.size = b.stop - b.start\n\t} else {\n\t\tif include(b.window[l-1].Time()) {\n\t\t\tfor ; b.start < l; b.start++ {\n\t\t\t\tif include(b.window[b.start].Time()) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tb.size = l - b.start + b.stop\n\t\t} else {\n\t\t\tfor b.start = 0; b.start < b.stop; b.start++ {\n\t\t\t\tif include(b.window[b.start].Time()) {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tb.size = b.stop - b.start\n\t\t}\n\t}\n}\n\n// Returns a copy of the current buffer.\n// TODO(nathanielc): Optimize this function use buffered vs unbuffered batch messages.\nfunc (b *windowTimeBuffer) points() []edge.BatchPointMessage {\n\tif b.size == 0 {\n\t\treturn nil\n\t}\n\tpoints := make([]edge.BatchPointMessage, b.size)\n\tif b.stop > b.start {\n\t\tfor i, p := range b.window[b.start:b.stop] {\n\t\t\tpoints[i] = edge.BatchPointFromPoint(p)\n\t\t}\n\t} else {\n\t\tj := 0\n\t\tl := len(b.window)\n\t\tfor i := b.start; i < l; i++ {\n\t\t\tp := b.window[i]\n\t\t\tpoints[j] = edge.BatchPointFromPoint(p)\n\t\t\tj++\n\t\t}\n\t\tfor i := 0; i < b.stop; i++ {\n\t\t\tp := b.window[i]\n\t\t\tpoints[j] = edge.BatchPointFromPoint(p)\n\t\t\tj++\n\t\t}\n\t}\n\treturn points\n}\n\ntype windowByCount struct {\n\tname  string\n\tgroup edge.GroupInfo\n\n\tbuf      []edge.BatchPointMessage\n\tstart    int\n\tstop     int\n\tperiod   int\n\tevery    int\n\tnextEmit int\n\tsize     int\n\tcount    int\n\n\tlogger *log.Logger\n}\n\nfunc newWindowByCount(\n\tname string,\n\tgroup edge.GroupInfo,\n\tperiod,\n\tevery int,\n\tfillPeriod bool,\n\tlogger *log.Logger) *windowByCount {\n\t// Determine the first nextEmit index\n\tnextEmit := every\n\tif fillPeriod {\n\t\tnextEmit = period\n\t}\n\treturn &windowByCount{\n\t\tname:     name,\n\t\tgroup:    group,\n\t\tbuf:      make([]edge.BatchPointMessage, period),\n\t\tperiod:   period,\n\t\tevery:    every,\n\t\tnextEmit: nextEmit,\n\t\tlogger:   logger,\n\t}\n}\nfunc (w *windowByCount) BeginBatch(edge.BeginBatchMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"window does not support batch data\")\n}\nfunc (w *windowByCount) BatchPoint(edge.BatchPointMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"window does not support batch data\")\n}\nfunc (w *windowByCount) EndBatch(edge.EndBatchMessage) (edge.Message, error) {\n\treturn nil, errors.New(\"window does not support batch data\")\n}\nfunc (w *windowByCount) Barrier(b edge.BarrierMessage) (edge.Message, error) {\n\t//TODO(nathanielc): Implement barrier messages to flush window\n\treturn b, nil\n}\nfunc (w *windowByCount) DeleteGroup(d edge.DeleteGroupMessage) (edge.Message, error) {\n\treturn d, nil\n}\n\nfunc (w *windowByCount) Point(p edge.PointMessage) (msg edge.Message, err error) {\n\tw.buf[w.stop] = edge.BatchPointFromPoint(p)\n\tw.stop = (w.stop + 1) % w.period\n\tif w.size == w.period {\n\t\tw.start = (w.start + 1) % w.period\n\t} else {\n\t\tw.size++\n\t}\n\tw.count++\n\t//Check if its time to emit\n\tif w.count == w.nextEmit {\n\t\tw.nextEmit += w.every\n\t\tmsg = w.batch()\n\t}\n\treturn\n}\n\nfunc (w *windowByCount) batch() edge.BufferedBatchMessage {\n\tpoints := w.points()\n\treturn edge.NewBufferedBatchMessage(\n\t\tedge.NewBeginBatchMessage(\n\t\t\tw.name,\n\t\t\tw.group.Tags,\n\t\t\tw.group.Dimensions.ByName,\n\t\t\tpoints[len(points)-1].Time(),\n\t\t\tlen(points),\n\t\t),\n\t\tpoints,\n\t\tedge.NewEndBatchMessage(),\n\t)\n}\n\n// Returns a copy of the current buffer.\nfunc (w *windowByCount) points() []edge.BatchPointMessage {\n\tif w.size == 0 {\n\t\treturn nil\n\t}\n\tpoints := make([]edge.BatchPointMessage, w.size)\n\tif w.stop > w.start {\n\t\tcopy(points, w.buf[w.start:w.stop])\n\t} else {\n\t\tj := 0\n\t\tl := len(w.buf)\n\t\tfor i := w.start; i < l; i++ {\n\t\t\tpoints[j] = w.buf[i]\n\t\t\tj++\n\t\t}\n\t\tfor i := 0; i < w.stop; i++ {\n\t\t\tpoints[j] = w.buf[i]\n\t\t\tj++\n\t\t}\n\t}\n\treturn points\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/kapacitor/window_test.go",
    "content": "package kapacitor\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/influxdata/kapacitor/edge\"\n\t\"github.com/influxdata/kapacitor/models\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar logger = log.New(os.Stderr, \"[window] \", log.LstdFlags|log.Lshortfile)\n\nfunc TestWindowBufferByTime(t *testing.T) {\n\tassert := assert.New(t)\n\n\tbuf := &windowTimeBuffer{logger: logger}\n\n\tsize := 100\n\n\t// fill buffer\n\tfor i := 1; i <= size; i++ {\n\n\t\tt := time.Unix(int64(i), 0)\n\t\tp := edge.NewPointMessage(\n\t\t\t\"name\", \"db\", \"rp\",\n\t\t\tmodels.Dimensions{},\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tt,\n\t\t)\n\t\tbuf.insert(p)\n\n\t\tassert.Equal(i, buf.size)\n\t\tassert.Equal(0, buf.start)\n\t\tassert.Equal(i, buf.stop)\n\t}\n\n\t// purge entire buffer\n\tfor i := 0; i <= size; i++ {\n\n\t\toldest := time.Unix(int64(i+1), 0).UTC()\n\t\tbuf.purge(oldest, true)\n\n\t\tassert.Equal(size-i, buf.size, \"i: %d\", i)\n\t\tassert.Equal(i, buf.start, \"i: %d\", i)\n\t\tassert.Equal(size, buf.stop, \"i: %d\", i)\n\n\t\tpoints := buf.points()\n\t\tif assert.Equal(size-i, len(points)) {\n\t\t\tfor _, p := range points {\n\t\t\t\tassert.True(!p.Time().Before(oldest), \"Point %s is not after oldest time %s\", p.Time(), oldest)\n\t\t\t}\n\t\t}\n\t}\n\n\tassert.Equal(0, buf.size)\n\n\t// fill buffer again\n\toldest := time.Unix(int64(size), 0).UTC()\n\tfor i := 1; i <= size*2; i++ {\n\n\t\tt := time.Unix(int64(i+size), 0)\n\t\tp := edge.NewPointMessage(\n\t\t\t\"name\", \"db\", \"rp\",\n\t\t\tmodels.Dimensions{},\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tt,\n\t\t)\n\t\tbuf.insert(p)\n\n\t\tassert.Equal(i, buf.size)\n\n\t\tpoints := buf.points()\n\t\tif assert.Equal(i, len(points)) {\n\t\t\tfor _, p := range points {\n\t\t\t\tif assert.NotNil(p, \"i:%d\", i) {\n\t\t\t\t\tassert.True(!p.Time().Before(oldest), \"Point %s is not after oldest time %s\", p.Time(), oldest)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestWindowBufferByCount(t *testing.T) {\n\ttestCases := []struct {\n\t\tsize       int\n\t\tevery      int\n\t\tperiod     int\n\t\tfillPeriod bool\n\t}{\n\t\t{\n\t\t\tsize:   100,\n\t\t\tevery:  10,\n\t\t\tperiod: 10,\n\t\t},\n\t\t{\n\t\t\tsize:   100,\n\t\t\tevery:  3,\n\t\t\tperiod: 10,\n\t\t},\n\t\t{\n\t\t\tsize:   100,\n\t\t\tevery:  1,\n\t\t\tperiod: 2,\n\t\t},\n\t\t{\n\t\t\tsize:   100,\n\t\t\tevery:  1,\n\t\t\tperiod: 1,\n\t\t},\n\t\t{\n\t\t\tsize:   100,\n\t\t\tevery:  10,\n\t\t\tperiod: 5,\n\t\t},\n\t\t{\n\t\t\tsize:   100,\n\t\t\tevery:  1,\n\t\t\tperiod: 5,\n\t\t},\n\t}\n\tfor _, tc := range testCases {\n\t\tt.Logf(\"Starting test size %d period %d every %d\", tc.size, tc.period, tc.every)\n\t\tw := newWindowByCount(\n\t\t\t\"test\",\n\t\t\tedge.GroupInfo{},\n\t\t\ttc.period,\n\t\t\ttc.every,\n\t\t\ttc.fillPeriod,\n\t\t\tlogger,\n\t\t)\n\n\t\t// fill buffer\n\t\tfor i := 1; i <= tc.size; i++ {\n\t\t\tp := edge.NewPointMessage(\n\t\t\t\t\"name\", \"db\", \"rp\",\n\t\t\t\tmodels.Dimensions{},\n\t\t\t\tnil,\n\t\t\t\tnil,\n\t\t\t\ttime.Unix(int64(i), 0).UTC(),\n\t\t\t)\n\t\t\tmsg, err := w.Point(p)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\texpEmit := tc.every == 0 || i%tc.every == 0\n\t\t\tif tc.fillPeriod {\n\t\t\t\texpEmit = i > tc.period && expEmit\n\t\t\t}\n\t\t\tif expEmit && msg == nil {\n\t\t\t\tt.Errorf(\"%d unexpected nil forward message: got nil message, expected non nil message\", i)\n\t\t\t}\n\t\t\tif !expEmit && msg != nil {\n\t\t\t\tt.Errorf(\"%d unexpected forward message: got non-nil message %v, expected nil message\", i, msg)\n\t\t\t}\n\n\t\t\tsize := i\n\t\t\tif size > tc.period {\n\t\t\t\tsize = tc.period\n\t\t\t}\n\t\t\tif got, exp := w.size, size; got != exp {\n\t\t\t\tt.Errorf(\"%d unexpected size: got %d exp %d\", i, got, exp)\n\t\t\t}\n\t\t\tstart := (i - tc.period) % tc.period\n\t\t\tif start < 0 {\n\t\t\t\tstart = 0\n\t\t\t}\n\t\t\tif got, exp := w.start, start; got != exp {\n\t\t\t\tt.Errorf(\"%d unexpected start: got %d exp %d\", i, got, exp)\n\t\t\t}\n\t\t\tif got, exp := w.stop, i%tc.period; got != exp {\n\t\t\t\tt.Errorf(\"%d unexpected stop: got %d exp %d\", i, got, exp)\n\t\t\t}\n\n\t\t\tif msg != nil {\n\t\t\t\tif msg.Type() != edge.BufferedBatch {\n\t\t\t\t\tt.Fatalf(\"unexpected message type %v\", msg.Type())\n\t\t\t\t}\n\t\t\t\tb := msg.(edge.BufferedBatchMessage)\n\t\t\t\tl := i\n\t\t\t\tif l > tc.period {\n\t\t\t\t\tl = tc.period\n\t\t\t\t}\n\t\t\t\tpoints := b.Points()\n\t\t\t\tif got, exp := len(points), l; got != exp {\n\t\t\t\t\tt.Fatalf(\"%d unexpected number of points got %d exp %d\", i, got, exp)\n\t\t\t\t}\n\n\t\t\t\tfor j, p := range points {\n\t\t\t\t\tif got, exp := p.Time(), time.Unix(int64(i+j-len(points)+1), 0).UTC(); !got.Equal(exp) {\n\t\t\t\t\t\tt.Errorf(\"%d unexpected point[%d].Time: got %v exp %v\", i, j, got, exp)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/influxdata/wlog/LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2016 InfluxData\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/influxdata/wlog/README.md",
    "content": "# wlog\nSimple log level based Go logger.\nProvides an io.Writer that filters log messages based on a log level prefix.\nValid log levels are: DEBUG, INFO, WARN, ERROR, OFF.\nLog messages need to begin with a L! where L is one of D, I, W, or E.\n\n\n## Usage\n\nCreate a *log.Logger via wlog.New:\n\n```go\npackage main\n\nimport (\n    \"log\"\n    \"os\"\n\n    \"github.com/influxdata/wlog\"\n)\n\nfunc main() {\n    var logger *log.Logger\n    logger = wlog.New(os.Stderr, \"prefix\", log.LstdFlags)\n    logger.Println(\"I! initialized logger\")\n}\n```\n\nCreate a *log.Logger explicitly using wlog.Writer:\n\n```go\npackage main\n\nimport (\n    \"log\"\n    \"os\"\n\n    \"github.com/influxdata/wlog\"\n)\n\nfunc main() {\n    var logger *log.Logger\n    logger = log.New(wlog.NewWriter(os.Stderr), \"prefix\", log.LstdFlags)\n    logger.Println(\"I! initialized logger\")\n}\n```\n\nPrefix log messages with a log level char and the `!` delimiter.\n\n```go\nlogger.Println(\"D! this is a debug log\")\nlogger.Println(\"I! this is an info log\")\nlogger.Println(\"W! this is a warn log\")\nlogger.Println(\"E! this is an error log\")\n```\n\n\nThe log level can be changed via the SetLevel or the SetLevelFromName functions.\n\n\n```go\npackage main\n\nimport (\n    \"log\"\n    \"os\"\n\n    \"github.com/influxdata/wlog\"\n)\n\nfunc main() {\n    var logger *log.Logger\n    logger = wlog.New(os.Stderr, \"prefix\", log.LstdFlags)\n    wlog.SetLevel(wlog.DEBUG)\n    logger.Println(\"D! initialized logger\")\n    wlog.SetLevelFromName(\"INFO\")\n    logger.Println(\"D! this message will be dropped\")\n    logger.Println(\"I! this message will be printed\")\n}\n```\n\n"
  },
  {
    "path": "vendor/github.com/influxdata/wlog/writer.go",
    "content": "/*\n\tProvides an io.Writer that filters log messages based on a log level.\n\n\tValid log levels are: DEBUG, INFO, WARN, ERROR.\n\n\tLog messages need to begin with a L! where L is one of D, I, W, or E.\n\n\tExamples:\n\t\tlog.Println(\"D! this is a debug log\")\n\t\tlog.Println(\"I! this is an info log\")\n\t\tlog.Println(\"W! this is a warn log\")\n\t\tlog.Println(\"E! this is an error log\")\n\n\tSimply pass a instance of wlog.Writer to log.New or use the helper wlog.New function.\n\n\tThe log level can be changed via the SetLevel or the SetLevelFromName functions.\n*/\npackage wlog\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"strings\"\n\t\"sync\"\n)\n\ntype Level int\n\nconst (\n\t_ Level = iota\n\tDEBUG\n\tINFO\n\tWARN\n\tERROR\n\tOFF\n)\n\nconst Delimiter = '!'\n\nvar invalidMSG = []byte(\"log messages must have 'L!' prefix where L is one of 'D', 'I', 'W', 'E'\")\n\nvar Levels = map[byte]Level{\n\t'D': DEBUG,\n\t'I': INFO,\n\t'W': WARN,\n\t'E': ERROR,\n}\nvar ReverseLevels map[Level]byte\n\nfunc init() {\n\tReverseLevels = make(map[Level]byte, len(Levels))\n\tfor k, l := range Levels {\n\t\tReverseLevels[l] = k\n\t}\n}\n\n// The global and only log level. Log levels are not implemented per writer.\nvar logLevel = INFO\n\nvar mu sync.RWMutex\n\n// Set the current logging Level.\nfunc SetLevel(l Level) {\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tlogLevel = l\n}\n\n// Retrieve the current logging Level.\nfunc LogLevel() Level {\n\tmu.RLock()\n\tdefer mu.RUnlock()\n\treturn logLevel\n}\n\n// name to Level mappings\nvar StringToLevel = map[string]Level{\n\t\"DEBUG\": DEBUG,\n\t\"INFO\":  INFO,\n\t\"WARN\":  WARN,\n\t\"ERROR\": ERROR,\n\t\"OFF\":   OFF,\n}\n\n// Set the log level via a string name. To set it directly use 'logLevel'.\nfunc SetLevelFromName(level string) error {\n\tl := StringToLevel[strings.ToUpper(level)]\n\tif l > 0 {\n\t\tSetLevel(l)\n\t} else {\n\t\treturn fmt.Errorf(\"invalid log level: %q\", level)\n\t}\n\treturn nil\n}\n\n// Implements io.Writer. Checks first byte of write for log level\n// and drops the log if necessary\ntype Writer struct {\n\tstart int\n\tw     io.Writer\n}\n\n// Create a new *log.Logger wrapping w in a wlog.Writer\nfunc New(w io.Writer, prefix string, flag int) *log.Logger {\n\treturn log.New(NewWriter(w), prefix, flag)\n}\n\n// Create a new wlog.Writer wrapping w.\nfunc NewWriter(w io.Writer) *Writer {\n\treturn &Writer{-1, w}\n}\n\n// Implements the io.Writer method.\nfunc (w *Writer) Write(buf []byte) (int, error) {\n\tif len(buf) > 0 {\n\t\tif w.start == -1 {\n\t\t\t// Find start of message index\n\t\t\tfor i, c := range buf {\n\t\t\t\tif c == Delimiter && i > 0 {\n\t\t\t\t\tl := buf[i-1]\n\t\t\t\t\tlevel := Levels[l]\n\t\t\t\t\tif level > 0 {\n\t\t\t\t\t\tw.start = i - 1\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif w.start == -1 {\n\t\t\t\tbuf = append(invalidMSG, buf...)\n\t\t\t\treturn w.w.Write(buf)\n\t\t\t}\n\t\t}\n\t\tl := Levels[buf[w.start]]\n\t\tif l >= LogLevel() {\n\t\t\treturn w.w.Write(buf)\n\t\t} else if l == 0 {\n\t\t\tbuf = append(invalidMSG, buf...)\n\t\t\treturn w.w.Write(buf)\n\t\t}\n\t}\n\treturn 0, nil\n}\n\n// StaticLevelWriter prefixes all log messages\n// with a static log level.\ntype StaticLevelWriter struct {\n\tlevelPrefix []byte\n\tw           io.Writer\n}\n\n// Create a writer that always append a static log prefix to all messages.\n// Usefult for supplying a *log.Logger to a package that doesn't\n// prefix log messages itself.\nfunc NewStaticLevelWriter(w io.Writer, level Level) *StaticLevelWriter {\n\tlevelPrefix := []byte{ReverseLevels[level], '!', ' '}\n\treturn &StaticLevelWriter{\n\t\tlevelPrefix: levelPrefix,\n\t\tw:           w,\n\t}\n}\n\nfunc (w *StaticLevelWriter) Write(buf []byte) (int, error) {\n\tbuf = append(w.levelPrefix, buf...)\n\treturn w.w.Write(buf)\n}\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml",
    "content": "language: go\n\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright {yyyy} {name of copyright owner}\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE",
    "content": "Copyright 2012 Matt T. Proud (matt.proud@gmail.com)\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/README.md",
    "content": "# Overview\nThis repository provides various Protocol Buffer extensions for the Go\nlanguage (golang), namely support for record length-delimited message\nstreaming.\n\n| Java                           | Go                    |\n| ------------------------------ | --------------------- |\n| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited  |\n| MessageLite#writeDelimitedTo   | pbutil.WriteDelimited |\n\nBecause [Code Review 9102043](https://codereview.appspot.com/9102043/) is\ndestined to never be merged into mainline (i.e., never be promoted to formal\n[goprotobuf features](https://github.com/golang/protobuf)), this repository\nwill live here in the wild.\n\n# Documentation\nWe have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here.\n\n# Testing\n[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions)\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go",
    "content": "// Copyright 2013 Matt T. Proud\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pbutil\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t. \"github.com/golang/protobuf/proto\"\n\t. \"github.com/golang/protobuf/proto/testdata\"\n)\n\nfunc TestWriteDelimited(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range []struct {\n\t\tmsg Message\n\t\tbuf []byte\n\t\tn   int\n\t\terr error\n\t}{\n\t\t{\n\t\t\tmsg: &Empty{},\n\t\t\tn:   1,\n\t\t\tbuf: []byte{0},\n\t\t},\n\t\t{\n\t\t\tmsg: &GoEnum{Foo: FOO_FOO1.Enum()},\n\t\t\tn:   3,\n\t\t\tbuf: []byte{2, 8, 1},\n\t\t},\n\t\t{\n\t\t\tmsg: &Strings{\n\t\t\t\tStringField: String(`This is my gigantic, unhappy string.  It exceeds\nthe encoding size of a single byte varint.  We are using it to fuzz test the\ncorrectness of the header decoding mechanisms, which may prove problematic.\nI expect it may.  Let's hope you enjoy testing as much as we do.`),\n\t\t\t},\n\t\t\tn: 271,\n\t\t\tbuf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109,\n\t\t\t\t121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104,\n\t\t\t\t97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73,\n\t\t\t\t116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101,\n\t\t\t\t110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102,\n\t\t\t\t32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32,\n\t\t\t\t118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32,\n\t\t\t\t117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122,\n\t\t\t\t122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114,\n\t\t\t\t101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32,\n\t\t\t\t104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103,\n\t\t\t\t32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104,\n\t\t\t\t105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112,\n\t\t\t\t114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120,\n\t\t\t\t112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101,\n\t\t\t\t116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110,\n\t\t\t\t106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,\n\t\t\t\t109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},\n\t\t},\n\t} {\n\t\tvar buf bytes.Buffer\n\t\tif n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err {\n\t\t\tt.Fatalf(\"WriteDelimited(buf, %#v) = %v, %v; want %v, %v\", test.msg, n, err, test.n, test.err)\n\t\t}\n\t\tif out := buf.Bytes(); !bytes.Equal(out, test.buf) {\n\t\t\tt.Fatalf(\"WriteDelimited(buf, %#v); buf = %v; want %v\", test.msg, out, test.buf)\n\t\t}\n\t}\n}\n\nfunc TestReadDelimited(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range []struct {\n\t\tbuf []byte\n\t\tmsg Message\n\t\tn   int\n\t\terr error\n\t}{\n\t\t{\n\t\t\tbuf: []byte{0},\n\t\t\tmsg: &Empty{},\n\t\t\tn:   1,\n\t\t},\n\t\t{\n\t\t\tn:   3,\n\t\t\tbuf: []byte{2, 8, 1},\n\t\t\tmsg: &GoEnum{Foo: FOO_FOO1.Enum()},\n\t\t},\n\t\t{\n\t\t\tbuf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109,\n\t\t\t\t121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104,\n\t\t\t\t97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73,\n\t\t\t\t116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101,\n\t\t\t\t110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102,\n\t\t\t\t32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32,\n\t\t\t\t118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32,\n\t\t\t\t117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122,\n\t\t\t\t122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114,\n\t\t\t\t101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32,\n\t\t\t\t104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103,\n\t\t\t\t32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104,\n\t\t\t\t105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112,\n\t\t\t\t114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120,\n\t\t\t\t112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101,\n\t\t\t\t116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110,\n\t\t\t\t106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32,\n\t\t\t\t109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46},\n\t\t\tmsg: &Strings{\n\t\t\t\tStringField: String(`This is my gigantic, unhappy string.  It exceeds\nthe encoding size of a single byte varint.  We are using it to fuzz test the\ncorrectness of the header decoding mechanisms, which may prove problematic.\nI expect it may.  Let's hope you enjoy testing as much as we do.`),\n\t\t\t},\n\t\t\tn: 271,\n\t\t},\n\t} {\n\t\tmsg := Clone(test.msg)\n\t\tmsg.Reset()\n\t\tif n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err {\n\t\t\tt.Fatalf(\"ReadDelimited(%v, msg) = %v, %v; want %v, %v\", test.buf, n, err, test.n, test.err)\n\t\t}\n\t\tif !Equal(msg, test.msg) {\n\t\t\tt.Fatalf(\"ReadDelimited(%v, msg); msg = %v; want %v\", test.buf, msg, test.msg)\n\t\t}\n\t}\n}\n\nfunc TestEndToEndValid(t *testing.T) {\n\tt.Parallel()\n\tfor _, test := range [][]Message{\n\t\t{&Empty{}},\n\t\t{&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}},\n\t\t{&GoEnum{Foo: FOO_FOO1.Enum()}},\n\t\t{&Strings{\n\t\t\tStringField: String(`This is my gigantic, unhappy string.  It exceeds\nthe encoding size of a single byte varint.  We are using it to fuzz test the\ncorrectness of the header decoding mechanisms, which may prove problematic.\nI expect it may.  Let's hope you enjoy testing as much as we do.`),\n\t\t}},\n\t} {\n\t\tvar buf bytes.Buffer\n\t\tvar written int\n\t\tfor i, msg := range test {\n\t\t\tn, err := WriteDelimited(&buf, msg)\n\t\t\tif err != nil {\n\t\t\t\t// Assumption: TestReadDelimited and TestWriteDelimited are sufficient\n\t\t\t\t//             and inputs for this test are explicitly exercised there.\n\t\t\t\tt.Fatalf(\"WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil\", test, i, err)\n\t\t\t}\n\t\t\twritten += n\n\t\t}\n\t\tvar read int\n\t\tfor i, msg := range test {\n\t\t\tout := Clone(msg)\n\t\t\tout.Reset()\n\t\t\tn, _ := ReadDelimited(&buf, out)\n\t\t\t// Decide to do EOF checking?\n\t\t\tread += n\n\t\t\tif !Equal(out, msg) {\n\t\t\t\tt.Fatalf(\"out = %v; want %v[%d] = %#v\", out, test, i, msg)\n\t\t\t}\n\t\t}\n\t\tif read != written {\n\t\t\tt.Fatalf(\"%v read = %d; want %d\", test, read, written)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go",
    "content": "// Copyright 2013 Matt T. Proud\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pbutil\n\nimport (\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"io\"\n\n\t\"github.com/golang/protobuf/proto\"\n)\n\nvar errInvalidVarint = errors.New(\"invalid varint32 encountered\")\n\n// ReadDelimited decodes a message from the provided length-delimited stream,\n// where the length is encoded as 32-bit varint prefix to the message body.\n// It returns the total number of bytes read and any applicable error.  This is\n// roughly equivalent to the companion Java API's\n// MessageLite#parseDelimitedFrom.  As per the reader contract, this function\n// calls r.Read repeatedly as required until exactly one message including its\n// prefix is read and decoded (or an error has occurred).  The function never\n// reads more bytes from the stream than required.  The function never returns\n// an error if a message has been read and decoded correctly, even if the end\n// of the stream has been reached in doing so.  In that case, any subsequent\n// calls return (0, io.EOF).\nfunc ReadDelimited(r io.Reader, m proto.Message) (n int, err error) {\n\t// Per AbstractParser#parsePartialDelimitedFrom with\n\t// CodedInputStream#readRawVarint32.\n\tvar headerBuf [binary.MaxVarintLen32]byte\n\tvar bytesRead, varIntBytes int\n\tvar messageLength uint64\n\tfor varIntBytes == 0 { // i.e. no varint has been decoded yet.\n\t\tif bytesRead >= len(headerBuf) {\n\t\t\treturn bytesRead, errInvalidVarint\n\t\t}\n\t\t// We have to read byte by byte here to avoid reading more bytes\n\t\t// than required. Each read byte is appended to what we have\n\t\t// read before.\n\t\tnewBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1])\n\t\tif newBytesRead == 0 {\n\t\t\tif err != nil {\n\t\t\t\treturn bytesRead, err\n\t\t\t}\n\t\t\t// A Reader should not return (0, nil), but if it does,\n\t\t\t// it should be treated as no-op (according to the\n\t\t\t// Reader contract). So let's go on...\n\t\t\tcontinue\n\t\t}\n\t\tbytesRead += newBytesRead\n\t\t// Now present everything read so far to the varint decoder and\n\t\t// see if a varint can be decoded already.\n\t\tmessageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead])\n\t}\n\n\tmessageBuf := make([]byte, messageLength)\n\tnewBytesRead, err := io.ReadFull(r, messageBuf)\n\tbytesRead += newBytesRead\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\n\treturn bytesRead, proto.Unmarshal(messageBuf, m)\n}\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go",
    "content": "// Copyright 2016 Matt T. Proud\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pbutil\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"testing\"\n\t\"testing/iotest\"\n)\n\nfunc TestReadDelimitedIllegalVarint(t *testing.T) {\n\tt.Parallel()\n\tvar tests = []struct {\n\t\tin  []byte\n\t\tn   int\n\t\terr error\n\t}{\n\t\t{\n\t\t\tin:  []byte{255, 255, 255, 255, 255},\n\t\t\tn:   5,\n\t\t\terr: errInvalidVarint,\n\t\t},\n\t\t{\n\t\t\tin:  []byte{255, 255, 255, 255, 255, 255},\n\t\t\tn:   5,\n\t\t\terr: errInvalidVarint,\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tn, err := ReadDelimited(bytes.NewReader(test.in), nil)\n\t\tif got, want := n, test.n; got != want {\n\t\t\tt.Errorf(\"ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?\", test.in, got, want)\n\t\t}\n\t\tif got, want := err, test.err; got != want {\n\t\t\tt.Errorf(\"ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v\", test.in, got, want)\n\t\t}\n\t}\n}\n\nfunc TestReadDelimitedPrematureHeader(t *testing.T) {\n\tt.Parallel()\n\tvar data = []byte{128, 5} // 256 + 256 + 128\n\tn, err := ReadDelimited(bytes.NewReader(data[0:1]), nil)\n\tif got, want := n, 1; got != want {\n\t\tt.Errorf(\"ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?\", data[0:1], got, want)\n\t}\n\tif got, want := err, io.EOF; got != want {\n\t\tt.Errorf(\"ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v\", data[0:1], got, want)\n\t}\n}\n\nfunc TestReadDelimitedPrematureBody(t *testing.T) {\n\tt.Parallel()\n\tvar data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128\n\tn, err := ReadDelimited(bytes.NewReader(data[:]), nil)\n\tif got, want := n, 5; got != want {\n\t\tt.Errorf(\"ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?\", data, got, want)\n\t}\n\tif got, want := err, io.ErrUnexpectedEOF; got != want {\n\t\tt.Errorf(\"ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v\", data, got, want)\n\t}\n}\n\nfunc TestReadDelimitedPrematureHeaderIncremental(t *testing.T) {\n\tt.Parallel()\n\tvar data = []byte{128, 5} // 256 + 256 + 128\n\tn, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[0:1])), nil)\n\tif got, want := n, 1; got != want {\n\t\tt.Errorf(\"ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?\", data[0:1], got, want)\n\t}\n\tif got, want := err, io.EOF; got != want {\n\t\tt.Errorf(\"ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v\", data[0:1], got, want)\n\t}\n}\n\nfunc TestReadDelimitedPrematureBodyIncremental(t *testing.T) {\n\tt.Parallel()\n\tvar data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128\n\tn, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[:])), nil)\n\tif got, want := n, 5; got != want {\n\t\tt.Errorf(\"ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?\", data, got, want)\n\t}\n\tif got, want := err, io.ErrUnexpectedEOF; got != want {\n\t\tt.Errorf(\"ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v\", data, got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go",
    "content": "// Copyright 2013 Matt T. Proud\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package pbutil provides record length-delimited Protocol Buffer streaming.\npackage pbutil\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go",
    "content": "// Copyright 2013 Matt T. Proud\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pbutil\n\nimport (\n\t\"encoding/binary\"\n\t\"io\"\n\n\t\"github.com/golang/protobuf/proto\"\n)\n\n// WriteDelimited encodes and dumps a message to the provided writer prefixed\n// with a 32-bit varint indicating the length of the encoded message, producing\n// a length-delimited record stream, which can be used to chain together\n// encoded messages of the same type together in a file.  It returns the total\n// number of bytes written and any applicable error.  This is roughly\n// equivalent to the companion Java API's MessageLite#writeDelimitedTo.\nfunc WriteDelimited(w io.Writer, m proto.Message) (n int, err error) {\n\tbuffer, err := proto.Marshal(m)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tvar buf [binary.MaxVarintLen32]byte\n\tencodedLength := binary.PutUvarint(buf[:], uint64(len(buffer)))\n\n\tsync, err := w.Write(buf[:encodedLength])\n\tif err != nil {\n\t\treturn sync, err\n\t}\n\n\tn, err = w.Write(buffer)\n\treturn n + sync, err\n}\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go",
    "content": "// Copyright 2016 Matt T. Proud\n//\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n//     http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage pbutil\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n)\n\nvar errMarshal = errors.New(\"pbutil: can't marshal\")\n\ntype cantMarshal struct{ proto.Message }\n\nfunc (cantMarshal) Marshal() ([]byte, error) { return nil, errMarshal }\n\nvar _ proto.Message = cantMarshal{}\n\nfunc TestWriteDelimitedMarshalErr(t *testing.T) {\n\tt.Parallel()\n\tvar data cantMarshal\n\tvar buf bytes.Buffer\n\tn, err := WriteDelimited(&buf, data)\n\tif got, want := n, 0; got != want {\n\t\tt.Errorf(\"WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?\", data, got, want)\n\t}\n\tif got, want := err, errMarshal; got != want {\n\t\tt.Errorf(\"WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v\", data, got, want)\n\t}\n}\n\ntype canMarshal struct{ proto.Message }\n\nfunc (canMarshal) Marshal() ([]byte, error) { return []byte{0, 1, 2, 3, 4, 5}, nil }\n\nvar errWrite = errors.New(\"pbutil: can't write\")\n\ntype cantWrite struct{}\n\nfunc (cantWrite) Write([]byte) (int, error) { return 0, errWrite }\n\nfunc TestWriteDelimitedWriteErr(t *testing.T) {\n\tt.Parallel()\n\tvar data canMarshal\n\tvar buf cantWrite\n\tn, err := WriteDelimited(buf, data)\n\tif got, want := n, 0; got != want {\n\t\tt.Errorf(\"WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?\", data, got, want)\n\t}\n\tif got, want := err, errWrite; got != want {\n\t\tt.Errorf(\"WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v\", data, got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go",
    "content": "// Copyright 2010 The Go Authors.  All rights reserved.\n// http://github.com/golang/protobuf/\n//\n// Redistribution and use in source and binary forms, with or without\n// modification, are permitted provided that the following conditions are\n// met:\n//\n//     * Redistributions of source code must retain the above copyright\n// notice, this list of conditions and the following disclaimer.\n//     * Redistributions in binary form must reproduce the above\n// copyright notice, this list of conditions and the following disclaimer\n// in the documentation and/or other materials provided with the\n// distribution.\n//     * Neither the name of Google Inc. nor the names of its\n// contributors may be used to endorse or promote products derived from\n// this software without specific prior written permission.\n//\n// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n// \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\n// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\npackage pbutil\n\nimport (\n\t. \"github.com/golang/protobuf/proto\"\n\t. \"github.com/golang/protobuf/proto/testdata\"\n)\n\n// FROM https://github.com/golang/protobuf/blob/master/proto/all_test.go.\n\nfunc initGoTestField() *GoTestField {\n\tf := new(GoTestField)\n\tf.Label = String(\"label\")\n\tf.Type = String(\"type\")\n\treturn f\n}\n\n// These are all structurally equivalent but the tag numbers differ.\n// (It's remarkable that required, optional, and repeated all have\n// 8 letters.)\nfunc initGoTest_RequiredGroup() *GoTest_RequiredGroup {\n\treturn &GoTest_RequiredGroup{\n\t\tRequiredField: String(\"required\"),\n\t}\n}\n\nfunc initGoTest_OptionalGroup() *GoTest_OptionalGroup {\n\treturn &GoTest_OptionalGroup{\n\t\tRequiredField: String(\"optional\"),\n\t}\n}\n\nfunc initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {\n\treturn &GoTest_RepeatedGroup{\n\t\tRequiredField: String(\"repeated\"),\n\t}\n}\n\nfunc initGoTest(setdefaults bool) *GoTest {\n\tpb := new(GoTest)\n\tif setdefaults {\n\t\tpb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)\n\t\tpb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)\n\t\tpb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)\n\t\tpb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)\n\t\tpb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)\n\t\tpb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)\n\t\tpb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)\n\t\tpb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)\n\t\tpb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)\n\t\tpb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)\n\t\tpb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted\n\t\tpb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)\n\t\tpb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)\n\t}\n\n\tpb.Kind = GoTest_TIME.Enum()\n\tpb.RequiredField = initGoTestField()\n\tpb.F_BoolRequired = Bool(true)\n\tpb.F_Int32Required = Int32(3)\n\tpb.F_Int64Required = Int64(6)\n\tpb.F_Fixed32Required = Uint32(32)\n\tpb.F_Fixed64Required = Uint64(64)\n\tpb.F_Uint32Required = Uint32(3232)\n\tpb.F_Uint64Required = Uint64(6464)\n\tpb.F_FloatRequired = Float32(3232)\n\tpb.F_DoubleRequired = Float64(6464)\n\tpb.F_StringRequired = String(\"string\")\n\tpb.F_BytesRequired = []byte(\"bytes\")\n\tpb.F_Sint32Required = Int32(-32)\n\tpb.F_Sint64Required = Int64(-64)\n\tpb.Requiredgroup = initGoTest_RequiredGroup()\n\n\treturn pb\n}\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/.gitignore",
    "content": "# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\n\n# Architecture specific extensions/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n*.test\n*.prof\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/.travis.yml",
    "content": "language: go\ngo_import_path: github.com/pkg/errors\ngo:\n  - 1.4.3\n  - 1.5.4\n  - 1.6.2\n  - 1.7.1\n  - tip\n\nscript:\n  - go test -v ./...\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/LICENSE",
    "content": "Copyright (c) 2015, Dave Cheney <dave@cheney.net>\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/README.md",
    "content": "# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors)\n\nPackage errors provides simple error handling primitives.\n\n`go get github.com/pkg/errors`\n\nThe traditional error handling idiom in Go is roughly akin to\n```go\nif err != nil {\n        return err\n}\n```\nwhich applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.\n\n## Adding context to an error\n\nThe errors.Wrap function returns a new error that adds context to the original error. For example\n```go\n_, err := ioutil.ReadAll(r)\nif err != nil {\n        return errors.Wrap(err, \"read failed\")\n}\n```\n## Retrieving the cause of an error\n\nUsing `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.\n```go\ntype causer interface {\n        Cause() error\n}\n```\n`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:\n```go\nswitch err := errors.Cause(err).(type) {\ncase *MyError:\n        // handle specifically\ndefault:\n        // unknown error\n}\n```\n\n[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).\n\n## Contributing\n\nWe welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high.\n\nBefore proposing a change, please discuss your change by raising an issue.\n\n## Licence\n\nBSD-2-Clause\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/appveyor.yml",
    "content": "version: build-{build}.{branch}\n\nclone_folder: C:\\gopath\\src\\github.com\\pkg\\errors\nshallow_clone: true # for startup speed\n\nenvironment:\n  GOPATH: C:\\gopath\n\nplatform:\n  - x64\n\n# http://www.appveyor.com/docs/installed-software\ninstall:\n  # some helpful output for debugging builds\n  - go version\n  - go env\n  # pre-installed MinGW at C:\\MinGW is 32bit only\n  # but MSYS2 at C:\\msys64 has mingw64\n  - set PATH=C:\\msys64\\mingw64\\bin;%PATH%\n  - gcc --version\n  - g++ --version\n\nbuild_script:\n  - go install -v ./...\n\ntest_script:\n  - set PATH=C:\\gopath\\bin;%PATH%\n  - go test -v ./...\n\n#artifacts:\n#  - path: '%GOPATH%\\bin\\*.exe'\ndeploy: off\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/bench_test.go",
    "content": "// +build go1.7\n\npackage errors\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\tstderrors \"errors\"\n)\n\nfunc noErrors(at, depth int) error {\n\tif at >= depth {\n\t\treturn stderrors.New(\"no error\")\n\t}\n\treturn noErrors(at+1, depth)\n}\nfunc yesErrors(at, depth int) error {\n\tif at >= depth {\n\t\treturn New(\"ye error\")\n\t}\n\treturn yesErrors(at+1, depth)\n}\n\nfunc BenchmarkErrors(b *testing.B) {\n\tvar toperr error\n\ttype run struct {\n\t\tstack int\n\t\tstd   bool\n\t}\n\truns := []run{\n\t\t{10, false},\n\t\t{10, true},\n\t\t{100, false},\n\t\t{100, true},\n\t\t{1000, false},\n\t\t{1000, true},\n\t}\n\tfor _, r := range runs {\n\t\tpart := \"pkg/errors\"\n\t\tif r.std {\n\t\t\tpart = \"errors\"\n\t\t}\n\t\tname := fmt.Sprintf(\"%s-stack-%d\", part, r.stack)\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tvar err error\n\t\t\tf := yesErrors\n\t\t\tif r.std {\n\t\t\t\tf = noErrors\n\t\t\t}\n\t\t\tb.ReportAllocs()\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\terr = f(0, r.stack)\n\t\t\t}\n\t\t\tb.StopTimer()\n\t\t\ttoperr = err\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/errors.go",
    "content": "// Package errors provides simple error handling primitives.\n//\n// The traditional error handling idiom in Go is roughly akin to\n//\n//     if err != nil {\n//             return err\n//     }\n//\n// which applied recursively up the call stack results in error reports\n// without context or debugging information. The errors package allows\n// programmers to add context to the failure path in their code in a way\n// that does not destroy the original value of the error.\n//\n// Adding context to an error\n//\n// The errors.Wrap function returns a new error that adds context to the\n// original error by recording a stack trace at the point Wrap is called,\n// and the supplied message. For example\n//\n//     _, err := ioutil.ReadAll(r)\n//     if err != nil {\n//             return errors.Wrap(err, \"read failed\")\n//     }\n//\n// If additional control is required the errors.WithStack and errors.WithMessage\n// functions destructure errors.Wrap into its component operations of annotating\n// an error with a stack trace and an a message, respectively.\n//\n// Retrieving the cause of an error\n//\n// Using errors.Wrap constructs a stack of errors, adding context to the\n// preceding error. Depending on the nature of the error it may be necessary\n// to reverse the operation of errors.Wrap to retrieve the original error\n// for inspection. Any error value which implements this interface\n//\n//     type causer interface {\n//             Cause() error\n//     }\n//\n// can be inspected by errors.Cause. errors.Cause will recursively retrieve\n// the topmost error which does not implement causer, which is assumed to be\n// the original cause. For example:\n//\n//     switch err := errors.Cause(err).(type) {\n//     case *MyError:\n//             // handle specifically\n//     default:\n//             // unknown error\n//     }\n//\n// causer interface is not exported by this package, but is considered a part\n// of stable public API.\n//\n// Formatted printing of errors\n//\n// All error values returned from this package implement fmt.Formatter and can\n// be formatted by the fmt package. The following verbs are supported\n//\n//     %s    print the error. If the error has a Cause it will be\n//           printed recursively\n//     %v    see %s\n//     %+v   extended format. Each Frame of the error's StackTrace will\n//           be printed in detail.\n//\n// Retrieving the stack trace of an error or wrapper\n//\n// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are\n// invoked. This information can be retrieved with the following interface.\n//\n//     type stackTracer interface {\n//             StackTrace() errors.StackTrace\n//     }\n//\n// Where errors.StackTrace is defined as\n//\n//     type StackTrace []Frame\n//\n// The Frame type represents a call site in the stack trace. Frame supports\n// the fmt.Formatter interface that can be used for printing information about\n// the stack trace of this error. For example:\n//\n//     if err, ok := err.(stackTracer); ok {\n//             for _, f := range err.StackTrace() {\n//                     fmt.Printf(\"%+s:%d\", f)\n//             }\n//     }\n//\n// stackTracer interface is not exported by this package, but is considered a part\n// of stable public API.\n//\n// See the documentation for Frame.Format for more details.\npackage errors\n\nimport (\n\t\"fmt\"\n\t\"io\"\n)\n\n// New returns an error with the supplied message.\n// New also records the stack trace at the point it was called.\nfunc New(message string) error {\n\treturn &fundamental{\n\t\tmsg:   message,\n\t\tstack: callers(),\n\t}\n}\n\n// Errorf formats according to a format specifier and returns the string\n// as a value that satisfies error.\n// Errorf also records the stack trace at the point it was called.\nfunc Errorf(format string, args ...interface{}) error {\n\treturn &fundamental{\n\t\tmsg:   fmt.Sprintf(format, args...),\n\t\tstack: callers(),\n\t}\n}\n\n// fundamental is an error that has a message and a stack, but no caller.\ntype fundamental struct {\n\tmsg string\n\t*stack\n}\n\nfunc (f *fundamental) Error() string { return f.msg }\n\nfunc (f *fundamental) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tif s.Flag('+') {\n\t\t\tio.WriteString(s, f.msg)\n\t\t\tf.stack.Format(s, verb)\n\t\t\treturn\n\t\t}\n\t\tfallthrough\n\tcase 's':\n\t\tio.WriteString(s, f.msg)\n\tcase 'q':\n\t\tfmt.Fprintf(s, \"%q\", f.msg)\n\t}\n}\n\n// WithStack annotates err with a stack trace at the point WithStack was called.\n// If err is nil, WithStack returns nil.\nfunc WithStack(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\treturn &withStack{\n\t\terr,\n\t\tcallers(),\n\t}\n}\n\ntype withStack struct {\n\terror\n\t*stack\n}\n\nfunc (w *withStack) Cause() error { return w.error }\n\nfunc (w *withStack) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tif s.Flag('+') {\n\t\t\tfmt.Fprintf(s, \"%+v\", w.Cause())\n\t\t\tw.stack.Format(s, verb)\n\t\t\treturn\n\t\t}\n\t\tfallthrough\n\tcase 's':\n\t\tio.WriteString(s, w.Error())\n\tcase 'q':\n\t\tfmt.Fprintf(s, \"%q\", w.Error())\n\t}\n}\n\n// Wrap returns an error annotating err with a stack trace\n// at the point Wrap is called, and the supplied message.\n// If err is nil, Wrap returns nil.\nfunc Wrap(err error, message string) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\terr = &withMessage{\n\t\tcause: err,\n\t\tmsg:   message,\n\t}\n\treturn &withStack{\n\t\terr,\n\t\tcallers(),\n\t}\n}\n\n// Wrapf returns an error annotating err with a stack trace\n// at the point Wrapf is call, and the format specifier.\n// If err is nil, Wrapf returns nil.\nfunc Wrapf(err error, format string, args ...interface{}) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\terr = &withMessage{\n\t\tcause: err,\n\t\tmsg:   fmt.Sprintf(format, args...),\n\t}\n\treturn &withStack{\n\t\terr,\n\t\tcallers(),\n\t}\n}\n\n// WithMessage annotates err with a new message.\n// If err is nil, WithMessage returns nil.\nfunc WithMessage(err error, message string) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\treturn &withMessage{\n\t\tcause: err,\n\t\tmsg:   message,\n\t}\n}\n\ntype withMessage struct {\n\tcause error\n\tmsg   string\n}\n\nfunc (w *withMessage) Error() string { return w.msg + \": \" + w.cause.Error() }\nfunc (w *withMessage) Cause() error  { return w.cause }\n\nfunc (w *withMessage) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tif s.Flag('+') {\n\t\t\tfmt.Fprintf(s, \"%+v\\n\", w.Cause())\n\t\t\tio.WriteString(s, w.msg)\n\t\t\treturn\n\t\t}\n\t\tfallthrough\n\tcase 's', 'q':\n\t\tio.WriteString(s, w.Error())\n\t}\n}\n\n// Cause returns the underlying cause of the error, if possible.\n// An error value has a cause if it implements the following\n// interface:\n//\n//     type causer interface {\n//            Cause() error\n//     }\n//\n// If the error does not implement Cause, the original error will\n// be returned. If the error is nil, nil will be returned without further\n// investigation.\nfunc Cause(err error) error {\n\ttype causer interface {\n\t\tCause() error\n\t}\n\n\tfor err != nil {\n\t\tcause, ok := err.(causer)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\terr = cause.Cause()\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/errors_test.go",
    "content": "package errors\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestNew(t *testing.T) {\n\ttests := []struct {\n\t\terr  string\n\t\twant error\n\t}{\n\t\t{\"\", fmt.Errorf(\"\")},\n\t\t{\"foo\", fmt.Errorf(\"foo\")},\n\t\t{\"foo\", New(\"foo\")},\n\t\t{\"string with format specifiers: %v\", errors.New(\"string with format specifiers: %v\")},\n\t}\n\n\tfor _, tt := range tests {\n\t\tgot := New(tt.err)\n\t\tif got.Error() != tt.want.Error() {\n\t\t\tt.Errorf(\"New.Error(): got: %q, want %q\", got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestWrapNil(t *testing.T) {\n\tgot := Wrap(nil, \"no error\")\n\tif got != nil {\n\t\tt.Errorf(\"Wrap(nil, \\\"no error\\\"): got %#v, expected nil\", got)\n\t}\n}\n\nfunc TestWrap(t *testing.T) {\n\ttests := []struct {\n\t\terr     error\n\t\tmessage string\n\t\twant    string\n\t}{\n\t\t{io.EOF, \"read error\", \"read error: EOF\"},\n\t\t{Wrap(io.EOF, \"read error\"), \"client error\", \"client error: read error: EOF\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tgot := Wrap(tt.err, tt.message).Error()\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"Wrap(%v, %q): got: %v, want %v\", tt.err, tt.message, got, tt.want)\n\t\t}\n\t}\n}\n\ntype nilError struct{}\n\nfunc (nilError) Error() string { return \"nil error\" }\n\nfunc TestCause(t *testing.T) {\n\tx := New(\"error\")\n\ttests := []struct {\n\t\terr  error\n\t\twant error\n\t}{{\n\t\t// nil error is nil\n\t\terr:  nil,\n\t\twant: nil,\n\t}, {\n\t\t// explicit nil error is nil\n\t\terr:  (error)(nil),\n\t\twant: nil,\n\t}, {\n\t\t// typed nil is nil\n\t\terr:  (*nilError)(nil),\n\t\twant: (*nilError)(nil),\n\t}, {\n\t\t// uncaused error is unaffected\n\t\terr:  io.EOF,\n\t\twant: io.EOF,\n\t}, {\n\t\t// caused error returns cause\n\t\terr:  Wrap(io.EOF, \"ignored\"),\n\t\twant: io.EOF,\n\t}, {\n\t\terr:  x, // return from errors.New\n\t\twant: x,\n\t}, {\n\t\tWithMessage(nil, \"whoops\"),\n\t\tnil,\n\t}, {\n\t\tWithMessage(io.EOF, \"whoops\"),\n\t\tio.EOF,\n\t}, {\n\t\tWithStack(nil),\n\t\tnil,\n\t}, {\n\t\tWithStack(io.EOF),\n\t\tio.EOF,\n\t}}\n\n\tfor i, tt := range tests {\n\t\tgot := Cause(tt.err)\n\t\tif !reflect.DeepEqual(got, tt.want) {\n\t\t\tt.Errorf(\"test %d: got %#v, want %#v\", i+1, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestWrapfNil(t *testing.T) {\n\tgot := Wrapf(nil, \"no error\")\n\tif got != nil {\n\t\tt.Errorf(\"Wrapf(nil, \\\"no error\\\"): got %#v, expected nil\", got)\n\t}\n}\n\nfunc TestWrapf(t *testing.T) {\n\ttests := []struct {\n\t\terr     error\n\t\tmessage string\n\t\twant    string\n\t}{\n\t\t{io.EOF, \"read error\", \"read error: EOF\"},\n\t\t{Wrapf(io.EOF, \"read error without format specifiers\"), \"client error\", \"client error: read error without format specifiers: EOF\"},\n\t\t{Wrapf(io.EOF, \"read error with %d format specifier\", 1), \"client error\", \"client error: read error with 1 format specifier: EOF\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tgot := Wrapf(tt.err, tt.message).Error()\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"Wrapf(%v, %q): got: %v, want %v\", tt.err, tt.message, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestErrorf(t *testing.T) {\n\ttests := []struct {\n\t\terr  error\n\t\twant string\n\t}{\n\t\t{Errorf(\"read error without format specifiers\"), \"read error without format specifiers\"},\n\t\t{Errorf(\"read error with %d format specifier\", 1), \"read error with 1 format specifier\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tgot := tt.err.Error()\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"Errorf(%v): got: %q, want %q\", tt.err, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestWithStackNil(t *testing.T) {\n\tgot := WithStack(nil)\n\tif got != nil {\n\t\tt.Errorf(\"WithStack(nil): got %#v, expected nil\", got)\n\t}\n}\n\nfunc TestWithStack(t *testing.T) {\n\ttests := []struct {\n\t\terr  error\n\t\twant string\n\t}{\n\t\t{io.EOF, \"EOF\"},\n\t\t{WithStack(io.EOF), \"EOF\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tgot := WithStack(tt.err).Error()\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"WithStack(%v): got: %v, want %v\", tt.err, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestWithMessageNil(t *testing.T) {\n\tgot := WithMessage(nil, \"no error\")\n\tif got != nil {\n\t\tt.Errorf(\"WithMessage(nil, \\\"no error\\\"): got %#v, expected nil\", got)\n\t}\n}\n\nfunc TestWithMessage(t *testing.T) {\n\ttests := []struct {\n\t\terr     error\n\t\tmessage string\n\t\twant    string\n\t}{\n\t\t{io.EOF, \"read error\", \"read error: EOF\"},\n\t\t{WithMessage(io.EOF, \"read error\"), \"client error\", \"client error: read error: EOF\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tgot := WithMessage(tt.err, tt.message).Error()\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"WithMessage(%v, %q): got: %q, want %q\", tt.err, tt.message, got, tt.want)\n\t\t}\n\t}\n\n}\n\n// errors.New, etc values are not expected to be compared by value\n// but the change in errors#27 made them incomparable. Assert that\n// various kinds of errors have a functional equality operator, even\n// if the result of that equality is always false.\nfunc TestErrorEquality(t *testing.T) {\n\tvals := []error{\n\t\tnil,\n\t\tio.EOF,\n\t\terrors.New(\"EOF\"),\n\t\tNew(\"EOF\"),\n\t\tErrorf(\"EOF\"),\n\t\tWrap(io.EOF, \"EOF\"),\n\t\tWrapf(io.EOF, \"EOF%d\", 2),\n\t\tWithMessage(nil, \"whoops\"),\n\t\tWithMessage(io.EOF, \"whoops\"),\n\t\tWithStack(io.EOF),\n\t\tWithStack(nil),\n\t}\n\n\tfor i := range vals {\n\t\tfor j := range vals {\n\t\t\t_ = vals[i] == vals[j] // mustn't panic\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/example_test.go",
    "content": "package errors_test\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/pkg/errors\"\n)\n\nfunc ExampleNew() {\n\terr := errors.New(\"whoops\")\n\tfmt.Println(err)\n\n\t// Output: whoops\n}\n\nfunc ExampleNew_printf() {\n\terr := errors.New(\"whoops\")\n\tfmt.Printf(\"%+v\", err)\n\n\t// Example output:\n\t// whoops\n\t// github.com/pkg/errors_test.ExampleNew_printf\n\t//         /home/dfc/src/github.com/pkg/errors/example_test.go:17\n\t// testing.runExample\n\t//         /home/dfc/go/src/testing/example.go:114\n\t// testing.RunExamples\n\t//         /home/dfc/go/src/testing/example.go:38\n\t// testing.(*M).Run\n\t//         /home/dfc/go/src/testing/testing.go:744\n\t// main.main\n\t//         /github.com/pkg/errors/_test/_testmain.go:106\n\t// runtime.main\n\t//         /home/dfc/go/src/runtime/proc.go:183\n\t// runtime.goexit\n\t//         /home/dfc/go/src/runtime/asm_amd64.s:2059\n}\n\nfunc ExampleWithMessage() {\n\tcause := errors.New(\"whoops\")\n\terr := errors.WithMessage(cause, \"oh noes\")\n\tfmt.Println(err)\n\n\t// Output: oh noes: whoops\n}\n\nfunc ExampleWithStack() {\n\tcause := errors.New(\"whoops\")\n\terr := errors.WithStack(cause)\n\tfmt.Println(err)\n\n\t// Output: whoops\n}\n\nfunc ExampleWithStack_printf() {\n\tcause := errors.New(\"whoops\")\n\terr := errors.WithStack(cause)\n\tfmt.Printf(\"%+v\", err)\n\n\t// Example Output:\n\t// whoops\n\t// github.com/pkg/errors_test.ExampleWithStack_printf\n\t//         /home/fabstu/go/src/github.com/pkg/errors/example_test.go:55\n\t// testing.runExample\n\t//         /usr/lib/go/src/testing/example.go:114\n\t// testing.RunExamples\n\t//         /usr/lib/go/src/testing/example.go:38\n\t// testing.(*M).Run\n\t//         /usr/lib/go/src/testing/testing.go:744\n\t// main.main\n\t//         github.com/pkg/errors/_test/_testmain.go:106\n\t// runtime.main\n\t//         /usr/lib/go/src/runtime/proc.go:183\n\t// runtime.goexit\n\t//         /usr/lib/go/src/runtime/asm_amd64.s:2086\n\t// github.com/pkg/errors_test.ExampleWithStack_printf\n\t//         /home/fabstu/go/src/github.com/pkg/errors/example_test.go:56\n\t// testing.runExample\n\t//         /usr/lib/go/src/testing/example.go:114\n\t// testing.RunExamples\n\t//         /usr/lib/go/src/testing/example.go:38\n\t// testing.(*M).Run\n\t//         /usr/lib/go/src/testing/testing.go:744\n\t// main.main\n\t//         github.com/pkg/errors/_test/_testmain.go:106\n\t// runtime.main\n\t//         /usr/lib/go/src/runtime/proc.go:183\n\t// runtime.goexit\n\t//         /usr/lib/go/src/runtime/asm_amd64.s:2086\n}\n\nfunc ExampleWrap() {\n\tcause := errors.New(\"whoops\")\n\terr := errors.Wrap(cause, \"oh noes\")\n\tfmt.Println(err)\n\n\t// Output: oh noes: whoops\n}\n\nfunc fn() error {\n\te1 := errors.New(\"error\")\n\te2 := errors.Wrap(e1, \"inner\")\n\te3 := errors.Wrap(e2, \"middle\")\n\treturn errors.Wrap(e3, \"outer\")\n}\n\nfunc ExampleCause() {\n\terr := fn()\n\tfmt.Println(err)\n\tfmt.Println(errors.Cause(err))\n\n\t// Output: outer: middle: inner: error\n\t// error\n}\n\nfunc ExampleWrap_extended() {\n\terr := fn()\n\tfmt.Printf(\"%+v\\n\", err)\n\n\t// Example output:\n\t// error\n\t// github.com/pkg/errors_test.fn\n\t//         /home/dfc/src/github.com/pkg/errors/example_test.go:47\n\t// github.com/pkg/errors_test.ExampleCause_printf\n\t//         /home/dfc/src/github.com/pkg/errors/example_test.go:63\n\t// testing.runExample\n\t//         /home/dfc/go/src/testing/example.go:114\n\t// testing.RunExamples\n\t//         /home/dfc/go/src/testing/example.go:38\n\t// testing.(*M).Run\n\t//         /home/dfc/go/src/testing/testing.go:744\n\t// main.main\n\t//         /github.com/pkg/errors/_test/_testmain.go:104\n\t// runtime.main\n\t//         /home/dfc/go/src/runtime/proc.go:183\n\t// runtime.goexit\n\t//         /home/dfc/go/src/runtime/asm_amd64.s:2059\n\t// github.com/pkg/errors_test.fn\n\t// \t  /home/dfc/src/github.com/pkg/errors/example_test.go:48: inner\n\t// github.com/pkg/errors_test.fn\n\t//        /home/dfc/src/github.com/pkg/errors/example_test.go:49: middle\n\t// github.com/pkg/errors_test.fn\n\t//      /home/dfc/src/github.com/pkg/errors/example_test.go:50: outer\n}\n\nfunc ExampleWrapf() {\n\tcause := errors.New(\"whoops\")\n\terr := errors.Wrapf(cause, \"oh noes #%d\", 2)\n\tfmt.Println(err)\n\n\t// Output: oh noes #2: whoops\n}\n\nfunc ExampleErrorf_extended() {\n\terr := errors.Errorf(\"whoops: %s\", \"foo\")\n\tfmt.Printf(\"%+v\", err)\n\n\t// Example output:\n\t// whoops: foo\n\t// github.com/pkg/errors_test.ExampleErrorf\n\t//         /home/dfc/src/github.com/pkg/errors/example_test.go:101\n\t// testing.runExample\n\t//         /home/dfc/go/src/testing/example.go:114\n\t// testing.RunExamples\n\t//         /home/dfc/go/src/testing/example.go:38\n\t// testing.(*M).Run\n\t//         /home/dfc/go/src/testing/testing.go:744\n\t// main.main\n\t//         /github.com/pkg/errors/_test/_testmain.go:102\n\t// runtime.main\n\t//         /home/dfc/go/src/runtime/proc.go:183\n\t// runtime.goexit\n\t//         /home/dfc/go/src/runtime/asm_amd64.s:2059\n}\n\nfunc Example_stackTrace() {\n\ttype stackTracer interface {\n\t\tStackTrace() errors.StackTrace\n\t}\n\n\terr, ok := errors.Cause(fn()).(stackTracer)\n\tif !ok {\n\t\tpanic(\"oops, err does not implement stackTracer\")\n\t}\n\n\tst := err.StackTrace()\n\tfmt.Printf(\"%+v\", st[0:2]) // top two frames\n\n\t// Example output:\n\t// github.com/pkg/errors_test.fn\n\t//\t/home/dfc/src/github.com/pkg/errors/example_test.go:47\n\t// github.com/pkg/errors_test.Example_stackTrace\n\t//\t/home/dfc/src/github.com/pkg/errors/example_test.go:127\n}\n\nfunc ExampleCause_printf() {\n\terr := errors.Wrap(func() error {\n\t\treturn func() error {\n\t\t\treturn errors.Errorf(\"hello %s\", fmt.Sprintf(\"world\"))\n\t\t}()\n\t}(), \"failed\")\n\n\tfmt.Printf(\"%v\", err)\n\n\t// Output: failed: hello world\n}\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/format_test.go",
    "content": "package errors\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestFormatNew(t *testing.T) {\n\ttests := []struct {\n\t\terror\n\t\tformat string\n\t\twant   string\n\t}{{\n\t\tNew(\"error\"),\n\t\t\"%s\",\n\t\t\"error\",\n\t}, {\n\t\tNew(\"error\"),\n\t\t\"%v\",\n\t\t\"error\",\n\t}, {\n\t\tNew(\"error\"),\n\t\t\"%+v\",\n\t\t\"error\\n\" +\n\t\t\t\"github.com/pkg/errors.TestFormatNew\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:26\",\n\t}, {\n\t\tNew(\"error\"),\n\t\t\"%q\",\n\t\t`\"error\"`,\n\t}}\n\n\tfor i, tt := range tests {\n\t\ttestFormatRegexp(t, i, tt.error, tt.format, tt.want)\n\t}\n}\n\nfunc TestFormatErrorf(t *testing.T) {\n\ttests := []struct {\n\t\terror\n\t\tformat string\n\t\twant   string\n\t}{{\n\t\tErrorf(\"%s\", \"error\"),\n\t\t\"%s\",\n\t\t\"error\",\n\t}, {\n\t\tErrorf(\"%s\", \"error\"),\n\t\t\"%v\",\n\t\t\"error\",\n\t}, {\n\t\tErrorf(\"%s\", \"error\"),\n\t\t\"%+v\",\n\t\t\"error\\n\" +\n\t\t\t\"github.com/pkg/errors.TestFormatErrorf\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:56\",\n\t}}\n\n\tfor i, tt := range tests {\n\t\ttestFormatRegexp(t, i, tt.error, tt.format, tt.want)\n\t}\n}\n\nfunc TestFormatWrap(t *testing.T) {\n\ttests := []struct {\n\t\terror\n\t\tformat string\n\t\twant   string\n\t}{{\n\t\tWrap(New(\"error\"), \"error2\"),\n\t\t\"%s\",\n\t\t\"error2: error\",\n\t}, {\n\t\tWrap(New(\"error\"), \"error2\"),\n\t\t\"%v\",\n\t\t\"error2: error\",\n\t}, {\n\t\tWrap(New(\"error\"), \"error2\"),\n\t\t\"%+v\",\n\t\t\"error\\n\" +\n\t\t\t\"github.com/pkg/errors.TestFormatWrap\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:82\",\n\t}, {\n\t\tWrap(io.EOF, \"error\"),\n\t\t\"%s\",\n\t\t\"error: EOF\",\n\t}, {\n\t\tWrap(io.EOF, \"error\"),\n\t\t\"%v\",\n\t\t\"error: EOF\",\n\t}, {\n\t\tWrap(io.EOF, \"error\"),\n\t\t\"%+v\",\n\t\t\"EOF\\n\" +\n\t\t\t\"error\\n\" +\n\t\t\t\"github.com/pkg/errors.TestFormatWrap\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:96\",\n\t}, {\n\t\tWrap(Wrap(io.EOF, \"error1\"), \"error2\"),\n\t\t\"%+v\",\n\t\t\"EOF\\n\" +\n\t\t\t\"error1\\n\" +\n\t\t\t\"github.com/pkg/errors.TestFormatWrap\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:103\\n\",\n\t}, {\n\t\tWrap(New(\"error with space\"), \"context\"),\n\t\t\"%q\",\n\t\t`\"context: error with space\"`,\n\t}}\n\n\tfor i, tt := range tests {\n\t\ttestFormatRegexp(t, i, tt.error, tt.format, tt.want)\n\t}\n}\n\nfunc TestFormatWrapf(t *testing.T) {\n\ttests := []struct {\n\t\terror\n\t\tformat string\n\t\twant   string\n\t}{{\n\t\tWrapf(io.EOF, \"error%d\", 2),\n\t\t\"%s\",\n\t\t\"error2: EOF\",\n\t}, {\n\t\tWrapf(io.EOF, \"error%d\", 2),\n\t\t\"%v\",\n\t\t\"error2: EOF\",\n\t}, {\n\t\tWrapf(io.EOF, \"error%d\", 2),\n\t\t\"%+v\",\n\t\t\"EOF\\n\" +\n\t\t\t\"error2\\n\" +\n\t\t\t\"github.com/pkg/errors.TestFormatWrapf\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:134\",\n\t}, {\n\t\tWrapf(New(\"error\"), \"error%d\", 2),\n\t\t\"%s\",\n\t\t\"error2: error\",\n\t}, {\n\t\tWrapf(New(\"error\"), \"error%d\", 2),\n\t\t\"%v\",\n\t\t\"error2: error\",\n\t}, {\n\t\tWrapf(New(\"error\"), \"error%d\", 2),\n\t\t\"%+v\",\n\t\t\"error\\n\" +\n\t\t\t\"github.com/pkg/errors.TestFormatWrapf\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:149\",\n\t}}\n\n\tfor i, tt := range tests {\n\t\ttestFormatRegexp(t, i, tt.error, tt.format, tt.want)\n\t}\n}\n\nfunc TestFormatWithStack(t *testing.T) {\n\ttests := []struct {\n\t\terror\n\t\tformat string\n\t\twant   []string\n\t}{{\n\t\tWithStack(io.EOF),\n\t\t\"%s\",\n\t\t[]string{\"EOF\"},\n\t}, {\n\t\tWithStack(io.EOF),\n\t\t\"%v\",\n\t\t[]string{\"EOF\"},\n\t}, {\n\t\tWithStack(io.EOF),\n\t\t\"%+v\",\n\t\t[]string{\"EOF\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:175\"},\n\t}, {\n\t\tWithStack(New(\"error\")),\n\t\t\"%s\",\n\t\t[]string{\"error\"},\n\t}, {\n\t\tWithStack(New(\"error\")),\n\t\t\"%v\",\n\t\t[]string{\"error\"},\n\t}, {\n\t\tWithStack(New(\"error\")),\n\t\t\"%+v\",\n\t\t[]string{\"error\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:189\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:189\"},\n\t}, {\n\t\tWithStack(WithStack(io.EOF)),\n\t\t\"%+v\",\n\t\t[]string{\"EOF\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:197\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:197\"},\n\t}, {\n\t\tWithStack(WithStack(Wrapf(io.EOF, \"message\"))),\n\t\t\"%+v\",\n\t\t[]string{\"EOF\",\n\t\t\t\"message\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:205\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:205\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:205\"},\n\t}, {\n\t\tWithStack(Errorf(\"error%d\", 1)),\n\t\t\"%+v\",\n\t\t[]string{\"error1\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:216\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithStack\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:216\"},\n\t}}\n\n\tfor i, tt := range tests {\n\t\ttestFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)\n\t}\n}\n\nfunc TestFormatWithMessage(t *testing.T) {\n\ttests := []struct {\n\t\terror\n\t\tformat string\n\t\twant   []string\n\t}{{\n\t\tWithMessage(New(\"error\"), \"error2\"),\n\t\t\"%s\",\n\t\t[]string{\"error2: error\"},\n\t}, {\n\t\tWithMessage(New(\"error\"), \"error2\"),\n\t\t\"%v\",\n\t\t[]string{\"error2: error\"},\n\t}, {\n\t\tWithMessage(New(\"error\"), \"error2\"),\n\t\t\"%+v\",\n\t\t[]string{\n\t\t\t\"error\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithMessage\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:244\",\n\t\t\t\"error2\"},\n\t}, {\n\t\tWithMessage(io.EOF, \"addition1\"),\n\t\t\"%s\",\n\t\t[]string{\"addition1: EOF\"},\n\t}, {\n\t\tWithMessage(io.EOF, \"addition1\"),\n\t\t\"%v\",\n\t\t[]string{\"addition1: EOF\"},\n\t}, {\n\t\tWithMessage(io.EOF, \"addition1\"),\n\t\t\"%+v\",\n\t\t[]string{\"EOF\", \"addition1\"},\n\t}, {\n\t\tWithMessage(WithMessage(io.EOF, \"addition1\"), \"addition2\"),\n\t\t\"%v\",\n\t\t[]string{\"addition2: addition1: EOF\"},\n\t}, {\n\t\tWithMessage(WithMessage(io.EOF, \"addition1\"), \"addition2\"),\n\t\t\"%+v\",\n\t\t[]string{\"EOF\", \"addition1\", \"addition2\"},\n\t}, {\n\t\tWrap(WithMessage(io.EOF, \"error1\"), \"error2\"),\n\t\t\"%+v\",\n\t\t[]string{\"EOF\", \"error1\", \"error2\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithMessage\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:272\"},\n\t}, {\n\t\tWithMessage(Errorf(\"error%d\", 1), \"error2\"),\n\t\t\"%+v\",\n\t\t[]string{\"error1\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithMessage\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:278\",\n\t\t\t\"error2\"},\n\t}, {\n\t\tWithMessage(WithStack(io.EOF), \"error\"),\n\t\t\"%+v\",\n\t\t[]string{\n\t\t\t\"EOF\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithMessage\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:285\",\n\t\t\t\"error\"},\n\t}, {\n\t\tWithMessage(Wrap(WithStack(io.EOF), \"inside-error\"), \"outside-error\"),\n\t\t\"%+v\",\n\t\t[]string{\n\t\t\t\"EOF\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithMessage\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:293\",\n\t\t\t\"inside-error\",\n\t\t\t\"github.com/pkg/errors.TestFormatWithMessage\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:293\",\n\t\t\t\"outside-error\"},\n\t}}\n\n\tfor i, tt := range tests {\n\t\ttestFormatCompleteCompare(t, i, tt.error, tt.format, tt.want, true)\n\t}\n}\n\nfunc TestFormatGeneric(t *testing.T) {\n\tstarts := []struct {\n\t\terr  error\n\t\twant []string\n\t}{\n\t\t{New(\"new-error\"), []string{\n\t\t\t\"new-error\",\n\t\t\t\"github.com/pkg/errors.TestFormatGeneric\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:315\"},\n\t\t}, {Errorf(\"errorf-error\"), []string{\n\t\t\t\"errorf-error\",\n\t\t\t\"github.com/pkg/errors.TestFormatGeneric\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/format_test.go:319\"},\n\t\t}, {errors.New(\"errors-new-error\"), []string{\n\t\t\t\"errors-new-error\"},\n\t\t},\n\t}\n\n\twrappers := []wrapper{\n\t\t{\n\t\t\tfunc(err error) error { return WithMessage(err, \"with-message\") },\n\t\t\t[]string{\"with-message\"},\n\t\t}, {\n\t\t\tfunc(err error) error { return WithStack(err) },\n\t\t\t[]string{\n\t\t\t\t\"github.com/pkg/errors.(func·002|TestFormatGeneric.func2)\\n\\t\" +\n\t\t\t\t\t\".+/github.com/pkg/errors/format_test.go:333\",\n\t\t\t},\n\t\t}, {\n\t\t\tfunc(err error) error { return Wrap(err, \"wrap-error\") },\n\t\t\t[]string{\n\t\t\t\t\"wrap-error\",\n\t\t\t\t\"github.com/pkg/errors.(func·003|TestFormatGeneric.func3)\\n\\t\" +\n\t\t\t\t\t\".+/github.com/pkg/errors/format_test.go:339\",\n\t\t\t},\n\t\t}, {\n\t\t\tfunc(err error) error { return Wrapf(err, \"wrapf-error%d\", 1) },\n\t\t\t[]string{\n\t\t\t\t\"wrapf-error1\",\n\t\t\t\t\"github.com/pkg/errors.(func·004|TestFormatGeneric.func4)\\n\\t\" +\n\t\t\t\t\t\".+/github.com/pkg/errors/format_test.go:346\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor s := range starts {\n\t\terr := starts[s].err\n\t\twant := starts[s].want\n\t\ttestFormatCompleteCompare(t, s, err, \"%+v\", want, false)\n\t\ttestGenericRecursive(t, err, want, wrappers, 3)\n\t}\n}\n\nfunc testFormatRegexp(t *testing.T, n int, arg interface{}, format, want string) {\n\tgot := fmt.Sprintf(format, arg)\n\tgotLines := strings.SplitN(got, \"\\n\", -1)\n\twantLines := strings.SplitN(want, \"\\n\", -1)\n\n\tif len(wantLines) > len(gotLines) {\n\t\tt.Errorf(\"test %d: wantLines(%d) > gotLines(%d):\\n got: %q\\nwant: %q\", n+1, len(wantLines), len(gotLines), got, want)\n\t\treturn\n\t}\n\n\tfor i, w := range wantLines {\n\t\tmatch, err := regexp.MatchString(w, gotLines[i])\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !match {\n\t\t\tt.Errorf(\"test %d: line %d: fmt.Sprintf(%q, err):\\n got: %q\\nwant: %q\", n+1, i+1, format, got, want)\n\t\t}\n\t}\n}\n\nvar stackLineR = regexp.MustCompile(`\\.`)\n\n// parseBlocks parses input into a slice, where:\n//  - incase entry contains a newline, its a stacktrace\n//  - incase entry contains no newline, its a solo line.\n//\n// Detecting stack boundaries only works incase the WithStack-calls are\n// to be found on the same line, thats why it is optionally here.\n//\n// Example use:\n//\n// for _, e := range blocks {\n//   if strings.ContainsAny(e, \"\\n\") {\n//     // Match as stack\n//   } else {\n//     // Match as line\n//   }\n// }\n//\nfunc parseBlocks(input string, detectStackboundaries bool) ([]string, error) {\n\tvar blocks []string\n\n\tstack := \"\"\n\twasStack := false\n\tlines := map[string]bool{} // already found lines\n\n\tfor _, l := range strings.Split(input, \"\\n\") {\n\t\tisStackLine := stackLineR.MatchString(l)\n\n\t\tswitch {\n\t\tcase !isStackLine && wasStack:\n\t\t\tblocks = append(blocks, stack, l)\n\t\t\tstack = \"\"\n\t\t\tlines = map[string]bool{}\n\t\tcase isStackLine:\n\t\t\tif wasStack {\n\t\t\t\t// Detecting two stacks after another, possible cause lines match in\n\t\t\t\t// our tests due to WithStack(WithStack(io.EOF)) on same line.\n\t\t\t\tif detectStackboundaries {\n\t\t\t\t\tif lines[l] {\n\t\t\t\t\t\tif len(stack) == 0 {\n\t\t\t\t\t\t\treturn nil, errors.New(\"len of block must not be zero here\")\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tblocks = append(blocks, stack)\n\t\t\t\t\t\tstack = l\n\t\t\t\t\t\tlines = map[string]bool{l: true}\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tstack = stack + \"\\n\" + l\n\t\t\t} else {\n\t\t\t\tstack = l\n\t\t\t}\n\t\t\tlines[l] = true\n\t\tcase !isStackLine && !wasStack:\n\t\t\tblocks = append(blocks, l)\n\t\tdefault:\n\t\t\treturn nil, errors.New(\"must not happen\")\n\t\t}\n\n\t\twasStack = isStackLine\n\t}\n\n\t// Use up stack\n\tif stack != \"\" {\n\t\tblocks = append(blocks, stack)\n\t}\n\treturn blocks, nil\n}\n\nfunc testFormatCompleteCompare(t *testing.T, n int, arg interface{}, format string, want []string, detectStackBoundaries bool) {\n\tgotStr := fmt.Sprintf(format, arg)\n\n\tgot, err := parseBlocks(gotStr, detectStackBoundaries)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif len(got) != len(want) {\n\t\tt.Fatalf(\"test %d: fmt.Sprintf(%s, err) -> wrong number of blocks: got(%d) want(%d)\\n got: %s\\nwant: %s\\ngotStr: %q\",\n\t\t\tn+1, format, len(got), len(want), prettyBlocks(got), prettyBlocks(want), gotStr)\n\t}\n\n\tfor i := range got {\n\t\tif strings.ContainsAny(want[i], \"\\n\") {\n\t\t\t// Match as stack\n\t\t\tmatch, err := regexp.MatchString(want[i], got[i])\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif !match {\n\t\t\t\tt.Fatalf(\"test %d: block %d: fmt.Sprintf(%q, err):\\ngot:\\n%q\\nwant:\\n%q\\nall-got:\\n%s\\nall-want:\\n%s\\n\",\n\t\t\t\t\tn+1, i+1, format, got[i], want[i], prettyBlocks(got), prettyBlocks(want))\n\t\t\t}\n\t\t} else {\n\t\t\t// Match as message\n\t\t\tif got[i] != want[i] {\n\t\t\t\tt.Fatalf(\"test %d: fmt.Sprintf(%s, err) at block %d got != want:\\n got: %q\\nwant: %q\", n+1, format, i+1, got[i], want[i])\n\t\t\t}\n\t\t}\n\t}\n}\n\ntype wrapper struct {\n\twrap func(err error) error\n\twant []string\n}\n\nfunc prettyBlocks(blocks []string, prefix ...string) string {\n\tvar out []string\n\n\tfor _, b := range blocks {\n\t\tout = append(out, fmt.Sprintf(\"%v\", b))\n\t}\n\n\treturn \"   \" + strings.Join(out, \"\\n   \")\n}\n\nfunc testGenericRecursive(t *testing.T, beforeErr error, beforeWant []string, list []wrapper, maxDepth int) {\n\tif len(beforeWant) == 0 {\n\t\tpanic(\"beforeWant must not be empty\")\n\t}\n\tfor _, w := range list {\n\t\tif len(w.want) == 0 {\n\t\t\tpanic(\"want must not be empty\")\n\t\t}\n\n\t\terr := w.wrap(beforeErr)\n\n\t\t// Copy required cause append(beforeWant, ..) modified beforeWant subtly.\n\t\tbeforeCopy := make([]string, len(beforeWant))\n\t\tcopy(beforeCopy, beforeWant)\n\n\t\tbeforeWant := beforeCopy\n\t\tlast := len(beforeWant) - 1\n\t\tvar want []string\n\n\t\t// Merge two stacks behind each other.\n\t\tif strings.ContainsAny(beforeWant[last], \"\\n\") && strings.ContainsAny(w.want[0], \"\\n\") {\n\t\t\twant = append(beforeWant[:last], append([]string{beforeWant[last] + \"((?s).*)\" + w.want[0]}, w.want[1:]...)...)\n\t\t} else {\n\t\t\twant = append(beforeWant, w.want...)\n\t\t}\n\n\t\ttestFormatCompleteCompare(t, maxDepth, err, \"%+v\", want, false)\n\t\tif maxDepth > 0 {\n\t\t\ttestGenericRecursive(t, err, want, list, maxDepth-1)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/stack.go",
    "content": "package errors\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n// Frame represents a program counter inside a stack frame.\ntype Frame uintptr\n\n// pc returns the program counter for this frame;\n// multiple frames may have the same PC value.\nfunc (f Frame) pc() uintptr { return uintptr(f) - 1 }\n\n// file returns the full path to the file that contains the\n// function for this Frame's pc.\nfunc (f Frame) file() string {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn \"unknown\"\n\t}\n\tfile, _ := fn.FileLine(f.pc())\n\treturn file\n}\n\n// line returns the line number of source code of the\n// function for this Frame's pc.\nfunc (f Frame) line() int {\n\tfn := runtime.FuncForPC(f.pc())\n\tif fn == nil {\n\t\treturn 0\n\t}\n\t_, line := fn.FileLine(f.pc())\n\treturn line\n}\n\n// Format formats the frame according to the fmt.Formatter interface.\n//\n//    %s    source file\n//    %d    source line\n//    %n    function name\n//    %v    equivalent to %s:%d\n//\n// Format accepts flags that alter the printing of some verbs, as follows:\n//\n//    %+s   path of source file relative to the compile time GOPATH\n//    %+v   equivalent to %+s:%d\nfunc (f Frame) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 's':\n\t\tswitch {\n\t\tcase s.Flag('+'):\n\t\t\tpc := f.pc()\n\t\t\tfn := runtime.FuncForPC(pc)\n\t\t\tif fn == nil {\n\t\t\t\tio.WriteString(s, \"unknown\")\n\t\t\t} else {\n\t\t\t\tfile, _ := fn.FileLine(pc)\n\t\t\t\tfmt.Fprintf(s, \"%s\\n\\t%s\", fn.Name(), file)\n\t\t\t}\n\t\tdefault:\n\t\t\tio.WriteString(s, path.Base(f.file()))\n\t\t}\n\tcase 'd':\n\t\tfmt.Fprintf(s, \"%d\", f.line())\n\tcase 'n':\n\t\tname := runtime.FuncForPC(f.pc()).Name()\n\t\tio.WriteString(s, funcname(name))\n\tcase 'v':\n\t\tf.Format(s, 's')\n\t\tio.WriteString(s, \":\")\n\t\tf.Format(s, 'd')\n\t}\n}\n\n// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).\ntype StackTrace []Frame\n\nfunc (st StackTrace) Format(s fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tswitch {\n\t\tcase s.Flag('+'):\n\t\t\tfor _, f := range st {\n\t\t\t\tfmt.Fprintf(s, \"\\n%+v\", f)\n\t\t\t}\n\t\tcase s.Flag('#'):\n\t\t\tfmt.Fprintf(s, \"%#v\", []Frame(st))\n\t\tdefault:\n\t\t\tfmt.Fprintf(s, \"%v\", []Frame(st))\n\t\t}\n\tcase 's':\n\t\tfmt.Fprintf(s, \"%s\", []Frame(st))\n\t}\n}\n\n// stack represents a stack of program counters.\ntype stack []uintptr\n\nfunc (s *stack) Format(st fmt.State, verb rune) {\n\tswitch verb {\n\tcase 'v':\n\t\tswitch {\n\t\tcase st.Flag('+'):\n\t\t\tfor _, pc := range *s {\n\t\t\t\tf := Frame(pc)\n\t\t\t\tfmt.Fprintf(st, \"\\n%+v\", f)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (s *stack) StackTrace() StackTrace {\n\tf := make([]Frame, len(*s))\n\tfor i := 0; i < len(f); i++ {\n\t\tf[i] = Frame((*s)[i])\n\t}\n\treturn f\n}\n\nfunc callers() *stack {\n\tconst depth = 32\n\tvar pcs [depth]uintptr\n\tn := runtime.Callers(3, pcs[:])\n\tvar st stack = pcs[0:n]\n\treturn &st\n}\n\n// funcname removes the path prefix component of a function's name reported by func.Name().\nfunc funcname(name string) string {\n\ti := strings.LastIndex(name, \"/\")\n\tname = name[i+1:]\n\ti = strings.Index(name, \".\")\n\treturn name[i+1:]\n}\n\nfunc trimGOPATH(name, file string) string {\n\t// Here we want to get the source file path relative to the compile time\n\t// GOPATH. As of Go 1.6.x there is no direct way to know the compiled\n\t// GOPATH at runtime, but we can infer the number of path segments in the\n\t// GOPATH. We note that fn.Name() returns the function name qualified by\n\t// the import path, which does not include the GOPATH. Thus we can trim\n\t// segments from the beginning of the file path until the number of path\n\t// separators remaining is one more than the number of path separators in\n\t// the function name. For example, given:\n\t//\n\t//    GOPATH     /home/user\n\t//    file       /home/user/src/pkg/sub/file.go\n\t//    fn.Name()  pkg/sub.Type.Method\n\t//\n\t// We want to produce:\n\t//\n\t//    pkg/sub/file.go\n\t//\n\t// From this we can easily see that fn.Name() has one less path separator\n\t// than our desired output. We count separators from the end of the file\n\t// path until it finds two more than in the function name and then move\n\t// one character forward to preserve the initial path segment without a\n\t// leading separator.\n\tconst sep = \"/\"\n\tgoal := strings.Count(name, sep) + 2\n\ti := len(file)\n\tfor n := 0; n < goal; n++ {\n\t\ti = strings.LastIndex(file[:i], sep)\n\t\tif i == -1 {\n\t\t\t// not enough separators found, set i so that the slice expression\n\t\t\t// below leaves file unmodified\n\t\t\ti = -len(sep)\n\t\t\tbreak\n\t\t}\n\t}\n\t// get back to 0 or trim the leading separator\n\tfile = file[i+len(sep):]\n\treturn file\n}\n"
  },
  {
    "path": "vendor/github.com/pkg/errors/stack_test.go",
    "content": "package errors\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n)\n\nvar initpc, _, _, _ = runtime.Caller(0)\n\nfunc TestFrameLine(t *testing.T) {\n\tvar tests = []struct {\n\t\tFrame\n\t\twant int\n\t}{{\n\t\tFrame(initpc),\n\t\t9,\n\t}, {\n\t\tfunc() Frame {\n\t\t\tvar pc, _, _, _ = runtime.Caller(0)\n\t\t\treturn Frame(pc)\n\t\t}(),\n\t\t20,\n\t}, {\n\t\tfunc() Frame {\n\t\t\tvar pc, _, _, _ = runtime.Caller(1)\n\t\t\treturn Frame(pc)\n\t\t}(),\n\t\t28,\n\t}, {\n\t\tFrame(0), // invalid PC\n\t\t0,\n\t}}\n\n\tfor _, tt := range tests {\n\t\tgot := tt.Frame.line()\n\t\twant := tt.want\n\t\tif want != got {\n\t\t\tt.Errorf(\"Frame(%v): want: %v, got: %v\", uintptr(tt.Frame), want, got)\n\t\t}\n\t}\n}\n\ntype X struct{}\n\nfunc (x X) val() Frame {\n\tvar pc, _, _, _ = runtime.Caller(0)\n\treturn Frame(pc)\n}\n\nfunc (x *X) ptr() Frame {\n\tvar pc, _, _, _ = runtime.Caller(0)\n\treturn Frame(pc)\n}\n\nfunc TestFrameFormat(t *testing.T) {\n\tvar tests = []struct {\n\t\tFrame\n\t\tformat string\n\t\twant   string\n\t}{{\n\t\tFrame(initpc),\n\t\t\"%s\",\n\t\t\"stack_test.go\",\n\t}, {\n\t\tFrame(initpc),\n\t\t\"%+s\",\n\t\t\"github.com/pkg/errors.init\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go\",\n\t}, {\n\t\tFrame(0),\n\t\t\"%s\",\n\t\t\"unknown\",\n\t}, {\n\t\tFrame(0),\n\t\t\"%+s\",\n\t\t\"unknown\",\n\t}, {\n\t\tFrame(initpc),\n\t\t\"%d\",\n\t\t\"9\",\n\t}, {\n\t\tFrame(0),\n\t\t\"%d\",\n\t\t\"0\",\n\t}, {\n\t\tFrame(initpc),\n\t\t\"%n\",\n\t\t\"init\",\n\t}, {\n\t\tfunc() Frame {\n\t\t\tvar x X\n\t\t\treturn x.ptr()\n\t\t}(),\n\t\t\"%n\",\n\t\t`\\(\\*X\\).ptr`,\n\t}, {\n\t\tfunc() Frame {\n\t\t\tvar x X\n\t\t\treturn x.val()\n\t\t}(),\n\t\t\"%n\",\n\t\t\"X.val\",\n\t}, {\n\t\tFrame(0),\n\t\t\"%n\",\n\t\t\"\",\n\t}, {\n\t\tFrame(initpc),\n\t\t\"%v\",\n\t\t\"stack_test.go:9\",\n\t}, {\n\t\tFrame(initpc),\n\t\t\"%+v\",\n\t\t\"github.com/pkg/errors.init\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go:9\",\n\t}, {\n\t\tFrame(0),\n\t\t\"%v\",\n\t\t\"unknown:0\",\n\t}}\n\n\tfor i, tt := range tests {\n\t\ttestFormatRegexp(t, i, tt.Frame, tt.format, tt.want)\n\t}\n}\n\nfunc TestFuncname(t *testing.T) {\n\ttests := []struct {\n\t\tname, want string\n\t}{\n\t\t{\"\", \"\"},\n\t\t{\"runtime.main\", \"main\"},\n\t\t{\"github.com/pkg/errors.funcname\", \"funcname\"},\n\t\t{\"funcname\", \"funcname\"},\n\t\t{\"io.copyBuffer\", \"copyBuffer\"},\n\t\t{\"main.(*R).Write\", \"(*R).Write\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tgot := funcname(tt.name)\n\t\twant := tt.want\n\t\tif got != want {\n\t\t\tt.Errorf(\"funcname(%q): want: %q, got %q\", tt.name, want, got)\n\t\t}\n\t}\n}\n\nfunc TestTrimGOPATH(t *testing.T) {\n\tvar tests = []struct {\n\t\tFrame\n\t\twant string\n\t}{{\n\t\tFrame(initpc),\n\t\t\"github.com/pkg/errors/stack_test.go\",\n\t}}\n\n\tfor i, tt := range tests {\n\t\tpc := tt.Frame.pc()\n\t\tfn := runtime.FuncForPC(pc)\n\t\tfile, _ := fn.FileLine(pc)\n\t\tgot := trimGOPATH(fn.Name(), file)\n\t\ttestFormatRegexp(t, i, got, \"%s\", tt.want)\n\t}\n}\n\nfunc TestStackTrace(t *testing.T) {\n\ttests := []struct {\n\t\terr  error\n\t\twant []string\n\t}{{\n\t\tNew(\"ooh\"), []string{\n\t\t\t\"github.com/pkg/errors.TestStackTrace\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go:172\",\n\t\t},\n\t}, {\n\t\tWrap(New(\"ooh\"), \"ahh\"), []string{\n\t\t\t\"github.com/pkg/errors.TestStackTrace\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go:177\", // this is the stack of Wrap, not New\n\t\t},\n\t}, {\n\t\tCause(Wrap(New(\"ooh\"), \"ahh\")), []string{\n\t\t\t\"github.com/pkg/errors.TestStackTrace\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go:182\", // this is the stack of New\n\t\t},\n\t}, {\n\t\tfunc() error { return New(\"ooh\") }(), []string{\n\t\t\t`github.com/pkg/errors.(func·009|TestStackTrace.func1)` +\n\t\t\t\t\"\\n\\t.+/github.com/pkg/errors/stack_test.go:187\", // this is the stack of New\n\t\t\t\"github.com/pkg/errors.TestStackTrace\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go:187\", // this is the stack of New's caller\n\t\t},\n\t}, {\n\t\tCause(func() error {\n\t\t\treturn func() error {\n\t\t\t\treturn Errorf(\"hello %s\", fmt.Sprintf(\"world\"))\n\t\t\t}()\n\t\t}()), []string{\n\t\t\t`github.com/pkg/errors.(func·010|TestStackTrace.func2.1)` +\n\t\t\t\t\"\\n\\t.+/github.com/pkg/errors/stack_test.go:196\", // this is the stack of Errorf\n\t\t\t`github.com/pkg/errors.(func·011|TestStackTrace.func2)` +\n\t\t\t\t\"\\n\\t.+/github.com/pkg/errors/stack_test.go:197\", // this is the stack of Errorf's caller\n\t\t\t\"github.com/pkg/errors.TestStackTrace\\n\" +\n\t\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go:198\", // this is the stack of Errorf's caller's caller\n\t\t},\n\t}}\n\tfor i, tt := range tests {\n\t\tx, ok := tt.err.(interface {\n\t\t\tStackTrace() StackTrace\n\t\t})\n\t\tif !ok {\n\t\t\tt.Errorf(\"expected %#v to implement StackTrace() StackTrace\", tt.err)\n\t\t\tcontinue\n\t\t}\n\t\tst := x.StackTrace()\n\t\tfor j, want := range tt.want {\n\t\t\ttestFormatRegexp(t, i, st[j], \"%+v\", want)\n\t\t}\n\t}\n}\n\nfunc stackTrace() StackTrace {\n\tconst depth = 8\n\tvar pcs [depth]uintptr\n\tn := runtime.Callers(1, pcs[:])\n\tvar st stack = pcs[0:n]\n\treturn st.StackTrace()\n}\n\nfunc TestStackTraceFormat(t *testing.T) {\n\ttests := []struct {\n\t\tStackTrace\n\t\tformat string\n\t\twant   string\n\t}{{\n\t\tnil,\n\t\t\"%s\",\n\t\t`\\[\\]`,\n\t}, {\n\t\tnil,\n\t\t\"%v\",\n\t\t`\\[\\]`,\n\t}, {\n\t\tnil,\n\t\t\"%+v\",\n\t\t\"\",\n\t}, {\n\t\tnil,\n\t\t\"%#v\",\n\t\t`\\[\\]errors.Frame\\(nil\\)`,\n\t}, {\n\t\tmake(StackTrace, 0),\n\t\t\"%s\",\n\t\t`\\[\\]`,\n\t}, {\n\t\tmake(StackTrace, 0),\n\t\t\"%v\",\n\t\t`\\[\\]`,\n\t}, {\n\t\tmake(StackTrace, 0),\n\t\t\"%+v\",\n\t\t\"\",\n\t}, {\n\t\tmake(StackTrace, 0),\n\t\t\"%#v\",\n\t\t`\\[\\]errors.Frame{}`,\n\t}, {\n\t\tstackTrace()[:2],\n\t\t\"%s\",\n\t\t`\\[stack_test.go stack_test.go\\]`,\n\t}, {\n\t\tstackTrace()[:2],\n\t\t\"%v\",\n\t\t`\\[stack_test.go:225 stack_test.go:272\\]`,\n\t}, {\n\t\tstackTrace()[:2],\n\t\t\"%+v\",\n\t\t\"\\n\" +\n\t\t\t\"github.com/pkg/errors.stackTrace\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go:225\\n\" +\n\t\t\t\"github.com/pkg/errors.TestStackTraceFormat\\n\" +\n\t\t\t\"\\t.+/github.com/pkg/errors/stack_test.go:276\",\n\t}, {\n\t\tstackTrace()[:2],\n\t\t\"%#v\",\n\t\t`\\[\\]errors.Frame{stack_test.go:225, stack_test.go:284}`,\n\t}}\n\n\tfor i, tt := range tests {\n\t\ttestFormatRegexp(t, i, tt.StackTrace, tt.format, tt.want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/pmezard/go-difflib/.travis.yml",
    "content": "language: go\ngo:\n  - 1.5\n  - tip\n\n"
  },
  {
    "path": "vendor/github.com/pmezard/go-difflib/LICENSE",
    "content": "Copyright (c) 2013, Patrick Mezard\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n    Redistributions in binary form must reproduce the above copyright\nnotice, this list of conditions and the following disclaimer in the\ndocumentation and/or other materials provided with the distribution.\n    The names of its contributors may not be used to endorse or promote\nproducts derived from this software without specific prior written\npermission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\nIS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED\nTO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\nPARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED\nTO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\nPROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\nLIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\nNEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "vendor/github.com/pmezard/go-difflib/README.md",
    "content": "go-difflib\n==========\n\n[![Build Status](https://travis-ci.org/pmezard/go-difflib.png?branch=master)](https://travis-ci.org/pmezard/go-difflib)\n[![GoDoc](https://godoc.org/github.com/pmezard/go-difflib/difflib?status.svg)](https://godoc.org/github.com/pmezard/go-difflib/difflib)\n\nGo-difflib is a partial port of python 3 difflib package. Its main goal\nwas to make unified and context diff available in pure Go, mostly for\ntesting purposes.\n\nThe following class and functions (and related tests) have be ported:\n\n* `SequenceMatcher`\n* `unified_diff()`\n* `context_diff()`\n\n## Installation\n\n```bash\n$ go get github.com/pmezard/go-difflib/difflib\n```\n\n### Quick Start\n\nDiffs are configured with Unified (or ContextDiff) structures, and can\nbe output to an io.Writer or returned as a string.\n\n```Go\ndiff := UnifiedDiff{\n    A:        difflib.SplitLines(\"foo\\nbar\\n\"),\n    B:        difflib.SplitLines(\"foo\\nbaz\\n\"),\n    FromFile: \"Original\",\n    ToFile:   \"Current\",\n    Context:  3,\n}\ntext, _ := GetUnifiedDiffString(diff)\nfmt.Printf(text)\n```\n\nwould output:\n\n```\n--- Original\n+++ Current\n@@ -1,3 +1,3 @@\n foo\n-bar\n+baz\n```\n\n"
  },
  {
    "path": "vendor/github.com/pmezard/go-difflib/difflib/difflib.go",
    "content": "// Package difflib is a partial port of Python difflib module.\n//\n// It provides tools to compare sequences of strings and generate textual diffs.\n//\n// The following class and functions have been ported:\n//\n// - SequenceMatcher\n//\n// - unified_diff\n//\n// - context_diff\n//\n// Getting unified diffs was the main goal of the port. Keep in mind this code\n// is mostly suitable to output text differences in a human friendly way, there\n// are no guarantees generated diffs are consumable by patch(1).\npackage difflib\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc max(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc calculateRatio(matches, length int) float64 {\n\tif length > 0 {\n\t\treturn 2.0 * float64(matches) / float64(length)\n\t}\n\treturn 1.0\n}\n\ntype Match struct {\n\tA    int\n\tB    int\n\tSize int\n}\n\ntype OpCode struct {\n\tTag byte\n\tI1  int\n\tI2  int\n\tJ1  int\n\tJ2  int\n}\n\n// SequenceMatcher compares sequence of strings. The basic\n// algorithm predates, and is a little fancier than, an algorithm\n// published in the late 1980's by Ratcliff and Obershelp under the\n// hyperbolic name \"gestalt pattern matching\".  The basic idea is to find\n// the longest contiguous matching subsequence that contains no \"junk\"\n// elements (R-O doesn't address junk).  The same idea is then applied\n// recursively to the pieces of the sequences to the left and to the right\n// of the matching subsequence.  This does not yield minimal edit\n// sequences, but does tend to yield matches that \"look right\" to people.\n//\n// SequenceMatcher tries to compute a \"human-friendly diff\" between two\n// sequences.  Unlike e.g. UNIX(tm) diff, the fundamental notion is the\n// longest *contiguous* & junk-free matching subsequence.  That's what\n// catches peoples' eyes.  The Windows(tm) windiff has another interesting\n// notion, pairing up elements that appear uniquely in each sequence.\n// That, and the method here, appear to yield more intuitive difference\n// reports than does diff.  This method appears to be the least vulnerable\n// to synching up on blocks of \"junk lines\", though (like blank lines in\n// ordinary text files, or maybe \"<P>\" lines in HTML files).  That may be\n// because this is the only method of the 3 that has a *concept* of\n// \"junk\" <wink>.\n//\n// Timing:  Basic R-O is cubic time worst case and quadratic time expected\n// case.  SequenceMatcher is quadratic time for the worst case and has\n// expected-case behavior dependent in a complicated way on how many\n// elements the sequences have in common; best case time is linear.\ntype SequenceMatcher struct {\n\ta              []string\n\tb              []string\n\tb2j            map[string][]int\n\tIsJunk         func(string) bool\n\tautoJunk       bool\n\tbJunk          map[string]struct{}\n\tmatchingBlocks []Match\n\tfullBCount     map[string]int\n\tbPopular       map[string]struct{}\n\topCodes        []OpCode\n}\n\nfunc NewMatcher(a, b []string) *SequenceMatcher {\n\tm := SequenceMatcher{autoJunk: true}\n\tm.SetSeqs(a, b)\n\treturn &m\n}\n\nfunc NewMatcherWithJunk(a, b []string, autoJunk bool,\n\tisJunk func(string) bool) *SequenceMatcher {\n\n\tm := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk}\n\tm.SetSeqs(a, b)\n\treturn &m\n}\n\n// Set two sequences to be compared.\nfunc (m *SequenceMatcher) SetSeqs(a, b []string) {\n\tm.SetSeq1(a)\n\tm.SetSeq2(b)\n}\n\n// Set the first sequence to be compared. The second sequence to be compared is\n// not changed.\n//\n// SequenceMatcher computes and caches detailed information about the second\n// sequence, so if you want to compare one sequence S against many sequences,\n// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other\n// sequences.\n//\n// See also SetSeqs() and SetSeq2().\nfunc (m *SequenceMatcher) SetSeq1(a []string) {\n\tif &a == &m.a {\n\t\treturn\n\t}\n\tm.a = a\n\tm.matchingBlocks = nil\n\tm.opCodes = nil\n}\n\n// Set the second sequence to be compared. The first sequence to be compared is\n// not changed.\nfunc (m *SequenceMatcher) SetSeq2(b []string) {\n\tif &b == &m.b {\n\t\treturn\n\t}\n\tm.b = b\n\tm.matchingBlocks = nil\n\tm.opCodes = nil\n\tm.fullBCount = nil\n\tm.chainB()\n}\n\nfunc (m *SequenceMatcher) chainB() {\n\t// Populate line -> index mapping\n\tb2j := map[string][]int{}\n\tfor i, s := range m.b {\n\t\tindices := b2j[s]\n\t\tindices = append(indices, i)\n\t\tb2j[s] = indices\n\t}\n\n\t// Purge junk elements\n\tm.bJunk = map[string]struct{}{}\n\tif m.IsJunk != nil {\n\t\tjunk := m.bJunk\n\t\tfor s, _ := range b2j {\n\t\t\tif m.IsJunk(s) {\n\t\t\t\tjunk[s] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tfor s, _ := range junk {\n\t\t\tdelete(b2j, s)\n\t\t}\n\t}\n\n\t// Purge remaining popular elements\n\tpopular := map[string]struct{}{}\n\tn := len(m.b)\n\tif m.autoJunk && n >= 200 {\n\t\tntest := n/100 + 1\n\t\tfor s, indices := range b2j {\n\t\t\tif len(indices) > ntest {\n\t\t\t\tpopular[s] = struct{}{}\n\t\t\t}\n\t\t}\n\t\tfor s, _ := range popular {\n\t\t\tdelete(b2j, s)\n\t\t}\n\t}\n\tm.bPopular = popular\n\tm.b2j = b2j\n}\n\nfunc (m *SequenceMatcher) isBJunk(s string) bool {\n\t_, ok := m.bJunk[s]\n\treturn ok\n}\n\n// Find longest matching block in a[alo:ahi] and b[blo:bhi].\n//\n// If IsJunk is not defined:\n//\n// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where\n//     alo <= i <= i+k <= ahi\n//     blo <= j <= j+k <= bhi\n// and for all (i',j',k') meeting those conditions,\n//     k >= k'\n//     i <= i'\n//     and if i == i', j <= j'\n//\n// In other words, of all maximal matching blocks, return one that\n// starts earliest in a, and of all those maximal matching blocks that\n// start earliest in a, return the one that starts earliest in b.\n//\n// If IsJunk is defined, first the longest matching block is\n// determined as above, but with the additional restriction that no\n// junk element appears in the block.  Then that block is extended as\n// far as possible by matching (only) junk elements on both sides.  So\n// the resulting block never matches on junk except as identical junk\n// happens to be adjacent to an \"interesting\" match.\n//\n// If no blocks match, return (alo, blo, 0).\nfunc (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match {\n\t// CAUTION:  stripping common prefix or suffix would be incorrect.\n\t// E.g.,\n\t//    ab\n\t//    acab\n\t// Longest matching block is \"ab\", but if common prefix is\n\t// stripped, it's \"a\" (tied with \"b\").  UNIX(tm) diff does so\n\t// strip, so ends up claiming that ab is changed to acab by\n\t// inserting \"ca\" in the middle.  That's minimal but unintuitive:\n\t// \"it's obvious\" that someone inserted \"ac\" at the front.\n\t// Windiff ends up at the same place as diff, but by pairing up\n\t// the unique 'b's and then matching the first two 'a's.\n\tbesti, bestj, bestsize := alo, blo, 0\n\n\t// find longest junk-free match\n\t// during an iteration of the loop, j2len[j] = length of longest\n\t// junk-free match ending with a[i-1] and b[j]\n\tj2len := map[int]int{}\n\tfor i := alo; i != ahi; i++ {\n\t\t// look at all instances of a[i] in b; note that because\n\t\t// b2j has no junk keys, the loop is skipped if a[i] is junk\n\t\tnewj2len := map[int]int{}\n\t\tfor _, j := range m.b2j[m.a[i]] {\n\t\t\t// a[i] matches b[j]\n\t\t\tif j < blo {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif j >= bhi {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tk := j2len[j-1] + 1\n\t\t\tnewj2len[j] = k\n\t\t\tif k > bestsize {\n\t\t\t\tbesti, bestj, bestsize = i-k+1, j-k+1, k\n\t\t\t}\n\t\t}\n\t\tj2len = newj2len\n\t}\n\n\t// Extend the best by non-junk elements on each end.  In particular,\n\t// \"popular\" non-junk elements aren't in b2j, which greatly speeds\n\t// the inner loop above, but also means \"the best\" match so far\n\t// doesn't contain any junk *or* popular non-junk elements.\n\tfor besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) &&\n\t\tm.a[besti-1] == m.b[bestj-1] {\n\t\tbesti, bestj, bestsize = besti-1, bestj-1, bestsize+1\n\t}\n\tfor besti+bestsize < ahi && bestj+bestsize < bhi &&\n\t\t!m.isBJunk(m.b[bestj+bestsize]) &&\n\t\tm.a[besti+bestsize] == m.b[bestj+bestsize] {\n\t\tbestsize += 1\n\t}\n\n\t// Now that we have a wholly interesting match (albeit possibly\n\t// empty!), we may as well suck up the matching junk on each\n\t// side of it too.  Can't think of a good reason not to, and it\n\t// saves post-processing the (possibly considerable) expense of\n\t// figuring out what to do with it.  In the case of an empty\n\t// interesting match, this is clearly the right thing to do,\n\t// because no other kind of match is possible in the regions.\n\tfor besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) &&\n\t\tm.a[besti-1] == m.b[bestj-1] {\n\t\tbesti, bestj, bestsize = besti-1, bestj-1, bestsize+1\n\t}\n\tfor besti+bestsize < ahi && bestj+bestsize < bhi &&\n\t\tm.isBJunk(m.b[bestj+bestsize]) &&\n\t\tm.a[besti+bestsize] == m.b[bestj+bestsize] {\n\t\tbestsize += 1\n\t}\n\n\treturn Match{A: besti, B: bestj, Size: bestsize}\n}\n\n// Return list of triples describing matching subsequences.\n//\n// Each triple is of the form (i, j, n), and means that\n// a[i:i+n] == b[j:j+n].  The triples are monotonically increasing in\n// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are\n// adjacent triples in the list, and the second is not the last triple in the\n// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe\n// adjacent equal blocks.\n//\n// The last triple is a dummy, (len(a), len(b), 0), and is the only\n// triple with n==0.\nfunc (m *SequenceMatcher) GetMatchingBlocks() []Match {\n\tif m.matchingBlocks != nil {\n\t\treturn m.matchingBlocks\n\t}\n\n\tvar matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match\n\tmatchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match {\n\t\tmatch := m.findLongestMatch(alo, ahi, blo, bhi)\n\t\ti, j, k := match.A, match.B, match.Size\n\t\tif match.Size > 0 {\n\t\t\tif alo < i && blo < j {\n\t\t\t\tmatched = matchBlocks(alo, i, blo, j, matched)\n\t\t\t}\n\t\t\tmatched = append(matched, match)\n\t\t\tif i+k < ahi && j+k < bhi {\n\t\t\t\tmatched = matchBlocks(i+k, ahi, j+k, bhi, matched)\n\t\t\t}\n\t\t}\n\t\treturn matched\n\t}\n\tmatched := matchBlocks(0, len(m.a), 0, len(m.b), nil)\n\n\t// It's possible that we have adjacent equal blocks in the\n\t// matching_blocks list now.\n\tnonAdjacent := []Match{}\n\ti1, j1, k1 := 0, 0, 0\n\tfor _, b := range matched {\n\t\t// Is this block adjacent to i1, j1, k1?\n\t\ti2, j2, k2 := b.A, b.B, b.Size\n\t\tif i1+k1 == i2 && j1+k1 == j2 {\n\t\t\t// Yes, so collapse them -- this just increases the length of\n\t\t\t// the first block by the length of the second, and the first\n\t\t\t// block so lengthened remains the block to compare against.\n\t\t\tk1 += k2\n\t\t} else {\n\t\t\t// Not adjacent.  Remember the first block (k1==0 means it's\n\t\t\t// the dummy we started with), and make the second block the\n\t\t\t// new block to compare against.\n\t\t\tif k1 > 0 {\n\t\t\t\tnonAdjacent = append(nonAdjacent, Match{i1, j1, k1})\n\t\t\t}\n\t\t\ti1, j1, k1 = i2, j2, k2\n\t\t}\n\t}\n\tif k1 > 0 {\n\t\tnonAdjacent = append(nonAdjacent, Match{i1, j1, k1})\n\t}\n\n\tnonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0})\n\tm.matchingBlocks = nonAdjacent\n\treturn m.matchingBlocks\n}\n\n// Return list of 5-tuples describing how to turn a into b.\n//\n// Each tuple is of the form (tag, i1, i2, j1, j2).  The first tuple\n// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the\n// tuple preceding it, and likewise for j1 == the previous j2.\n//\n// The tags are characters, with these meanings:\n//\n// 'r' (replace):  a[i1:i2] should be replaced by b[j1:j2]\n//\n// 'd' (delete):   a[i1:i2] should be deleted, j1==j2 in this case.\n//\n// 'i' (insert):   b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case.\n//\n// 'e' (equal):    a[i1:i2] == b[j1:j2]\nfunc (m *SequenceMatcher) GetOpCodes() []OpCode {\n\tif m.opCodes != nil {\n\t\treturn m.opCodes\n\t}\n\ti, j := 0, 0\n\tmatching := m.GetMatchingBlocks()\n\topCodes := make([]OpCode, 0, len(matching))\n\tfor _, m := range matching {\n\t\t//  invariant:  we've pumped out correct diffs to change\n\t\t//  a[:i] into b[:j], and the next matching block is\n\t\t//  a[ai:ai+size] == b[bj:bj+size]. So we need to pump\n\t\t//  out a diff to change a[i:ai] into b[j:bj], pump out\n\t\t//  the matching block, and move (i,j) beyond the match\n\t\tai, bj, size := m.A, m.B, m.Size\n\t\ttag := byte(0)\n\t\tif i < ai && j < bj {\n\t\t\ttag = 'r'\n\t\t} else if i < ai {\n\t\t\ttag = 'd'\n\t\t} else if j < bj {\n\t\t\ttag = 'i'\n\t\t}\n\t\tif tag > 0 {\n\t\t\topCodes = append(opCodes, OpCode{tag, i, ai, j, bj})\n\t\t}\n\t\ti, j = ai+size, bj+size\n\t\t// the list of matching blocks is terminated by a\n\t\t// sentinel with size 0\n\t\tif size > 0 {\n\t\t\topCodes = append(opCodes, OpCode{'e', ai, i, bj, j})\n\t\t}\n\t}\n\tm.opCodes = opCodes\n\treturn m.opCodes\n}\n\n// Isolate change clusters by eliminating ranges with no changes.\n//\n// Return a generator of groups with up to n lines of context.\n// Each group is in the same format as returned by GetOpCodes().\nfunc (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {\n\tif n < 0 {\n\t\tn = 3\n\t}\n\tcodes := m.GetOpCodes()\n\tif len(codes) == 0 {\n\t\tcodes = []OpCode{OpCode{'e', 0, 1, 0, 1}}\n\t}\n\t// Fixup leading and trailing groups if they show no changes.\n\tif codes[0].Tag == 'e' {\n\t\tc := codes[0]\n\t\ti1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2\n\t\tcodes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2}\n\t}\n\tif codes[len(codes)-1].Tag == 'e' {\n\t\tc := codes[len(codes)-1]\n\t\ti1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2\n\t\tcodes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)}\n\t}\n\tnn := n + n\n\tgroups := [][]OpCode{}\n\tgroup := []OpCode{}\n\tfor _, c := range codes {\n\t\ti1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2\n\t\t// End the current group and start a new one whenever\n\t\t// there is a large range with no changes.\n\t\tif c.Tag == 'e' && i2-i1 > nn {\n\t\t\tgroup = append(group, OpCode{c.Tag, i1, min(i2, i1+n),\n\t\t\t\tj1, min(j2, j1+n)})\n\t\t\tgroups = append(groups, group)\n\t\t\tgroup = []OpCode{}\n\t\t\ti1, j1 = max(i1, i2-n), max(j1, j2-n)\n\t\t}\n\t\tgroup = append(group, OpCode{c.Tag, i1, i2, j1, j2})\n\t}\n\tif len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') {\n\t\tgroups = append(groups, group)\n\t}\n\treturn groups\n}\n\n// Return a measure of the sequences' similarity (float in [0,1]).\n//\n// Where T is the total number of elements in both sequences, and\n// M is the number of matches, this is 2.0*M / T.\n// Note that this is 1 if the sequences are identical, and 0 if\n// they have nothing in common.\n//\n// .Ratio() is expensive to compute if you haven't already computed\n// .GetMatchingBlocks() or .GetOpCodes(), in which case you may\n// want to try .QuickRatio() or .RealQuickRation() first to get an\n// upper bound.\nfunc (m *SequenceMatcher) Ratio() float64 {\n\tmatches := 0\n\tfor _, m := range m.GetMatchingBlocks() {\n\t\tmatches += m.Size\n\t}\n\treturn calculateRatio(matches, len(m.a)+len(m.b))\n}\n\n// Return an upper bound on ratio() relatively quickly.\n//\n// This isn't defined beyond that it is an upper bound on .Ratio(), and\n// is faster to compute.\nfunc (m *SequenceMatcher) QuickRatio() float64 {\n\t// viewing a and b as multisets, set matches to the cardinality\n\t// of their intersection; this counts the number of matches\n\t// without regard to order, so is clearly an upper bound\n\tif m.fullBCount == nil {\n\t\tm.fullBCount = map[string]int{}\n\t\tfor _, s := range m.b {\n\t\t\tm.fullBCount[s] = m.fullBCount[s] + 1\n\t\t}\n\t}\n\n\t// avail[x] is the number of times x appears in 'b' less the\n\t// number of times we've seen it in 'a' so far ... kinda\n\tavail := map[string]int{}\n\tmatches := 0\n\tfor _, s := range m.a {\n\t\tn, ok := avail[s]\n\t\tif !ok {\n\t\t\tn = m.fullBCount[s]\n\t\t}\n\t\tavail[s] = n - 1\n\t\tif n > 0 {\n\t\t\tmatches += 1\n\t\t}\n\t}\n\treturn calculateRatio(matches, len(m.a)+len(m.b))\n}\n\n// Return an upper bound on ratio() very quickly.\n//\n// This isn't defined beyond that it is an upper bound on .Ratio(), and\n// is faster to compute than either .Ratio() or .QuickRatio().\nfunc (m *SequenceMatcher) RealQuickRatio() float64 {\n\tla, lb := len(m.a), len(m.b)\n\treturn calculateRatio(min(la, lb), la+lb)\n}\n\n// Convert range to the \"ed\" format\nfunc formatRangeUnified(start, stop int) string {\n\t// Per the diff spec at http://www.unix.org/single_unix_specification/\n\tbeginning := start + 1 // lines start numbering with one\n\tlength := stop - start\n\tif length == 1 {\n\t\treturn fmt.Sprintf(\"%d\", beginning)\n\t}\n\tif length == 0 {\n\t\tbeginning -= 1 // empty ranges begin at line just before the range\n\t}\n\treturn fmt.Sprintf(\"%d,%d\", beginning, length)\n}\n\n// Unified diff parameters\ntype UnifiedDiff struct {\n\tA        []string // First sequence lines\n\tFromFile string   // First file name\n\tFromDate string   // First file time\n\tB        []string // Second sequence lines\n\tToFile   string   // Second file name\n\tToDate   string   // Second file time\n\tEol      string   // Headers end of line, defaults to LF\n\tContext  int      // Number of context lines\n}\n\n// Compare two sequences of lines; generate the delta as a unified diff.\n//\n// Unified diffs are a compact way of showing line changes and a few\n// lines of context.  The number of context lines is set by 'n' which\n// defaults to three.\n//\n// By default, the diff control lines (those with ---, +++, or @@) are\n// created with a trailing newline.  This is helpful so that inputs\n// created from file.readlines() result in diffs that are suitable for\n// file.writelines() since both the inputs and outputs have trailing\n// newlines.\n//\n// For inputs that do not have trailing newlines, set the lineterm\n// argument to \"\" so that the output will be uniformly newline free.\n//\n// The unidiff format normally has a header for filenames and modification\n// times.  Any or all of these may be specified using strings for\n// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'.\n// The modification times are normally expressed in the ISO 8601 format.\nfunc WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error {\n\tbuf := bufio.NewWriter(writer)\n\tdefer buf.Flush()\n\twf := func(format string, args ...interface{}) error {\n\t\t_, err := buf.WriteString(fmt.Sprintf(format, args...))\n\t\treturn err\n\t}\n\tws := func(s string) error {\n\t\t_, err := buf.WriteString(s)\n\t\treturn err\n\t}\n\n\tif len(diff.Eol) == 0 {\n\t\tdiff.Eol = \"\\n\"\n\t}\n\n\tstarted := false\n\tm := NewMatcher(diff.A, diff.B)\n\tfor _, g := range m.GetGroupedOpCodes(diff.Context) {\n\t\tif !started {\n\t\t\tstarted = true\n\t\t\tfromDate := \"\"\n\t\t\tif len(diff.FromDate) > 0 {\n\t\t\t\tfromDate = \"\\t\" + diff.FromDate\n\t\t\t}\n\t\t\ttoDate := \"\"\n\t\t\tif len(diff.ToDate) > 0 {\n\t\t\t\ttoDate = \"\\t\" + diff.ToDate\n\t\t\t}\n\t\t\tif diff.FromFile != \"\" || diff.ToFile != \"\" {\n\t\t\t\terr := wf(\"--- %s%s%s\", diff.FromFile, fromDate, diff.Eol)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\terr = wf(\"+++ %s%s%s\", diff.ToFile, toDate, diff.Eol)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfirst, last := g[0], g[len(g)-1]\n\t\trange1 := formatRangeUnified(first.I1, last.I2)\n\t\trange2 := formatRangeUnified(first.J1, last.J2)\n\t\tif err := wf(\"@@ -%s +%s @@%s\", range1, range2, diff.Eol); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, c := range g {\n\t\t\ti1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2\n\t\t\tif c.Tag == 'e' {\n\t\t\t\tfor _, line := range diff.A[i1:i2] {\n\t\t\t\t\tif err := ws(\" \" + line); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif c.Tag == 'r' || c.Tag == 'd' {\n\t\t\t\tfor _, line := range diff.A[i1:i2] {\n\t\t\t\t\tif err := ws(\"-\" + line); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif c.Tag == 'r' || c.Tag == 'i' {\n\t\t\t\tfor _, line := range diff.B[j1:j2] {\n\t\t\t\t\tif err := ws(\"+\" + line); err != nil {\n\t\t\t\t\t\treturn err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// Like WriteUnifiedDiff but returns the diff a string.\nfunc GetUnifiedDiffString(diff UnifiedDiff) (string, error) {\n\tw := &bytes.Buffer{}\n\terr := WriteUnifiedDiff(w, diff)\n\treturn string(w.Bytes()), err\n}\n\n// Convert range to the \"ed\" format.\nfunc formatRangeContext(start, stop int) string {\n\t// Per the diff spec at http://www.unix.org/single_unix_specification/\n\tbeginning := start + 1 // lines start numbering with one\n\tlength := stop - start\n\tif length == 0 {\n\t\tbeginning -= 1 // empty ranges begin at line just before the range\n\t}\n\tif length <= 1 {\n\t\treturn fmt.Sprintf(\"%d\", beginning)\n\t}\n\treturn fmt.Sprintf(\"%d,%d\", beginning, beginning+length-1)\n}\n\ntype ContextDiff UnifiedDiff\n\n// Compare two sequences of lines; generate the delta as a context diff.\n//\n// Context diffs are a compact way of showing line changes and a few\n// lines of context. The number of context lines is set by diff.Context\n// which defaults to three.\n//\n// By default, the diff control lines (those with *** or ---) are\n// created with a trailing newline.\n//\n// For inputs that do not have trailing newlines, set the diff.Eol\n// argument to \"\" so that the output will be uniformly newline free.\n//\n// The context diff format normally has a header for filenames and\n// modification times.  Any or all of these may be specified using\n// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate.\n// The modification times are normally expressed in the ISO 8601 format.\n// If not specified, the strings default to blanks.\nfunc WriteContextDiff(writer io.Writer, diff ContextDiff) error {\n\tbuf := bufio.NewWriter(writer)\n\tdefer buf.Flush()\n\tvar diffErr error\n\twf := func(format string, args ...interface{}) {\n\t\t_, err := buf.WriteString(fmt.Sprintf(format, args...))\n\t\tif diffErr == nil && err != nil {\n\t\t\tdiffErr = err\n\t\t}\n\t}\n\tws := func(s string) {\n\t\t_, err := buf.WriteString(s)\n\t\tif diffErr == nil && err != nil {\n\t\t\tdiffErr = err\n\t\t}\n\t}\n\n\tif len(diff.Eol) == 0 {\n\t\tdiff.Eol = \"\\n\"\n\t}\n\n\tprefix := map[byte]string{\n\t\t'i': \"+ \",\n\t\t'd': \"- \",\n\t\t'r': \"! \",\n\t\t'e': \"  \",\n\t}\n\n\tstarted := false\n\tm := NewMatcher(diff.A, diff.B)\n\tfor _, g := range m.GetGroupedOpCodes(diff.Context) {\n\t\tif !started {\n\t\t\tstarted = true\n\t\t\tfromDate := \"\"\n\t\t\tif len(diff.FromDate) > 0 {\n\t\t\t\tfromDate = \"\\t\" + diff.FromDate\n\t\t\t}\n\t\t\ttoDate := \"\"\n\t\t\tif len(diff.ToDate) > 0 {\n\t\t\t\ttoDate = \"\\t\" + diff.ToDate\n\t\t\t}\n\t\t\tif diff.FromFile != \"\" || diff.ToFile != \"\" {\n\t\t\t\twf(\"*** %s%s%s\", diff.FromFile, fromDate, diff.Eol)\n\t\t\t\twf(\"--- %s%s%s\", diff.ToFile, toDate, diff.Eol)\n\t\t\t}\n\t\t}\n\n\t\tfirst, last := g[0], g[len(g)-1]\n\t\tws(\"***************\" + diff.Eol)\n\n\t\trange1 := formatRangeContext(first.I1, last.I2)\n\t\twf(\"*** %s ****%s\", range1, diff.Eol)\n\t\tfor _, c := range g {\n\t\t\tif c.Tag == 'r' || c.Tag == 'd' {\n\t\t\t\tfor _, cc := range g {\n\t\t\t\t\tif cc.Tag == 'i' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfor _, line := range diff.A[cc.I1:cc.I2] {\n\t\t\t\t\t\tws(prefix[cc.Tag] + line)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\trange2 := formatRangeContext(first.J1, last.J2)\n\t\twf(\"--- %s ----%s\", range2, diff.Eol)\n\t\tfor _, c := range g {\n\t\t\tif c.Tag == 'r' || c.Tag == 'i' {\n\t\t\t\tfor _, cc := range g {\n\t\t\t\t\tif cc.Tag == 'd' {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tfor _, line := range diff.B[cc.J1:cc.J2] {\n\t\t\t\t\t\tws(prefix[cc.Tag] + line)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn diffErr\n}\n\n// Like WriteContextDiff but returns the diff a string.\nfunc GetContextDiffString(diff ContextDiff) (string, error) {\n\tw := &bytes.Buffer{}\n\terr := WriteContextDiff(w, diff)\n\treturn string(w.Bytes()), err\n}\n\n// Split a string on \"\\n\" while preserving them. The output can be used\n// as input for UnifiedDiff and ContextDiff structures.\nfunc SplitLines(s string) []string {\n\tlines := strings.SplitAfter(s, \"\\n\")\n\tlines[len(lines)-1] += \"\\n\"\n\treturn lines\n}\n"
  },
  {
    "path": "vendor/github.com/pmezard/go-difflib/difflib/difflib_test.go",
    "content": "package difflib\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc assertAlmostEqual(t *testing.T, a, b float64, places int) {\n\tif math.Abs(a-b) > math.Pow10(-places) {\n\t\tt.Errorf(\"%.7f != %.7f\", a, b)\n\t}\n}\n\nfunc assertEqual(t *testing.T, a, b interface{}) {\n\tif !reflect.DeepEqual(a, b) {\n\t\tt.Errorf(\"%v != %v\", a, b)\n\t}\n}\n\nfunc splitChars(s string) []string {\n\tchars := make([]string, 0, len(s))\n\t// Assume ASCII inputs\n\tfor i := 0; i != len(s); i++ {\n\t\tchars = append(chars, string(s[i]))\n\t}\n\treturn chars\n}\n\nfunc TestSequenceMatcherRatio(t *testing.T) {\n\ts := NewMatcher(splitChars(\"abcd\"), splitChars(\"bcde\"))\n\tassertEqual(t, s.Ratio(), 0.75)\n\tassertEqual(t, s.QuickRatio(), 0.75)\n\tassertEqual(t, s.RealQuickRatio(), 1.0)\n}\n\nfunc TestGetOptCodes(t *testing.T) {\n\ta := \"qabxcd\"\n\tb := \"abycdf\"\n\ts := NewMatcher(splitChars(a), splitChars(b))\n\tw := &bytes.Buffer{}\n\tfor _, op := range s.GetOpCodes() {\n\t\tfmt.Fprintf(w, \"%s a[%d:%d], (%s) b[%d:%d] (%s)\\n\", string(op.Tag),\n\t\t\top.I1, op.I2, a[op.I1:op.I2], op.J1, op.J2, b[op.J1:op.J2])\n\t}\n\tresult := string(w.Bytes())\n\texpected := `d a[0:1], (q) b[0:0] ()\ne a[1:3], (ab) b[0:2] (ab)\nr a[3:4], (x) b[2:3] (y)\ne a[4:6], (cd) b[3:5] (cd)\ni a[6:6], () b[5:6] (f)\n`\n\tif expected != result {\n\t\tt.Errorf(\"unexpected op codes: \\n%s\", result)\n\t}\n}\n\nfunc TestGroupedOpCodes(t *testing.T) {\n\ta := []string{}\n\tfor i := 0; i != 39; i++ {\n\t\ta = append(a, fmt.Sprintf(\"%02d\", i))\n\t}\n\tb := []string{}\n\tb = append(b, a[:8]...)\n\tb = append(b, \" i\")\n\tb = append(b, a[8:19]...)\n\tb = append(b, \" x\")\n\tb = append(b, a[20:22]...)\n\tb = append(b, a[27:34]...)\n\tb = append(b, \" y\")\n\tb = append(b, a[35:]...)\n\ts := NewMatcher(a, b)\n\tw := &bytes.Buffer{}\n\tfor _, g := range s.GetGroupedOpCodes(-1) {\n\t\tfmt.Fprintf(w, \"group\\n\")\n\t\tfor _, op := range g {\n\t\t\tfmt.Fprintf(w, \"  %s, %d, %d, %d, %d\\n\", string(op.Tag),\n\t\t\t\top.I1, op.I2, op.J1, op.J2)\n\t\t}\n\t}\n\tresult := string(w.Bytes())\n\texpected := `group\n  e, 5, 8, 5, 8\n  i, 8, 8, 8, 9\n  e, 8, 11, 9, 12\ngroup\n  e, 16, 19, 17, 20\n  r, 19, 20, 20, 21\n  e, 20, 22, 21, 23\n  d, 22, 27, 23, 23\n  e, 27, 30, 23, 26\ngroup\n  e, 31, 34, 27, 30\n  r, 34, 35, 30, 31\n  e, 35, 38, 31, 34\n`\n\tif expected != result {\n\t\tt.Errorf(\"unexpected op codes: \\n%s\", result)\n\t}\n}\n\nfunc ExampleGetUnifiedDiffCode() {\n\ta := `one\ntwo\nthree\nfour\nfmt.Printf(\"%s,%T\",a,b)`\n\tb := `zero\none\nthree\nfour`\n\tdiff := UnifiedDiff{\n\t\tA:        SplitLines(a),\n\t\tB:        SplitLines(b),\n\t\tFromFile: \"Original\",\n\t\tFromDate: \"2005-01-26 23:30:50\",\n\t\tToFile:   \"Current\",\n\t\tToDate:   \"2010-04-02 10:20:52\",\n\t\tContext:  3,\n\t}\n\tresult, _ := GetUnifiedDiffString(diff)\n\tfmt.Println(strings.Replace(result, \"\\t\", \" \", -1))\n\t// Output:\n\t// --- Original 2005-01-26 23:30:50\n\t// +++ Current 2010-04-02 10:20:52\n\t// @@ -1,5 +1,4 @@\n\t// +zero\n\t//  one\n\t// -two\n\t//  three\n\t//  four\n\t// -fmt.Printf(\"%s,%T\",a,b)\n}\n\nfunc ExampleGetContextDiffCode() {\n\ta := `one\ntwo\nthree\nfour\nfmt.Printf(\"%s,%T\",a,b)`\n\tb := `zero\none\ntree\nfour`\n\tdiff := ContextDiff{\n\t\tA:        SplitLines(a),\n\t\tB:        SplitLines(b),\n\t\tFromFile: \"Original\",\n\t\tToFile:   \"Current\",\n\t\tContext:  3,\n\t\tEol:      \"\\n\",\n\t}\n\tresult, _ := GetContextDiffString(diff)\n\tfmt.Print(strings.Replace(result, \"\\t\", \" \", -1))\n\t// Output:\n\t// *** Original\n\t// --- Current\n\t// ***************\n\t// *** 1,5 ****\n\t//   one\n\t// ! two\n\t// ! three\n\t//   four\n\t// - fmt.Printf(\"%s,%T\",a,b)\n\t// --- 1,4 ----\n\t// + zero\n\t//   one\n\t// ! tree\n\t//   four\n}\n\nfunc ExampleGetContextDiffString() {\n\ta := `one\ntwo\nthree\nfour`\n\tb := `zero\none\ntree\nfour`\n\tdiff := ContextDiff{\n\t\tA:        SplitLines(a),\n\t\tB:        SplitLines(b),\n\t\tFromFile: \"Original\",\n\t\tToFile:   \"Current\",\n\t\tContext:  3,\n\t\tEol:      \"\\n\",\n\t}\n\tresult, _ := GetContextDiffString(diff)\n\tfmt.Printf(strings.Replace(result, \"\\t\", \" \", -1))\n\t// Output:\n\t// *** Original\n\t// --- Current\n\t// ***************\n\t// *** 1,4 ****\n\t//   one\n\t// ! two\n\t// ! three\n\t//   four\n\t// --- 1,4 ----\n\t// + zero\n\t//   one\n\t// ! tree\n\t//   four\n}\n\nfunc rep(s string, count int) string {\n\treturn strings.Repeat(s, count)\n}\n\nfunc TestWithAsciiOneInsert(t *testing.T) {\n\tsm := NewMatcher(splitChars(rep(\"b\", 100)),\n\t\tsplitChars(\"a\"+rep(\"b\", 100)))\n\tassertAlmostEqual(t, sm.Ratio(), 0.995, 3)\n\tassertEqual(t, sm.GetOpCodes(),\n\t\t[]OpCode{{'i', 0, 0, 0, 1}, {'e', 0, 100, 1, 101}})\n\tassertEqual(t, len(sm.bPopular), 0)\n\n\tsm = NewMatcher(splitChars(rep(\"b\", 100)),\n\t\tsplitChars(rep(\"b\", 50)+\"a\"+rep(\"b\", 50)))\n\tassertAlmostEqual(t, sm.Ratio(), 0.995, 3)\n\tassertEqual(t, sm.GetOpCodes(),\n\t\t[]OpCode{{'e', 0, 50, 0, 50}, {'i', 50, 50, 50, 51}, {'e', 50, 100, 51, 101}})\n\tassertEqual(t, len(sm.bPopular), 0)\n}\n\nfunc TestWithAsciiOnDelete(t *testing.T) {\n\tsm := NewMatcher(splitChars(rep(\"a\", 40)+\"c\"+rep(\"b\", 40)),\n\t\tsplitChars(rep(\"a\", 40)+rep(\"b\", 40)))\n\tassertAlmostEqual(t, sm.Ratio(), 0.994, 3)\n\tassertEqual(t, sm.GetOpCodes(),\n\t\t[]OpCode{{'e', 0, 40, 0, 40}, {'d', 40, 41, 40, 40}, {'e', 41, 81, 40, 80}})\n}\n\nfunc TestWithAsciiBJunk(t *testing.T) {\n\tisJunk := func(s string) bool {\n\t\treturn s == \" \"\n\t}\n\tsm := NewMatcherWithJunk(splitChars(rep(\"a\", 40)+rep(\"b\", 40)),\n\t\tsplitChars(rep(\"a\", 44)+rep(\"b\", 40)), true, isJunk)\n\tassertEqual(t, sm.bJunk, map[string]struct{}{})\n\n\tsm = NewMatcherWithJunk(splitChars(rep(\"a\", 40)+rep(\"b\", 40)),\n\t\tsplitChars(rep(\"a\", 44)+rep(\"b\", 40)+rep(\" \", 20)), false, isJunk)\n\tassertEqual(t, sm.bJunk, map[string]struct{}{\" \": struct{}{}})\n\n\tisJunk = func(s string) bool {\n\t\treturn s == \" \" || s == \"b\"\n\t}\n\tsm = NewMatcherWithJunk(splitChars(rep(\"a\", 40)+rep(\"b\", 40)),\n\t\tsplitChars(rep(\"a\", 44)+rep(\"b\", 40)+rep(\" \", 20)), false, isJunk)\n\tassertEqual(t, sm.bJunk, map[string]struct{}{\" \": struct{}{}, \"b\": struct{}{}})\n}\n\nfunc TestSFBugsRatioForNullSeqn(t *testing.T) {\n\tsm := NewMatcher(nil, nil)\n\tassertEqual(t, sm.Ratio(), 1.0)\n\tassertEqual(t, sm.QuickRatio(), 1.0)\n\tassertEqual(t, sm.RealQuickRatio(), 1.0)\n}\n\nfunc TestSFBugsComparingEmptyLists(t *testing.T) {\n\tgroups := NewMatcher(nil, nil).GetGroupedOpCodes(-1)\n\tassertEqual(t, len(groups), 0)\n\tdiff := UnifiedDiff{\n\t\tFromFile: \"Original\",\n\t\tToFile:   \"Current\",\n\t\tContext:  3,\n\t}\n\tresult, err := GetUnifiedDiffString(diff)\n\tassertEqual(t, err, nil)\n\tassertEqual(t, result, \"\")\n}\n\nfunc TestOutputFormatRangeFormatUnified(t *testing.T) {\n\t// Per the diff spec at http://www.unix.org/single_unix_specification/\n\t//\n\t// Each <range> field shall be of the form:\n\t//   %1d\", <beginning line number>  if the range contains exactly one line,\n\t// and:\n\t//  \"%1d,%1d\", <beginning line number>, <number of lines> otherwise.\n\t// If a range is empty, its beginning line number shall be the number of\n\t// the line just before the range, or 0 if the empty range starts the file.\n\tfm := formatRangeUnified\n\tassertEqual(t, fm(3, 3), \"3,0\")\n\tassertEqual(t, fm(3, 4), \"4\")\n\tassertEqual(t, fm(3, 5), \"4,2\")\n\tassertEqual(t, fm(3, 6), \"4,3\")\n\tassertEqual(t, fm(0, 0), \"0,0\")\n}\n\nfunc TestOutputFormatRangeFormatContext(t *testing.T) {\n\t// Per the diff spec at http://www.unix.org/single_unix_specification/\n\t//\n\t// The range of lines in file1 shall be written in the following format\n\t// if the range contains two or more lines:\n\t//     \"*** %d,%d ****\\n\", <beginning line number>, <ending line number>\n\t// and the following format otherwise:\n\t//     \"*** %d ****\\n\", <ending line number>\n\t// The ending line number of an empty range shall be the number of the preceding line,\n\t// or 0 if the range is at the start of the file.\n\t//\n\t// Next, the range of lines in file2 shall be written in the following format\n\t// if the range contains two or more lines:\n\t//     \"--- %d,%d ----\\n\", <beginning line number>, <ending line number>\n\t// and the following format otherwise:\n\t//     \"--- %d ----\\n\", <ending line number>\n\tfm := formatRangeContext\n\tassertEqual(t, fm(3, 3), \"3\")\n\tassertEqual(t, fm(3, 4), \"4\")\n\tassertEqual(t, fm(3, 5), \"4,5\")\n\tassertEqual(t, fm(3, 6), \"4,6\")\n\tassertEqual(t, fm(0, 0), \"0\")\n}\n\nfunc TestOutputFormatTabDelimiter(t *testing.T) {\n\tdiff := UnifiedDiff{\n\t\tA:        splitChars(\"one\"),\n\t\tB:        splitChars(\"two\"),\n\t\tFromFile: \"Original\",\n\t\tFromDate: \"2005-01-26 23:30:50\",\n\t\tToFile:   \"Current\",\n\t\tToDate:   \"2010-04-12 10:20:52\",\n\t\tEol:      \"\\n\",\n\t}\n\tud, err := GetUnifiedDiffString(diff)\n\tassertEqual(t, err, nil)\n\tassertEqual(t, SplitLines(ud)[:2], []string{\n\t\t\"--- Original\\t2005-01-26 23:30:50\\n\",\n\t\t\"+++ Current\\t2010-04-12 10:20:52\\n\",\n\t})\n\tcd, err := GetContextDiffString(ContextDiff(diff))\n\tassertEqual(t, err, nil)\n\tassertEqual(t, SplitLines(cd)[:2], []string{\n\t\t\"*** Original\\t2005-01-26 23:30:50\\n\",\n\t\t\"--- Current\\t2010-04-12 10:20:52\\n\",\n\t})\n}\n\nfunc TestOutputFormatNoTrailingTabOnEmptyFiledate(t *testing.T) {\n\tdiff := UnifiedDiff{\n\t\tA:        splitChars(\"one\"),\n\t\tB:        splitChars(\"two\"),\n\t\tFromFile: \"Original\",\n\t\tToFile:   \"Current\",\n\t\tEol:      \"\\n\",\n\t}\n\tud, err := GetUnifiedDiffString(diff)\n\tassertEqual(t, err, nil)\n\tassertEqual(t, SplitLines(ud)[:2], []string{\"--- Original\\n\", \"+++ Current\\n\"})\n\n\tcd, err := GetContextDiffString(ContextDiff(diff))\n\tassertEqual(t, err, nil)\n\tassertEqual(t, SplitLines(cd)[:2], []string{\"*** Original\\n\", \"--- Current\\n\"})\n}\n\nfunc TestOmitFilenames(t *testing.T) {\n\tdiff := UnifiedDiff{\n\t\tA:   SplitLines(\"o\\nn\\ne\\n\"),\n\t\tB:   SplitLines(\"t\\nw\\no\\n\"),\n\t\tEol: \"\\n\",\n\t}\n\tud, err := GetUnifiedDiffString(diff)\n\tassertEqual(t, err, nil)\n\tassertEqual(t, SplitLines(ud), []string{\n\t\t\"@@ -0,0 +1,2 @@\\n\",\n\t\t\"+t\\n\",\n\t\t\"+w\\n\",\n\t\t\"@@ -2,2 +3,0 @@\\n\",\n\t\t\"-n\\n\",\n\t\t\"-e\\n\",\n\t\t\"\\n\",\n\t})\n\n\tcd, err := GetContextDiffString(ContextDiff(diff))\n\tassertEqual(t, err, nil)\n\tassertEqual(t, SplitLines(cd), []string{\n\t\t\"***************\\n\",\n\t\t\"*** 0 ****\\n\",\n\t\t\"--- 1,2 ----\\n\",\n\t\t\"+ t\\n\",\n\t\t\"+ w\\n\",\n\t\t\"***************\\n\",\n\t\t\"*** 2,3 ****\\n\",\n\t\t\"- n\\n\",\n\t\t\"- e\\n\",\n\t\t\"--- 3 ----\\n\",\n\t\t\"\\n\",\n\t})\n}\n\nfunc TestSplitLines(t *testing.T) {\n\tallTests := []struct {\n\t\tinput string\n\t\twant  []string\n\t}{\n\t\t{\"foo\", []string{\"foo\\n\"}},\n\t\t{\"foo\\nbar\", []string{\"foo\\n\", \"bar\\n\"}},\n\t\t{\"foo\\nbar\\n\", []string{\"foo\\n\", \"bar\\n\", \"\\n\"}},\n\t}\n\tfor _, test := range allTests {\n\t\tassertEqual(t, SplitLines(test.input), test.want)\n\t}\n}\n\nfunc benchmarkSplitLines(b *testing.B, count int) {\n\tstr := strings.Repeat(\"foo\\n\", count)\n\n\tb.ResetTimer()\n\n\tn := 0\n\tfor i := 0; i < b.N; i++ {\n\t\tn += len(SplitLines(str))\n\t}\n}\n\nfunc BenchmarkSplitLines100(b *testing.B) {\n\tbenchmarkSplitLines(b, 100)\n}\n\nfunc BenchmarkSplitLines10000(b *testing.B) {\n\tbenchmarkSplitLines(b, 10000)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/.gitignore",
    "content": "# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\n\n# Architecture specific extensions/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n\n*~\n*#\n.build\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/.travis.yml",
    "content": "sudo: false\nlanguage: go\n\ngo:\n - 1.6.3\n - 1.7\n - 1.8.1\n\nscript:\n - go test -short ./...\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/CHANGELOG.md",
    "content": "## 0.8.0 / 2016-08-17\n* [CHANGE] Registry is doing more consistency checks. This might break\n  existing setups that used to export inconsistent metrics.\n* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow\n  arbitrary grouping.\n* [CHANGE] Removed `SelfCollector`.\n* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods.\n* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`,\n  `extraction`.\n* [CHANGE] Deprecated a number of functions.\n* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer`\n  interfaces.\n* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package\n  `promhttp`) and enabling the creation of other exposition mechanisms.\n* [FEATURE] `MustRegister` is variadic now, allowing registration of many\n  collectors in one call.\n* [FEATURE] Added HTTP API v1 package.\n* [ENHANCEMENT] Numerous documentation improvements.\n* [ENHANCEMENT] Improved metric sorting.\n* [ENHANCEMENT] Inlined fnv64a hashing for improved performance.\n* [ENHANCEMENT] Several test improvements.\n* [BUGFIX] Handle collisions in MetricVec.\n\n## 0.7.0 / 2015-07-27\n* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix.\n* [BUGFIX] Closed gaps in metric consistency check.\n* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling.\n* [ENHANCEMENT] Document the possibility to create \"empty\" metrics in\n  a metric vector.\n* [ENHANCEMENT] Fix and clarify various doc comments and the README.md.\n* [ENHANCEMENT] (Kind of) solve \"The Proxy Problem\" of http.InstrumentHandler.\n* [ENHANCEMENT] Change responseWriterDelegator.written to int64.\n\n## 0.6.0 / 2015-06-01\n* [CHANGE] Rename process_goroutines to go_goroutines.\n* [ENHANCEMENT] Validate label names during YAML decoding.\n* [ENHANCEMENT] Add LabelName regular expression.\n* [BUGFIX] Ensure alignment of struct members for 32-bit systems.\n\n## 0.5.0 / 2015-05-06\n* [BUGFIX] Removed a weakness in the fingerprinting aka signature code.\n  This makes fingerprinting slower and more allocation-heavy, but the\n  weakness was too severe to be tolerated.\n* [CHANGE] As a result of the above, Metric.Fingerprint is now returning\n  a different fingerprint. To keep the same fingerprint, the new method\n  Metric.FastFingerprint was introduced, which will be used by the\n  Prometheus server for storage purposes (implying that a collision\n  detection has to be added, too).\n* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on\n  fingerprinting anymore, removing the possibility of an undetected\n  fingerprint collision.\n* [FEATURE] The Go collector in the exposition library includes garbage\n  collection stats.\n* [FEATURE] The exposition library allows to create constant \"throw-away\"\n  summaries and histograms.\n* [CHANGE] A number of new reserved labels and prefixes.\n\n## 0.4.0 / 2015-04-08\n* [CHANGE] Return NaN when Summaries have no observations yet.\n* [BUGFIX] Properly handle Summary decay upon Write().\n* [BUGFIX] Fix the documentation link to the consumption library.\n* [FEATURE] Allow the metric family injection hook to merge with existing\n  metric families.\n* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs.\n* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions.\n\n## 0.3.2 / 2015-03-11\n* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is\n  only used by the Prometheus server internally.\n* [CLEANUP] Added licenses of vendored code left out by godep.\n\n## 0.3.1 / 2015-03-04\n* [ENHANCEMENT] Switched fingerprinting functions from own free list to\n  sync.Pool.\n* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests).\n\n## 0.3.0 / 2015-03-03\n* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL\n  PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS\n  VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE.\n* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was\n  arguably broken.)\n* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If\n  client_golang is used as a library, the vendoring will stay out of your way.\n* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made\n  the fingerprinting change above necessary.)\n* [FEATURE] Added new fingerprinting functions SignatureForLabels and\n  SignatureWithoutLabels to be used by the Prometheus server. These functions\n  require fewer allocations than the ones currently used by the server.\n\n## 0.2.0 / 2015-02-23\n* [FEATURE] Introduce new Histagram metric type.\n* [CHANGE] Ignore process collector errors for now (better error handling\n  pending).\n* [CHANGE] Use clear error interface for process pidFn.\n* [BUGFIX] Fix Go download links for several archs and OSes.\n* [ENHANCEMENT] Massively improve Gauge and Counter performance.\n* [ENHANCEMENT] Catch illegal label names for summaries in histograms.\n* [ENHANCEMENT] Reduce allocations during fingerprinting.\n* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if\n  both cgo is available and the build is for an OS with procfs.\n* [CLEANUP] Clean up code style issues.\n* [CLEANUP] Mark slow test as such and exclude them from travis.\n* [CLEANUP] Update protobuf library package name.\n* [CLEANUP] Updated vendoring of beorn7/perks.\n\n## 0.1.0 / 2015-02-02\n* [CLEANUP] Introduced semantic versioning and changelog. From now on,\n  changes will be reported in this file.\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/CONTRIBUTING.md",
    "content": "# Contributing\n\nPrometheus uses GitHub to manage reviews of pull requests.\n\n* If you have a trivial fix or improvement, go ahead and create a pull request,\n  addressing (with `@...`) the maintainer of this repository (see\n  [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.\n\n* If you plan to do something more involved, first discuss your ideas\n  on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).\n  This will avoid unnecessary work and surely give you and us a good deal\n  of inspiration.\n\n* Relevant coding style guidelines are the [Go Code Review\n  Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)\n  and the _Formatting and style_ section of Peter Bourgon's [Go: Best\n  Practices for Production\n  Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/MAINTAINERS.md",
    "content": "* Björn Rabenstein <beorn@soundcloud.com>\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/NOTICE",
    "content": "Prometheus instrumentation library for Go applications\nCopyright 2012-2015 The Prometheus Authors\n\nThis product includes software developed at\nSoundCloud Ltd. (http://soundcloud.com/).\n\n\nThe following components are included in this product:\n\nperks - a fork of https://github.com/bmizerany/perks\nhttps://github.com/beorn7/perks\nCopyright 2013-2015 Blake Mizerany, Björn Rabenstein\nSee https://github.com/beorn7/perks/blob/master/README.md for license details.\n\nGo support for Protocol Buffers - Google's data interchange format\nhttp://github.com/golang/protobuf/\nCopyright 2010 The Go Authors\nSee source code for license details.\n\nSupport for streaming Protocol Buffer messages for the Go language (golang).\nhttps://github.com/matttproud/golang_protobuf_extensions\nCopyright 2013 Matt T. Proud\nLicensed under the Apache License, Version 2.0\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/README.md",
    "content": "# Prometheus Go client library\n\n[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang)\n[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/client_golang)](https://goreportcard.com/report/github.com/prometheus/client_golang)\n\nThis is the [Go](http://golang.org) client library for\n[Prometheus](http://prometheus.io). It has two separate parts, one for\ninstrumenting application code, and one for creating clients that talk to the\nPrometheus HTTP API.\n\n## Instrumenting applications\n\n[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus)\n\nThe\n[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus)\ncontains the instrumentation library. See the\n[best practices section](http://prometheus.io/docs/practices/naming/) of the\nPrometheus documentation to learn more about instrumenting applications.\n\nThe\n[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples)\ncontains simple examples of instrumented code.\n\n## Client for the Prometheus HTTP API\n\n[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus)\n\nThe\n[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus)\ncontains the client for the\n[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you\nto write Go applications that query time series data from a Prometheus\nserver. It is still in alpha stage.\n\n## Where is `model`, `extraction`, and `text`?\n\nThe `model` packages has been moved to\n[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model).\n\nThe `extraction` and `text` packages are now contained in\n[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt).\n\n## Contributing and community\n\nSee the [contributing guidelines](CONTRIBUTING.md) and the\n[Community section](http://prometheus.io/community/) of the homepage.\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/VERSION",
    "content": "0.8.0\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/.gitignore",
    "content": "command-line-arguments.test\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/README.md",
    "content": "See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"sync\"\n\t\"testing\"\n)\n\nfunc BenchmarkCounterWithLabelValues(b *testing.B) {\n\tm := NewCounterVec(\n\t\tCounterOpts{\n\t\t\tName: \"benchmark_counter\",\n\t\t\tHelp: \"A counter to benchmark it.\",\n\t\t},\n\t\t[]string{\"one\", \"two\", \"three\"},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.WithLabelValues(\"eins\", \"zwei\", \"drei\").Inc()\n\t}\n}\n\nfunc BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) {\n\tm := NewCounterVec(\n\t\tCounterOpts{\n\t\t\tName: \"benchmark_counter\",\n\t\t\tHelp: \"A counter to benchmark it.\",\n\t\t},\n\t\t[]string{\"one\", \"two\", \"three\"},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\twg := sync.WaitGroup{}\n\tfor i := 0; i < 10; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tfor j := 0; j < b.N/10; j++ {\n\t\t\t\tm.WithLabelValues(\"eins\", \"zwei\", \"drei\").Inc()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc BenchmarkCounterWithMappedLabels(b *testing.B) {\n\tm := NewCounterVec(\n\t\tCounterOpts{\n\t\t\tName: \"benchmark_counter\",\n\t\t\tHelp: \"A counter to benchmark it.\",\n\t\t},\n\t\t[]string{\"one\", \"two\", \"three\"},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.With(Labels{\"two\": \"zwei\", \"one\": \"eins\", \"three\": \"drei\"}).Inc()\n\t}\n}\n\nfunc BenchmarkCounterWithPreparedMappedLabels(b *testing.B) {\n\tm := NewCounterVec(\n\t\tCounterOpts{\n\t\t\tName: \"benchmark_counter\",\n\t\t\tHelp: \"A counter to benchmark it.\",\n\t\t},\n\t\t[]string{\"one\", \"two\", \"three\"},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tlabels := Labels{\"two\": \"zwei\", \"one\": \"eins\", \"three\": \"drei\"}\n\tfor i := 0; i < b.N; i++ {\n\t\tm.With(labels).Inc()\n\t}\n}\n\nfunc BenchmarkCounterNoLabels(b *testing.B) {\n\tm := NewCounter(CounterOpts{\n\t\tName: \"benchmark_counter\",\n\t\tHelp: \"A counter to benchmark it.\",\n\t})\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.Inc()\n\t}\n}\n\nfunc BenchmarkGaugeWithLabelValues(b *testing.B) {\n\tm := NewGaugeVec(\n\t\tGaugeOpts{\n\t\t\tName: \"benchmark_gauge\",\n\t\t\tHelp: \"A gauge to benchmark it.\",\n\t\t},\n\t\t[]string{\"one\", \"two\", \"three\"},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.WithLabelValues(\"eins\", \"zwei\", \"drei\").Set(3.1415)\n\t}\n}\n\nfunc BenchmarkGaugeNoLabels(b *testing.B) {\n\tm := NewGauge(GaugeOpts{\n\t\tName: \"benchmark_gauge\",\n\t\tHelp: \"A gauge to benchmark it.\",\n\t})\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.Set(3.1415)\n\t}\n}\n\nfunc BenchmarkSummaryWithLabelValues(b *testing.B) {\n\tm := NewSummaryVec(\n\t\tSummaryOpts{\n\t\t\tName:       \"benchmark_summary\",\n\t\t\tHelp:       \"A summary to benchmark it.\",\n\t\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},\n\t\t},\n\t\t[]string{\"one\", \"two\", \"three\"},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.WithLabelValues(\"eins\", \"zwei\", \"drei\").Observe(3.1415)\n\t}\n}\n\nfunc BenchmarkSummaryNoLabels(b *testing.B) {\n\tm := NewSummary(SummaryOpts{\n\t\tName:       \"benchmark_summary\",\n\t\tHelp:       \"A summary to benchmark it.\",\n\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},\n\t},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.Observe(3.1415)\n\t}\n}\n\nfunc BenchmarkHistogramWithLabelValues(b *testing.B) {\n\tm := NewHistogramVec(\n\t\tHistogramOpts{\n\t\t\tName: \"benchmark_histogram\",\n\t\t\tHelp: \"A histogram to benchmark it.\",\n\t\t},\n\t\t[]string{\"one\", \"two\", \"three\"},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.WithLabelValues(\"eins\", \"zwei\", \"drei\").Observe(3.1415)\n\t}\n}\n\nfunc BenchmarkHistogramNoLabels(b *testing.B) {\n\tm := NewHistogram(HistogramOpts{\n\t\tName: \"benchmark_histogram\",\n\t\tHelp: \"A histogram to benchmark it.\",\n\t},\n\t)\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tm.Observe(3.1415)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/collector.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\n// Collector is the interface implemented by anything that can be used by\n// Prometheus to collect metrics. A Collector has to be registered for\n// collection. See Registerer.Register.\n//\n// The stock metrics provided by this package (Gauge, Counter, Summary,\n// Histogram, Untyped) are also Collectors (which only ever collect one metric,\n// namely itself). An implementer of Collector may, however, collect multiple\n// metrics in a coordinated fashion and/or create metrics on the fly. Examples\n// for collectors already implemented in this library are the metric vectors\n// (i.e. collection of multiple instances of the same Metric but with different\n// label values) like GaugeVec or SummaryVec, and the ExpvarCollector.\ntype Collector interface {\n\t// Describe sends the super-set of all possible descriptors of metrics\n\t// collected by this Collector to the provided channel and returns once\n\t// the last descriptor has been sent. The sent descriptors fulfill the\n\t// consistency and uniqueness requirements described in the Desc\n\t// documentation. (It is valid if one and the same Collector sends\n\t// duplicate descriptors. Those duplicates are simply ignored. However,\n\t// two different Collectors must not send duplicate descriptors.) This\n\t// method idempotently sends the same descriptors throughout the\n\t// lifetime of the Collector. If a Collector encounters an error while\n\t// executing this method, it must send an invalid descriptor (created\n\t// with NewInvalidDesc) to signal the error to the registry.\n\tDescribe(chan<- *Desc)\n\t// Collect is called by the Prometheus registry when collecting\n\t// metrics. The implementation sends each collected metric via the\n\t// provided channel and returns once the last metric has been sent. The\n\t// descriptor of each sent metric is one of those returned by\n\t// Describe. Returned metrics that share the same descriptor must differ\n\t// in their variable label values. This method may be called\n\t// concurrently and must therefore be implemented in a concurrency safe\n\t// way. Blocking occurs at the expense of total performance of rendering\n\t// all registered metrics. Ideally, Collector implementations support\n\t// concurrent readers.\n\tCollect(chan<- Metric)\n}\n\n// selfCollector implements Collector for a single Metric so that the Metric\n// collects itself. Add it as an anonymous field to a struct that implements\n// Metric, and call init with the Metric itself as an argument.\ntype selfCollector struct {\n\tself Metric\n}\n\n// init provides the selfCollector with a reference to the metric it is supposed\n// to collect. It is usually called within the factory function to create a\n// metric. See example.\nfunc (c *selfCollector) init(self Metric) {\n\tc.self = self\n}\n\n// Describe implements Collector.\nfunc (c *selfCollector) Describe(ch chan<- *Desc) {\n\tch <- c.self.Desc()\n}\n\n// Collect implements Collector.\nfunc (c *selfCollector) Collect(ch chan<- Metric) {\n\tch <- c.self\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/counter.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"errors\"\n)\n\n// Counter is a Metric that represents a single numerical value that only ever\n// goes up. That implies that it cannot be used to count items whose number can\n// also go down, e.g. the number of currently running goroutines. Those\n// \"counters\" are represented by Gauges.\n//\n// A Counter is typically used to count requests served, tasks completed, errors\n// occurred, etc.\n//\n// To create Counter instances, use NewCounter.\ntype Counter interface {\n\tMetric\n\tCollector\n\n\t// Inc increments the counter by 1. Use Add to increment it by arbitrary\n\t// non-negative values.\n\tInc()\n\t// Add adds the given value to the counter. It panics if the value is <\n\t// 0.\n\tAdd(float64)\n}\n\n// CounterOpts is an alias for Opts. See there for doc comments.\ntype CounterOpts Opts\n\n// NewCounter creates a new Counter based on the provided CounterOpts.\nfunc NewCounter(opts CounterOpts) Counter {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t)\n\tresult := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}}\n\tresult.init(result) // Init self-collection.\n\treturn result\n}\n\ntype counter struct {\n\tvalue\n}\n\nfunc (c *counter) Add(v float64) {\n\tif v < 0 {\n\t\tpanic(errors.New(\"counter cannot decrease in value\"))\n\t}\n\tc.value.Add(v)\n}\n\n// CounterVec is a Collector that bundles a set of Counters that all share the\n// same Desc, but have different values for their variable labels. This is used\n// if you want to count the same thing partitioned by various dimensions\n// (e.g. number of HTTP requests, partitioned by response code and\n// method). Create instances with NewCounterVec.\ntype CounterVec struct {\n\t*metricVec\n}\n\n// NewCounterVec creates a new CounterVec based on the provided CounterOpts and\n// partitioned by the given label names.\nfunc NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabelNames,\n\t\topts.ConstLabels,\n\t)\n\treturn &CounterVec{\n\t\tmetricVec: newMetricVec(desc, func(lvs ...string) Metric {\n\t\t\tresult := &counter{value: value{\n\t\t\t\tdesc:       desc,\n\t\t\t\tvalType:    CounterValue,\n\t\t\t\tlabelPairs: makeLabelPairs(desc, lvs),\n\t\t\t}}\n\t\t\tresult.init(result) // Init self-collection.\n\t\t\treturn result\n\t\t}),\n\t}\n}\n\n// GetMetricWithLabelValues returns the Counter for the given slice of label\n// values (same order as the VariableLabels in Desc). If that combination of\n// label values is accessed for the first time, a new Counter is created.\n//\n// It is possible to call this method without using the returned Counter to only\n// create the new Counter but leave it at its starting value 0. See also the\n// SummaryVec example.\n//\n// Keeping the Counter for later use is possible (and should be considered if\n// performance is critical), but keep in mind that Reset, DeleteLabelValues and\n// Delete can be used to delete the Counter from the CounterVec. In that case,\n// the Counter will still exist, but it will not be exported anymore, even if a\n// Counter with the same label values is created later.\n//\n// An error is returned if the number of label values is not the same as the\n// number of VariableLabels in Desc.\n//\n// Note that for more than one label value, this method is prone to mistakes\n// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as\n// an alternative to avoid that type of mistake. For higher label numbers, the\n// latter has a much more readable (albeit more verbose) syntax, but it comes\n// with a performance overhead (for creating and processing the Labels map).\n// See also the GaugeVec example.\nfunc (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {\n\tmetric, err := m.metricVec.getMetricWithLabelValues(lvs...)\n\tif metric != nil {\n\t\treturn metric.(Counter), err\n\t}\n\treturn nil, err\n}\n\n// GetMetricWith returns the Counter for the given Labels map (the label names\n// must match those of the VariableLabels in Desc). If that label map is\n// accessed for the first time, a new Counter is created. Implications of\n// creating a Counter without using it and keeping the Counter for later use are\n// the same as for GetMetricWithLabelValues.\n//\n// An error is returned if the number and names of the Labels are inconsistent\n// with those of the VariableLabels in Desc.\n//\n// This method is used for the same purpose as\n// GetMetricWithLabelValues(...string). See there for pros and cons of the two\n// methods.\nfunc (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {\n\tmetric, err := m.metricVec.getMetricWith(labels)\n\tif metric != nil {\n\t\treturn metric.(Counter), err\n\t}\n\treturn nil, err\n}\n\n// WithLabelValues works as GetMetricWithLabelValues, but panics where\n// GetMetricWithLabelValues would have returned an error. By not returning an\n// error, WithLabelValues allows shortcuts like\n//     myVec.WithLabelValues(\"404\", \"GET\").Add(42)\nfunc (m *CounterVec) WithLabelValues(lvs ...string) Counter {\n\treturn m.metricVec.withLabelValues(lvs...).(Counter)\n}\n\n// With works as GetMetricWith, but panics where GetMetricWithLabels would have\n// returned an error. By not returning an error, With allows shortcuts like\n//     myVec.With(Labels{\"code\": \"404\", \"method\": \"GET\"}).Add(42)\nfunc (m *CounterVec) With(labels Labels) Counter {\n\treturn m.metricVec.with(labels).(Counter)\n}\n\n// CounterFunc is a Counter whose value is determined at collect time by calling a\n// provided function.\n//\n// To create CounterFunc instances, use NewCounterFunc.\ntype CounterFunc interface {\n\tMetric\n\tCollector\n}\n\n// NewCounterFunc creates a new CounterFunc based on the provided\n// CounterOpts. The value reported is determined by calling the given function\n// from within the Write method. Take into account that metric collection may\n// happen concurrently. If that results in concurrent calls to Write, like in\n// the case where a CounterFunc is directly registered with Prometheus, the\n// provided function must be concurrency-safe. The function should also honor\n// the contract for a Counter (values only go up, not down), but compliance will\n// not be checked.\nfunc NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc {\n\treturn newValueFunc(NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t), CounterValue, function)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/counter_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"testing\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc TestCounterAdd(t *testing.T) {\n\tcounter := NewCounter(CounterOpts{\n\t\tName:        \"test\",\n\t\tHelp:        \"test help\",\n\t\tConstLabels: Labels{\"a\": \"1\", \"b\": \"2\"},\n\t}).(*counter)\n\tcounter.Inc()\n\tif expected, got := 1., math.Float64frombits(counter.valBits); expected != got {\n\t\tt.Errorf(\"Expected %f, got %f.\", expected, got)\n\t}\n\tcounter.Add(42)\n\tif expected, got := 43., math.Float64frombits(counter.valBits); expected != got {\n\t\tt.Errorf(\"Expected %f, got %f.\", expected, got)\n\t}\n\n\tif expected, got := \"counter cannot decrease in value\", decreaseCounter(counter).Error(); expected != got {\n\t\tt.Errorf(\"Expected error %q, got %q.\", expected, got)\n\t}\n\n\tm := &dto.Metric{}\n\tcounter.Write(m)\n\n\tif expected, got := `label:<name:\"a\" value:\"1\" > label:<name:\"b\" value:\"2\" > counter:<value:43 > `, m.String(); expected != got {\n\t\tt.Errorf(\"expected %q, got %q\", expected, got)\n\t}\n}\n\nfunc decreaseCounter(c *counter) (err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = e.(error)\n\t\t}\n\t}()\n\tc.Add(-1)\n\treturn nil\n}\n\nfunc TestCounterVecGetMetricWithInvalidLabelValues(t *testing.T) {\n\ttestCases := []struct {\n\t\tdesc   string\n\t\tlabels Labels\n\t}{\n\t\t{\n\t\t\tdesc:   \"non utf8 label value\",\n\t\t\tlabels: Labels{\"a\": \"\\xFF\"},\n\t\t},\n\t\t{\n\t\t\tdesc:   \"not enough label values\",\n\t\t\tlabels: Labels{},\n\t\t},\n\t\t{\n\t\t\tdesc:   \"too many label values\",\n\t\t\tlabels: Labels{\"a\": \"1\", \"b\": \"2\"},\n\t\t},\n\t}\n\n\tfor _, test := range testCases {\n\t\tcounterVec := NewCounterVec(CounterOpts{\n\t\t\tName: \"test\",\n\t\t}, []string{\"a\"})\n\n\t\tlabelValues := make([]string, len(test.labels))\n\t\tfor _, val := range test.labels {\n\t\t\tlabelValues = append(labelValues, val)\n\t\t}\n\n\t\texpectPanic(t, func() {\n\t\t\tcounterVec.WithLabelValues(labelValues...)\n\t\t}, fmt.Sprintf(\"WithLabelValues: expected panic because: %s\", test.desc))\n\t\texpectPanic(t, func() {\n\t\t\tcounterVec.With(test.labels)\n\t\t}, fmt.Sprintf(\"WithLabelValues: expected panic because: %s\", test.desc))\n\n\t\tif _, err := counterVec.GetMetricWithLabelValues(labelValues...); err == nil {\n\t\t\tt.Errorf(\"GetMetricWithLabelValues: expected error because: %s\", test.desc)\n\t\t}\n\t\tif _, err := counterVec.GetMetricWith(test.labels); err == nil {\n\t\t\tt.Errorf(\"GetMetricWith: expected error because: %s\", test.desc)\n\t\t}\n\t}\n}\n\nfunc expectPanic(t *testing.T, op func(), errorMsg string) {\n\tdefer func() {\n\t\tif err := recover(); err == nil {\n\t\t\tt.Error(errorMsg)\n\t\t}\n\t}()\n\n\top()\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/desc.go",
    "content": "// Copyright 2016 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"github.com/prometheus/common/model\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\n// Desc is the descriptor used by every Prometheus Metric. It is essentially\n// the immutable meta-data of a Metric. The normal Metric implementations\n// included in this package manage their Desc under the hood. Users only have to\n// deal with Desc if they use advanced features like the ExpvarCollector or\n// custom Collectors and Metrics.\n//\n// Descriptors registered with the same registry have to fulfill certain\n// consistency and uniqueness criteria if they share the same fully-qualified\n// name: They must have the same help string and the same label names (aka label\n// dimensions) in each, constLabels and variableLabels, but they must differ in\n// the values of the constLabels.\n//\n// Descriptors that share the same fully-qualified names and the same label\n// values of their constLabels are considered equal.\n//\n// Use NewDesc to create new Desc instances.\ntype Desc struct {\n\t// fqName has been built from Namespace, Subsystem, and Name.\n\tfqName string\n\t// help provides some helpful information about this metric.\n\thelp string\n\t// constLabelPairs contains precalculated DTO label pairs based on\n\t// the constant labels.\n\tconstLabelPairs []*dto.LabelPair\n\t// VariableLabels contains names of labels for which the metric\n\t// maintains variable values.\n\tvariableLabels []string\n\t// id is a hash of the values of the ConstLabels and fqName. This\n\t// must be unique among all registered descriptors and can therefore be\n\t// used as an identifier of the descriptor.\n\tid uint64\n\t// dimHash is a hash of the label names (preset and variable) and the\n\t// Help string. Each Desc with the same fqName must have the same\n\t// dimHash.\n\tdimHash uint64\n\t// err is an error that occurred during construction. It is reported on\n\t// registration time.\n\terr error\n}\n\n// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc\n// and will be reported on registration time. variableLabels and constLabels can\n// be nil if no such labels should be set. fqName and help must not be empty.\n//\n// variableLabels only contain the label names. Their label values are variable\n// and therefore not part of the Desc. (They are managed within the Metric.)\n//\n// For constLabels, the label values are constant. Therefore, they are fully\n// specified in the Desc. See the Opts documentation for the implications of\n// constant labels.\nfunc NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc {\n\td := &Desc{\n\t\tfqName:         fqName,\n\t\thelp:           help,\n\t\tvariableLabels: variableLabels,\n\t}\n\tif help == \"\" {\n\t\td.err = errors.New(\"empty help string\")\n\t\treturn d\n\t}\n\tif !model.IsValidMetricName(model.LabelValue(fqName)) {\n\t\td.err = fmt.Errorf(\"%q is not a valid metric name\", fqName)\n\t\treturn d\n\t}\n\t// labelValues contains the label values of const labels (in order of\n\t// their sorted label names) plus the fqName (at position 0).\n\tlabelValues := make([]string, 1, len(constLabels)+1)\n\tlabelValues[0] = fqName\n\tlabelNames := make([]string, 0, len(constLabels)+len(variableLabels))\n\tlabelNameSet := map[string]struct{}{}\n\t// First add only the const label names and sort them...\n\tfor labelName := range constLabels {\n\t\tif !checkLabelName(labelName) {\n\t\t\td.err = fmt.Errorf(\"%q is not a valid label name\", labelName)\n\t\t\treturn d\n\t\t}\n\t\tlabelNames = append(labelNames, labelName)\n\t\tlabelNameSet[labelName] = struct{}{}\n\t}\n\tsort.Strings(labelNames)\n\t// ... so that we can now add const label values in the order of their names.\n\tfor _, labelName := range labelNames {\n\t\tlabelValues = append(labelValues, constLabels[labelName])\n\t}\n\t// Validate the const label values. They can't have a wrong cardinality, so\n\t// use in len(labelValues) as expectedNumberOfValues.\n\tif err := validateLabelValues(labelValues, len(labelValues)); err != nil {\n\t\td.err = err\n\t\treturn d\n\t}\n\t// Now add the variable label names, but prefix them with something that\n\t// cannot be in a regular label name. That prevents matching the label\n\t// dimension with a different mix between preset and variable labels.\n\tfor _, labelName := range variableLabels {\n\t\tif !checkLabelName(labelName) {\n\t\t\td.err = fmt.Errorf(\"%q is not a valid label name\", labelName)\n\t\t\treturn d\n\t\t}\n\t\tlabelNames = append(labelNames, \"$\"+labelName)\n\t\tlabelNameSet[labelName] = struct{}{}\n\t}\n\tif len(labelNames) != len(labelNameSet) {\n\t\td.err = errors.New(\"duplicate label names\")\n\t\treturn d\n\t}\n\n\tvh := hashNew()\n\tfor _, val := range labelValues {\n\t\tvh = hashAdd(vh, val)\n\t\tvh = hashAddByte(vh, separatorByte)\n\t}\n\td.id = vh\n\t// Sort labelNames so that order doesn't matter for the hash.\n\tsort.Strings(labelNames)\n\t// Now hash together (in this order) the help string and the sorted\n\t// label names.\n\tlh := hashNew()\n\tlh = hashAdd(lh, help)\n\tlh = hashAddByte(lh, separatorByte)\n\tfor _, labelName := range labelNames {\n\t\tlh = hashAdd(lh, labelName)\n\t\tlh = hashAddByte(lh, separatorByte)\n\t}\n\td.dimHash = lh\n\n\td.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels))\n\tfor n, v := range constLabels {\n\t\td.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{\n\t\t\tName:  proto.String(n),\n\t\t\tValue: proto.String(v),\n\t\t})\n\t}\n\tsort.Sort(LabelPairSorter(d.constLabelPairs))\n\treturn d\n}\n\n// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the\n// provided error set. If a collector returning such a descriptor is registered,\n// registration will fail with the provided error. NewInvalidDesc can be used by\n// a Collector to signal inability to describe itself.\nfunc NewInvalidDesc(err error) *Desc {\n\treturn &Desc{\n\t\terr: err,\n\t}\n}\n\nfunc (d *Desc) String() string {\n\tlpStrings := make([]string, 0, len(d.constLabelPairs))\n\tfor _, lp := range d.constLabelPairs {\n\t\tlpStrings = append(\n\t\t\tlpStrings,\n\t\t\tfmt.Sprintf(\"%s=%q\", lp.GetName(), lp.GetValue()),\n\t\t)\n\t}\n\treturn fmt.Sprintf(\n\t\t\"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}\",\n\t\td.fqName,\n\t\td.help,\n\t\tstrings.Join(lpStrings, \",\"),\n\t\td.variableLabels,\n\t)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/desc_test.go",
    "content": "package prometheus\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewDescInvalidLabelValues(t *testing.T) {\n\tdesc := NewDesc(\n\t\t\"sample_label\",\n\t\t\"sample label\",\n\t\tnil,\n\t\tLabels{\"a\": \"\\xFF\"},\n\t)\n\tif desc.err == nil {\n\t\tt.Errorf(\"NewDesc: expected error because: %s\", desc.err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/doc.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package prometheus provides metrics primitives to instrument code for\n// monitoring. It also offers a registry for metrics. Sub-packages allow to\n// expose the registered metrics via HTTP (package promhttp) or push them to a\n// Pushgateway (package push).\n//\n// All exported functions and methods are safe to be used concurrently unless\n// specified otherwise.\n//\n// A Basic Example\n//\n// As a starting point, a very basic usage example:\n//\n//    package main\n//\n//    import (\n//    \t\"log\"\n//    \t\"net/http\"\n//\n//    \t\"github.com/prometheus/client_golang/prometheus\"\n//    \t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n//    )\n//\n//    var (\n//    \tcpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{\n//    \t\tName: \"cpu_temperature_celsius\",\n//    \t\tHelp: \"Current temperature of the CPU.\",\n//    \t})\n//    \thdFailures = prometheus.NewCounterVec(\n//    \t\tprometheus.CounterOpts{\n//    \t\t\tName: \"hd_errors_total\",\n//    \t\t\tHelp: \"Number of hard-disk errors.\",\n//    \t\t},\n//    \t\t[]string{\"device\"},\n//    \t)\n//    )\n//\n//    func init() {\n//    \t// Metrics have to be registered to be exposed:\n//    \tprometheus.MustRegister(cpuTemp)\n//    \tprometheus.MustRegister(hdFailures)\n//    }\n//\n//    func main() {\n//    \tcpuTemp.Set(65.3)\n//    \thdFailures.With(prometheus.Labels{\"device\":\"/dev/sda\"}).Inc()\n//\n//    \t// The Handler function provides a default handler to expose metrics\n//    \t// via an HTTP server. \"/metrics\" is the usual endpoint for that.\n//    \thttp.Handle(\"/metrics\", promhttp.Handler())\n//    \tlog.Fatal(http.ListenAndServe(\":8080\", nil))\n//    }\n//\n//\n// This is a complete program that exports two metrics, a Gauge and a Counter,\n// the latter with a label attached to turn it into a (one-dimensional) vector.\n//\n// Metrics\n//\n// The number of exported identifiers in this package might appear a bit\n// overwhelming. However, in addition to the basic plumbing shown in the example\n// above, you only need to understand the different metric types and their\n// vector versions for basic usage.\n//\n// Above, you have already touched the Counter and the Gauge. There are two more\n// advanced metric types: the Summary and Histogram. A more thorough description\n// of those four metric types can be found in the Prometheus docs:\n// https://prometheus.io/docs/concepts/metric_types/\n//\n// A fifth \"type\" of metric is Untyped. It behaves like a Gauge, but signals the\n// Prometheus server not to assume anything about its type.\n//\n// In addition to the fundamental metric types Gauge, Counter, Summary,\n// Histogram, and Untyped, a very important part of the Prometheus data model is\n// the partitioning of samples along dimensions called labels, which results in\n// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec,\n// HistogramVec, and UntypedVec.\n//\n// While only the fundamental metric types implement the Metric interface, both\n// the metrics and their vector versions implement the Collector interface. A\n// Collector manages the collection of a number of Metrics, but for convenience,\n// a Metric can also “collect itself”. Note that Gauge, Counter, Summary,\n// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec,\n// SummaryVec, HistogramVec, and UntypedVec are not.\n//\n// To create instances of Metrics and their vector versions, you need a suitable\n// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, HistogramOpts, or\n// UntypedOpts.\n//\n// Custom Collectors and constant Metrics\n//\n// While you could create your own implementations of Metric, most likely you\n// will only ever implement the Collector interface on your own. At a first\n// glance, a custom Collector seems handy to bundle Metrics for common\n// registration (with the prime example of the different metric vectors above,\n// which bundle all the metrics of the same name but with different labels).\n//\n// There is a more involved use case, too: If you already have metrics\n// available, created outside of the Prometheus context, you don't need the\n// interface of the various Metric types. You essentially want to mirror the\n// existing numbers into Prometheus Metrics during collection. An own\n// implementation of the Collector interface is perfect for that. You can create\n// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and\n// NewConstSummary (and their respective Must… versions). That will happen in\n// the Collect method. The Describe method has to return separate Desc\n// instances, representative of the “throw-away” metrics to be created later.\n// NewDesc comes in handy to create those Desc instances.\n//\n// The Collector example illustrates the use case. You can also look at the\n// source code of the processCollector (mirroring process metrics), the\n// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar\n// metrics) as examples that are used in this package itself.\n//\n// If you just need to call a function to get a single float value to collect as\n// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting\n// shortcuts.\n//\n// Advanced Uses of the Registry\n//\n// While MustRegister is the by far most common way of registering a Collector,\n// sometimes you might want to handle the errors the registration might cause.\n// As suggested by the name, MustRegister panics if an error occurs. With the\n// Register function, the error is returned and can be handled.\n//\n// An error is returned if the registered Collector is incompatible or\n// inconsistent with already registered metrics. The registry aims for\n// consistency of the collected metrics according to the Prometheus data model.\n// Inconsistencies are ideally detected at registration time, not at collect\n// time. The former will usually be detected at start-up time of a program,\n// while the latter will only happen at scrape time, possibly not even on the\n// first scrape if the inconsistency only becomes relevant later. That is the\n// main reason why a Collector and a Metric have to describe themselves to the\n// registry.\n//\n// So far, everything we did operated on the so-called default registry, as it\n// can be found in the global DefaultRegistry variable. With NewRegistry, you\n// can create a custom registry, or you can even implement the Registerer or\n// Gatherer interfaces yourself. The methods Register and Unregister work in the\n// same way on a custom registry as the global functions Register and Unregister\n// on the default registry.\n//\n// There are a number of uses for custom registries: You can use registries with\n// special properties, see NewPedanticRegistry. You can avoid global state, as\n// it is imposed by the DefaultRegistry. You can use multiple registries at the\n// same time to expose different metrics in different ways. You can use separate\n// registries for testing purposes.\n//\n// Also note that the DefaultRegistry comes registered with a Collector for Go\n// runtime metrics (via NewGoCollector) and a Collector for process metrics (via\n// NewProcessCollector). With a custom registry, you are in control and decide\n// yourself about the Collectors to register.\n//\n// HTTP Exposition\n//\n// The Registry implements the Gatherer interface. The caller of the Gather\n// method can then expose the gathered metrics in some way. Usually, the metrics\n// are served via HTTP on the /metrics endpoint. That's happening in the example\n// above. The tools to expose metrics via HTTP are in the promhttp sub-package.\n// (The top-level functions in the prometheus package are deprecated.)\n//\n// Pushing to the Pushgateway\n//\n// Function for pushing to the Pushgateway can be found in the push sub-package.\n//\n// Graphite Bridge\n//\n// Functions and examples to push metrics from a Gatherer to Graphite can be\n// found in the graphite sub-package.\n//\n// Other Means of Exposition\n//\n// More ways of exposing metrics can easily be added by following the approaches\n// of the existing implementations.\npackage prometheus\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus_test\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\n// ClusterManager is an example for a system that might have been built without\n// Prometheus in mind. It models a central manager of jobs running in a\n// cluster. To turn it into something that collects Prometheus metrics, we\n// simply add the two methods required for the Collector interface.\n//\n// An additional challenge is that multiple instances of the ClusterManager are\n// run within the same binary, each in charge of a different zone. We need to\n// make use of ConstLabels to be able to register each ClusterManager instance\n// with Prometheus.\ntype ClusterManager struct {\n\tZone         string\n\tOOMCountDesc *prometheus.Desc\n\tRAMUsageDesc *prometheus.Desc\n\t// ... many more fields\n}\n\n// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a\n// real cluster manager would have to do. Since it may actually be really\n// expensive, it must only be called once per collection. This implementation,\n// obviously, only returns some made-up data.\nfunc (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() (\n\toomCountByHost map[string]int, ramUsageByHost map[string]float64,\n) {\n\t// Just example fake data.\n\toomCountByHost = map[string]int{\n\t\t\"foo.example.org\": 42,\n\t\t\"bar.example.org\": 2001,\n\t}\n\tramUsageByHost = map[string]float64{\n\t\t\"foo.example.org\": 6.023e23,\n\t\t\"bar.example.org\": 3.14,\n\t}\n\treturn\n}\n\n// Describe simply sends the two Descs in the struct to the channel.\nfunc (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) {\n\tch <- c.OOMCountDesc\n\tch <- c.RAMUsageDesc\n}\n\n// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it\n// creates constant metrics for each host on the fly based on the returned data.\n//\n// Note that Collect could be called concurrently, so we depend on\n// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe.\nfunc (c *ClusterManager) Collect(ch chan<- prometheus.Metric) {\n\toomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState()\n\tfor host, oomCount := range oomCountByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.OOMCountDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(oomCount),\n\t\t\thost,\n\t\t)\n\t}\n\tfor host, ramUsage := range ramUsageByHost {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tc.RAMUsageDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tramUsage,\n\t\t\thost,\n\t\t)\n\t}\n}\n\n// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note\n// that the zone is set as a ConstLabel. (It's different in each instance of the\n// ClusterManager, but constant over the lifetime of an instance.) Then there is\n// a variable label \"host\", since we want to partition the collected metrics by\n// host. Since all Descs created in this way are consistent across instances,\n// with a guaranteed distinction by the \"zone\" label, we can register different\n// ClusterManager instances with the same registry.\nfunc NewClusterManager(zone string) *ClusterManager {\n\treturn &ClusterManager{\n\t\tZone: zone,\n\t\tOOMCountDesc: prometheus.NewDesc(\n\t\t\t\"clustermanager_oom_crashes_total\",\n\t\t\t\"Number of OOM crashes.\",\n\t\t\t[]string{\"host\"},\n\t\t\tprometheus.Labels{\"zone\": zone},\n\t\t),\n\t\tRAMUsageDesc: prometheus.NewDesc(\n\t\t\t\"clustermanager_ram_usage_bytes\",\n\t\t\t\"RAM usage as reported to the cluster manager.\",\n\t\t\t[]string{\"host\"},\n\t\t\tprometheus.Labels{\"zone\": zone},\n\t\t),\n\t}\n}\n\nfunc ExampleCollector() {\n\tworkerDB := NewClusterManager(\"db\")\n\tworkerCA := NewClusterManager(\"ca\")\n\n\t// Since we are dealing with custom Collector implementations, it might\n\t// be a good idea to try it out with a pedantic registry.\n\treg := prometheus.NewPedanticRegistry()\n\treg.MustRegister(workerDB)\n\treg.MustRegister(workerCA)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/example_timer_complex_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus_test\n\nimport (\n\t\"net/http\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar (\n\t// apiRequestDuration tracks the duration separate for each HTTP status\n\t// class (1xx, 2xx, ...). This creates a fair amount of time series on\n\t// the Prometheus server. Usually, you would track the duration of\n\t// serving HTTP request without partitioning by outcome. Do something\n\t// like this only if needed. Also note how only status classes are\n\t// tracked, not every single status code. The latter would create an\n\t// even larger amount of time series. Request counters partitioned by\n\t// status code are usually OK as each counter only creates one time\n\t// series. Histograms are way more expensive, so partition with care and\n\t// only where you really need separate latency tracking. Partitioning by\n\t// status class is only an example. In concrete cases, other partitions\n\t// might make more sense.\n\tapiRequestDuration = prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"api_request_duration_seconds\",\n\t\t\tHelp:    \"Histogram for the request duration of the public API, partitioned by status class.\",\n\t\t\tBuckets: prometheus.ExponentialBuckets(0.1, 1.5, 5),\n\t\t},\n\t\t[]string{\"status_class\"},\n\t)\n)\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tstatus := http.StatusOK\n\t// The ObserverFunc gets called by the deferred ObserveDuration and\n\t// decides which Histogram's Observe method is called.\n\ttimer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {\n\t\tswitch {\n\t\tcase status >= 500: // Server error.\n\t\t\tapiRequestDuration.WithLabelValues(\"5xx\").Observe(v)\n\t\tcase status >= 400: // Client error.\n\t\t\tapiRequestDuration.WithLabelValues(\"4xx\").Observe(v)\n\t\tcase status >= 300: // Redirection.\n\t\t\tapiRequestDuration.WithLabelValues(\"3xx\").Observe(v)\n\t\tcase status >= 200: // Success.\n\t\t\tapiRequestDuration.WithLabelValues(\"2xx\").Observe(v)\n\t\tdefault: // Informational.\n\t\t\tapiRequestDuration.WithLabelValues(\"1xx\").Observe(v)\n\t\t}\n\t}))\n\tdefer timer.ObserveDuration()\n\n\t// Handle the request. Set status accordingly.\n\t// ...\n}\n\nfunc ExampleTimer_complex() {\n\thttp.HandleFunc(\"/api\", handler)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/example_timer_gauge_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus_test\n\nimport (\n\t\"os\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar (\n\t// If a function is called rarely (i.e. not more often than scrapes\n\t// happen) or ideally only once (like in a batch job), it can make sense\n\t// to use a Gauge for timing the function call. For timing a batch job\n\t// and pushing the result to a Pushgateway, see also the comprehensive\n\t// example in the push package.\n\tfuncDuration = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"example_function_duration_seconds\",\n\t\tHelp: \"Duration of the last call of an example function.\",\n\t})\n)\n\nfunc run() error {\n\t// The Set method of the Gauge is used to observe the duration.\n\ttimer := prometheus.NewTimer(prometheus.ObserverFunc(funcDuration.Set))\n\tdefer timer.ObserveDuration()\n\n\t// Do something. Return errors as encountered. The use of 'defer' above\n\t// makes sure the function is still timed properly.\n\treturn nil\n}\n\nfunc ExampleTimer_gauge() {\n\tif err := run(); err != nil {\n\t\tos.Exit(1)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/example_timer_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus_test\n\nimport (\n\t\"math/rand\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nvar (\n\trequestDuration = prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\tName:    \"example_request_duration_seconds\",\n\t\tHelp:    \"Histogram for the runtime of a simple example function.\",\n\t\tBuckets: prometheus.LinearBuckets(0.01, 0.01, 10),\n\t})\n)\n\nfunc ExampleTimer() {\n\t// timer times this example function. It uses a Histogram, but a Summary\n\t// would also work, as both implement Observer. Check out\n\t// https://prometheus.io/docs/practices/histograms/ for differences.\n\ttimer := prometheus.NewTimer(requestDuration)\n\tdefer timer.ObserveDuration()\n\n\t// Do something here that takes time.\n\ttime.Sleep(time.Duration(rand.NormFloat64()*10000+50000) * time.Microsecond)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/examples_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"math\"\n\t\"net/http\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/prometheus/common/expfmt\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc ExampleGauge() {\n\topsQueued := prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tNamespace: \"our_company\",\n\t\tSubsystem: \"blob_storage\",\n\t\tName:      \"ops_queued\",\n\t\tHelp:      \"Number of blob storage operations waiting to be processed.\",\n\t})\n\tprometheus.MustRegister(opsQueued)\n\n\t// 10 operations queued by the goroutine managing incoming requests.\n\topsQueued.Add(10)\n\t// A worker goroutine has picked up a waiting operation.\n\topsQueued.Dec()\n\t// And once more...\n\topsQueued.Dec()\n}\n\nfunc ExampleGaugeVec() {\n\topsQueued := prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tNamespace: \"our_company\",\n\t\t\tSubsystem: \"blob_storage\",\n\t\t\tName:      \"ops_queued\",\n\t\t\tHelp:      \"Number of blob storage operations waiting to be processed, partitioned by user and type.\",\n\t\t},\n\t\t[]string{\n\t\t\t// Which user has requested the operation?\n\t\t\t\"user\",\n\t\t\t// Of what type is the operation?\n\t\t\t\"type\",\n\t\t},\n\t)\n\tprometheus.MustRegister(opsQueued)\n\n\t// Increase a value using compact (but order-sensitive!) WithLabelValues().\n\topsQueued.WithLabelValues(\"bob\", \"put\").Add(4)\n\t// Increase a value with a map using WithLabels. More verbose, but order\n\t// doesn't matter anymore.\n\topsQueued.With(prometheus.Labels{\"type\": \"delete\", \"user\": \"alice\"}).Inc()\n}\n\nfunc ExampleGaugeFunc() {\n\tif err := prometheus.Register(prometheus.NewGaugeFunc(\n\t\tprometheus.GaugeOpts{\n\t\t\tSubsystem: \"runtime\",\n\t\t\tName:      \"goroutines_count\",\n\t\t\tHelp:      \"Number of goroutines that currently exist.\",\n\t\t},\n\t\tfunc() float64 { return float64(runtime.NumGoroutine()) },\n\t)); err == nil {\n\t\tfmt.Println(\"GaugeFunc 'goroutines_count' registered.\")\n\t}\n\t// Note that the count of goroutines is a gauge (and not a counter) as\n\t// it can go up and down.\n\n\t// Output:\n\t// GaugeFunc 'goroutines_count' registered.\n}\n\nfunc ExampleCounter() {\n\tpushCounter := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"repository_pushes\", // Note: No help string...\n\t})\n\terr := prometheus.Register(pushCounter) // ... so this will return an error.\n\tif err != nil {\n\t\tfmt.Println(\"Push counter couldn't be registered, no counting will happen:\", err)\n\t\treturn\n\t}\n\n\t// Try it once more, this time with a help string.\n\tpushCounter = prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"repository_pushes\",\n\t\tHelp: \"Number of pushes to external repository.\",\n\t})\n\terr = prometheus.Register(pushCounter)\n\tif err != nil {\n\t\tfmt.Println(\"Push counter couldn't be registered AGAIN, no counting will happen:\", err)\n\t\treturn\n\t}\n\n\tpushComplete := make(chan struct{})\n\t// TODO: Start a goroutine that performs repository pushes and reports\n\t// each completion via the channel.\n\tfor range pushComplete {\n\t\tpushCounter.Inc()\n\t}\n\t// Output:\n\t// Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: \"repository_pushes\", help: \"\", constLabels: {}, variableLabels: []} is invalid: empty help string\n}\n\nfunc ExampleCounterVec() {\n\thttpReqs := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"http_requests_total\",\n\t\t\tHelp: \"How many HTTP requests processed, partitioned by status code and HTTP method.\",\n\t\t},\n\t\t[]string{\"code\", \"method\"},\n\t)\n\tprometheus.MustRegister(httpReqs)\n\n\thttpReqs.WithLabelValues(\"404\", \"POST\").Add(42)\n\n\t// If you have to access the same set of labels very frequently, it\n\t// might be good to retrieve the metric only once and keep a handle to\n\t// it. But beware of deletion of that metric, see below!\n\tm := httpReqs.WithLabelValues(\"200\", \"GET\")\n\tfor i := 0; i < 1000000; i++ {\n\t\tm.Inc()\n\t}\n\t// Delete a metric from the vector. If you have previously kept a handle\n\t// to that metric (as above), future updates via that handle will go\n\t// unseen (even if you re-create a metric with the same label set\n\t// later).\n\thttpReqs.DeleteLabelValues(\"200\", \"GET\")\n\t// Same thing with the more verbose Labels syntax.\n\thttpReqs.Delete(prometheus.Labels{\"method\": \"GET\", \"code\": \"200\"})\n}\n\nfunc ExampleInstrumentHandler() {\n\t// Handle the \"/doc\" endpoint with the standard http.FileServer handler.\n\t// By wrapping the handler with InstrumentHandler, request count,\n\t// request and response sizes, and request latency are automatically\n\t// exported to Prometheus, partitioned by HTTP status code and method\n\t// and by the handler name (here \"fileserver\").\n\thttp.Handle(\"/doc\", prometheus.InstrumentHandler(\n\t\t\"fileserver\", http.FileServer(http.Dir(\"/usr/share/doc\")),\n\t))\n\t// The Prometheus handler still has to be registered to handle the\n\t// \"/metrics\" endpoint. The handler returned by prometheus.Handler() is\n\t// already instrumented - with \"prometheus\" as the handler name. In this\n\t// example, we want the handler name to be \"metrics\", so we instrument\n\t// the uninstrumented Prometheus handler ourselves.\n\thttp.Handle(\"/metrics\", prometheus.InstrumentHandler(\n\t\t\"metrics\", prometheus.UninstrumentedHandler(),\n\t))\n}\n\nfunc ExampleLabelPairSorter() {\n\tlabelPairs := []*dto.LabelPair{\n\t\t{Name: proto.String(\"status\"), Value: proto.String(\"404\")},\n\t\t{Name: proto.String(\"method\"), Value: proto.String(\"get\")},\n\t}\n\n\tsort.Sort(prometheus.LabelPairSorter(labelPairs))\n\n\tfmt.Println(labelPairs)\n\t// Output:\n\t// [name:\"method\" value:\"get\"  name:\"status\" value:\"404\" ]\n}\n\nfunc ExampleRegister() {\n\t// Imagine you have a worker pool and want to count the tasks completed.\n\ttaskCounter := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tSubsystem: \"worker_pool\",\n\t\tName:      \"completed_tasks_total\",\n\t\tHelp:      \"Total number of tasks completed.\",\n\t})\n\t// This will register fine.\n\tif err := prometheus.Register(taskCounter); err != nil {\n\t\tfmt.Println(err)\n\t} else {\n\t\tfmt.Println(\"taskCounter registered.\")\n\t}\n\t// Don't forget to tell the HTTP server about the Prometheus handler.\n\t// (In a real program, you still need to start the HTTP server...)\n\thttp.Handle(\"/metrics\", prometheus.Handler())\n\n\t// Now you can start workers and give every one of them a pointer to\n\t// taskCounter and let it increment it whenever it completes a task.\n\ttaskCounter.Inc() // This has to happen somewhere in the worker code.\n\n\t// But wait, you want to see how individual workers perform. So you need\n\t// a vector of counters, with one element for each worker.\n\ttaskCounterVec := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tSubsystem: \"worker_pool\",\n\t\t\tName:      \"completed_tasks_total\",\n\t\t\tHelp:      \"Total number of tasks completed.\",\n\t\t},\n\t\t[]string{\"worker_id\"},\n\t)\n\n\t// Registering will fail because we already have a metric of that name.\n\tif err := prometheus.Register(taskCounterVec); err != nil {\n\t\tfmt.Println(\"taskCounterVec not registered:\", err)\n\t} else {\n\t\tfmt.Println(\"taskCounterVec registered.\")\n\t}\n\n\t// To fix, first unregister the old taskCounter.\n\tif prometheus.Unregister(taskCounter) {\n\t\tfmt.Println(\"taskCounter unregistered.\")\n\t}\n\n\t// Try registering taskCounterVec again.\n\tif err := prometheus.Register(taskCounterVec); err != nil {\n\t\tfmt.Println(\"taskCounterVec not registered:\", err)\n\t} else {\n\t\tfmt.Println(\"taskCounterVec registered.\")\n\t}\n\t// Bummer! Still doesn't work.\n\n\t// Prometheus will not allow you to ever export metrics with\n\t// inconsistent help strings or label names. After unregistering, the\n\t// unregistered metrics will cease to show up in the /metrics HTTP\n\t// response, but the registry still remembers that those metrics had\n\t// been exported before. For this example, we will now choose a\n\t// different name. (In a real program, you would obviously not export\n\t// the obsolete metric in the first place.)\n\ttaskCounterVec = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tSubsystem: \"worker_pool\",\n\t\t\tName:      \"completed_tasks_by_id\",\n\t\t\tHelp:      \"Total number of tasks completed.\",\n\t\t},\n\t\t[]string{\"worker_id\"},\n\t)\n\tif err := prometheus.Register(taskCounterVec); err != nil {\n\t\tfmt.Println(\"taskCounterVec not registered:\", err)\n\t} else {\n\t\tfmt.Println(\"taskCounterVec registered.\")\n\t}\n\t// Finally it worked!\n\n\t// The workers have to tell taskCounterVec their id to increment the\n\t// right element in the metric vector.\n\ttaskCounterVec.WithLabelValues(\"42\").Inc() // Code from worker 42.\n\n\t// Each worker could also keep a reference to their own counter element\n\t// around. Pick the counter at initialization time of the worker.\n\tmyCounter := taskCounterVec.WithLabelValues(\"42\") // From worker 42 initialization code.\n\tmyCounter.Inc()                                   // Somewhere in the code of that worker.\n\n\t// Note that something like WithLabelValues(\"42\", \"spurious arg\") would\n\t// panic (because you have provided too many label values). If you want\n\t// to get an error instead, use GetMetricWithLabelValues(...) instead.\n\tnotMyCounter, err := taskCounterVec.GetMetricWithLabelValues(\"42\", \"spurious arg\")\n\tif err != nil {\n\t\tfmt.Println(\"Worker initialization failed:\", err)\n\t}\n\tif notMyCounter == nil {\n\t\tfmt.Println(\"notMyCounter is nil.\")\n\t}\n\n\t// A different (and somewhat tricky) approach is to use\n\t// ConstLabels. ConstLabels are pairs of label names and label values\n\t// that never change. You might ask what those labels are good for (and\n\t// rightfully so - if they never change, they could as well be part of\n\t// the metric name). There are essentially two use-cases: The first is\n\t// if labels are constant throughout the lifetime of a binary execution,\n\t// but they vary over time or between different instances of a running\n\t// binary. The second is what we have here: Each worker creates and\n\t// registers an own Counter instance where the only difference is in the\n\t// value of the ConstLabels. Those Counters can all be registered\n\t// because the different ConstLabel values guarantee that each worker\n\t// will increment a different Counter metric.\n\tcounterOpts := prometheus.CounterOpts{\n\t\tSubsystem:   \"worker_pool\",\n\t\tName:        \"completed_tasks\",\n\t\tHelp:        \"Total number of tasks completed.\",\n\t\tConstLabels: prometheus.Labels{\"worker_id\": \"42\"},\n\t}\n\ttaskCounterForWorker42 := prometheus.NewCounter(counterOpts)\n\tif err := prometheus.Register(taskCounterForWorker42); err != nil {\n\t\tfmt.Println(\"taskCounterVForWorker42 not registered:\", err)\n\t} else {\n\t\tfmt.Println(\"taskCounterForWorker42 registered.\")\n\t}\n\t// Obviously, in real code, taskCounterForWorker42 would be a member\n\t// variable of a worker struct, and the \"42\" would be retrieved with a\n\t// GetId() method or something. The Counter would be created and\n\t// registered in the initialization code of the worker.\n\n\t// For the creation of the next Counter, we can recycle\n\t// counterOpts. Just change the ConstLabels.\n\tcounterOpts.ConstLabels = prometheus.Labels{\"worker_id\": \"2001\"}\n\ttaskCounterForWorker2001 := prometheus.NewCounter(counterOpts)\n\tif err := prometheus.Register(taskCounterForWorker2001); err != nil {\n\t\tfmt.Println(\"taskCounterVForWorker2001 not registered:\", err)\n\t} else {\n\t\tfmt.Println(\"taskCounterForWorker2001 registered.\")\n\t}\n\n\ttaskCounterForWorker2001.Inc()\n\ttaskCounterForWorker42.Inc()\n\ttaskCounterForWorker2001.Inc()\n\n\t// Yet another approach would be to turn the workers themselves into\n\t// Collectors and register them. See the Collector example for details.\n\n\t// Output:\n\t// taskCounter registered.\n\t// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: \"worker_pool_completed_tasks_total\", help: \"Total number of tasks completed.\", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string\n\t// taskCounter unregistered.\n\t// taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: \"worker_pool_completed_tasks_total\", help: \"Total number of tasks completed.\", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string\n\t// taskCounterVec registered.\n\t// Worker initialization failed: inconsistent label cardinality\n\t// notMyCounter is nil.\n\t// taskCounterForWorker42 registered.\n\t// taskCounterForWorker2001 registered.\n}\n\nfunc ExampleSummary() {\n\ttemps := prometheus.NewSummary(prometheus.SummaryOpts{\n\t\tName:       \"pond_temperature_celsius\",\n\t\tHelp:       \"The temperature of the frog pond.\",\n\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},\n\t})\n\n\t// Simulate some observations.\n\tfor i := 0; i < 1000; i++ {\n\t\ttemps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)\n\t}\n\n\t// Just for demonstration, let's check the state of the summary by\n\t// (ab)using its Write method (which is usually only used by Prometheus\n\t// internally).\n\tmetric := &dto.Metric{}\n\ttemps.Write(metric)\n\tfmt.Println(proto.MarshalTextString(metric))\n\n\t// Output:\n\t// summary: <\n\t//   sample_count: 1000\n\t//   sample_sum: 29969.50000000001\n\t//   quantile: <\n\t//     quantile: 0.5\n\t//     value: 31.1\n\t//   >\n\t//   quantile: <\n\t//     quantile: 0.9\n\t//     value: 41.3\n\t//   >\n\t//   quantile: <\n\t//     quantile: 0.99\n\t//     value: 41.9\n\t//   >\n\t// >\n}\n\nfunc ExampleSummaryVec() {\n\ttemps := prometheus.NewSummaryVec(\n\t\tprometheus.SummaryOpts{\n\t\t\tName:       \"pond_temperature_celsius\",\n\t\t\tHelp:       \"The temperature of the frog pond.\",\n\t\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},\n\t\t},\n\t\t[]string{\"species\"},\n\t)\n\n\t// Simulate some observations.\n\tfor i := 0; i < 1000; i++ {\n\t\ttemps.WithLabelValues(\"litoria-caerulea\").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)\n\t\ttemps.WithLabelValues(\"lithobates-catesbeianus\").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10)\n\t}\n\n\t// Create a Summary without any observations.\n\ttemps.WithLabelValues(\"leiopelma-hochstetteri\")\n\n\t// Just for demonstration, let's check the state of the summary vector\n\t// by registering it with a custom registry and then let it collect the\n\t// metrics.\n\treg := prometheus.NewRegistry()\n\treg.MustRegister(temps)\n\n\tmetricFamilies, err := reg.Gather()\n\tif err != nil || len(metricFamilies) != 1 {\n\t\tpanic(\"unexpected behavior of custom test registry\")\n\t}\n\tfmt.Println(proto.MarshalTextString(metricFamilies[0]))\n\n\t// Output:\n\t// name: \"pond_temperature_celsius\"\n\t// help: \"The temperature of the frog pond.\"\n\t// type: SUMMARY\n\t// metric: <\n\t//   label: <\n\t//     name: \"species\"\n\t//     value: \"leiopelma-hochstetteri\"\n\t//   >\n\t//   summary: <\n\t//     sample_count: 0\n\t//     sample_sum: 0\n\t//     quantile: <\n\t//       quantile: 0.5\n\t//       value: nan\n\t//     >\n\t//     quantile: <\n\t//       quantile: 0.9\n\t//       value: nan\n\t//     >\n\t//     quantile: <\n\t//       quantile: 0.99\n\t//       value: nan\n\t//     >\n\t//   >\n\t// >\n\t// metric: <\n\t//   label: <\n\t//     name: \"species\"\n\t//     value: \"lithobates-catesbeianus\"\n\t//   >\n\t//   summary: <\n\t//     sample_count: 1000\n\t//     sample_sum: 31956.100000000017\n\t//     quantile: <\n\t//       quantile: 0.5\n\t//       value: 32.4\n\t//     >\n\t//     quantile: <\n\t//       quantile: 0.9\n\t//       value: 41.4\n\t//     >\n\t//     quantile: <\n\t//       quantile: 0.99\n\t//       value: 41.9\n\t//     >\n\t//   >\n\t// >\n\t// metric: <\n\t//   label: <\n\t//     name: \"species\"\n\t//     value: \"litoria-caerulea\"\n\t//   >\n\t//   summary: <\n\t//     sample_count: 1000\n\t//     sample_sum: 29969.50000000001\n\t//     quantile: <\n\t//       quantile: 0.5\n\t//       value: 31.1\n\t//     >\n\t//     quantile: <\n\t//       quantile: 0.9\n\t//       value: 41.3\n\t//     >\n\t//     quantile: <\n\t//       quantile: 0.99\n\t//       value: 41.9\n\t//     >\n\t//   >\n\t// >\n}\n\nfunc ExampleNewConstSummary() {\n\tdesc := prometheus.NewDesc(\n\t\t\"http_request_duration_seconds\",\n\t\t\"A summary of the HTTP request durations.\",\n\t\t[]string{\"code\", \"method\"},\n\t\tprometheus.Labels{\"owner\": \"example\"},\n\t)\n\n\t// Create a constant summary from values we got from a 3rd party telemetry system.\n\ts := prometheus.MustNewConstSummary(\n\t\tdesc,\n\t\t4711, 403.34,\n\t\tmap[float64]float64{0.5: 42.3, 0.9: 323.3},\n\t\t\"200\", \"get\",\n\t)\n\n\t// Just for demonstration, let's check the state of the summary by\n\t// (ab)using its Write method (which is usually only used by Prometheus\n\t// internally).\n\tmetric := &dto.Metric{}\n\ts.Write(metric)\n\tfmt.Println(proto.MarshalTextString(metric))\n\n\t// Output:\n\t// label: <\n\t//   name: \"code\"\n\t//   value: \"200\"\n\t// >\n\t// label: <\n\t//   name: \"method\"\n\t//   value: \"get\"\n\t// >\n\t// label: <\n\t//   name: \"owner\"\n\t//   value: \"example\"\n\t// >\n\t// summary: <\n\t//   sample_count: 4711\n\t//   sample_sum: 403.34\n\t//   quantile: <\n\t//     quantile: 0.5\n\t//     value: 42.3\n\t//   >\n\t//   quantile: <\n\t//     quantile: 0.9\n\t//     value: 323.3\n\t//   >\n\t// >\n}\n\nfunc ExampleHistogram() {\n\ttemps := prometheus.NewHistogram(prometheus.HistogramOpts{\n\t\tName:    \"pond_temperature_celsius\",\n\t\tHelp:    \"The temperature of the frog pond.\", // Sorry, we can't measure how badly it smells.\n\t\tBuckets: prometheus.LinearBuckets(20, 5, 5),  // 5 buckets, each 5 centigrade wide.\n\t})\n\n\t// Simulate some observations.\n\tfor i := 0; i < 1000; i++ {\n\t\ttemps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10)\n\t}\n\n\t// Just for demonstration, let's check the state of the histogram by\n\t// (ab)using its Write method (which is usually only used by Prometheus\n\t// internally).\n\tmetric := &dto.Metric{}\n\ttemps.Write(metric)\n\tfmt.Println(proto.MarshalTextString(metric))\n\n\t// Output:\n\t// histogram: <\n\t//   sample_count: 1000\n\t//   sample_sum: 29969.50000000001\n\t//   bucket: <\n\t//     cumulative_count: 192\n\t//     upper_bound: 20\n\t//   >\n\t//   bucket: <\n\t//     cumulative_count: 366\n\t//     upper_bound: 25\n\t//   >\n\t//   bucket: <\n\t//     cumulative_count: 501\n\t//     upper_bound: 30\n\t//   >\n\t//   bucket: <\n\t//     cumulative_count: 638\n\t//     upper_bound: 35\n\t//   >\n\t//   bucket: <\n\t//     cumulative_count: 816\n\t//     upper_bound: 40\n\t//   >\n\t// >\n}\n\nfunc ExampleNewConstHistogram() {\n\tdesc := prometheus.NewDesc(\n\t\t\"http_request_duration_seconds\",\n\t\t\"A histogram of the HTTP request durations.\",\n\t\t[]string{\"code\", \"method\"},\n\t\tprometheus.Labels{\"owner\": \"example\"},\n\t)\n\n\t// Create a constant histogram from values we got from a 3rd party telemetry system.\n\th := prometheus.MustNewConstHistogram(\n\t\tdesc,\n\t\t4711, 403.34,\n\t\tmap[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233},\n\t\t\"200\", \"get\",\n\t)\n\n\t// Just for demonstration, let's check the state of the histogram by\n\t// (ab)using its Write method (which is usually only used by Prometheus\n\t// internally).\n\tmetric := &dto.Metric{}\n\th.Write(metric)\n\tfmt.Println(proto.MarshalTextString(metric))\n\n\t// Output:\n\t// label: <\n\t//   name: \"code\"\n\t//   value: \"200\"\n\t// >\n\t// label: <\n\t//   name: \"method\"\n\t//   value: \"get\"\n\t// >\n\t// label: <\n\t//   name: \"owner\"\n\t//   value: \"example\"\n\t// >\n\t// histogram: <\n\t//   sample_count: 4711\n\t//   sample_sum: 403.34\n\t//   bucket: <\n\t//     cumulative_count: 121\n\t//     upper_bound: 25\n\t//   >\n\t//   bucket: <\n\t//     cumulative_count: 2403\n\t//     upper_bound: 50\n\t//   >\n\t//   bucket: <\n\t//     cumulative_count: 3221\n\t//     upper_bound: 100\n\t//   >\n\t//   bucket: <\n\t//     cumulative_count: 4233\n\t//     upper_bound: 200\n\t//   >\n\t// >\n}\n\nfunc ExampleAlreadyRegisteredError() {\n\treqCounter := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"requests_total\",\n\t\tHelp: \"The total number of requests served.\",\n\t})\n\tif err := prometheus.Register(reqCounter); err != nil {\n\t\tif are, ok := err.(prometheus.AlreadyRegisteredError); ok {\n\t\t\t// A counter for that metric has been registered before.\n\t\t\t// Use the old counter from now on.\n\t\t\treqCounter = are.ExistingCollector.(prometheus.Counter)\n\t\t} else {\n\t\t\t// Something else went wrong!\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treqCounter.Inc()\n}\n\nfunc ExampleGatherers() {\n\treg := prometheus.NewRegistry()\n\ttemp := prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tName: \"temperature_kelvin\",\n\t\t\tHelp: \"Temperature in Kelvin.\",\n\t\t},\n\t\t[]string{\"location\"},\n\t)\n\treg.MustRegister(temp)\n\ttemp.WithLabelValues(\"outside\").Set(273.14)\n\ttemp.WithLabelValues(\"inside\").Set(298.44)\n\n\tvar parser expfmt.TextParser\n\n\ttext := `\n# TYPE humidity_percent gauge\n# HELP humidity_percent Humidity in %.\nhumidity_percent{location=\"outside\"} 45.4\nhumidity_percent{location=\"inside\"} 33.2\n# TYPE temperature_kelvin gauge\n# HELP temperature_kelvin Temperature in Kelvin.\ntemperature_kelvin{location=\"somewhere else\"} 4.5\n`\n\n\tparseText := func() ([]*dto.MetricFamily, error) {\n\t\tparsed, err := parser.TextToMetricFamilies(strings.NewReader(text))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tvar result []*dto.MetricFamily\n\t\tfor _, mf := range parsed {\n\t\t\tresult = append(result, mf)\n\t\t}\n\t\treturn result, nil\n\t}\n\n\tgatherers := prometheus.Gatherers{\n\t\treg,\n\t\tprometheus.GathererFunc(parseText),\n\t}\n\n\tgathering, err := gatherers.Gather()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tout := &bytes.Buffer{}\n\tfor _, mf := range gathering {\n\t\tif _, err := expfmt.MetricFamilyToText(out, mf); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tfmt.Print(out.String())\n\tfmt.Println(\"----------\")\n\n\t// Note how the temperature_kelvin metric family has been merged from\n\t// different sources. Now try\n\ttext = `\n# TYPE humidity_percent gauge\n# HELP humidity_percent Humidity in %.\nhumidity_percent{location=\"outside\"} 45.4\nhumidity_percent{location=\"inside\"} 33.2\n# TYPE temperature_kelvin gauge\n# HELP temperature_kelvin Temperature in Kelvin.\n# Duplicate metric:\ntemperature_kelvin{location=\"outside\"} 265.3\n # Wrong labels:\ntemperature_kelvin 4.5\n`\n\n\tgathering, err = gatherers.Gather()\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\t// Note that still as many metrics as possible are returned:\n\tout.Reset()\n\tfor _, mf := range gathering {\n\t\tif _, err := expfmt.MetricFamilyToText(out, mf); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tfmt.Print(out.String())\n\n\t// Output:\n\t// # HELP humidity_percent Humidity in %.\n\t// # TYPE humidity_percent gauge\n\t// humidity_percent{location=\"inside\"} 33.2\n\t// humidity_percent{location=\"outside\"} 45.4\n\t// # HELP temperature_kelvin Temperature in Kelvin.\n\t// # TYPE temperature_kelvin gauge\n\t// temperature_kelvin{location=\"inside\"} 298.44\n\t// temperature_kelvin{location=\"outside\"} 273.14\n\t// temperature_kelvin{location=\"somewhere else\"} 4.5\n\t// ----------\n\t// 2 error(s) occurred:\n\t// * collected metric temperature_kelvin label:<name:\"location\" value:\"outside\" > gauge:<value:265.3 >  was collected before with the same name and label values\n\t// * collected metric temperature_kelvin gauge:<value:4.5 >  has label dimensions inconsistent with previously collected metrics in the same metric family\n\t// # HELP humidity_percent Humidity in %.\n\t// # TYPE humidity_percent gauge\n\t// humidity_percent{location=\"inside\"} 33.2\n\t// humidity_percent{location=\"outside\"} 45.4\n\t// # HELP temperature_kelvin Temperature in Kelvin.\n\t// # TYPE temperature_kelvin gauge\n\t// temperature_kelvin{location=\"inside\"} 298.44\n\t// temperature_kelvin{location=\"outside\"} 273.14\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"encoding/json\"\n\t\"expvar\"\n)\n\ntype expvarCollector struct {\n\texports map[string]*Desc\n}\n\n// NewExpvarCollector returns a newly allocated expvar Collector that still has\n// to be registered with a Prometheus registry.\n//\n// An expvar Collector collects metrics from the expvar interface. It provides a\n// quick way to expose numeric values that are already exported via expvar as\n// Prometheus metrics. Note that the data models of expvar and Prometheus are\n// fundamentally different, and that the expvar Collector is inherently slower\n// than native Prometheus metrics. Thus, the expvar Collector is probably great\n// for experiments and prototying, but you should seriously consider a more\n// direct implementation of Prometheus metrics for monitoring production\n// systems.\n//\n// The exports map has the following meaning:\n//\n// The keys in the map correspond to expvar keys, i.e. for every expvar key you\n// want to export as Prometheus metric, you need an entry in the exports\n// map. The descriptor mapped to each key describes how to export the expvar\n// value. It defines the name and the help string of the Prometheus metric\n// proxying the expvar value. The type will always be Untyped.\n//\n// For descriptors without variable labels, the expvar value must be a number or\n// a bool. The number is then directly exported as the Prometheus sample\n// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values\n// that are not numbers or bools are silently ignored.\n//\n// If the descriptor has one variable label, the expvar value must be an expvar\n// map. The keys in the expvar map become the various values of the one\n// Prometheus label. The values in the expvar map must be numbers or bools again\n// as above.\n//\n// For descriptors with more than one variable label, the expvar must be a\n// nested expvar map, i.e. where the values of the topmost map are maps again\n// etc. until a depth is reached that corresponds to the number of labels. The\n// leaves of that structure must be numbers or bools as above to serve as the\n// sample values.\n//\n// Anything that does not fit into the scheme above is silently ignored.\nfunc NewExpvarCollector(exports map[string]*Desc) Collector {\n\treturn &expvarCollector{\n\t\texports: exports,\n\t}\n}\n\n// Describe implements Collector.\nfunc (e *expvarCollector) Describe(ch chan<- *Desc) {\n\tfor _, desc := range e.exports {\n\t\tch <- desc\n\t}\n}\n\n// Collect implements Collector.\nfunc (e *expvarCollector) Collect(ch chan<- Metric) {\n\tfor name, desc := range e.exports {\n\t\tvar m Metric\n\t\texpVar := expvar.Get(name)\n\t\tif expVar == nil {\n\t\t\tcontinue\n\t\t}\n\t\tvar v interface{}\n\t\tlabels := make([]string, len(desc.variableLabels))\n\t\tif err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {\n\t\t\tch <- NewInvalidMetric(desc, err)\n\t\t\tcontinue\n\t\t}\n\t\tvar processValue func(v interface{}, i int)\n\t\tprocessValue = func(v interface{}, i int) {\n\t\t\tif i >= len(labels) {\n\t\t\t\tcopiedLabels := append(make([]string, 0, len(labels)), labels...)\n\t\t\t\tswitch v := v.(type) {\n\t\t\t\tcase float64:\n\t\t\t\t\tm = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...)\n\t\t\t\tcase bool:\n\t\t\t\t\tif v {\n\t\t\t\t\t\tm = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tm = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...)\n\t\t\t\t\t}\n\t\t\t\tdefault:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tch <- m\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvm, ok := v.(map[string]interface{})\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfor lv, val := range vm {\n\t\t\t\tlabels[i] = lv\n\t\t\t\tprocessValue(val, i+1)\n\t\t\t}\n\t\t}\n\t\tprocessValue(v, 0)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus_test\n\nimport (\n\t\"expvar\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc ExampleNewExpvarCollector() {\n\texpvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{\n\t\t\"memstats\": prometheus.NewDesc(\n\t\t\t\"expvar_memstats\",\n\t\t\t\"All numeric memstats as one metric family. Not a good role-model, actually... ;-)\",\n\t\t\t[]string{\"type\"}, nil,\n\t\t),\n\t\t\"lone-int\": prometheus.NewDesc(\n\t\t\t\"expvar_lone_int\",\n\t\t\t\"Just an expvar int as an example.\",\n\t\t\tnil, nil,\n\t\t),\n\t\t\"http-request-map\": prometheus.NewDesc(\n\t\t\t\"expvar_http_request_total\",\n\t\t\t\"How many http requests processed, partitioned by status code and http method.\",\n\t\t\t[]string{\"code\", \"method\"}, nil,\n\t\t),\n\t})\n\tprometheus.MustRegister(expvarCollector)\n\n\t// The Prometheus part is done here. But to show that this example is\n\t// doing anything, we have to manually export something via expvar.  In\n\t// real-life use-cases, some library would already have exported via\n\t// expvar what we want to re-export as Prometheus metrics.\n\texpvar.NewInt(\"lone-int\").Set(42)\n\texpvarMap := expvar.NewMap(\"http-request-map\")\n\tvar (\n\t\texpvarMap1, expvarMap2                             expvar.Map\n\t\texpvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int\n\t)\n\texpvarMap1.Init()\n\texpvarMap2.Init()\n\texpvarInt11.Set(3)\n\texpvarInt12.Set(13)\n\texpvarInt21.Set(11)\n\texpvarInt22.Set(212)\n\texpvarMap1.Set(\"POST\", &expvarInt11)\n\texpvarMap1.Set(\"GET\", &expvarInt12)\n\texpvarMap2.Set(\"POST\", &expvarInt21)\n\texpvarMap2.Set(\"GET\", &expvarInt22)\n\texpvarMap.Set(\"404\", &expvarMap1)\n\texpvarMap.Set(\"200\", &expvarMap2)\n\t// Results in the following expvar map:\n\t// \"http-request-count\": {\"200\": {\"POST\": 11, \"GET\": 212}, \"404\": {\"POST\": 3, \"GET\": 13}}\n\n\t// Let's see what the scrape would yield, but exclude the memstats metrics.\n\tmetricStrings := []string{}\n\tmetric := dto.Metric{}\n\tmetricChan := make(chan prometheus.Metric)\n\tgo func() {\n\t\texpvarCollector.Collect(metricChan)\n\t\tclose(metricChan)\n\t}()\n\tfor m := range metricChan {\n\t\tif strings.Index(m.Desc().String(), \"expvar_memstats\") == -1 {\n\t\t\tmetric.Reset()\n\t\t\tm.Write(&metric)\n\t\t\tmetricStrings = append(metricStrings, metric.String())\n\t\t}\n\t}\n\tsort.Strings(metricStrings)\n\tfor _, s := range metricStrings {\n\t\tfmt.Println(strings.TrimRight(s, \" \"))\n\t}\n\t// Output:\n\t// label:<name:\"code\" value:\"200\" > label:<name:\"method\" value:\"GET\" > untyped:<value:212 >\n\t// label:<name:\"code\" value:\"200\" > label:<name:\"method\" value:\"POST\" > untyped:<value:11 >\n\t// label:<name:\"code\" value:\"404\" > label:<name:\"method\" value:\"GET\" > untyped:<value:13 >\n\t// label:<name:\"code\" value:\"404\" > label:<name:\"method\" value:\"POST\" > untyped:<value:3 >\n\t// untyped:<value:42 >\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/fnv.go",
    "content": "package prometheus\n\n// Inline and byte-free variant of hash/fnv's fnv64a.\n\nconst (\n\toffset64 = 14695981039346656037\n\tprime64  = 1099511628211\n)\n\n// hashNew initializies a new fnv64a hash value.\nfunc hashNew() uint64 {\n\treturn offset64\n}\n\n// hashAdd adds a string to a fnv64a hash value, returning the updated hash.\nfunc hashAdd(h uint64, s string) uint64 {\n\tfor i := 0; i < len(s); i++ {\n\t\th ^= uint64(s[i])\n\t\th *= prime64\n\t}\n\treturn h\n}\n\n// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.\nfunc hashAddByte(h uint64, b byte) uint64 {\n\th ^= uint64(b)\n\th *= prime64\n\treturn h\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/gauge.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\n// Gauge is a Metric that represents a single numerical value that can\n// arbitrarily go up and down.\n//\n// A Gauge is typically used for measured values like temperatures or current\n// memory usage, but also \"counts\" that can go up and down, like the number of\n// running goroutines.\n//\n// To create Gauge instances, use NewGauge.\ntype Gauge interface {\n\tMetric\n\tCollector\n\n\t// Set sets the Gauge to an arbitrary value.\n\tSet(float64)\n\t// Inc increments the Gauge by 1. Use Add to increment it by arbitrary\n\t// values.\n\tInc()\n\t// Dec decrements the Gauge by 1. Use Sub to decrement it by arbitrary\n\t// values.\n\tDec()\n\t// Add adds the given value to the Gauge. (The value can be negative,\n\t// resulting in a decrease of the Gauge.)\n\tAdd(float64)\n\t// Sub subtracts the given value from the Gauge. (The value can be\n\t// negative, resulting in an increase of the Gauge.)\n\tSub(float64)\n\n\t// SetToCurrentTime sets the Gauge to the current Unix time in seconds.\n\tSetToCurrentTime()\n}\n\n// GaugeOpts is an alias for Opts. See there for doc comments.\ntype GaugeOpts Opts\n\n// NewGauge creates a new Gauge based on the provided GaugeOpts.\nfunc NewGauge(opts GaugeOpts) Gauge {\n\treturn newValue(NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t), GaugeValue, 0)\n}\n\n// GaugeVec is a Collector that bundles a set of Gauges that all share the same\n// Desc, but have different values for their variable labels. This is used if\n// you want to count the same thing partitioned by various dimensions\n// (e.g. number of operations queued, partitioned by user and operation\n// type). Create instances with NewGaugeVec.\ntype GaugeVec struct {\n\t*metricVec\n}\n\n// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and\n// partitioned by the given label names.\nfunc NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabelNames,\n\t\topts.ConstLabels,\n\t)\n\treturn &GaugeVec{\n\t\tmetricVec: newMetricVec(desc, func(lvs ...string) Metric {\n\t\t\treturn newValue(desc, GaugeValue, 0, lvs...)\n\t\t}),\n\t}\n}\n\n// GetMetricWithLabelValues returns the Gauge for the given slice of label\n// values (same order as the VariableLabels in Desc). If that combination of\n// label values is accessed for the first time, a new Gauge is created.\n//\n// It is possible to call this method without using the returned Gauge to only\n// create the new Gauge but leave it at its starting value 0. See also the\n// SummaryVec example.\n//\n// Keeping the Gauge for later use is possible (and should be considered if\n// performance is critical), but keep in mind that Reset, DeleteLabelValues and\n// Delete can be used to delete the Gauge from the GaugeVec. In that case, the\n// Gauge will still exist, but it will not be exported anymore, even if a\n// Gauge with the same label values is created later. See also the CounterVec\n// example.\n//\n// An error is returned if the number of label values is not the same as the\n// number of VariableLabels in Desc.\n//\n// Note that for more than one label value, this method is prone to mistakes\n// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as\n// an alternative to avoid that type of mistake. For higher label numbers, the\n// latter has a much more readable (albeit more verbose) syntax, but it comes\n// with a performance overhead (for creating and processing the Labels map).\nfunc (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {\n\tmetric, err := m.metricVec.getMetricWithLabelValues(lvs...)\n\tif metric != nil {\n\t\treturn metric.(Gauge), err\n\t}\n\treturn nil, err\n}\n\n// GetMetricWith returns the Gauge for the given Labels map (the label names\n// must match those of the VariableLabels in Desc). If that label map is\n// accessed for the first time, a new Gauge is created. Implications of\n// creating a Gauge without using it and keeping the Gauge for later use are\n// the same as for GetMetricWithLabelValues.\n//\n// An error is returned if the number and names of the Labels are inconsistent\n// with those of the VariableLabels in Desc.\n//\n// This method is used for the same purpose as\n// GetMetricWithLabelValues(...string). See there for pros and cons of the two\n// methods.\nfunc (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {\n\tmetric, err := m.metricVec.getMetricWith(labels)\n\tif metric != nil {\n\t\treturn metric.(Gauge), err\n\t}\n\treturn nil, err\n}\n\n// WithLabelValues works as GetMetricWithLabelValues, but panics where\n// GetMetricWithLabelValues would have returned an error. By not returning an\n// error, WithLabelValues allows shortcuts like\n//     myVec.WithLabelValues(\"404\", \"GET\").Add(42)\nfunc (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {\n\treturn m.metricVec.withLabelValues(lvs...).(Gauge)\n}\n\n// With works as GetMetricWith, but panics where GetMetricWithLabels would have\n// returned an error. By not returning an error, With allows shortcuts like\n//     myVec.With(Labels{\"code\": \"404\", \"method\": \"GET\"}).Add(42)\nfunc (m *GaugeVec) With(labels Labels) Gauge {\n\treturn m.metricVec.with(labels).(Gauge)\n}\n\n// GaugeFunc is a Gauge whose value is determined at collect time by calling a\n// provided function.\n//\n// To create GaugeFunc instances, use NewGaugeFunc.\ntype GaugeFunc interface {\n\tMetric\n\tCollector\n}\n\n// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The\n// value reported is determined by calling the given function from within the\n// Write method. Take into account that metric collection may happen\n// concurrently. If that results in concurrent calls to Write, like in the case\n// where a GaugeFunc is directly registered with Prometheus, the provided\n// function must be concurrency-safe.\nfunc NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc {\n\treturn newValueFunc(NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t), GaugeValue, function)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"sync\"\n\t\"testing\"\n\t\"testing/quick\"\n\t\"time\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc listenGaugeStream(vals, result chan float64, done chan struct{}) {\n\tvar sum float64\nouter:\n\tfor {\n\t\tselect {\n\t\tcase <-done:\n\t\t\tclose(vals)\n\t\t\tfor v := range vals {\n\t\t\t\tsum += v\n\t\t\t}\n\t\t\tbreak outer\n\t\tcase v := <-vals:\n\t\t\tsum += v\n\t\t}\n\t}\n\tresult <- sum\n\tclose(result)\n}\n\nfunc TestGaugeConcurrency(t *testing.T) {\n\tit := func(n uint32) bool {\n\t\tmutations := int(n % 10000)\n\t\tconcLevel := int(n%15 + 1)\n\n\t\tvar start, end sync.WaitGroup\n\t\tstart.Add(1)\n\t\tend.Add(concLevel)\n\n\t\tsStream := make(chan float64, mutations*concLevel)\n\t\tresult := make(chan float64)\n\t\tdone := make(chan struct{})\n\n\t\tgo listenGaugeStream(sStream, result, done)\n\t\tgo func() {\n\t\t\tend.Wait()\n\t\t\tclose(done)\n\t\t}()\n\n\t\tgge := NewGauge(GaugeOpts{\n\t\t\tName: \"test_gauge\",\n\t\t\tHelp: \"no help can be found here\",\n\t\t})\n\t\tfor i := 0; i < concLevel; i++ {\n\t\t\tvals := make([]float64, mutations)\n\t\t\tfor j := 0; j < mutations; j++ {\n\t\t\t\tvals[j] = rand.Float64() - 0.5\n\t\t\t}\n\n\t\t\tgo func(vals []float64) {\n\t\t\t\tstart.Wait()\n\t\t\t\tfor _, v := range vals {\n\t\t\t\t\tsStream <- v\n\t\t\t\t\tgge.Add(v)\n\t\t\t\t}\n\t\t\t\tend.Done()\n\t\t\t}(vals)\n\t\t}\n\t\tstart.Done()\n\n\t\tif expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 {\n\t\t\tt.Fatalf(\"expected approx. %f, got %f\", expected, got)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\tif err := quick.Check(it, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestGaugeVecConcurrency(t *testing.T) {\n\tit := func(n uint32) bool {\n\t\tmutations := int(n % 10000)\n\t\tconcLevel := int(n%15 + 1)\n\t\tvecLength := int(n%5 + 1)\n\n\t\tvar start, end sync.WaitGroup\n\t\tstart.Add(1)\n\t\tend.Add(concLevel)\n\n\t\tsStreams := make([]chan float64, vecLength)\n\t\tresults := make([]chan float64, vecLength)\n\t\tdone := make(chan struct{})\n\n\t\tfor i := 0; i < vecLength; i++ {\n\t\t\tsStreams[i] = make(chan float64, mutations*concLevel)\n\t\t\tresults[i] = make(chan float64)\n\t\t\tgo listenGaugeStream(sStreams[i], results[i], done)\n\t\t}\n\n\t\tgo func() {\n\t\t\tend.Wait()\n\t\t\tclose(done)\n\t\t}()\n\n\t\tgge := NewGaugeVec(\n\t\t\tGaugeOpts{\n\t\t\t\tName: \"test_gauge\",\n\t\t\t\tHelp: \"no help can be found here\",\n\t\t\t},\n\t\t\t[]string{\"label\"},\n\t\t)\n\t\tfor i := 0; i < concLevel; i++ {\n\t\t\tvals := make([]float64, mutations)\n\t\t\tpick := make([]int, mutations)\n\t\t\tfor j := 0; j < mutations; j++ {\n\t\t\t\tvals[j] = rand.Float64() - 0.5\n\t\t\t\tpick[j] = rand.Intn(vecLength)\n\t\t\t}\n\n\t\t\tgo func(vals []float64) {\n\t\t\t\tstart.Wait()\n\t\t\t\tfor i, v := range vals {\n\t\t\t\t\tsStreams[pick[i]] <- v\n\t\t\t\t\tgge.WithLabelValues(string('A' + pick[i])).Add(v)\n\t\t\t\t}\n\t\t\t\tend.Done()\n\t\t\t}(vals)\n\t\t}\n\t\tstart.Done()\n\n\t\tfor i := range sStreams {\n\t\t\tif expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 {\n\t\t\t\tt.Fatalf(\"expected approx. %f, got %f\", expected, got)\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tif err := quick.Check(it, nil); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestGaugeFunc(t *testing.T) {\n\tgf := NewGaugeFunc(\n\t\tGaugeOpts{\n\t\t\tName:        \"test_name\",\n\t\t\tHelp:        \"test help\",\n\t\t\tConstLabels: Labels{\"a\": \"1\", \"b\": \"2\"},\n\t\t},\n\t\tfunc() float64 { return 3.1415 },\n\t)\n\n\tif expected, got := `Desc{fqName: \"test_name\", help: \"test help\", constLabels: {a=\"1\",b=\"2\"}, variableLabels: []}`, gf.Desc().String(); expected != got {\n\t\tt.Errorf(\"expected %q, got %q\", expected, got)\n\t}\n\n\tm := &dto.Metric{}\n\tgf.Write(m)\n\n\tif expected, got := `label:<name:\"a\" value:\"1\" > label:<name:\"b\" value:\"2\" > gauge:<value:3.1415 > `, m.String(); expected != got {\n\t\tt.Errorf(\"expected %q, got %q\", expected, got)\n\t}\n}\n\nfunc TestGaugeSetCurrentTime(t *testing.T) {\n\tg := NewGauge(GaugeOpts{\n\t\tName: \"test_name\",\n\t\tHelp: \"test help\",\n\t})\n\tg.SetToCurrentTime()\n\tunixTime := float64(time.Now().Unix())\n\n\tm := &dto.Metric{}\n\tg.Write(m)\n\n\tdelta := unixTime - m.GetGauge().GetValue()\n\t// This is just a smoke test to make sure SetToCurrentTime is not\n\t// totally off. Tests with current time involved are hard...\n\tif math.Abs(delta) > 5 {\n\t\tt.Errorf(\"Gauge set to current time deviates from current time by more than 5s, delta is %f seconds\", delta)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/go_collector.go",
    "content": "package prometheus\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"runtime/debug\"\n\t\"time\"\n)\n\ntype goCollector struct {\n\tgoroutinesDesc *Desc\n\tthreadsDesc    *Desc\n\tgcDesc         *Desc\n\tgoInfoDesc     *Desc\n\n\t// metrics to describe and collect\n\tmetrics memStatsMetrics\n}\n\n// NewGoCollector returns a collector which exports metrics about the current\n// go process.\nfunc NewGoCollector() Collector {\n\treturn &goCollector{\n\t\tgoroutinesDesc: NewDesc(\n\t\t\t\"go_goroutines\",\n\t\t\t\"Number of goroutines that currently exist.\",\n\t\t\tnil, nil),\n\t\tthreadsDesc: NewDesc(\n\t\t\t\"go_threads\",\n\t\t\t\"Number of OS threads created.\",\n\t\t\tnil, nil),\n\t\tgcDesc: NewDesc(\n\t\t\t\"go_gc_duration_seconds\",\n\t\t\t\"A summary of the GC invocation durations.\",\n\t\t\tnil, nil),\n\t\tgoInfoDesc: NewDesc(\n\t\t\t\"go_info\",\n\t\t\t\"Information about the Go environment.\",\n\t\t\tnil, Labels{\"version\": runtime.Version()}),\n\t\tmetrics: memStatsMetrics{\n\t\t\t{\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"alloc_bytes\"),\n\t\t\t\t\t\"Number of bytes allocated and still in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"alloc_bytes_total\"),\n\t\t\t\t\t\"Total number of bytes allocated, even if freed.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"sys_bytes\"),\n\t\t\t\t\t\"Number of bytes obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.Sys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"lookups_total\"),\n\t\t\t\t\t\"Total number of pointer lookups.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mallocs_total\"),\n\t\t\t\t\t\"Total number of mallocs.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"frees_total\"),\n\t\t\t\t\t\"Total number of frees.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.Frees) },\n\t\t\t\tvalType: CounterValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_alloc_bytes\"),\n\t\t\t\t\t\"Number of heap bytes allocated and still in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_sys_bytes\"),\n\t\t\t\t\t\"Number of heap bytes obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_idle_bytes\"),\n\t\t\t\t\t\"Number of heap bytes waiting to be used.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_inuse_bytes\"),\n\t\t\t\t\t\"Number of heap bytes that are in use.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_released_bytes\"),\n\t\t\t\t\t\"Number of heap bytes released to OS.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"heap_objects\"),\n\t\t\t\t\t\"Number of allocated objects.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"stack_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by the stack allocator.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"stack_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes obtained from system for stack allocator.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mspan_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by mspan structures.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mspan_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for mspan structures obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mcache_inuse_bytes\"),\n\t\t\t\t\t\"Number of bytes in use by mcache structures.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"mcache_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for mcache structures obtained from system.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"buck_hash_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used by the profiling bucket hash table.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"gc_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for garbage collection system metadata.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"other_sys_bytes\"),\n\t\t\t\t\t\"Number of bytes used for other system allocations.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"next_gc_bytes\"),\n\t\t\t\t\t\"Number of heap bytes when next garbage collection will take place.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"last_gc_time_seconds\"),\n\t\t\t\t\t\"Number of seconds since 1970 of last garbage collection.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t}, {\n\t\t\t\tdesc: NewDesc(\n\t\t\t\t\tmemstatNamespace(\"gc_cpu_fraction\"),\n\t\t\t\t\t\"The fraction of this program's available CPU time used by the GC since the program started.\",\n\t\t\t\t\tnil, nil,\n\t\t\t\t),\n\t\t\t\teval:    func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },\n\t\t\t\tvalType: GaugeValue,\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc memstatNamespace(s string) string {\n\treturn fmt.Sprintf(\"go_memstats_%s\", s)\n}\n\n// Describe returns all descriptions of the collector.\nfunc (c *goCollector) Describe(ch chan<- *Desc) {\n\tch <- c.goroutinesDesc\n\tch <- c.threadsDesc\n\tch <- c.gcDesc\n\tch <- c.goInfoDesc\n\tfor _, i := range c.metrics {\n\t\tch <- i.desc\n\t}\n}\n\n// Collect returns the current state of all metrics of the collector.\nfunc (c *goCollector) Collect(ch chan<- Metric) {\n\tch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))\n\tn, _ := runtime.ThreadCreateProfile(nil)\n\tch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))\n\n\tvar stats debug.GCStats\n\tstats.PauseQuantiles = make([]time.Duration, 5)\n\tdebug.ReadGCStats(&stats)\n\n\tquantiles := make(map[float64]float64)\n\tfor idx, pq := range stats.PauseQuantiles[1:] {\n\t\tquantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds()\n\t}\n\tquantiles[0.0] = stats.PauseQuantiles[0].Seconds()\n\tch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)\n\n\tch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)\n\n\tms := &runtime.MemStats{}\n\truntime.ReadMemStats(ms)\n\tfor _, i := range c.metrics {\n\t\tch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))\n\t}\n}\n\n// memStatsMetrics provide description, value, and value type for memstat metrics.\ntype memStatsMetrics []struct {\n\tdesc    *Desc\n\teval    func(*runtime.MemStats) float64\n\tvalType ValueType\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go",
    "content": "package prometheus\n\nimport (\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc TestGoCollector(t *testing.T) {\n\tvar (\n\t\tc      = NewGoCollector()\n\t\tch     = make(chan Metric)\n\t\twaitc  = make(chan struct{})\n\t\tclosec = make(chan struct{})\n\t\told    = -1\n\t)\n\tdefer close(closec)\n\n\tgo func() {\n\t\tc.Collect(ch)\n\t\tgo func(c <-chan struct{}) {\n\t\t\t<-c\n\t\t}(closec)\n\t\t<-waitc\n\t\tc.Collect(ch)\n\t}()\n\n\tfor {\n\t\tselect {\n\t\tcase m := <-ch:\n\t\t\t// m can be Gauge or Counter,\n\t\t\t// currently just test the go_goroutines Gauge\n\t\t\t// and ignore others.\n\t\t\tif m.Desc().fqName != \"go_goroutines\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpb := &dto.Metric{}\n\t\t\tm.Write(pb)\n\t\t\tif pb.GetGauge() == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif old == -1 {\n\t\t\t\told = int(pb.GetGauge().GetValue())\n\t\t\t\tclose(waitc)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif diff := int(pb.GetGauge().GetValue()) - old; diff != 1 {\n\t\t\t\t// TODO: This is flaky in highly concurrent situations.\n\t\t\t\tt.Errorf(\"want 1 new goroutine, got %d\", diff)\n\t\t\t}\n\n\t\t\t// GoCollector performs three sends per call.\n\t\t\t// On line 27 we need to receive three more sends\n\t\t\t// to shut down cleanly.\n\t\t\t<-ch\n\t\t\t<-ch\n\t\t\t<-ch\n\t\t\treturn\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tt.Fatalf(\"expected collect timed out\")\n\t\t}\n\t}\n}\n\nfunc TestGCCollector(t *testing.T) {\n\tvar (\n\t\tc        = NewGoCollector()\n\t\tch       = make(chan Metric)\n\t\twaitc    = make(chan struct{})\n\t\tclosec   = make(chan struct{})\n\t\toldGC    uint64\n\t\toldPause float64\n\t)\n\tdefer close(closec)\n\n\tgo func() {\n\t\tc.Collect(ch)\n\t\t// force GC\n\t\truntime.GC()\n\t\t<-waitc\n\t\tc.Collect(ch)\n\t}()\n\n\tfirst := true\n\tfor {\n\t\tselect {\n\t\tcase metric := <-ch:\n\t\t\tswitch m := metric.(type) {\n\t\t\tcase *constSummary, *value:\n\t\t\t\tpb := &dto.Metric{}\n\t\t\t\tm.Write(pb)\n\t\t\t\tif pb.GetSummary() == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif len(pb.GetSummary().Quantile) != 5 {\n\t\t\t\t\tt.Errorf(\"expected 4 buckets, got %d\", len(pb.GetSummary().Quantile))\n\t\t\t\t}\n\t\t\t\tfor idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} {\n\t\t\t\t\tif *pb.GetSummary().Quantile[idx].Quantile != want {\n\t\t\t\t\t\tt.Errorf(\"bucket #%d is off, got %f, want %f\", idx, *pb.GetSummary().Quantile[idx].Quantile, want)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif first {\n\t\t\t\t\tfirst = false\n\t\t\t\t\toldGC = *pb.GetSummary().SampleCount\n\t\t\t\t\toldPause = *pb.GetSummary().SampleSum\n\t\t\t\t\tclose(waitc)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 {\n\t\t\t\t\tt.Errorf(\"want 1 new garbage collection run, got %d\", diff)\n\t\t\t\t}\n\t\t\t\tif diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 {\n\t\t\t\t\tt.Errorf(\"want moar pause, got %f\", diff)\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-time.After(1 * time.Second):\n\t\t\tt.Fatalf(\"expected collect timed out\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/histogram.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"sync/atomic\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\n// A Histogram counts individual observations from an event or sample stream in\n// configurable buckets. Similar to a summary, it also provides a sum of\n// observations and an observation count.\n//\n// On the Prometheus server, quantiles can be calculated from a Histogram using\n// the histogram_quantile function in the query language.\n//\n// Note that Histograms, in contrast to Summaries, can be aggregated with the\n// Prometheus query language (see the documentation for detailed\n// procedures). However, Histograms require the user to pre-define suitable\n// buckets, and they are in general less accurate. The Observe method of a\n// Histogram has a very low performance overhead in comparison with the Observe\n// method of a Summary.\n//\n// To create Histogram instances, use NewHistogram.\ntype Histogram interface {\n\tMetric\n\tCollector\n\n\t// Observe adds a single observation to the histogram.\n\tObserve(float64)\n}\n\n// bucketLabel is used for the label that defines the upper bound of a\n// bucket of a histogram (\"le\" -> \"less or equal\").\nconst bucketLabel = \"le\"\n\n// DefBuckets are the default Histogram buckets. The default buckets are\n// tailored to broadly measure the response time (in seconds) of a network\n// service. Most likely, however, you will be required to define buckets\n// customized to your use case.\nvar (\n\tDefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}\n\n\terrBucketLabelNotAllowed = fmt.Errorf(\n\t\t\"%q is not allowed as label name in histograms\", bucketLabel,\n\t)\n)\n\n// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest\n// bucket has an upper bound of 'start'. The final +Inf bucket is not counted\n// and not included in the returned slice. The returned slice is meant to be\n// used for the Buckets field of HistogramOpts.\n//\n// The function panics if 'count' is zero or negative.\nfunc LinearBuckets(start, width float64, count int) []float64 {\n\tif count < 1 {\n\t\tpanic(\"LinearBuckets needs a positive count\")\n\t}\n\tbuckets := make([]float64, count)\n\tfor i := range buckets {\n\t\tbuckets[i] = start\n\t\tstart += width\n\t}\n\treturn buckets\n}\n\n// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an\n// upper bound of 'start' and each following bucket's upper bound is 'factor'\n// times the previous bucket's upper bound. The final +Inf bucket is not counted\n// and not included in the returned slice. The returned slice is meant to be\n// used for the Buckets field of HistogramOpts.\n//\n// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative,\n// or if 'factor' is less than or equal 1.\nfunc ExponentialBuckets(start, factor float64, count int) []float64 {\n\tif count < 1 {\n\t\tpanic(\"ExponentialBuckets needs a positive count\")\n\t}\n\tif start <= 0 {\n\t\tpanic(\"ExponentialBuckets needs a positive start value\")\n\t}\n\tif factor <= 1 {\n\t\tpanic(\"ExponentialBuckets needs a factor greater than 1\")\n\t}\n\tbuckets := make([]float64, count)\n\tfor i := range buckets {\n\t\tbuckets[i] = start\n\t\tstart *= factor\n\t}\n\treturn buckets\n}\n\n// HistogramOpts bundles the options for creating a Histogram metric. It is\n// mandatory to set Name and Help to a non-empty string. All other fields are\n// optional and can safely be left at their zero value.\ntype HistogramOpts struct {\n\t// Namespace, Subsystem, and Name are components of the fully-qualified\n\t// name of the Histogram (created by joining these components with\n\t// \"_\"). Only Name is mandatory, the others merely help structuring the\n\t// name. Note that the fully-qualified name of the Histogram must be a\n\t// valid Prometheus metric name.\n\tNamespace string\n\tSubsystem string\n\tName      string\n\n\t// Help provides information about this Histogram. Mandatory!\n\t//\n\t// Metrics with the same fully-qualified name must have the same Help\n\t// string.\n\tHelp string\n\n\t// ConstLabels are used to attach fixed labels to this\n\t// Histogram. Histograms with the same fully-qualified name must have the\n\t// same label names in their ConstLabels.\n\t//\n\t// Note that in most cases, labels have a value that varies during the\n\t// lifetime of a process. Those labels are usually managed with a\n\t// HistogramVec. ConstLabels serve only special purposes. One is for the\n\t// special case where the value of a label does not change during the\n\t// lifetime of a process, e.g. if the revision of the running binary is\n\t// put into a label. Another, more advanced purpose is if more than one\n\t// Collector needs to collect Histograms with the same fully-qualified\n\t// name. In that case, those Summaries must differ in the values of\n\t// their ConstLabels. See the Collector examples.\n\t//\n\t// If the value of a label never changes (not even between binaries),\n\t// that label most likely should not be a label at all (but part of the\n\t// metric name).\n\tConstLabels Labels\n\n\t// Buckets defines the buckets into which observations are counted. Each\n\t// element in the slice is the upper inclusive bound of a bucket. The\n\t// values must be sorted in strictly increasing order. There is no need\n\t// to add a highest bucket with +Inf bound, it will be added\n\t// implicitly. The default value is DefBuckets.\n\tBuckets []float64\n}\n\n// NewHistogram creates a new Histogram based on the provided HistogramOpts. It\n// panics if the buckets in HistogramOpts are not in strictly increasing order.\nfunc NewHistogram(opts HistogramOpts) Histogram {\n\treturn newHistogram(\n\t\tNewDesc(\n\t\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\t\topts.Help,\n\t\t\tnil,\n\t\t\topts.ConstLabels,\n\t\t),\n\t\topts,\n\t)\n}\n\nfunc newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {\n\tif len(desc.variableLabels) != len(labelValues) {\n\t\tpanic(errInconsistentCardinality)\n\t}\n\n\tfor _, n := range desc.variableLabels {\n\t\tif n == bucketLabel {\n\t\t\tpanic(errBucketLabelNotAllowed)\n\t\t}\n\t}\n\tfor _, lp := range desc.constLabelPairs {\n\t\tif lp.GetName() == bucketLabel {\n\t\t\tpanic(errBucketLabelNotAllowed)\n\t\t}\n\t}\n\n\tif len(opts.Buckets) == 0 {\n\t\topts.Buckets = DefBuckets\n\t}\n\n\th := &histogram{\n\t\tdesc:        desc,\n\t\tupperBounds: opts.Buckets,\n\t\tlabelPairs:  makeLabelPairs(desc, labelValues),\n\t}\n\tfor i, upperBound := range h.upperBounds {\n\t\tif i < len(h.upperBounds)-1 {\n\t\t\tif upperBound >= h.upperBounds[i+1] {\n\t\t\t\tpanic(fmt.Errorf(\n\t\t\t\t\t\"histogram buckets must be in increasing order: %f >= %f\",\n\t\t\t\t\tupperBound, h.upperBounds[i+1],\n\t\t\t\t))\n\t\t\t}\n\t\t} else {\n\t\t\tif math.IsInf(upperBound, +1) {\n\t\t\t\t// The +Inf bucket is implicit. Remove it here.\n\t\t\t\th.upperBounds = h.upperBounds[:i]\n\t\t\t}\n\t\t}\n\t}\n\t// Finally we know the final length of h.upperBounds and can make counts.\n\th.counts = make([]uint64, len(h.upperBounds))\n\n\th.init(h) // Init self-collection.\n\treturn h\n}\n\ntype histogram struct {\n\t// sumBits contains the bits of the float64 representing the sum of all\n\t// observations. sumBits and count have to go first in the struct to\n\t// guarantee alignment for atomic operations.\n\t// http://golang.org/pkg/sync/atomic/#pkg-note-BUG\n\tsumBits uint64\n\tcount   uint64\n\n\tselfCollector\n\t// Note that there is no mutex required.\n\n\tdesc *Desc\n\n\tupperBounds []float64\n\tcounts      []uint64\n\n\tlabelPairs []*dto.LabelPair\n}\n\nfunc (h *histogram) Desc() *Desc {\n\treturn h.desc\n}\n\nfunc (h *histogram) Observe(v float64) {\n\t// TODO(beorn7): For small numbers of buckets (<30), a linear search is\n\t// slightly faster than the binary search. If we really care, we could\n\t// switch from one search strategy to the other depending on the number\n\t// of buckets.\n\t//\n\t// Microbenchmarks (BenchmarkHistogramNoLabels):\n\t// 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op\n\t// 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op\n\t// 300 buckets: 154 ns/op linear - binary 61.6 ns/op\n\ti := sort.SearchFloat64s(h.upperBounds, v)\n\tif i < len(h.counts) {\n\t\tatomic.AddUint64(&h.counts[i], 1)\n\t}\n\tatomic.AddUint64(&h.count, 1)\n\tfor {\n\t\toldBits := atomic.LoadUint64(&h.sumBits)\n\t\tnewBits := math.Float64bits(math.Float64frombits(oldBits) + v)\n\t\tif atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\nfunc (h *histogram) Write(out *dto.Metric) error {\n\this := &dto.Histogram{}\n\tbuckets := make([]*dto.Bucket, len(h.upperBounds))\n\n\this.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits)))\n\this.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count))\n\tvar count uint64\n\tfor i, upperBound := range h.upperBounds {\n\t\tcount += atomic.LoadUint64(&h.counts[i])\n\t\tbuckets[i] = &dto.Bucket{\n\t\t\tCumulativeCount: proto.Uint64(count),\n\t\t\tUpperBound:      proto.Float64(upperBound),\n\t\t}\n\t}\n\this.Bucket = buckets\n\tout.Histogram = his\n\tout.Label = h.labelPairs\n\treturn nil\n}\n\n// HistogramVec is a Collector that bundles a set of Histograms that all share the\n// same Desc, but have different values for their variable labels. This is used\n// if you want to count the same thing partitioned by various dimensions\n// (e.g. HTTP request latencies, partitioned by status code and method). Create\n// instances with NewHistogramVec.\ntype HistogramVec struct {\n\t*metricVec\n}\n\n// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and\n// partitioned by the given label names.\nfunc NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabelNames,\n\t\topts.ConstLabels,\n\t)\n\treturn &HistogramVec{\n\t\tmetricVec: newMetricVec(desc, func(lvs ...string) Metric {\n\t\t\treturn newHistogram(desc, opts, lvs...)\n\t\t}),\n\t}\n}\n\n// GetMetricWithLabelValues returns the Histogram for the given slice of label\n// values (same order as the VariableLabels in Desc). If that combination of\n// label values is accessed for the first time, a new Histogram is created.\n//\n// It is possible to call this method without using the returned Histogram to only\n// create the new Histogram but leave it at its starting value, a Histogram without\n// any observations.\n//\n// Keeping the Histogram for later use is possible (and should be considered if\n// performance is critical), but keep in mind that Reset, DeleteLabelValues and\n// Delete can be used to delete the Histogram from the HistogramVec. In that case, the\n// Histogram will still exist, but it will not be exported anymore, even if a\n// Histogram with the same label values is created later. See also the CounterVec\n// example.\n//\n// An error is returned if the number of label values is not the same as the\n// number of VariableLabels in Desc.\n//\n// Note that for more than one label value, this method is prone to mistakes\n// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as\n// an alternative to avoid that type of mistake. For higher label numbers, the\n// latter has a much more readable (albeit more verbose) syntax, but it comes\n// with a performance overhead (for creating and processing the Labels map).\n// See also the GaugeVec example.\nfunc (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {\n\tmetric, err := m.metricVec.getMetricWithLabelValues(lvs...)\n\tif metric != nil {\n\t\treturn metric.(Observer), err\n\t}\n\treturn nil, err\n}\n\n// GetMetricWith returns the Histogram for the given Labels map (the label names\n// must match those of the VariableLabels in Desc). If that label map is\n// accessed for the first time, a new Histogram is created. Implications of\n// creating a Histogram without using it and keeping the Histogram for later use\n// are the same as for GetMetricWithLabelValues.\n//\n// An error is returned if the number and names of the Labels are inconsistent\n// with those of the VariableLabels in Desc.\n//\n// This method is used for the same purpose as\n// GetMetricWithLabelValues(...string). See there for pros and cons of the two\n// methods.\nfunc (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {\n\tmetric, err := m.metricVec.getMetricWith(labels)\n\tif metric != nil {\n\t\treturn metric.(Observer), err\n\t}\n\treturn nil, err\n}\n\n// WithLabelValues works as GetMetricWithLabelValues, but panics where\n// GetMetricWithLabelValues would have returned an error. By not returning an\n// error, WithLabelValues allows shortcuts like\n//     myVec.WithLabelValues(\"404\", \"GET\").Observe(42.21)\nfunc (m *HistogramVec) WithLabelValues(lvs ...string) Observer {\n\treturn m.metricVec.withLabelValues(lvs...).(Observer)\n}\n\n// With works as GetMetricWith, but panics where GetMetricWithLabels would have\n// returned an error. By not returning an error, With allows shortcuts like\n//     myVec.With(Labels{\"code\": \"404\", \"method\": \"GET\"}).Observe(42.21)\nfunc (m *HistogramVec) With(labels Labels) Observer {\n\treturn m.metricVec.with(labels).(Observer)\n}\n\ntype constHistogram struct {\n\tdesc       *Desc\n\tcount      uint64\n\tsum        float64\n\tbuckets    map[float64]uint64\n\tlabelPairs []*dto.LabelPair\n}\n\nfunc (h *constHistogram) Desc() *Desc {\n\treturn h.desc\n}\n\nfunc (h *constHistogram) Write(out *dto.Metric) error {\n\this := &dto.Histogram{}\n\tbuckets := make([]*dto.Bucket, 0, len(h.buckets))\n\n\this.SampleCount = proto.Uint64(h.count)\n\this.SampleSum = proto.Float64(h.sum)\n\n\tfor upperBound, count := range h.buckets {\n\t\tbuckets = append(buckets, &dto.Bucket{\n\t\t\tCumulativeCount: proto.Uint64(count),\n\t\t\tUpperBound:      proto.Float64(upperBound),\n\t\t})\n\t}\n\n\tif len(buckets) > 0 {\n\t\tsort.Sort(buckSort(buckets))\n\t}\n\this.Bucket = buckets\n\n\tout.Histogram = his\n\tout.Label = h.labelPairs\n\n\treturn nil\n}\n\n// NewConstHistogram returns a metric representing a Prometheus histogram with\n// fixed values for the count, sum, and bucket counts. As those parameters\n// cannot be changed, the returned value does not implement the Histogram\n// interface (but only the Metric interface). Users of this package will not\n// have much use for it in regular operations. However, when implementing custom\n// Collectors, it is useful as a throw-away metric that is generated on the fly\n// to send it to Prometheus in the Collect method.\n//\n// buckets is a map of upper bounds to cumulative counts, excluding the +Inf\n// bucket.\n//\n// NewConstHistogram returns an error if the length of labelValues is not\n// consistent with the variable labels in Desc.\nfunc NewConstHistogram(\n\tdesc *Desc,\n\tcount uint64,\n\tsum float64,\n\tbuckets map[float64]uint64,\n\tlabelValues ...string,\n) (Metric, error) {\n\tif err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &constHistogram{\n\t\tdesc:       desc,\n\t\tcount:      count,\n\t\tsum:        sum,\n\t\tbuckets:    buckets,\n\t\tlabelPairs: makeLabelPairs(desc, labelValues),\n\t}, nil\n}\n\n// MustNewConstHistogram is a version of NewConstHistogram that panics where\n// NewConstMetric would have returned an error.\nfunc MustNewConstHistogram(\n\tdesc *Desc,\n\tcount uint64,\n\tsum float64,\n\tbuckets map[float64]uint64,\n\tlabelValues ...string,\n) Metric {\n\tm, err := NewConstHistogram(desc, count, sum, buckets, labelValues...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn m\n}\n\ntype buckSort []*dto.Bucket\n\nfunc (s buckSort) Len() int {\n\treturn len(s)\n}\n\nfunc (s buckSort) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc (s buckSort) Less(i, j int) bool {\n\treturn s[i].GetUpperBound() < s[j].GetUpperBound()\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"sort\"\n\t\"sync\"\n\t\"testing\"\n\t\"testing/quick\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc benchmarkHistogramObserve(w int, b *testing.B) {\n\tb.StopTimer()\n\n\twg := new(sync.WaitGroup)\n\twg.Add(w)\n\n\tg := new(sync.WaitGroup)\n\tg.Add(1)\n\n\ts := NewHistogram(HistogramOpts{})\n\n\tfor i := 0; i < w; i++ {\n\t\tgo func() {\n\t\t\tg.Wait()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\ts.Observe(float64(i))\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tb.StartTimer()\n\tg.Done()\n\twg.Wait()\n}\n\nfunc BenchmarkHistogramObserve1(b *testing.B) {\n\tbenchmarkHistogramObserve(1, b)\n}\n\nfunc BenchmarkHistogramObserve2(b *testing.B) {\n\tbenchmarkHistogramObserve(2, b)\n}\n\nfunc BenchmarkHistogramObserve4(b *testing.B) {\n\tbenchmarkHistogramObserve(4, b)\n}\n\nfunc BenchmarkHistogramObserve8(b *testing.B) {\n\tbenchmarkHistogramObserve(8, b)\n}\n\nfunc benchmarkHistogramWrite(w int, b *testing.B) {\n\tb.StopTimer()\n\n\twg := new(sync.WaitGroup)\n\twg.Add(w)\n\n\tg := new(sync.WaitGroup)\n\tg.Add(1)\n\n\ts := NewHistogram(HistogramOpts{})\n\n\tfor i := 0; i < 1000000; i++ {\n\t\ts.Observe(float64(i))\n\t}\n\n\tfor j := 0; j < w; j++ {\n\t\touts := make([]dto.Metric, b.N)\n\n\t\tgo func(o []dto.Metric) {\n\t\t\tg.Wait()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\ts.Write(&o[i])\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}(outs)\n\t}\n\n\tb.StartTimer()\n\tg.Done()\n\twg.Wait()\n}\n\nfunc BenchmarkHistogramWrite1(b *testing.B) {\n\tbenchmarkHistogramWrite(1, b)\n}\n\nfunc BenchmarkHistogramWrite2(b *testing.B) {\n\tbenchmarkHistogramWrite(2, b)\n}\n\nfunc BenchmarkHistogramWrite4(b *testing.B) {\n\tbenchmarkHistogramWrite(4, b)\n}\n\nfunc BenchmarkHistogramWrite8(b *testing.B) {\n\tbenchmarkHistogramWrite(8, b)\n}\n\nfunc TestHistogramNonMonotonicBuckets(t *testing.T) {\n\ttestCases := map[string][]float64{\n\t\t\"not strictly monotonic\":  {1, 2, 2, 3},\n\t\t\"not monotonic at all\":    {1, 2, 4, 3, 5},\n\t\t\"have +Inf in the middle\": {1, 2, math.Inf(+1), 3},\n\t}\n\tfor name, buckets := range testCases {\n\t\tfunc() {\n\t\t\tdefer func() {\n\t\t\t\tif r := recover(); r == nil {\n\t\t\t\t\tt.Errorf(\"Buckets %v are %s but NewHistogram did not panic.\", buckets, name)\n\t\t\t\t}\n\t\t\t}()\n\t\t\t_ = NewHistogram(HistogramOpts{\n\t\t\t\tName:    \"test_histogram\",\n\t\t\t\tHelp:    \"helpless\",\n\t\t\t\tBuckets: buckets,\n\t\t\t})\n\t\t}()\n\t}\n}\n\n// Intentionally adding +Inf here to test if that case is handled correctly.\n// Also, getCumulativeCounts depends on it.\nvar testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}\n\nfunc TestHistogramConcurrency(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t}\n\n\trand.Seed(42)\n\n\tit := func(n uint32) bool {\n\t\tmutations := int(n%1e4 + 1e4)\n\t\tconcLevel := int(n%5 + 1)\n\t\ttotal := mutations * concLevel\n\n\t\tvar start, end sync.WaitGroup\n\t\tstart.Add(1)\n\t\tend.Add(concLevel)\n\n\t\tsum := NewHistogram(HistogramOpts{\n\t\t\tName:    \"test_histogram\",\n\t\t\tHelp:    \"helpless\",\n\t\t\tBuckets: testBuckets,\n\t\t})\n\n\t\tallVars := make([]float64, total)\n\t\tvar sampleSum float64\n\t\tfor i := 0; i < concLevel; i++ {\n\t\t\tvals := make([]float64, mutations)\n\t\t\tfor j := 0; j < mutations; j++ {\n\t\t\t\tv := rand.NormFloat64()\n\t\t\t\tvals[j] = v\n\t\t\t\tallVars[i*mutations+j] = v\n\t\t\t\tsampleSum += v\n\t\t\t}\n\n\t\t\tgo func(vals []float64) {\n\t\t\t\tstart.Wait()\n\t\t\t\tfor _, v := range vals {\n\t\t\t\t\tsum.Observe(v)\n\t\t\t\t}\n\t\t\t\tend.Done()\n\t\t\t}(vals)\n\t\t}\n\t\tsort.Float64s(allVars)\n\t\tstart.Done()\n\t\tend.Wait()\n\n\t\tm := &dto.Metric{}\n\t\tsum.Write(m)\n\t\tif got, want := int(*m.Histogram.SampleCount), total; got != want {\n\t\t\tt.Errorf(\"got sample count %d, want %d\", got, want)\n\t\t}\n\t\tif got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {\n\t\t\tt.Errorf(\"got sample sum %f, want %f\", got, want)\n\t\t}\n\n\t\twantCounts := getCumulativeCounts(allVars)\n\n\t\tif got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {\n\t\t\tt.Errorf(\"got %d buckets in protobuf, want %d\", got, want)\n\t\t}\n\t\tfor i, wantBound := range testBuckets {\n\t\t\tif i == len(testBuckets)-1 {\n\t\t\t\tbreak // No +Inf bucket in protobuf.\n\t\t\t}\n\t\t\tif gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound {\n\t\t\t\tt.Errorf(\"got bound %f, want %f\", gotBound, wantBound)\n\t\t\t}\n\t\t\tif gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount {\n\t\t\t\tt.Errorf(\"got count %d, want %d\", gotCount, wantCount)\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tif err := quick.Check(it, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestHistogramVecConcurrency(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t}\n\n\trand.Seed(42)\n\n\tobjectives := make([]float64, 0, len(DefObjectives))\n\tfor qu := range DefObjectives {\n\n\t\tobjectives = append(objectives, qu)\n\t}\n\tsort.Float64s(objectives)\n\n\tit := func(n uint32) bool {\n\t\tmutations := int(n%1e4 + 1e4)\n\t\tconcLevel := int(n%7 + 1)\n\t\tvecLength := int(n%3 + 1)\n\n\t\tvar start, end sync.WaitGroup\n\t\tstart.Add(1)\n\t\tend.Add(concLevel)\n\n\t\this := NewHistogramVec(\n\t\t\tHistogramOpts{\n\t\t\t\tName:    \"test_histogram\",\n\t\t\t\tHelp:    \"helpless\",\n\t\t\t\tBuckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)},\n\t\t\t},\n\t\t\t[]string{\"label\"},\n\t\t)\n\n\t\tallVars := make([][]float64, vecLength)\n\t\tsampleSums := make([]float64, vecLength)\n\t\tfor i := 0; i < concLevel; i++ {\n\t\t\tvals := make([]float64, mutations)\n\t\t\tpicks := make([]int, mutations)\n\t\t\tfor j := 0; j < mutations; j++ {\n\t\t\t\tv := rand.NormFloat64()\n\t\t\t\tvals[j] = v\n\t\t\t\tpick := rand.Intn(vecLength)\n\t\t\t\tpicks[j] = pick\n\t\t\t\tallVars[pick] = append(allVars[pick], v)\n\t\t\t\tsampleSums[pick] += v\n\t\t\t}\n\n\t\t\tgo func(vals []float64) {\n\t\t\t\tstart.Wait()\n\t\t\t\tfor i, v := range vals {\n\t\t\t\t\this.WithLabelValues(string('A' + picks[i])).Observe(v)\n\t\t\t\t}\n\t\t\t\tend.Done()\n\t\t\t}(vals)\n\t\t}\n\t\tfor _, vars := range allVars {\n\t\t\tsort.Float64s(vars)\n\t\t}\n\t\tstart.Done()\n\t\tend.Wait()\n\n\t\tfor i := 0; i < vecLength; i++ {\n\t\t\tm := &dto.Metric{}\n\t\t\ts := his.WithLabelValues(string('A' + i))\n\t\t\ts.(Histogram).Write(m)\n\n\t\t\tif got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want {\n\t\t\t\tt.Errorf(\"got %d buckets in protobuf, want %d\", got, want)\n\t\t\t}\n\t\t\tif got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want {\n\t\t\t\tt.Errorf(\"got sample count %d, want %d\", got, want)\n\t\t\t}\n\t\t\tif got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {\n\t\t\t\tt.Errorf(\"got sample sum %f, want %f\", got, want)\n\t\t\t}\n\n\t\t\twantCounts := getCumulativeCounts(allVars[i])\n\n\t\t\tfor j, wantBound := range testBuckets {\n\t\t\t\tif j == len(testBuckets)-1 {\n\t\t\t\t\tbreak // No +Inf bucket in protobuf.\n\t\t\t\t}\n\t\t\t\tif gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound {\n\t\t\t\t\tt.Errorf(\"got bound %f, want %f\", gotBound, wantBound)\n\t\t\t\t}\n\t\t\t\tif gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount {\n\t\t\t\t\tt.Errorf(\"got count %d, want %d\", gotCount, wantCount)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tif err := quick.Check(it, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc getCumulativeCounts(vars []float64) []uint64 {\n\tcounts := make([]uint64, len(testBuckets))\n\tfor _, v := range vars {\n\t\tfor i := len(testBuckets) - 1; i >= 0; i-- {\n\t\t\tif v > testBuckets[i] {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcounts[i]++\n\t\t}\n\t}\n\treturn counts\n}\n\nfunc TestBuckets(t *testing.T) {\n\tgot := LinearBuckets(-15, 5, 6)\n\twant := []float64{-15, -10, -5, 0, 5, 10}\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"linear buckets: got %v, want %v\", got, want)\n\t}\n\n\tgot = ExponentialBuckets(100, 1.2, 3)\n\twant = []float64{100, 120, 144}\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"linear buckets: got %v, want %v\", got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/http.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/common/expfmt\"\n)\n\n// TODO(beorn7): Remove this whole file. It is a partial mirror of\n// promhttp/http.go (to avoid circular import chains) where everything HTTP\n// related should live. The functions here are just for avoiding\n// breakage. Everything is deprecated.\n\nconst (\n\tcontentTypeHeader     = \"Content-Type\"\n\tcontentLengthHeader   = \"Content-Length\"\n\tcontentEncodingHeader = \"Content-Encoding\"\n\tacceptEncodingHeader  = \"Accept-Encoding\"\n)\n\nvar bufPool sync.Pool\n\nfunc getBuf() *bytes.Buffer {\n\tbuf := bufPool.Get()\n\tif buf == nil {\n\t\treturn &bytes.Buffer{}\n\t}\n\treturn buf.(*bytes.Buffer)\n}\n\nfunc giveBuf(buf *bytes.Buffer) {\n\tbuf.Reset()\n\tbufPool.Put(buf)\n}\n\n// Handler returns an HTTP handler for the DefaultGatherer. It is\n// already instrumented with InstrumentHandler (using \"prometheus\" as handler\n// name).\n//\n// Deprecated: Please note the issues described in the doc comment of\n// InstrumentHandler. You might want to consider using promhttp.Handler instead\n// (which is not instrumented, but can be instrumented with the tooling provided\n// in package promhttp).\nfunc Handler() http.Handler {\n\treturn InstrumentHandler(\"prometheus\", UninstrumentedHandler())\n}\n\n// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer.\n//\n// Deprecated: Use promhttp.Handler instead. See there for further documentation.\nfunc UninstrumentedHandler() http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tmfs, err := DefaultGatherer.Gather()\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"An error has occurred during metrics collection:\\n\\n\"+err.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\n\t\tcontentType := expfmt.Negotiate(req.Header)\n\t\tbuf := getBuf()\n\t\tdefer giveBuf(buf)\n\t\twriter, encoding := decorateWriter(req, buf)\n\t\tenc := expfmt.NewEncoder(writer, contentType)\n\t\tvar lastErr error\n\t\tfor _, mf := range mfs {\n\t\t\tif err := enc.Encode(mf); err != nil {\n\t\t\t\tlastErr = err\n\t\t\t\thttp.Error(w, \"An error has occurred during metrics encoding:\\n\\n\"+err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif closer, ok := writer.(io.Closer); ok {\n\t\t\tcloser.Close()\n\t\t}\n\t\tif lastErr != nil && buf.Len() == 0 {\n\t\t\thttp.Error(w, \"No metrics encoded, last error:\\n\\n\"+lastErr.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\theader := w.Header()\n\t\theader.Set(contentTypeHeader, string(contentType))\n\t\theader.Set(contentLengthHeader, fmt.Sprint(buf.Len()))\n\t\tif encoding != \"\" {\n\t\t\theader.Set(contentEncodingHeader, encoding)\n\t\t}\n\t\tw.Write(buf.Bytes())\n\t})\n}\n\n// decorateWriter wraps a writer to handle gzip compression if requested.  It\n// returns the decorated writer and the appropriate \"Content-Encoding\" header\n// (which is empty if no compression is enabled).\nfunc decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) {\n\theader := request.Header.Get(acceptEncodingHeader)\n\tparts := strings.Split(header, \",\")\n\tfor _, part := range parts {\n\t\tpart := strings.TrimSpace(part)\n\t\tif part == \"gzip\" || strings.HasPrefix(part, \"gzip;\") {\n\t\t\treturn gzip.NewWriter(writer), \"gzip\"\n\t\t}\n\t}\n\treturn writer, \"\"\n}\n\nvar instLabels = []string{\"method\", \"code\"}\n\ntype nower interface {\n\tNow() time.Time\n}\n\ntype nowFunc func() time.Time\n\nfunc (n nowFunc) Now() time.Time {\n\treturn n()\n}\n\nvar now nower = nowFunc(func() time.Time {\n\treturn time.Now()\n})\n\nfunc nowSeries(t ...time.Time) nower {\n\treturn nowFunc(func() time.Time {\n\t\tdefer func() {\n\t\t\tt = t[1:]\n\t\t}()\n\n\t\treturn t[0]\n\t})\n}\n\n// InstrumentHandler wraps the given HTTP handler for instrumentation. It\n// registers four metric collectors (if not already done) and reports HTTP\n// metrics to the (newly or already) registered collectors: http_requests_total\n// (CounterVec), http_request_duration_microseconds (Summary),\n// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each\n// has a constant label named \"handler\" with the provided handlerName as\n// value. http_requests_total is a metric vector partitioned by HTTP method\n// (label name \"method\") and HTTP status code (label name \"code\").\n//\n// Deprecated: InstrumentHandler has several issues. Use the tooling provided in\n// package promhttp instead. The issues are the following:\n//\n// - It uses Summaries rather than Histograms. Summaries are not useful if\n// aggregation across multiple instances is required.\n//\n// - It uses microseconds as unit, which is deprecated and should be replaced by\n// seconds.\n//\n// - The size of the request is calculated in a separate goroutine. Since this\n// calculator requires access to the request header, it creates a race with\n// any writes to the header performed during request handling.\n// httputil.ReverseProxy is a prominent example for a handler\n// performing such writes.\n//\n// - It has additional issues with HTTP/2, cf.\n// https://github.com/prometheus/client_golang/issues/272.\nfunc InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {\n\treturn InstrumentHandlerFunc(handlerName, handler.ServeHTTP)\n}\n\n// InstrumentHandlerFunc wraps the given function for instrumentation. It\n// otherwise works in the same way as InstrumentHandler (and shares the same\n// issues).\n//\n// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as\n// InstrumentHandler is. Use the tooling provided in package promhttp instead.\nfunc InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {\n\treturn InstrumentHandlerFuncWithOpts(\n\t\tSummaryOpts{\n\t\t\tSubsystem:   \"http\",\n\t\t\tConstLabels: Labels{\"handler\": handlerName},\n\t\t\tObjectives:  map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},\n\t\t},\n\t\thandlerFunc,\n\t)\n}\n\n// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same\n// issues) but provides more flexibility (at the cost of a more complex call\n// syntax). As InstrumentHandler, this function registers four metric\n// collectors, but it uses the provided SummaryOpts to create them. However, the\n// fields \"Name\" and \"Help\" in the SummaryOpts are ignored. \"Name\" is replaced\n// by \"requests_total\", \"request_duration_microseconds\", \"request_size_bytes\",\n// and \"response_size_bytes\", respectively. \"Help\" is replaced by an appropriate\n// help string. The names of the variable labels of the http_requests_total\n// CounterVec are \"method\" (get, post, etc.), and \"code\" (HTTP status code).\n//\n// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the\n// behavior of InstrumentHandler:\n//\n//     prometheus.InstrumentHandlerWithOpts(\n//         prometheus.SummaryOpts{\n//              Subsystem:   \"http\",\n//              ConstLabels: prometheus.Labels{\"handler\": handlerName},\n//         },\n//         handler,\n//     )\n//\n// Technical detail: \"requests_total\" is a CounterVec, not a SummaryVec, so it\n// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally,\n// and all its fields are set to the equally named fields in the provided\n// SummaryOpts.\n//\n// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as\n// InstrumentHandler is. Use the tooling provided in package promhttp instead.\nfunc InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {\n\treturn InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)\n}\n\n// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares\n// the same issues) but provides more flexibility (at the cost of a more complex\n// call syntax). See InstrumentHandlerWithOpts for details how the provided\n// SummaryOpts are used.\n//\n// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons\n// as InstrumentHandler is. Use the tooling provided in package promhttp instead.\nfunc InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {\n\treqCnt := NewCounterVec(\n\t\tCounterOpts{\n\t\t\tNamespace:   opts.Namespace,\n\t\t\tSubsystem:   opts.Subsystem,\n\t\t\tName:        \"requests_total\",\n\t\t\tHelp:        \"Total number of HTTP requests made.\",\n\t\t\tConstLabels: opts.ConstLabels,\n\t\t},\n\t\tinstLabels,\n\t)\n\tif err := Register(reqCnt); err != nil {\n\t\tif are, ok := err.(AlreadyRegisteredError); ok {\n\t\t\treqCnt = are.ExistingCollector.(*CounterVec)\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\topts.Name = \"request_duration_microseconds\"\n\topts.Help = \"The HTTP request latencies in microseconds.\"\n\treqDur := NewSummary(opts)\n\tif err := Register(reqDur); err != nil {\n\t\tif are, ok := err.(AlreadyRegisteredError); ok {\n\t\t\treqDur = are.ExistingCollector.(Summary)\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\topts.Name = \"request_size_bytes\"\n\topts.Help = \"The HTTP request sizes in bytes.\"\n\treqSz := NewSummary(opts)\n\tif err := Register(reqSz); err != nil {\n\t\tif are, ok := err.(AlreadyRegisteredError); ok {\n\t\t\treqSz = are.ExistingCollector.(Summary)\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\topts.Name = \"response_size_bytes\"\n\topts.Help = \"The HTTP response sizes in bytes.\"\n\tresSz := NewSummary(opts)\n\tif err := Register(resSz); err != nil {\n\t\tif are, ok := err.(AlreadyRegisteredError); ok {\n\t\t\tresSz = are.ExistingCollector.(Summary)\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tnow := time.Now()\n\n\t\tdelegate := &responseWriterDelegator{ResponseWriter: w}\n\t\tout := computeApproximateRequestSize(r)\n\n\t\t_, cn := w.(http.CloseNotifier)\n\t\t_, fl := w.(http.Flusher)\n\t\t_, hj := w.(http.Hijacker)\n\t\t_, rf := w.(io.ReaderFrom)\n\t\tvar rw http.ResponseWriter\n\t\tif cn && fl && hj && rf {\n\t\t\trw = &fancyResponseWriterDelegator{delegate}\n\t\t} else {\n\t\t\trw = delegate\n\t\t}\n\t\thandlerFunc(rw, r)\n\n\t\telapsed := float64(time.Since(now)) / float64(time.Microsecond)\n\n\t\tmethod := sanitizeMethod(r.Method)\n\t\tcode := sanitizeCode(delegate.status)\n\t\treqCnt.WithLabelValues(method, code).Inc()\n\t\treqDur.Observe(elapsed)\n\t\tresSz.Observe(float64(delegate.written))\n\t\treqSz.Observe(float64(<-out))\n\t})\n}\n\nfunc computeApproximateRequestSize(r *http.Request) <-chan int {\n\t// Get URL length in current go routine for avoiding a race condition.\n\t// HandlerFunc that runs in parallel may modify the URL.\n\ts := 0\n\tif r.URL != nil {\n\t\ts += len(r.URL.String())\n\t}\n\n\tout := make(chan int, 1)\n\n\tgo func() {\n\t\ts += len(r.Method)\n\t\ts += len(r.Proto)\n\t\tfor name, values := range r.Header {\n\t\t\ts += len(name)\n\t\t\tfor _, value := range values {\n\t\t\t\ts += len(value)\n\t\t\t}\n\t\t}\n\t\ts += len(r.Host)\n\n\t\t// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.\n\n\t\tif r.ContentLength != -1 {\n\t\t\ts += int(r.ContentLength)\n\t\t}\n\t\tout <- s\n\t\tclose(out)\n\t}()\n\n\treturn out\n}\n\ntype responseWriterDelegator struct {\n\thttp.ResponseWriter\n\n\thandler, method string\n\tstatus          int\n\twritten         int64\n\twroteHeader     bool\n}\n\nfunc (r *responseWriterDelegator) WriteHeader(code int) {\n\tr.status = code\n\tr.wroteHeader = true\n\tr.ResponseWriter.WriteHeader(code)\n}\n\nfunc (r *responseWriterDelegator) Write(b []byte) (int, error) {\n\tif !r.wroteHeader {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n\tn, err := r.ResponseWriter.Write(b)\n\tr.written += int64(n)\n\treturn n, err\n}\n\ntype fancyResponseWriterDelegator struct {\n\t*responseWriterDelegator\n}\n\nfunc (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool {\n\treturn f.ResponseWriter.(http.CloseNotifier).CloseNotify()\n}\n\nfunc (f *fancyResponseWriterDelegator) Flush() {\n\tf.ResponseWriter.(http.Flusher).Flush()\n}\n\nfunc (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn f.ResponseWriter.(http.Hijacker).Hijack()\n}\n\nfunc (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) {\n\tif !f.wroteHeader {\n\t\tf.WriteHeader(http.StatusOK)\n\t}\n\tn, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r)\n\tf.written += n\n\treturn n, err\n}\n\nfunc sanitizeMethod(m string) string {\n\tswitch m {\n\tcase \"GET\", \"get\":\n\t\treturn \"get\"\n\tcase \"PUT\", \"put\":\n\t\treturn \"put\"\n\tcase \"HEAD\", \"head\":\n\t\treturn \"head\"\n\tcase \"POST\", \"post\":\n\t\treturn \"post\"\n\tcase \"DELETE\", \"delete\":\n\t\treturn \"delete\"\n\tcase \"CONNECT\", \"connect\":\n\t\treturn \"connect\"\n\tcase \"OPTIONS\", \"options\":\n\t\treturn \"options\"\n\tcase \"NOTIFY\", \"notify\":\n\t\treturn \"notify\"\n\tdefault:\n\t\treturn strings.ToLower(m)\n\t}\n}\n\nfunc sanitizeCode(s int) string {\n\tswitch s {\n\tcase 100:\n\t\treturn \"100\"\n\tcase 101:\n\t\treturn \"101\"\n\n\tcase 200:\n\t\treturn \"200\"\n\tcase 201:\n\t\treturn \"201\"\n\tcase 202:\n\t\treturn \"202\"\n\tcase 203:\n\t\treturn \"203\"\n\tcase 204:\n\t\treturn \"204\"\n\tcase 205:\n\t\treturn \"205\"\n\tcase 206:\n\t\treturn \"206\"\n\n\tcase 300:\n\t\treturn \"300\"\n\tcase 301:\n\t\treturn \"301\"\n\tcase 302:\n\t\treturn \"302\"\n\tcase 304:\n\t\treturn \"304\"\n\tcase 305:\n\t\treturn \"305\"\n\tcase 307:\n\t\treturn \"307\"\n\n\tcase 400:\n\t\treturn \"400\"\n\tcase 401:\n\t\treturn \"401\"\n\tcase 402:\n\t\treturn \"402\"\n\tcase 403:\n\t\treturn \"403\"\n\tcase 404:\n\t\treturn \"404\"\n\tcase 405:\n\t\treturn \"405\"\n\tcase 406:\n\t\treturn \"406\"\n\tcase 407:\n\t\treturn \"407\"\n\tcase 408:\n\t\treturn \"408\"\n\tcase 409:\n\t\treturn \"409\"\n\tcase 410:\n\t\treturn \"410\"\n\tcase 411:\n\t\treturn \"411\"\n\tcase 412:\n\t\treturn \"412\"\n\tcase 413:\n\t\treturn \"413\"\n\tcase 414:\n\t\treturn \"414\"\n\tcase 415:\n\t\treturn \"415\"\n\tcase 416:\n\t\treturn \"416\"\n\tcase 417:\n\t\treturn \"417\"\n\tcase 418:\n\t\treturn \"418\"\n\n\tcase 500:\n\t\treturn \"500\"\n\tcase 501:\n\t\treturn \"501\"\n\tcase 502:\n\t\treturn \"502\"\n\tcase 503:\n\t\treturn \"503\"\n\tcase 504:\n\t\treturn \"504\"\n\tcase 505:\n\t\treturn \"505\"\n\n\tcase 428:\n\t\treturn \"428\"\n\tcase 429:\n\t\treturn \"429\"\n\tcase 431:\n\t\treturn \"431\"\n\tcase 511:\n\t\treturn \"511\"\n\n\tdefault:\n\t\treturn strconv.Itoa(s)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/http_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\ntype respBody string\n\nfunc (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusTeapot)\n\tw.Write([]byte(b))\n}\n\nfunc TestInstrumentHandler(t *testing.T) {\n\tdefer func(n nower) {\n\t\tnow = n.(nower)\n\t}(now)\n\n\tinstant := time.Now()\n\tend := instant.Add(30 * time.Second)\n\tnow = nowSeries(instant, end)\n\trespBody := respBody(\"Howdy there!\")\n\n\thndlr := InstrumentHandler(\"test-handler\", respBody)\n\n\topts := SummaryOpts{\n\t\tSubsystem:   \"http\",\n\t\tConstLabels: Labels{\"handler\": \"test-handler\"},\n\t\tObjectives:  map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},\n\t}\n\n\treqCnt := NewCounterVec(\n\t\tCounterOpts{\n\t\t\tNamespace:   opts.Namespace,\n\t\t\tSubsystem:   opts.Subsystem,\n\t\t\tName:        \"requests_total\",\n\t\t\tHelp:        \"Total number of HTTP requests made.\",\n\t\t\tConstLabels: opts.ConstLabels,\n\t\t},\n\t\tinstLabels,\n\t)\n\terr := Register(reqCnt)\n\tif err == nil {\n\t\tt.Fatal(\"expected reqCnt to be registered already\")\n\t}\n\tif are, ok := err.(AlreadyRegisteredError); ok {\n\t\treqCnt = are.ExistingCollector.(*CounterVec)\n\t} else {\n\t\tt.Fatal(\"unexpected registration error:\", err)\n\t}\n\n\topts.Name = \"request_duration_microseconds\"\n\topts.Help = \"The HTTP request latencies in microseconds.\"\n\treqDur := NewSummary(opts)\n\terr = Register(reqDur)\n\tif err == nil {\n\t\tt.Fatal(\"expected reqDur to be registered already\")\n\t}\n\tif are, ok := err.(AlreadyRegisteredError); ok {\n\t\treqDur = are.ExistingCollector.(Summary)\n\t} else {\n\t\tt.Fatal(\"unexpected registration error:\", err)\n\t}\n\n\topts.Name = \"request_size_bytes\"\n\topts.Help = \"The HTTP request sizes in bytes.\"\n\treqSz := NewSummary(opts)\n\terr = Register(reqSz)\n\tif err == nil {\n\t\tt.Fatal(\"expected reqSz to be registered already\")\n\t}\n\tif _, ok := err.(AlreadyRegisteredError); !ok {\n\t\tt.Fatal(\"unexpected registration error:\", err)\n\t}\n\n\topts.Name = \"response_size_bytes\"\n\topts.Help = \"The HTTP response sizes in bytes.\"\n\tresSz := NewSummary(opts)\n\terr = Register(resSz)\n\tif err == nil {\n\t\tt.Fatal(\"expected resSz to be registered already\")\n\t}\n\tif _, ok := err.(AlreadyRegisteredError); !ok {\n\t\tt.Fatal(\"unexpected registration error:\", err)\n\t}\n\n\treqCnt.Reset()\n\n\tresp := httptest.NewRecorder()\n\treq := &http.Request{\n\t\tMethod: \"GET\",\n\t}\n\n\thndlr.ServeHTTP(resp, req)\n\n\tif resp.Code != http.StatusTeapot {\n\t\tt.Fatalf(\"expected status %d, got %d\", http.StatusTeapot, resp.Code)\n\t}\n\tif string(resp.Body.Bytes()) != \"Howdy there!\" {\n\t\tt.Fatalf(\"expected body %s, got %s\", \"Howdy there!\", string(resp.Body.Bytes()))\n\t}\n\n\tout := &dto.Metric{}\n\treqDur.Write(out)\n\tif want, got := \"test-handler\", out.Label[0].GetValue(); want != got {\n\t\tt.Errorf(\"want label value %q in reqDur, got %q\", want, got)\n\t}\n\tif want, got := uint64(1), out.Summary.GetSampleCount(); want != got {\n\t\tt.Errorf(\"want sample count %d in reqDur, got %d\", want, got)\n\t}\n\n\tout.Reset()\n\tif want, got := 1, len(reqCnt.children); want != got {\n\t\tt.Errorf(\"want %d children in reqCnt, got %d\", want, got)\n\t}\n\tcnt, err := reqCnt.GetMetricWithLabelValues(\"get\", \"418\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcnt.Write(out)\n\tif want, got := \"418\", out.Label[0].GetValue(); want != got {\n\t\tt.Errorf(\"want label value %q in reqCnt, got %q\", want, got)\n\t}\n\tif want, got := \"test-handler\", out.Label[1].GetValue(); want != got {\n\t\tt.Errorf(\"want label value %q in reqCnt, got %q\", want, got)\n\t}\n\tif want, got := \"get\", out.Label[2].GetValue(); want != got {\n\t\tt.Errorf(\"want label value %q in reqCnt, got %q\", want, got)\n\t}\n\tif out.Counter == nil {\n\t\tt.Fatal(\"expected non-nil counter in reqCnt\")\n\t}\n\tif want, got := 1., out.Counter.GetValue(); want != got {\n\t\tt.Errorf(\"want reqCnt of %f, got %f\", want, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/labels.go",
    "content": "package prometheus\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode/utf8\"\n\n\t\"github.com/prometheus/common/model\"\n)\n\n// Labels represents a collection of label name -> value mappings. This type is\n// commonly used with the With(Labels) and GetMetricWith(Labels) methods of\n// metric vector Collectors, e.g.:\n//     myVec.With(Labels{\"code\": \"404\", \"method\": \"GET\"}).Add(42)\n//\n// The other use-case is the specification of constant label pairs in Opts or to\n// create a Desc.\ntype Labels map[string]string\n\n// reservedLabelPrefix is a prefix which is not legal in user-supplied\n// label names.\nconst reservedLabelPrefix = \"__\"\n\nvar errInconsistentCardinality = errors.New(\"inconsistent label cardinality\")\n\nfunc validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {\n\tif len(labels) != expectedNumberOfValues {\n\t\treturn errInconsistentCardinality\n\t}\n\n\tfor name, val := range labels {\n\t\tif !utf8.ValidString(val) {\n\t\t\treturn fmt.Errorf(\"label %s: value %q is not valid UTF-8\", name, val)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc validateLabelValues(vals []string, expectedNumberOfValues int) error {\n\tif len(vals) != expectedNumberOfValues {\n\t\treturn errInconsistentCardinality\n\t}\n\n\tfor _, val := range vals {\n\t\tif !utf8.ValidString(val) {\n\t\t\treturn fmt.Errorf(\"label value %q is not valid UTF-8\", val)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc checkLabelName(l string) bool {\n\treturn model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/metric.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"strings\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nconst separatorByte byte = 255\n\n// A Metric models a single sample value with its meta data being exported to\n// Prometheus. Implementations of Metric in this package are Gauge, Counter,\n// Histogram, Summary, and Untyped.\ntype Metric interface {\n\t// Desc returns the descriptor for the Metric. This method idempotently\n\t// returns the same descriptor throughout the lifetime of the\n\t// Metric. The returned descriptor is immutable by contract. A Metric\n\t// unable to describe itself must return an invalid descriptor (created\n\t// with NewInvalidDesc).\n\tDesc() *Desc\n\t// Write encodes the Metric into a \"Metric\" Protocol Buffer data\n\t// transmission object.\n\t//\n\t// Metric implementations must observe concurrency safety as reads of\n\t// this metric may occur at any time, and any blocking occurs at the\n\t// expense of total performance of rendering all registered\n\t// metrics. Ideally, Metric implementations should support concurrent\n\t// readers.\n\t//\n\t// While populating dto.Metric, it is the responsibility of the\n\t// implementation to ensure validity of the Metric protobuf (like valid\n\t// UTF-8 strings or syntactically valid metric and label names). It is\n\t// recommended to sort labels lexicographically. (Implementers may find\n\t// LabelPairSorter useful for that.) Callers of Write should still make\n\t// sure of sorting if they depend on it.\n\tWrite(*dto.Metric) error\n\t// TODO(beorn7): The original rationale of passing in a pre-allocated\n\t// dto.Metric protobuf to save allocations has disappeared. The\n\t// signature of this method should be changed to \"Write() (*dto.Metric,\n\t// error)\".\n}\n\n// Opts bundles the options for creating most Metric types. Each metric\n// implementation XXX has its own XXXOpts type, but in most cases, it is just be\n// an alias of this type (which might change when the requirement arises.)\n//\n// It is mandatory to set Name and Help to a non-empty string. All other fields\n// are optional and can safely be left at their zero value.\ntype Opts struct {\n\t// Namespace, Subsystem, and Name are components of the fully-qualified\n\t// name of the Metric (created by joining these components with\n\t// \"_\"). Only Name is mandatory, the others merely help structuring the\n\t// name. Note that the fully-qualified name of the metric must be a\n\t// valid Prometheus metric name.\n\tNamespace string\n\tSubsystem string\n\tName      string\n\n\t// Help provides information about this metric. Mandatory!\n\t//\n\t// Metrics with the same fully-qualified name must have the same Help\n\t// string.\n\tHelp string\n\n\t// ConstLabels are used to attach fixed labels to this metric. Metrics\n\t// with the same fully-qualified name must have the same label names in\n\t// their ConstLabels.\n\t//\n\t// Note that in most cases, labels have a value that varies during the\n\t// lifetime of a process. Those labels are usually managed with a metric\n\t// vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels\n\t// serve only special purposes. One is for the special case where the\n\t// value of a label does not change during the lifetime of a process,\n\t// e.g. if the revision of the running binary is put into a\n\t// label. Another, more advanced purpose is if more than one Collector\n\t// needs to collect Metrics with the same fully-qualified name. In that\n\t// case, those Metrics must differ in the values of their\n\t// ConstLabels. See the Collector examples.\n\t//\n\t// If the value of a label never changes (not even between binaries),\n\t// that label most likely should not be a label at all (but part of the\n\t// metric name).\n\tConstLabels Labels\n}\n\n// BuildFQName joins the given three name components by \"_\". Empty name\n// components are ignored. If the name parameter itself is empty, an empty\n// string is returned, no matter what. Metric implementations included in this\n// library use this function internally to generate the fully-qualified metric\n// name from the name component in their Opts. Users of the library will only\n// need this function if they implement their own Metric or instantiate a Desc\n// (with NewDesc) directly.\nfunc BuildFQName(namespace, subsystem, name string) string {\n\tif name == \"\" {\n\t\treturn \"\"\n\t}\n\tswitch {\n\tcase namespace != \"\" && subsystem != \"\":\n\t\treturn strings.Join([]string{namespace, subsystem, name}, \"_\")\n\tcase namespace != \"\":\n\t\treturn strings.Join([]string{namespace, name}, \"_\")\n\tcase subsystem != \"\":\n\t\treturn strings.Join([]string{subsystem, name}, \"_\")\n\t}\n\treturn name\n}\n\n// LabelPairSorter implements sort.Interface. It is used to sort a slice of\n// dto.LabelPair pointers. This is useful for implementing the Write method of\n// custom metrics.\ntype LabelPairSorter []*dto.LabelPair\n\nfunc (s LabelPairSorter) Len() int {\n\treturn len(s)\n}\n\nfunc (s LabelPairSorter) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc (s LabelPairSorter) Less(i, j int) bool {\n\treturn s[i].GetName() < s[j].GetName()\n}\n\ntype hashSorter []uint64\n\nfunc (s hashSorter) Len() int {\n\treturn len(s)\n}\n\nfunc (s hashSorter) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc (s hashSorter) Less(i, j int) bool {\n\treturn s[i] < s[j]\n}\n\ntype invalidMetric struct {\n\tdesc *Desc\n\terr  error\n}\n\n// NewInvalidMetric returns a metric whose Write method always returns the\n// provided error. It is useful if a Collector finds itself unable to collect\n// a metric and wishes to report an error to the registry.\nfunc NewInvalidMetric(desc *Desc, err error) Metric {\n\treturn &invalidMetric{desc, err}\n}\n\nfunc (m *invalidMetric) Desc() *Desc { return m.desc }\n\nfunc (m *invalidMetric) Write(*dto.Metric) error { return m.err }\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/metric_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport \"testing\"\n\nfunc TestBuildFQName(t *testing.T) {\n\tscenarios := []struct{ namespace, subsystem, name, result string }{\n\t\t{\"a\", \"b\", \"c\", \"a_b_c\"},\n\t\t{\"\", \"b\", \"c\", \"b_c\"},\n\t\t{\"a\", \"\", \"c\", \"a_c\"},\n\t\t{\"\", \"\", \"c\", \"c\"},\n\t\t{\"a\", \"b\", \"\", \"\"},\n\t\t{\"a\", \"\", \"\", \"\"},\n\t\t{\"\", \"b\", \"\", \"\"},\n\t\t{\" \", \"\", \"\", \"\"},\n\t}\n\n\tfor i, s := range scenarios {\n\t\tif want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got {\n\t\t\tt.Errorf(\"%d. want %s, got %s\", i, want, got)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/observer.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\n// Observer is the interface that wraps the Observe method, which is used by\n// Histogram and Summary to add observations.\ntype Observer interface {\n\tObserve(float64)\n}\n\n// The ObserverFunc type is an adapter to allow the use of ordinary\n// functions as Observers. If f is a function with the appropriate\n// signature, ObserverFunc(f) is an Observer that calls f.\n//\n// This adapter is usually used in connection with the Timer type, and there are\n// two general use cases:\n//\n// The most common one is to use a Gauge as the Observer for a Timer.\n// See the \"Gauge\" Timer example.\n//\n// The more advanced use case is to create a function that dynamically decides\n// which Observer to use for observing the duration. See the \"Complex\" Timer\n// example.\ntype ObserverFunc func(float64)\n\n// Observe calls f(value). It implements Observer.\nfunc (f ObserverFunc) Observe(value float64) {\n\tf(value)\n}\n\n// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.\ntype ObserverVec interface {\n\tGetMetricWith(Labels) (Observer, error)\n\tGetMetricWithLabelValues(lvs ...string) (Observer, error)\n\tWith(Labels) Observer\n\tWithLabelValues(...string) Observer\n\n\tCollector\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/process_collector.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport \"github.com/prometheus/procfs\"\n\ntype processCollector struct {\n\tpid             int\n\tcollectFn       func(chan<- Metric)\n\tpidFn           func() (int, error)\n\tcpuTotal        *Desc\n\topenFDs, maxFDs *Desc\n\tvsize, rss      *Desc\n\tstartTime       *Desc\n}\n\n// NewProcessCollector returns a collector which exports the current state of\n// process metrics including cpu, memory and file descriptor usage as well as\n// the process start time for the given process id under the given namespace.\nfunc NewProcessCollector(pid int, namespace string) Collector {\n\treturn NewProcessCollectorPIDFn(\n\t\tfunc() (int, error) { return pid, nil },\n\t\tnamespace,\n\t)\n}\n\n// NewProcessCollectorPIDFn returns a collector which exports the current state\n// of process metrics including cpu, memory and file descriptor usage as well\n// as the process start time under the given namespace. The given pidFn is\n// called on each collect and is used to determine the process to export\n// metrics for.\nfunc NewProcessCollectorPIDFn(\n\tpidFn func() (int, error),\n\tnamespace string,\n) Collector {\n\tns := \"\"\n\tif len(namespace) > 0 {\n\t\tns = namespace + \"_\"\n\t}\n\n\tc := processCollector{\n\t\tpidFn:     pidFn,\n\t\tcollectFn: func(chan<- Metric) {},\n\n\t\tcpuTotal: NewDesc(\n\t\t\tns+\"process_cpu_seconds_total\",\n\t\t\t\"Total user and system CPU time spent in seconds.\",\n\t\t\tnil, nil,\n\t\t),\n\t\topenFDs: NewDesc(\n\t\t\tns+\"process_open_fds\",\n\t\t\t\"Number of open file descriptors.\",\n\t\t\tnil, nil,\n\t\t),\n\t\tmaxFDs: NewDesc(\n\t\t\tns+\"process_max_fds\",\n\t\t\t\"Maximum number of open file descriptors.\",\n\t\t\tnil, nil,\n\t\t),\n\t\tvsize: NewDesc(\n\t\t\tns+\"process_virtual_memory_bytes\",\n\t\t\t\"Virtual memory size in bytes.\",\n\t\t\tnil, nil,\n\t\t),\n\t\trss: NewDesc(\n\t\t\tns+\"process_resident_memory_bytes\",\n\t\t\t\"Resident memory size in bytes.\",\n\t\t\tnil, nil,\n\t\t),\n\t\tstartTime: NewDesc(\n\t\t\tns+\"process_start_time_seconds\",\n\t\t\t\"Start time of the process since unix epoch in seconds.\",\n\t\t\tnil, nil,\n\t\t),\n\t}\n\n\t// Set up process metric collection if supported by the runtime.\n\tif _, err := procfs.NewStat(); err == nil {\n\t\tc.collectFn = c.processCollect\n\t}\n\n\treturn &c\n}\n\n// Describe returns all descriptions of the collector.\nfunc (c *processCollector) Describe(ch chan<- *Desc) {\n\tch <- c.cpuTotal\n\tch <- c.openFDs\n\tch <- c.maxFDs\n\tch <- c.vsize\n\tch <- c.rss\n\tch <- c.startTime\n}\n\n// Collect returns the current state of all metrics of the collector.\nfunc (c *processCollector) Collect(ch chan<- Metric) {\n\tc.collectFn(ch)\n}\n\n// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the\n// client allows users to configure the error behavior.\nfunc (c *processCollector) processCollect(ch chan<- Metric) {\n\tpid, err := c.pidFn()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tp, err := procfs.NewProc(pid)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif stat, err := p.NewStat(); err == nil {\n\t\tch <- MustNewConstMetric(c.cpuTotal, CounterValue, stat.CPUTime())\n\t\tch <- MustNewConstMetric(c.vsize, GaugeValue, float64(stat.VirtualMemory()))\n\t\tch <- MustNewConstMetric(c.rss, GaugeValue, float64(stat.ResidentMemory()))\n\t\tif startTime, err := stat.StartTime(); err == nil {\n\t\t\tch <- MustNewConstMetric(c.startTime, GaugeValue, startTime)\n\t\t}\n\t}\n\n\tif fds, err := p.FileDescriptorsLen(); err == nil {\n\t\tch <- MustNewConstMetric(c.openFDs, GaugeValue, float64(fds))\n\t}\n\n\tif limits, err := p.NewLimits(); err == nil {\n\t\tch <- MustNewConstMetric(c.maxFDs, GaugeValue, float64(limits.OpenFiles))\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go",
    "content": "package prometheus\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com/prometheus/common/expfmt\"\n\t\"github.com/prometheus/procfs\"\n)\n\nfunc TestProcessCollector(t *testing.T) {\n\tif _, err := procfs.Self(); err != nil {\n\t\tt.Skipf(\"skipping TestProcessCollector, procfs not available: %s\", err)\n\t}\n\n\tregistry := NewRegistry()\n\tif err := registry.Register(NewProcessCollector(os.Getpid(), \"\")); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := registry.Register(NewProcessCollectorPIDFn(\n\t\tfunc() (int, error) { return os.Getpid(), nil }, \"foobar\"),\n\t); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tmfs, err := registry.Gather()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tvar buf bytes.Buffer\n\tfor _, mf := range mfs {\n\t\tif _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t}\n\n\tfor _, re := range []*regexp.Regexp{\n\t\tregexp.MustCompile(\"\\nprocess_cpu_seconds_total [0-9]\"),\n\t\tregexp.MustCompile(\"\\nprocess_max_fds [1-9]\"),\n\t\tregexp.MustCompile(\"\\nprocess_open_fds [1-9]\"),\n\t\tregexp.MustCompile(\"\\nprocess_virtual_memory_bytes [1-9]\"),\n\t\tregexp.MustCompile(\"\\nprocess_resident_memory_bytes [1-9]\"),\n\t\tregexp.MustCompile(\"\\nprocess_start_time_seconds [0-9.]{10,}\"),\n\t\tregexp.MustCompile(\"\\nfoobar_process_cpu_seconds_total [0-9]\"),\n\t\tregexp.MustCompile(\"\\nfoobar_process_max_fds [1-9]\"),\n\t\tregexp.MustCompile(\"\\nfoobar_process_open_fds [1-9]\"),\n\t\tregexp.MustCompile(\"\\nfoobar_process_virtual_memory_bytes [1-9]\"),\n\t\tregexp.MustCompile(\"\\nfoobar_process_resident_memory_bytes [1-9]\"),\n\t\tregexp.MustCompile(\"\\nfoobar_process_start_time_seconds [0-9.]{10,}\"),\n\t} {\n\t\tif !re.Match(buf.Bytes()) {\n\t\t\tt.Errorf(\"want body to match %s\\n%s\", re, buf.String())\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage promhttp\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"net\"\n\t\"net/http\"\n)\n\nconst (\n\tcloseNotifier = 1 << iota\n\tflusher\n\thijacker\n\treaderFrom\n\tpusher\n)\n\ntype delegator interface {\n\thttp.ResponseWriter\n\n\tStatus() int\n\tWritten() int64\n}\n\ntype responseWriterDelegator struct {\n\thttp.ResponseWriter\n\n\thandler, method    string\n\tstatus             int\n\twritten            int64\n\twroteHeader        bool\n\tobserveWriteHeader func(int)\n}\n\nfunc (r *responseWriterDelegator) Status() int {\n\treturn r.status\n}\n\nfunc (r *responseWriterDelegator) Written() int64 {\n\treturn r.written\n}\n\nfunc (r *responseWriterDelegator) WriteHeader(code int) {\n\tr.status = code\n\tr.wroteHeader = true\n\tr.ResponseWriter.WriteHeader(code)\n\tif r.observeWriteHeader != nil {\n\t\tr.observeWriteHeader(code)\n\t}\n}\n\nfunc (r *responseWriterDelegator) Write(b []byte) (int, error) {\n\tif !r.wroteHeader {\n\t\tr.WriteHeader(http.StatusOK)\n\t}\n\tn, err := r.ResponseWriter.Write(b)\n\tr.written += int64(n)\n\treturn n, err\n}\n\ntype closeNotifierDelegator struct{ *responseWriterDelegator }\ntype flusherDelegator struct{ *responseWriterDelegator }\ntype hijackerDelegator struct{ *responseWriterDelegator }\ntype readerFromDelegator struct{ *responseWriterDelegator }\n\nfunc (d *closeNotifierDelegator) CloseNotify() <-chan bool {\n\treturn d.ResponseWriter.(http.CloseNotifier).CloseNotify()\n}\nfunc (d *flusherDelegator) Flush() {\n\td.ResponseWriter.(http.Flusher).Flush()\n}\nfunc (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {\n\treturn d.ResponseWriter.(http.Hijacker).Hijack()\n}\nfunc (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {\n\tif !d.wroteHeader {\n\t\td.WriteHeader(http.StatusOK)\n\t}\n\tn, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)\n\td.written += n\n\treturn n, err\n}\n\nvar pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)\n\nfunc init() {\n\t// TODO(beorn7): Code generation would help here.\n\tpickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0\n\t\treturn d\n\t}\n\tpickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1\n\t\treturn closeNotifierDelegator{d}\n\t}\n\tpickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2\n\t\treturn flusherDelegator{d}\n\t}\n\tpickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Flusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4\n\t\treturn hijackerDelegator{d}\n\t}\n\tpickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t}{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t}{d, &hijackerDelegator{d}, &flusherDelegator{d}}\n\t}\n\tpickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8\n\t\treturn readerFromDelegator{d}\n\t}\n\tpickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\tio.ReaderFrom\n\t\t\thttp.CloseNotifier\n\t\t}{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\tio.ReaderFrom\n\t\t\thttp.Flusher\n\t\t}{d, &readerFromDelegator{d}, &flusherDelegator{d}}\n\t}\n\tpickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\tio.ReaderFrom\n\t\t\thttp.Flusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\tio.ReaderFrom\n\t\t\thttp.Hijacker\n\t\t}{d, &readerFromDelegator{d}, &hijackerDelegator{d}}\n\t}\n\tpickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\tio.ReaderFrom\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\tio.ReaderFrom\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}\n\t}\n\tpickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\tio.ReaderFrom\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// +build go1.8\n\npackage promhttp\n\nimport (\n\t\"io\"\n\t\"net/http\"\n)\n\ntype pusherDelegator struct{ *responseWriterDelegator }\n\nfunc (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {\n\treturn d.ResponseWriter.(http.Pusher).Push(target, opts)\n}\n\nfunc init() {\n\tpickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16\n\t\treturn pusherDelegator{d}\n\t}\n\tpickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t}{d, &pusherDelegator{d}, &flusherDelegator{d}}\n\t}\n\tpickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\thttp.Flusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\thttp.Hijacker\n\t\t}{d, &pusherDelegator{d}, &hijackerDelegator{d}}\n\t}\n\tpickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}\n\t}\n\tpickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t}{d, &pusherDelegator{d}, &readerFromDelegator{d}}\n\t}\n\tpickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t\thttp.CloseNotifier\n\t\t}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t\thttp.Flusher\n\t\t}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}\n\t}\n\tpickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t\thttp.Flusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t\thttp.Hijacker\n\t\t}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}\n\t}\n\tpickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t\thttp.Hijacker\n\t\t\thttp.CloseNotifier\n\t\t}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n\tpickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}\n\t}\n\tpickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31\n\t\treturn struct {\n\t\t\t*responseWriterDelegator\n\t\t\thttp.Pusher\n\t\t\tio.ReaderFrom\n\t\t\thttp.Hijacker\n\t\t\thttp.Flusher\n\t\t\thttp.CloseNotifier\n\t\t}{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}\n\t}\n}\n\nfunc newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {\n\td := &responseWriterDelegator{\n\t\tResponseWriter:     w,\n\t\tobserveWriteHeader: observeWriteHeaderFunc,\n\t}\n\n\tid := 0\n\tif _, ok := w.(http.CloseNotifier); ok {\n\t\tid += closeNotifier\n\t}\n\tif _, ok := w.(http.Flusher); ok {\n\t\tid += flusher\n\t}\n\tif _, ok := w.(http.Hijacker); ok {\n\t\tid += hijacker\n\t}\n\tif _, ok := w.(io.ReaderFrom); ok {\n\t\tid += readerFrom\n\t}\n\tif _, ok := w.(http.Pusher); ok {\n\t\tid += pusher\n\t}\n\n\treturn pickDelegator[id](d)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// +build !go1.8\n\npackage promhttp\n\nimport (\n\t\"io\"\n\t\"net/http\"\n)\n\nfunc newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {\n\td := &responseWriterDelegator{\n\t\tResponseWriter:     w,\n\t\tobserveWriteHeader: observeWriteHeaderFunc,\n\t}\n\n\tid := 0\n\tif _, ok := w.(http.CloseNotifier); ok {\n\t\tid += closeNotifier\n\t}\n\tif _, ok := w.(http.Flusher); ok {\n\t\tid += flusher\n\t}\n\tif _, ok := w.(http.Hijacker); ok {\n\t\tid += hijacker\n\t}\n\tif _, ok := w.(io.ReaderFrom); ok {\n\t\tid += readerFrom\n\t}\n\n\treturn pickDelegator[id](d)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go",
    "content": "// Copyright 2016 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package promhttp provides tooling around HTTP servers and clients.\n//\n// First, the package allows the creation of http.Handler instances to expose\n// Prometheus metrics via HTTP. promhttp.Handler acts on the\n// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a\n// custom registry or anything that implements the Gatherer interface. It also\n// allows the creation of handlers that act differently on errors or allow to\n// log errors.\n//\n// Second, the package provides tooling to instrument instances of http.Handler\n// via middleware. Middleware wrappers follow the naming scheme\n// InstrumentHandlerX, where X describes the intended use of the middleware.\n// See each function's doc comment for specific details.\n//\n// Finally, the package allows for an http.RoundTripper to be instrumented via\n// middleware. Middleware wrappers follow the naming scheme\n// InstrumentRoundTripperX, where X describes the intended use of the\n// middleware. See each function's doc comment for specific details.\npackage promhttp\n\nimport (\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/prometheus/common/expfmt\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nconst (\n\tcontentTypeHeader     = \"Content-Type\"\n\tcontentLengthHeader   = \"Content-Length\"\n\tcontentEncodingHeader = \"Content-Encoding\"\n\tacceptEncodingHeader  = \"Accept-Encoding\"\n)\n\nvar bufPool sync.Pool\n\nfunc getBuf() *bytes.Buffer {\n\tbuf := bufPool.Get()\n\tif buf == nil {\n\t\treturn &bytes.Buffer{}\n\t}\n\treturn buf.(*bytes.Buffer)\n}\n\nfunc giveBuf(buf *bytes.Buffer) {\n\tbuf.Reset()\n\tbufPool.Put(buf)\n}\n\n// Handler returns an HTTP handler for the prometheus.DefaultGatherer. The\n// Handler uses the default HandlerOpts, i.e. report the first error as an HTTP\n// error, no error logging, and compression if requested by the client.\n//\n// If you want to create a Handler for the DefaultGatherer with different\n// HandlerOpts, create it with HandlerFor with prometheus.DefaultGatherer and\n// your desired HandlerOpts.\nfunc Handler() http.Handler {\n\treturn HandlerFor(prometheus.DefaultGatherer, HandlerOpts{})\n}\n\n// HandlerFor returns an http.Handler for the provided Gatherer. The behavior\n// of the Handler is defined by the provided HandlerOpts.\nfunc HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tmfs, err := reg.Gather()\n\t\tif err != nil {\n\t\t\tif opts.ErrorLog != nil {\n\t\t\t\topts.ErrorLog.Println(\"error gathering metrics:\", err)\n\t\t\t}\n\t\t\tswitch opts.ErrorHandling {\n\t\t\tcase PanicOnError:\n\t\t\t\tpanic(err)\n\t\t\tcase ContinueOnError:\n\t\t\t\tif len(mfs) == 0 {\n\t\t\t\t\thttp.Error(w, \"No metrics gathered, last error:\\n\\n\"+err.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\tcase HTTPErrorOnError:\n\t\t\t\thttp.Error(w, \"An error has occurred during metrics gathering:\\n\\n\"+err.Error(), http.StatusInternalServerError)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tcontentType := expfmt.Negotiate(req.Header)\n\t\tbuf := getBuf()\n\t\tdefer giveBuf(buf)\n\t\twriter, encoding := decorateWriter(req, buf, opts.DisableCompression)\n\t\tenc := expfmt.NewEncoder(writer, contentType)\n\t\tvar lastErr error\n\t\tfor _, mf := range mfs {\n\t\t\tif err := enc.Encode(mf); err != nil {\n\t\t\t\tlastErr = err\n\t\t\t\tif opts.ErrorLog != nil {\n\t\t\t\t\topts.ErrorLog.Println(\"error encoding metric family:\", err)\n\t\t\t\t}\n\t\t\t\tswitch opts.ErrorHandling {\n\t\t\t\tcase PanicOnError:\n\t\t\t\t\tpanic(err)\n\t\t\t\tcase ContinueOnError:\n\t\t\t\t\t// Handled later.\n\t\t\t\tcase HTTPErrorOnError:\n\t\t\t\t\thttp.Error(w, \"An error has occurred during metrics encoding:\\n\\n\"+err.Error(), http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif closer, ok := writer.(io.Closer); ok {\n\t\t\tcloser.Close()\n\t\t}\n\t\tif lastErr != nil && buf.Len() == 0 {\n\t\t\thttp.Error(w, \"No metrics encoded, last error:\\n\\n\"+lastErr.Error(), http.StatusInternalServerError)\n\t\t\treturn\n\t\t}\n\t\theader := w.Header()\n\t\theader.Set(contentTypeHeader, string(contentType))\n\t\theader.Set(contentLengthHeader, fmt.Sprint(buf.Len()))\n\t\tif encoding != \"\" {\n\t\t\theader.Set(contentEncodingHeader, encoding)\n\t\t}\n\t\tw.Write(buf.Bytes())\n\t\t// TODO(beorn7): Consider streaming serving of metrics.\n\t})\n}\n\n// HandlerErrorHandling defines how a Handler serving metrics will handle\n// errors.\ntype HandlerErrorHandling int\n\n// These constants cause handlers serving metrics to behave as described if\n// errors are encountered.\nconst (\n\t// Serve an HTTP status code 500 upon the first error\n\t// encountered. Report the error message in the body.\n\tHTTPErrorOnError HandlerErrorHandling = iota\n\t// Ignore errors and try to serve as many metrics as possible.  However,\n\t// if no metrics can be served, serve an HTTP status code 500 and the\n\t// last error message in the body. Only use this in deliberate \"best\n\t// effort\" metrics collection scenarios. It is recommended to at least\n\t// log errors (by providing an ErrorLog in HandlerOpts) to not mask\n\t// errors completely.\n\tContinueOnError\n\t// Panic upon the first error encountered (useful for \"crash only\" apps).\n\tPanicOnError\n)\n\n// Logger is the minimal interface HandlerOpts needs for logging. Note that\n// log.Logger from the standard library implements this interface, and it is\n// easy to implement by custom loggers, if they don't do so already anyway.\ntype Logger interface {\n\tPrintln(v ...interface{})\n}\n\n// HandlerOpts specifies options how to serve metrics via an http.Handler. The\n// zero value of HandlerOpts is a reasonable default.\ntype HandlerOpts struct {\n\t// ErrorLog specifies an optional logger for errors collecting and\n\t// serving metrics. If nil, errors are not logged at all.\n\tErrorLog Logger\n\t// ErrorHandling defines how errors are handled. Note that errors are\n\t// logged regardless of the configured ErrorHandling provided ErrorLog\n\t// is not nil.\n\tErrorHandling HandlerErrorHandling\n\t// If DisableCompression is true, the handler will never compress the\n\t// response, even if requested by the client.\n\tDisableCompression bool\n}\n\n// decorateWriter wraps a writer to handle gzip compression if requested.  It\n// returns the decorated writer and the appropriate \"Content-Encoding\" header\n// (which is empty if no compression is enabled).\nfunc decorateWriter(request *http.Request, writer io.Writer, compressionDisabled bool) (io.Writer, string) {\n\tif compressionDisabled {\n\t\treturn writer, \"\"\n\t}\n\theader := request.Header.Get(acceptEncodingHeader)\n\tparts := strings.Split(header, \",\")\n\tfor _, part := range parts {\n\t\tpart := strings.TrimSpace(part)\n\t\tif part == \"gzip\" || strings.HasPrefix(part, \"gzip;\") {\n\t\t\treturn gzip.NewWriter(writer), \"gzip\"\n\t\t}\n\t}\n\treturn writer, \"\"\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/http_test.go",
    "content": "// Copyright 2016 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage promhttp\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\ntype errorCollector struct{}\n\nfunc (e errorCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- prometheus.NewDesc(\"invalid_metric\", \"not helpful\", nil, nil)\n}\n\nfunc (e errorCollector) Collect(ch chan<- prometheus.Metric) {\n\tch <- prometheus.NewInvalidMetric(\n\t\tprometheus.NewDesc(\"invalid_metric\", \"not helpful\", nil, nil),\n\t\terrors.New(\"collect error\"),\n\t)\n}\n\nfunc TestHandlerErrorHandling(t *testing.T) {\n\n\t// Create a registry that collects a MetricFamily with two elements,\n\t// another with one, and reports an error.\n\treg := prometheus.NewRegistry()\n\n\tcnt := prometheus.NewCounter(prometheus.CounterOpts{\n\t\tName: \"the_count\",\n\t\tHelp: \"Ah-ah-ah! Thunder and lightning!\",\n\t})\n\treg.MustRegister(cnt)\n\n\tcntVec := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName:        \"name\",\n\t\t\tHelp:        \"docstring\",\n\t\t\tConstLabels: prometheus.Labels{\"constname\": \"constvalue\"},\n\t\t},\n\t\t[]string{\"labelname\"},\n\t)\n\tcntVec.WithLabelValues(\"val1\").Inc()\n\tcntVec.WithLabelValues(\"val2\").Inc()\n\treg.MustRegister(cntVec)\n\n\treg.MustRegister(errorCollector{})\n\n\tlogBuf := &bytes.Buffer{}\n\tlogger := log.New(logBuf, \"\", 0)\n\n\twriter := httptest.NewRecorder()\n\trequest, _ := http.NewRequest(\"GET\", \"/\", nil)\n\trequest.Header.Add(\"Accept\", \"test/plain\")\n\n\terrorHandler := HandlerFor(reg, HandlerOpts{\n\t\tErrorLog:      logger,\n\t\tErrorHandling: HTTPErrorOnError,\n\t})\n\tcontinueHandler := HandlerFor(reg, HandlerOpts{\n\t\tErrorLog:      logger,\n\t\tErrorHandling: ContinueOnError,\n\t})\n\tpanicHandler := HandlerFor(reg, HandlerOpts{\n\t\tErrorLog:      logger,\n\t\tErrorHandling: PanicOnError,\n\t})\n\twantMsg := `error gathering metrics: error collecting metric Desc{fqName: \"invalid_metric\", help: \"not helpful\", constLabels: {}, variableLabels: []}: collect error\n`\n\twantErrorBody := `An error has occurred during metrics gathering:\n\nerror collecting metric Desc{fqName: \"invalid_metric\", help: \"not helpful\", constLabels: {}, variableLabels: []}: collect error\n`\n\twantOKBody := `# HELP name docstring\n# TYPE name counter\nname{constname=\"constvalue\",labelname=\"val1\"} 1\nname{constname=\"constvalue\",labelname=\"val2\"} 1\n# HELP the_count Ah-ah-ah! Thunder and lightning!\n# TYPE the_count counter\nthe_count 0\n`\n\n\terrorHandler.ServeHTTP(writer, request)\n\tif got, want := writer.Code, http.StatusInternalServerError; got != want {\n\t\tt.Errorf(\"got HTTP status code %d, want %d\", got, want)\n\t}\n\tif got := logBuf.String(); got != wantMsg {\n\t\tt.Errorf(\"got log message:\\n%s\\nwant log mesage:\\n%s\\n\", got, wantMsg)\n\t}\n\tif got := writer.Body.String(); got != wantErrorBody {\n\t\tt.Errorf(\"got body:\\n%s\\nwant body:\\n%s\\n\", got, wantErrorBody)\n\t}\n\tlogBuf.Reset()\n\twriter.Body.Reset()\n\twriter.Code = http.StatusOK\n\n\tcontinueHandler.ServeHTTP(writer, request)\n\tif got, want := writer.Code, http.StatusOK; got != want {\n\t\tt.Errorf(\"got HTTP status code %d, want %d\", got, want)\n\t}\n\tif got := logBuf.String(); got != wantMsg {\n\t\tt.Errorf(\"got log message %q, want %q\", got, wantMsg)\n\t}\n\tif got := writer.Body.String(); got != wantOKBody {\n\t\tt.Errorf(\"got body %q, want %q\", got, wantOKBody)\n\t}\n\n\tdefer func() {\n\t\tif err := recover(); err == nil {\n\t\t\tt.Error(\"expected panic from panicHandler\")\n\t\t}\n\t}()\n\tpanicHandler.ServeHTTP(writer, request)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage promhttp\n\nimport (\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\n// The RoundTripperFunc type is an adapter to allow the use of ordinary\n// functions as RoundTrippers. If f is a function with the appropriate\n// signature, RountTripperFunc(f) is a RoundTripper that calls f.\ntype RoundTripperFunc func(req *http.Request) (*http.Response, error)\n\n// RoundTrip implements the RoundTripper interface.\nfunc (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {\n\treturn rt(r)\n}\n\n// InstrumentRoundTripperInFlight is a middleware that wraps the provided\n// http.RoundTripper. It sets the provided prometheus.Gauge to the number of\n// requests currently handled by the wrapped http.RoundTripper.\n//\n// See the example for ExampleInstrumentRoundTripperDuration for example usage.\nfunc InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {\n\treturn RoundTripperFunc(func(r *http.Request) (*http.Response, error) {\n\t\tgauge.Inc()\n\t\tdefer gauge.Dec()\n\t\treturn next.RoundTrip(r)\n\t})\n}\n\n// InstrumentRoundTripperCounter is a middleware that wraps the provided\n// http.RoundTripper to observe the request result with the provided CounterVec.\n// The CounterVec must have zero, one, or two labels. The only allowed label\n// names are \"code\" and \"method\". The function panics if any other instance\n// labels are provided. Partitioning of the CounterVec happens by HTTP status\n// code and/or HTTP method if the respective instance label names are present\n// in the CounterVec. For unpartitioned counting, use a CounterVec with\n// zero labels.\n//\n// If the wrapped RoundTripper panics or returns a non-nil error, the Counter\n// is not incremented.\n//\n// See the example for ExampleInstrumentRoundTripperDuration for example usage.\nfunc InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {\n\tcode, method := checkLabels(counter)\n\n\treturn RoundTripperFunc(func(r *http.Request) (*http.Response, error) {\n\t\tresp, err := next.RoundTrip(r)\n\t\tif err == nil {\n\t\t\tcounter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()\n\t\t}\n\t\treturn resp, err\n\t})\n}\n\n// InstrumentRoundTripperDuration is a middleware that wraps the provided\n// http.RoundTripper to observe the request duration with the provided ObserverVec.\n// The ObserverVec must have zero, one, or two labels. The only allowed label\n// names are \"code\" and \"method\". The function panics if any other instance\n// labels are provided. The Observe method of the Observer in the ObserverVec\n// is called with the request duration in seconds. Partitioning happens by HTTP\n// status code and/or HTTP method if the respective instance label names are\n// present in the ObserverVec. For unpartitioned observations, use an\n// ObserverVec with zero labels. Note that partitioning of Histograms is\n// expensive and should be used judiciously.\n//\n// If the wrapped RoundTripper panics or returns a non-nil error, no values are\n// reported.\n//\n// Note that this method is only guaranteed to never observe negative durations\n// if used with Go1.9+.\nfunc InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {\n\tcode, method := checkLabels(obs)\n\n\treturn RoundTripperFunc(func(r *http.Request) (*http.Response, error) {\n\t\tstart := time.Now()\n\t\tresp, err := next.RoundTrip(r)\n\t\tif err == nil {\n\t\t\tobs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())\n\t\t}\n\t\treturn resp, err\n\t})\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// +build go1.8\n\npackage promhttp\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"net/http\"\n\t\"net/http/httptrace\"\n\t\"time\"\n)\n\n// InstrumentTrace is used to offer flexibility in instrumenting the available\n// httptrace.ClientTrace hook functions. Each function is passed a float64\n// representing the time in seconds since the start of the http request. A user\n// may choose to use separately buckets Histograms, or implement custom\n// instance labels on a per function basis.\ntype InstrumentTrace struct {\n\tGotConn              func(float64)\n\tPutIdleConn          func(float64)\n\tGotFirstResponseByte func(float64)\n\tGot100Continue       func(float64)\n\tDNSStart             func(float64)\n\tDNSDone              func(float64)\n\tConnectStart         func(float64)\n\tConnectDone          func(float64)\n\tTLSHandshakeStart    func(float64)\n\tTLSHandshakeDone     func(float64)\n\tWroteHeaders         func(float64)\n\tWait100Continue      func(float64)\n\tWroteRequest         func(float64)\n}\n\n// InstrumentRoundTripperTrace is a middleware that wraps the provided\n// RoundTripper and reports times to hook functions provided in the\n// InstrumentTrace struct. Hook functions that are not present in the provided\n// InstrumentTrace struct are ignored. Times reported to the hook functions are\n// time since the start of the request. Only with Go1.9+, those times are\n// guaranteed to never be negative. (Earlier Go versions are not using a\n// monotonic clock.) Note that partitioning of Histograms is expensive and\n// should be used judiciously.\n//\n// For hook functions that receive an error as an argument, no observations are\n// made in the event of a non-nil error value.\n//\n// See the example for ExampleInstrumentRoundTripperDuration for example usage.\nfunc InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {\n\treturn RoundTripperFunc(func(r *http.Request) (*http.Response, error) {\n\t\tstart := time.Now()\n\n\t\ttrace := &httptrace.ClientTrace{\n\t\t\tGotConn: func(_ httptrace.GotConnInfo) {\n\t\t\t\tif it.GotConn != nil {\n\t\t\t\t\tit.GotConn(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tPutIdleConn: func(err error) {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif it.PutIdleConn != nil {\n\t\t\t\t\tit.PutIdleConn(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tDNSStart: func(_ httptrace.DNSStartInfo) {\n\t\t\t\tif it.DNSStart != nil {\n\t\t\t\t\tit.DNSStart(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tDNSDone: func(_ httptrace.DNSDoneInfo) {\n\t\t\t\tif it.DNSStart != nil {\n\t\t\t\t\tit.DNSStart(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tConnectStart: func(_, _ string) {\n\t\t\t\tif it.ConnectStart != nil {\n\t\t\t\t\tit.ConnectStart(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tConnectDone: func(_, _ string, err error) {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif it.ConnectDone != nil {\n\t\t\t\t\tit.ConnectDone(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tGotFirstResponseByte: func() {\n\t\t\t\tif it.GotFirstResponseByte != nil {\n\t\t\t\t\tit.GotFirstResponseByte(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tGot100Continue: func() {\n\t\t\t\tif it.Got100Continue != nil {\n\t\t\t\t\tit.Got100Continue(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tTLSHandshakeStart: func() {\n\t\t\t\tif it.TLSHandshakeStart != nil {\n\t\t\t\t\tit.TLSHandshakeStart(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tTLSHandshakeDone: func(_ tls.ConnectionState, err error) {\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif it.TLSHandshakeDone != nil {\n\t\t\t\t\tit.TLSHandshakeDone(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tWroteHeaders: func() {\n\t\t\t\tif it.WroteHeaders != nil {\n\t\t\t\t\tit.WroteHeaders(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tWait100Continue: func() {\n\t\t\t\tif it.Wait100Continue != nil {\n\t\t\t\t\tit.Wait100Continue(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t\tWroteRequest: func(_ httptrace.WroteRequestInfo) {\n\t\t\t\tif it.WroteRequest != nil {\n\t\t\t\t\tit.WroteRequest(time.Since(start).Seconds())\n\t\t\t\t}\n\t\t\t},\n\t\t}\n\t\tr = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))\n\n\t\treturn next.RoundTrip(r)\n\t})\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8_test.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// +build go1.8\n\npackage promhttp\n\nimport (\n\t\"log\"\n\t\"net/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc TestClientMiddlewareAPI(t *testing.T) {\n\tclient := http.DefaultClient\n\tclient.Timeout = 1 * time.Second\n\n\treg := prometheus.NewRegistry()\n\n\tinFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"client_in_flight_requests\",\n\t\tHelp: \"A gauge of in-flight requests for the wrapped client.\",\n\t})\n\n\tcounter := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"client_api_requests_total\",\n\t\t\tHelp: \"A counter for requests from the wrapped client.\",\n\t\t},\n\t\t[]string{\"code\", \"method\"},\n\t)\n\n\tdnsLatencyVec := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"dns_duration_seconds\",\n\t\t\tHelp:    \"Trace dns latency histogram.\",\n\t\t\tBuckets: []float64{.005, .01, .025, .05},\n\t\t},\n\t\t[]string{\"event\"},\n\t)\n\n\ttlsLatencyVec := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"tls_duration_seconds\",\n\t\t\tHelp:    \"Trace tls latency histogram.\",\n\t\t\tBuckets: []float64{.05, .1, .25, .5},\n\t\t},\n\t\t[]string{\"event\"},\n\t)\n\n\thistVec := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"request_duration_seconds\",\n\t\t\tHelp:    \"A histogram of request latencies.\",\n\t\t\tBuckets: prometheus.DefBuckets,\n\t\t},\n\t\t[]string{\"method\"},\n\t)\n\n\treg.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge)\n\n\ttrace := &InstrumentTrace{\n\t\tDNSStart: func(t float64) {\n\t\t\tdnsLatencyVec.WithLabelValues(\"dns_start\")\n\t\t},\n\t\tDNSDone: func(t float64) {\n\t\t\tdnsLatencyVec.WithLabelValues(\"dns_done\")\n\t\t},\n\t\tTLSHandshakeStart: func(t float64) {\n\t\t\ttlsLatencyVec.WithLabelValues(\"tls_handshake_start\")\n\t\t},\n\t\tTLSHandshakeDone: func(t float64) {\n\t\t\ttlsLatencyVec.WithLabelValues(\"tls_handshake_done\")\n\t\t},\n\t}\n\n\tclient.Transport = InstrumentRoundTripperInFlight(inFlightGauge,\n\t\tInstrumentRoundTripperCounter(counter,\n\t\t\tInstrumentRoundTripperTrace(trace,\n\t\t\t\tInstrumentRoundTripperDuration(histVec, http.DefaultTransport),\n\t\t\t),\n\t\t),\n\t)\n\n\tresp, err := client.Get(\"http://google.com\")\n\tif err != nil {\n\t\tt.Fatalf(\"%v\", err)\n\t}\n\tdefer resp.Body.Close()\n}\n\nfunc ExampleInstrumentRoundTripperDuration() {\n\tclient := http.DefaultClient\n\tclient.Timeout = 1 * time.Second\n\n\tinFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"client_in_flight_requests\",\n\t\tHelp: \"A gauge of in-flight requests for the wrapped client.\",\n\t})\n\n\tcounter := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"client_api_requests_total\",\n\t\t\tHelp: \"A counter for requests from the wrapped client.\",\n\t\t},\n\t\t[]string{\"code\", \"method\"},\n\t)\n\n\t// dnsLatencyVec uses custom buckets based on expected dns durations.\n\t// It has an instance label \"event\", which is set in the\n\t// DNSStart and DNSDonehook functions defined in the\n\t// InstrumentTrace struct below.\n\tdnsLatencyVec := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"dns_duration_seconds\",\n\t\t\tHelp:    \"Trace dns latency histogram.\",\n\t\t\tBuckets: []float64{.005, .01, .025, .05},\n\t\t},\n\t\t[]string{\"event\"},\n\t)\n\n\t// tlsLatencyVec uses custom buckets based on expected tls durations.\n\t// It has an instance label \"event\", which is set in the\n\t// TLSHandshakeStart and TLSHandshakeDone hook functions defined in the\n\t// InstrumentTrace struct below.\n\ttlsLatencyVec := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"tls_duration_seconds\",\n\t\t\tHelp:    \"Trace tls latency histogram.\",\n\t\t\tBuckets: []float64{.05, .1, .25, .5},\n\t\t},\n\t\t[]string{\"event\"},\n\t)\n\n\t// histVec has no labels, making it a zero-dimensional ObserverVec.\n\thistVec := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"request_duration_seconds\",\n\t\t\tHelp:    \"A histogram of request latencies.\",\n\t\t\tBuckets: prometheus.DefBuckets,\n\t\t},\n\t\t[]string{},\n\t)\n\n\t// Register all of the metrics in the standard registry.\n\tprometheus.MustRegister(counter, tlsLatencyVec, dnsLatencyVec, histVec, inFlightGauge)\n\n\t// Define functions for the available httptrace.ClientTrace hook\n\t// functions that we want to instrument.\n\ttrace := &InstrumentTrace{\n\t\tDNSStart: func(t float64) {\n\t\t\tdnsLatencyVec.WithLabelValues(\"dns_start\")\n\t\t},\n\t\tDNSDone: func(t float64) {\n\t\t\tdnsLatencyVec.WithLabelValues(\"dns_done\")\n\t\t},\n\t\tTLSHandshakeStart: func(t float64) {\n\t\t\ttlsLatencyVec.WithLabelValues(\"tls_handshake_start\")\n\t\t},\n\t\tTLSHandshakeDone: func(t float64) {\n\t\t\ttlsLatencyVec.WithLabelValues(\"tls_handshake_done\")\n\t\t},\n\t}\n\n\t// Wrap the default RoundTripper with middleware.\n\troundTripper := InstrumentRoundTripperInFlight(inFlightGauge,\n\t\tInstrumentRoundTripperCounter(counter,\n\t\t\tInstrumentRoundTripperTrace(trace,\n\t\t\t\tInstrumentRoundTripperDuration(histVec, http.DefaultTransport),\n\t\t\t),\n\t\t),\n\t)\n\n\t// Set the RoundTripper on our client.\n\tclient.Transport = roundTripper\n\n\tresp, err := client.Get(\"http://google.com\")\n\tif err != nil {\n\t\tlog.Printf(\"error: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage promhttp\n\nimport (\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\n// magicString is used for the hacky label test in checkLabels. Remove once fixed.\nconst magicString = \"zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa\"\n\n// InstrumentHandlerInFlight is a middleware that wraps the provided\n// http.Handler. It sets the provided prometheus.Gauge to the number of\n// requests currently handled by the wrapped http.Handler.\n//\n// See the example for InstrumentHandlerDuration for example usage.\nfunc InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tg.Inc()\n\t\tdefer g.Dec()\n\t\tnext.ServeHTTP(w, r)\n\t})\n}\n\n// InstrumentHandlerDuration is a middleware that wraps the provided\n// http.Handler to observe the request duration with the provided ObserverVec.\n// The ObserverVec must have zero, one, or two labels. The only allowed label\n// names are \"code\" and \"method\". The function panics if any other instance\n// labels are provided. The Observe method of the Observer in the ObserverVec\n// is called with the request duration in seconds. Partitioning happens by HTTP\n// status code and/or HTTP method if the respective instance label names are\n// present in the ObserverVec. For unpartitioned observations, use an\n// ObserverVec with zero labels. Note that partitioning of Histograms is\n// expensive and should be used judiciously.\n//\n// If the wrapped Handler does not set a status code, a status code of 200 is assumed.\n//\n// If the wrapped Handler panics, no values are reported.\n//\n// Note that this method is only guaranteed to never observe negative durations\n// if used with Go1.9+.\nfunc InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {\n\tcode, method := checkLabels(obs)\n\n\tif code {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tnow := time.Now()\n\t\t\td := newDelegator(w, nil)\n\t\t\tnext.ServeHTTP(d, r)\n\n\t\t\tobs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())\n\t\t})\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tnow := time.Now()\n\t\tnext.ServeHTTP(w, r)\n\t\tobs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())\n\t})\n}\n\n// InstrumentHandlerCounter is a middleware that wraps the provided\n// http.Handler to observe the request result with the provided CounterVec.\n// The CounterVec must have zero, one, or two labels. The only allowed label\n// names are \"code\" and \"method\". The function panics if any other instance\n// labels are provided. Partitioning of the CounterVec happens by HTTP status\n// code and/or HTTP method if the respective instance label names are present\n// in the CounterVec. For unpartitioned counting, use a CounterVec with\n// zero labels.\n//\n// If the wrapped Handler does not set a status code, a status code of 200 is assumed.\n//\n// If the wrapped Handler panics, the Counter is not incremented.\n//\n// See the example for InstrumentHandlerDuration for example usage.\nfunc InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {\n\tcode, method := checkLabels(counter)\n\n\tif code {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\td := newDelegator(w, nil)\n\t\t\tnext.ServeHTTP(d, r)\n\t\t\tcounter.With(labels(code, method, r.Method, d.Status())).Inc()\n\t\t})\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tnext.ServeHTTP(w, r)\n\t\tcounter.With(labels(code, method, r.Method, 0)).Inc()\n\t})\n}\n\n// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided\n// http.Handler to observe with the provided ObserverVec the request duration\n// until the response headers are written. The ObserverVec must have zero, one,\n// or two labels. The only allowed label names are \"code\" and \"method\". The\n// function panics if any other instance labels are provided. The Observe\n// method of the Observer in the ObserverVec is called with the request\n// duration in seconds. Partitioning happens by HTTP status code and/or HTTP\n// method if the respective instance label names are present in the\n// ObserverVec. For unpartitioned observations, use an ObserverVec with zero\n// labels. Note that partitioning of Histograms is expensive and should be used\n// judiciously.\n//\n// If the wrapped Handler panics before calling WriteHeader, no value is\n// reported.\n//\n// Note that this method is only guaranteed to never observe negative durations\n// if used with Go1.9+.\n//\n// See the example for InstrumentHandlerDuration for example usage.\nfunc InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {\n\tcode, method := checkLabels(obs)\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tnow := time.Now()\n\t\td := newDelegator(w, func(status int) {\n\t\t\tobs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())\n\t\t})\n\t\tnext.ServeHTTP(d, r)\n\t})\n}\n\n// InstrumentHandlerRequestSize is a middleware that wraps the provided\n// http.Handler to observe the request size with the provided ObserverVec.\n// The ObserverVec must have zero, one, or two labels. The only allowed label\n// names are \"code\" and \"method\". The function panics if any other instance\n// labels are provided. The Observe method of the Observer in the ObserverVec\n// is called with the request size in bytes. Partitioning happens by HTTP\n// status code and/or HTTP method if the respective instance label names are\n// present in the ObserverVec. For unpartitioned observations, use an\n// ObserverVec with zero labels. Note that partitioning of Histograms is\n// expensive and should be used judiciously.\n//\n// If the wrapped Handler does not set a status code, a status code of 200 is assumed.\n//\n// If the wrapped Handler panics, no values are reported.\n//\n// See the example for InstrumentHandlerDuration for example usage.\nfunc InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {\n\tcode, method := checkLabels(obs)\n\n\tif code {\n\t\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\td := newDelegator(w, nil)\n\t\t\tnext.ServeHTTP(d, r)\n\t\t\tsize := computeApproximateRequestSize(r)\n\t\t\tobs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))\n\t\t})\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tnext.ServeHTTP(w, r)\n\t\tsize := computeApproximateRequestSize(r)\n\t\tobs.With(labels(code, method, r.Method, 0)).Observe(float64(size))\n\t})\n}\n\n// InstrumentHandlerResponseSize is a middleware that wraps the provided\n// http.Handler to observe the response size with the provided ObserverVec.\n// The ObserverVec must have zero, one, or two labels. The only allowed label\n// names are \"code\" and \"method\". The function panics if any other instance\n// labels are provided. The Observe method of the Observer in the ObserverVec\n// is called with the response size in bytes. Partitioning happens by HTTP\n// status code and/or HTTP method if the respective instance label names are\n// present in the ObserverVec. For unpartitioned observations, use an\n// ObserverVec with zero labels. Note that partitioning of Histograms is\n// expensive and should be used judiciously.\n//\n// If the wrapped Handler does not set a status code, a status code of 200 is assumed.\n//\n// If the wrapped Handler panics, no values are reported.\n//\n// See the example for InstrumentHandlerDuration for example usage.\nfunc InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {\n\tcode, method := checkLabels(obs)\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\td := newDelegator(w, nil)\n\t\tnext.ServeHTTP(d, r)\n\t\tobs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))\n\t})\n}\n\nfunc checkLabels(c prometheus.Collector) (code bool, method bool) {\n\t// TODO(beorn7): Remove this hacky way to check for instance labels\n\t// once Descriptors can have their dimensionality queried.\n\tvar (\n\t\tdesc *prometheus.Desc\n\t\tpm   dto.Metric\n\t)\n\n\tdescc := make(chan *prometheus.Desc, 1)\n\tc.Describe(descc)\n\n\tselect {\n\tcase desc = <-descc:\n\tdefault:\n\t\tpanic(\"no description provided by collector\")\n\t}\n\tselect {\n\tcase <-descc:\n\t\tpanic(\"more than one description provided by collector\")\n\tdefault:\n\t}\n\n\tclose(descc)\n\n\tif _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil {\n\t\treturn\n\t}\n\tif m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString); err == nil {\n\t\tif err := m.Write(&pm); err != nil {\n\t\t\tpanic(\"error checking metric for labels\")\n\t\t}\n\t\tfor _, label := range pm.Label {\n\t\t\tname, value := label.GetName(), label.GetValue()\n\t\t\tif value != magicString {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tswitch name {\n\t\t\tcase \"code\":\n\t\t\t\tcode = true\n\t\t\tcase \"method\":\n\t\t\t\tmethod = true\n\t\t\tdefault:\n\t\t\t\tpanic(\"metric partitioned with non-supported labels\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tpanic(\"previously set label not found – this must never happen\")\n\t}\n\tif m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString, magicString); err == nil {\n\t\tif err := m.Write(&pm); err != nil {\n\t\t\tpanic(\"error checking metric for labels\")\n\t\t}\n\t\tfor _, label := range pm.Label {\n\t\t\tname, value := label.GetName(), label.GetValue()\n\t\t\tif value != magicString {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif name == \"code\" || name == \"method\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpanic(\"metric partitioned with non-supported labels\")\n\t\t}\n\t\tcode = true\n\t\tmethod = true\n\t\treturn\n\t}\n\tpanic(\"metric partitioned with non-supported labels\")\n}\n\n// emptyLabels is a one-time allocation for non-partitioned metrics to avoid\n// unnecessary allocations on each request.\nvar emptyLabels = prometheus.Labels{}\n\nfunc labels(code, method bool, reqMethod string, status int) prometheus.Labels {\n\tif !(code || method) {\n\t\treturn emptyLabels\n\t}\n\tlabels := prometheus.Labels{}\n\n\tif code {\n\t\tlabels[\"code\"] = sanitizeCode(status)\n\t}\n\tif method {\n\t\tlabels[\"method\"] = sanitizeMethod(reqMethod)\n\t}\n\n\treturn labels\n}\n\nfunc computeApproximateRequestSize(r *http.Request) int {\n\ts := 0\n\tif r.URL != nil {\n\t\ts += len(r.URL.String())\n\t}\n\n\ts += len(r.Method)\n\ts += len(r.Proto)\n\tfor name, values := range r.Header {\n\t\ts += len(name)\n\t\tfor _, value := range values {\n\t\t\ts += len(value)\n\t\t}\n\t}\n\ts += len(r.Host)\n\n\t// N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.\n\n\tif r.ContentLength != -1 {\n\t\ts += int(r.ContentLength)\n\t}\n\treturn s\n}\n\nfunc sanitizeMethod(m string) string {\n\tswitch m {\n\tcase \"GET\", \"get\":\n\t\treturn \"get\"\n\tcase \"PUT\", \"put\":\n\t\treturn \"put\"\n\tcase \"HEAD\", \"head\":\n\t\treturn \"head\"\n\tcase \"POST\", \"post\":\n\t\treturn \"post\"\n\tcase \"DELETE\", \"delete\":\n\t\treturn \"delete\"\n\tcase \"CONNECT\", \"connect\":\n\t\treturn \"connect\"\n\tcase \"OPTIONS\", \"options\":\n\t\treturn \"options\"\n\tcase \"NOTIFY\", \"notify\":\n\t\treturn \"notify\"\n\tdefault:\n\t\treturn strings.ToLower(m)\n\t}\n}\n\n// If the wrapped http.Handler has not set a status code, i.e. the value is\n// currently 0, santizeCode will return 200, for consistency with behavior in\n// the stdlib.\nfunc sanitizeCode(s int) string {\n\tswitch s {\n\tcase 100:\n\t\treturn \"100\"\n\tcase 101:\n\t\treturn \"101\"\n\n\tcase 200, 0:\n\t\treturn \"200\"\n\tcase 201:\n\t\treturn \"201\"\n\tcase 202:\n\t\treturn \"202\"\n\tcase 203:\n\t\treturn \"203\"\n\tcase 204:\n\t\treturn \"204\"\n\tcase 205:\n\t\treturn \"205\"\n\tcase 206:\n\t\treturn \"206\"\n\n\tcase 300:\n\t\treturn \"300\"\n\tcase 301:\n\t\treturn \"301\"\n\tcase 302:\n\t\treturn \"302\"\n\tcase 304:\n\t\treturn \"304\"\n\tcase 305:\n\t\treturn \"305\"\n\tcase 307:\n\t\treturn \"307\"\n\n\tcase 400:\n\t\treturn \"400\"\n\tcase 401:\n\t\treturn \"401\"\n\tcase 402:\n\t\treturn \"402\"\n\tcase 403:\n\t\treturn \"403\"\n\tcase 404:\n\t\treturn \"404\"\n\tcase 405:\n\t\treturn \"405\"\n\tcase 406:\n\t\treturn \"406\"\n\tcase 407:\n\t\treturn \"407\"\n\tcase 408:\n\t\treturn \"408\"\n\tcase 409:\n\t\treturn \"409\"\n\tcase 410:\n\t\treturn \"410\"\n\tcase 411:\n\t\treturn \"411\"\n\tcase 412:\n\t\treturn \"412\"\n\tcase 413:\n\t\treturn \"413\"\n\tcase 414:\n\t\treturn \"414\"\n\tcase 415:\n\t\treturn \"415\"\n\tcase 416:\n\t\treturn \"416\"\n\tcase 417:\n\t\treturn \"417\"\n\tcase 418:\n\t\treturn \"418\"\n\n\tcase 500:\n\t\treturn \"500\"\n\tcase 501:\n\t\treturn \"501\"\n\tcase 502:\n\t\treturn \"502\"\n\tcase 503:\n\t\treturn \"503\"\n\tcase 504:\n\t\treturn \"504\"\n\tcase 505:\n\t\treturn \"505\"\n\n\tcase 428:\n\t\treturn \"428\"\n\tcase 429:\n\t\treturn \"429\"\n\tcase 431:\n\t\treturn \"431\"\n\tcase 511:\n\t\treturn \"511\"\n\n\tdefault:\n\t\treturn strconv.Itoa(s)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server_test.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage promhttp\n\nimport (\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc TestMiddlewareAPI(t *testing.T) {\n\treg := prometheus.NewRegistry()\n\n\tinFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"in_flight_requests\",\n\t\tHelp: \"A gauge of requests currently being served by the wrapped handler.\",\n\t})\n\n\tcounter := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"api_requests_total\",\n\t\t\tHelp: \"A counter for requests to the wrapped handler.\",\n\t\t},\n\t\t[]string{\"code\", \"method\"},\n\t)\n\n\thistVec := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:        \"response_duration_seconds\",\n\t\t\tHelp:        \"A histogram of request latencies.\",\n\t\t\tBuckets:     prometheus.DefBuckets,\n\t\t\tConstLabels: prometheus.Labels{\"handler\": \"api\"},\n\t\t},\n\t\t[]string{\"method\"},\n\t)\n\n\twriteHeaderVec := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:        \"write_header_duration_seconds\",\n\t\t\tHelp:        \"A histogram of time to first write latencies.\",\n\t\t\tBuckets:     prometheus.DefBuckets,\n\t\t\tConstLabels: prometheus.Labels{\"handler\": \"api\"},\n\t\t},\n\t\t[]string{},\n\t)\n\n\tresponseSize := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"push_request_size_bytes\",\n\t\t\tHelp:    \"A histogram of request sizes for requests.\",\n\t\t\tBuckets: []float64{200, 500, 900, 1500},\n\t\t},\n\t\t[]string{},\n\t)\n\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"OK\"))\n\t})\n\n\treg.MustRegister(inFlightGauge, counter, histVec, responseSize, writeHeaderVec)\n\n\tchain := InstrumentHandlerInFlight(inFlightGauge,\n\t\tInstrumentHandlerCounter(counter,\n\t\t\tInstrumentHandlerDuration(histVec,\n\t\t\t\tInstrumentHandlerTimeToWriteHeader(writeHeaderVec,\n\t\t\t\t\tInstrumentHandlerResponseSize(responseSize, handler),\n\t\t\t\t),\n\t\t\t),\n\t\t),\n\t)\n\n\tr, _ := http.NewRequest(\"GET\", \"www.example.com\", nil)\n\tw := httptest.NewRecorder()\n\tchain.ServeHTTP(w, r)\n}\n\nfunc TestInstrumentTimeToFirstWrite(t *testing.T) {\n\tvar i int\n\tdobs := &responseWriterDelegator{\n\t\tResponseWriter: httptest.NewRecorder(),\n\t\tobserveWriteHeader: func(status int) {\n\t\t\ti = status\n\t\t},\n\t}\n\td := newDelegator(dobs, nil)\n\n\td.WriteHeader(http.StatusOK)\n\n\tif i != http.StatusOK {\n\t\tt.Fatalf(\"failed to execute observeWriteHeader\")\n\t}\n}\n\n// testResponseWriter is an http.ResponseWriter that also implements\n// http.CloseNotifier, http.Flusher, and io.ReaderFrom.\ntype testResponseWriter struct {\n\tcloseNotifyCalled, flushCalled, readFromCalled bool\n}\n\nfunc (t *testResponseWriter) Header() http.Header       { return nil }\nfunc (t *testResponseWriter) Write([]byte) (int, error) { return 0, nil }\nfunc (t *testResponseWriter) WriteHeader(int)           {}\nfunc (t *testResponseWriter) CloseNotify() <-chan bool {\n\tt.closeNotifyCalled = true\n\treturn nil\n}\nfunc (t *testResponseWriter) Flush() { t.flushCalled = true }\nfunc (t *testResponseWriter) ReadFrom(io.Reader) (int64, error) {\n\tt.readFromCalled = true\n\treturn 0, nil\n}\n\nfunc TestInterfaceUpgrade(t *testing.T) {\n\tw := &testResponseWriter{}\n\td := newDelegator(w, nil)\n\td.(http.CloseNotifier).CloseNotify()\n\tif !w.closeNotifyCalled {\n\t\tt.Error(\"CloseNotify not called\")\n\t}\n\td.(http.Flusher).Flush()\n\tif !w.flushCalled {\n\t\tt.Error(\"Flush not called\")\n\t}\n\td.(io.ReaderFrom).ReadFrom(nil)\n\tif !w.readFromCalled {\n\t\tt.Error(\"ReadFrom not called\")\n\t}\n\tif _, ok := d.(http.Hijacker); ok {\n\t\tt.Error(\"delegator unexpectedly implements http.Hijacker\")\n\t}\n}\n\nfunc ExampleInstrumentHandlerDuration() {\n\tinFlightGauge := prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"in_flight_requests\",\n\t\tHelp: \"A gauge of requests currently being served by the wrapped handler.\",\n\t})\n\n\tcounter := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"api_requests_total\",\n\t\t\tHelp: \"A counter for requests to the wrapped handler.\",\n\t\t},\n\t\t[]string{\"code\", \"method\"},\n\t)\n\n\t// pushVec and pullVec are partitioned by the HTTP method and use custom\n\t// buckets based on the expected request duration. ConstLabels are used\n\t// to set a handler label to mark pushVec as tracking the durations for\n\t// pushes and pullVec as tracking the durations for pulls. Note that\n\t// Name, Help, and Buckets need to be the same for consistency, so we\n\t// use the same HistogramOpts after just modifying the ConstLabels.\n\thistogramOpts := prometheus.HistogramOpts{\n\t\tName:        \"request_duration_seconds\",\n\t\tHelp:        \"A histogram of latencies for requests.\",\n\t\tBuckets:     []float64{.25, .5, 1, 2.5, 5, 10},\n\t\tConstLabels: prometheus.Labels{\"handler\": \"push\"},\n\t}\n\tpushVec := prometheus.NewHistogramVec(\n\t\thistogramOpts,\n\t\t[]string{\"method\"},\n\t)\n\thistogramOpts.ConstLabels = prometheus.Labels{\"handler\": \"pull\"}\n\tpullVec := prometheus.NewHistogramVec(\n\t\thistogramOpts,\n\t\t[]string{\"method\"},\n\t)\n\n\t// responseSize has no labels, making it a zero-dimensional\n\t// ObserverVec.\n\tresponseSize := prometheus.NewHistogramVec(\n\t\tprometheus.HistogramOpts{\n\t\t\tName:    \"response_size_bytes\",\n\t\t\tHelp:    \"A histogram of response sizes for requests.\",\n\t\t\tBuckets: []float64{200, 500, 900, 1500},\n\t\t},\n\t\t[]string{},\n\t)\n\n\t// Create the handlers that will be wrapped by the middleware.\n\tpushHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"Push\"))\n\t})\n\tpullHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Write([]byte(\"Pull\"))\n\t})\n\n\t// Register all of the metrics in the standard registry.\n\tprometheus.MustRegister(inFlightGauge, counter, pullVec, pushVec, responseSize)\n\n\t// Wrap the pushHandler with our shared middleware, but use the\n\t// endpoint-specific pushVec with InstrumentHandlerDuration.\n\tpushChain := InstrumentHandlerInFlight(inFlightGauge,\n\t\tInstrumentHandlerCounter(counter,\n\t\t\tInstrumentHandlerDuration(pushVec,\n\t\t\t\tInstrumentHandlerResponseSize(responseSize, pushHandler),\n\t\t\t),\n\t\t),\n\t)\n\n\t// Wrap the pushHandler with the shared middleware, but use the\n\t// endpoint-specific pullVec with InstrumentHandlerDuration.\n\tpullChain := InstrumentHandlerInFlight(inFlightGauge,\n\t\tInstrumentHandlerCounter(counter,\n\t\t\tInstrumentHandlerDuration(pullVec,\n\t\t\t\tInstrumentHandlerResponseSize(responseSize, pullHandler),\n\t\t\t),\n\t\t),\n\t)\n\n\thttp.Handle(\"/metrics\", Handler())\n\thttp.Handle(\"/push\", pushChain)\n\thttp.Handle(\"/pull\", pullChain)\n\n\tif err := http.ListenAndServe(\":3000\", nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/registry.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"sort\"\n\t\"sync\"\n\t\"unicode/utf8\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nconst (\n\t// Capacity for the channel to collect metrics and descriptors.\n\tcapMetricChan = 1000\n\tcapDescChan   = 10\n)\n\n// DefaultRegisterer and DefaultGatherer are the implementations of the\n// Registerer and Gatherer interface a number of convenience functions in this\n// package act on. Initially, both variables point to the same Registry, which\n// has a process collector (see NewProcessCollector) and a Go collector (see\n// NewGoCollector) already registered. This approach to keep default instances\n// as global state mirrors the approach of other packages in the Go standard\n// library. Note that there are caveats. Change the variables with caution and\n// only if you understand the consequences. Users who want to avoid global state\n// altogether should not use the convenience function and act on custom\n// instances instead.\nvar (\n\tdefaultRegistry              = NewRegistry()\n\tDefaultRegisterer Registerer = defaultRegistry\n\tDefaultGatherer   Gatherer   = defaultRegistry\n)\n\nfunc init() {\n\tMustRegister(NewProcessCollector(os.Getpid(), \"\"))\n\tMustRegister(NewGoCollector())\n}\n\n// NewRegistry creates a new vanilla Registry without any Collectors\n// pre-registered.\nfunc NewRegistry() *Registry {\n\treturn &Registry{\n\t\tcollectorsByID:  map[uint64]Collector{},\n\t\tdescIDs:         map[uint64]struct{}{},\n\t\tdimHashesByName: map[string]uint64{},\n\t}\n}\n\n// NewPedanticRegistry returns a registry that checks during collection if each\n// collected Metric is consistent with its reported Desc, and if the Desc has\n// actually been registered with the registry.\n//\n// Usually, a Registry will be happy as long as the union of all collected\n// Metrics is consistent and valid even if some metrics are not consistent with\n// their own Desc or a Desc provided by their registered Collector. Well-behaved\n// Collectors and Metrics will only provide consistent Descs. This Registry is\n// useful to test the implementation of Collectors and Metrics.\nfunc NewPedanticRegistry() *Registry {\n\tr := NewRegistry()\n\tr.pedanticChecksEnabled = true\n\treturn r\n}\n\n// Registerer is the interface for the part of a registry in charge of\n// registering and unregistering. Users of custom registries should use\n// Registerer as type for registration purposes (rather than the Registry type\n// directly). In that way, they are free to use custom Registerer implementation\n// (e.g. for testing purposes).\ntype Registerer interface {\n\t// Register registers a new Collector to be included in metrics\n\t// collection. It returns an error if the descriptors provided by the\n\t// Collector are invalid or if they — in combination with descriptors of\n\t// already registered Collectors — do not fulfill the consistency and\n\t// uniqueness criteria described in the documentation of metric.Desc.\n\t//\n\t// If the provided Collector is equal to a Collector already registered\n\t// (which includes the case of re-registering the same Collector), the\n\t// returned error is an instance of AlreadyRegisteredError, which\n\t// contains the previously registered Collector.\n\t//\n\t// It is in general not safe to register the same Collector multiple\n\t// times concurrently.\n\tRegister(Collector) error\n\t// MustRegister works like Register but registers any number of\n\t// Collectors and panics upon the first registration that causes an\n\t// error.\n\tMustRegister(...Collector)\n\t// Unregister unregisters the Collector that equals the Collector passed\n\t// in as an argument.  (Two Collectors are considered equal if their\n\t// Describe method yields the same set of descriptors.) The function\n\t// returns whether a Collector was unregistered.\n\t//\n\t// Note that even after unregistering, it will not be possible to\n\t// register a new Collector that is inconsistent with the unregistered\n\t// Collector, e.g. a Collector collecting metrics with the same name but\n\t// a different help string. The rationale here is that the same registry\n\t// instance must only collect consistent metrics throughout its\n\t// lifetime.\n\tUnregister(Collector) bool\n}\n\n// Gatherer is the interface for the part of a registry in charge of gathering\n// the collected metrics into a number of MetricFamilies. The Gatherer interface\n// comes with the same general implication as described for the Registerer\n// interface.\ntype Gatherer interface {\n\t// Gather calls the Collect method of the registered Collectors and then\n\t// gathers the collected metrics into a lexicographically sorted slice\n\t// of MetricFamily protobufs. Even if an error occurs, Gather attempts\n\t// to gather as many metrics as possible. Hence, if a non-nil error is\n\t// returned, the returned MetricFamily slice could be nil (in case of a\n\t// fatal error that prevented any meaningful metric collection) or\n\t// contain a number of MetricFamily protobufs, some of which might be\n\t// incomplete, and some might be missing altogether. The returned error\n\t// (which might be a MultiError) explains the details. In scenarios\n\t// where complete collection is critical, the returned MetricFamily\n\t// protobufs should be disregarded if the returned error is non-nil.\n\tGather() ([]*dto.MetricFamily, error)\n}\n\n// Register registers the provided Collector with the DefaultRegisterer.\n//\n// Register is a shortcut for DefaultRegisterer.Register(c). See there for more\n// details.\nfunc Register(c Collector) error {\n\treturn DefaultRegisterer.Register(c)\n}\n\n// MustRegister registers the provided Collectors with the DefaultRegisterer and\n// panics if any error occurs.\n//\n// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See\n// there for more details.\nfunc MustRegister(cs ...Collector) {\n\tDefaultRegisterer.MustRegister(cs...)\n}\n\n// Unregister removes the registration of the provided Collector from the\n// DefaultRegisterer.\n//\n// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for\n// more details.\nfunc Unregister(c Collector) bool {\n\treturn DefaultRegisterer.Unregister(c)\n}\n\n// GathererFunc turns a function into a Gatherer.\ntype GathererFunc func() ([]*dto.MetricFamily, error)\n\n// Gather implements Gatherer.\nfunc (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) {\n\treturn gf()\n}\n\n// AlreadyRegisteredError is returned by the Register method if the Collector to\n// be registered has already been registered before, or a different Collector\n// that collects the same metrics has been registered before. Registration fails\n// in that case, but you can detect from the kind of error what has\n// happened. The error contains fields for the existing Collector and the\n// (rejected) new Collector that equals the existing one. This can be used to\n// find out if an equal Collector has been registered before and switch over to\n// using the old one, as demonstrated in the example.\ntype AlreadyRegisteredError struct {\n\tExistingCollector, NewCollector Collector\n}\n\nfunc (err AlreadyRegisteredError) Error() string {\n\treturn \"duplicate metrics collector registration attempted\"\n}\n\n// MultiError is a slice of errors implementing the error interface. It is used\n// by a Gatherer to report multiple errors during MetricFamily gathering.\ntype MultiError []error\n\nfunc (errs MultiError) Error() string {\n\tif len(errs) == 0 {\n\t\treturn \"\"\n\t}\n\tbuf := &bytes.Buffer{}\n\tfmt.Fprintf(buf, \"%d error(s) occurred:\", len(errs))\n\tfor _, err := range errs {\n\t\tfmt.Fprintf(buf, \"\\n* %s\", err)\n\t}\n\treturn buf.String()\n}\n\n// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only\n// contained error as error if len(errs is 1). In all other cases, it returns\n// the MultiError directly. This is helpful for returning a MultiError in a way\n// that only uses the MultiError if needed.\nfunc (errs MultiError) MaybeUnwrap() error {\n\tswitch len(errs) {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\treturn errs[0]\n\tdefault:\n\t\treturn errs\n\t}\n}\n\n// Registry registers Prometheus collectors, collects their metrics, and gathers\n// them into MetricFamilies for exposition. It implements both Registerer and\n// Gatherer. The zero value is not usable. Create instances with NewRegistry or\n// NewPedanticRegistry.\ntype Registry struct {\n\tmtx                   sync.RWMutex\n\tcollectorsByID        map[uint64]Collector // ID is a hash of the descIDs.\n\tdescIDs               map[uint64]struct{}\n\tdimHashesByName       map[string]uint64\n\tpedanticChecksEnabled bool\n}\n\n// Register implements Registerer.\nfunc (r *Registry) Register(c Collector) error {\n\tvar (\n\t\tdescChan           = make(chan *Desc, capDescChan)\n\t\tnewDescIDs         = map[uint64]struct{}{}\n\t\tnewDimHashesByName = map[string]uint64{}\n\t\tcollectorID        uint64 // Just a sum of all desc IDs.\n\t\tduplicateDescErr   error\n\t)\n\tgo func() {\n\t\tc.Describe(descChan)\n\t\tclose(descChan)\n\t}()\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\t// Conduct various tests...\n\tfor desc := range descChan {\n\n\t\t// Is the descriptor valid at all?\n\t\tif desc.err != nil {\n\t\t\treturn fmt.Errorf(\"descriptor %s is invalid: %s\", desc, desc.err)\n\t\t}\n\n\t\t// Is the descID unique?\n\t\t// (In other words: Is the fqName + constLabel combination unique?)\n\t\tif _, exists := r.descIDs[desc.id]; exists {\n\t\t\tduplicateDescErr = fmt.Errorf(\"descriptor %s already exists with the same fully-qualified name and const label values\", desc)\n\t\t}\n\t\t// If it is not a duplicate desc in this collector, add it to\n\t\t// the collectorID.  (We allow duplicate descs within the same\n\t\t// collector, but their existence must be a no-op.)\n\t\tif _, exists := newDescIDs[desc.id]; !exists {\n\t\t\tnewDescIDs[desc.id] = struct{}{}\n\t\t\tcollectorID += desc.id\n\t\t}\n\n\t\t// Are all the label names and the help string consistent with\n\t\t// previous descriptors of the same name?\n\t\t// First check existing descriptors...\n\t\tif dimHash, exists := r.dimHashesByName[desc.fqName]; exists {\n\t\t\tif dimHash != desc.dimHash {\n\t\t\t\treturn fmt.Errorf(\"a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string\", desc)\n\t\t\t}\n\t\t} else {\n\t\t\t// ...then check the new descriptors already seen.\n\t\t\tif dimHash, exists := newDimHashesByName[desc.fqName]; exists {\n\t\t\t\tif dimHash != desc.dimHash {\n\t\t\t\t\treturn fmt.Errorf(\"descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s\", desc)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tnewDimHashesByName[desc.fqName] = desc.dimHash\n\t\t\t}\n\t\t}\n\t}\n\t// Did anything happen at all?\n\tif len(newDescIDs) == 0 {\n\t\treturn errors.New(\"collector has no descriptors\")\n\t}\n\tif existing, exists := r.collectorsByID[collectorID]; exists {\n\t\treturn AlreadyRegisteredError{\n\t\t\tExistingCollector: existing,\n\t\t\tNewCollector:      c,\n\t\t}\n\t}\n\t// If the collectorID is new, but at least one of the descs existed\n\t// before, we are in trouble.\n\tif duplicateDescErr != nil {\n\t\treturn duplicateDescErr\n\t}\n\n\t// Only after all tests have passed, actually register.\n\tr.collectorsByID[collectorID] = c\n\tfor hash := range newDescIDs {\n\t\tr.descIDs[hash] = struct{}{}\n\t}\n\tfor name, dimHash := range newDimHashesByName {\n\t\tr.dimHashesByName[name] = dimHash\n\t}\n\treturn nil\n}\n\n// Unregister implements Registerer.\nfunc (r *Registry) Unregister(c Collector) bool {\n\tvar (\n\t\tdescChan    = make(chan *Desc, capDescChan)\n\t\tdescIDs     = map[uint64]struct{}{}\n\t\tcollectorID uint64 // Just a sum of the desc IDs.\n\t)\n\tgo func() {\n\t\tc.Describe(descChan)\n\t\tclose(descChan)\n\t}()\n\tfor desc := range descChan {\n\t\tif _, exists := descIDs[desc.id]; !exists {\n\t\t\tcollectorID += desc.id\n\t\t\tdescIDs[desc.id] = struct{}{}\n\t\t}\n\t}\n\n\tr.mtx.RLock()\n\tif _, exists := r.collectorsByID[collectorID]; !exists {\n\t\tr.mtx.RUnlock()\n\t\treturn false\n\t}\n\tr.mtx.RUnlock()\n\n\tr.mtx.Lock()\n\tdefer r.mtx.Unlock()\n\n\tdelete(r.collectorsByID, collectorID)\n\tfor id := range descIDs {\n\t\tdelete(r.descIDs, id)\n\t}\n\t// dimHashesByName is left untouched as those must be consistent\n\t// throughout the lifetime of a program.\n\treturn true\n}\n\n// MustRegister implements Registerer.\nfunc (r *Registry) MustRegister(cs ...Collector) {\n\tfor _, c := range cs {\n\t\tif err := r.Register(c); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\n// Gather implements Gatherer.\nfunc (r *Registry) Gather() ([]*dto.MetricFamily, error) {\n\tvar (\n\t\tmetricChan        = make(chan Metric, capMetricChan)\n\t\tmetricHashes      = map[uint64]struct{}{}\n\t\tdimHashes         = map[string]uint64{}\n\t\twg                sync.WaitGroup\n\t\terrs              MultiError          // The collected errors to return in the end.\n\t\tregisteredDescIDs map[uint64]struct{} // Only used for pedantic checks\n\t)\n\n\tr.mtx.RLock()\n\tmetricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName))\n\n\t// Scatter.\n\t// (Collectors could be complex and slow, so we call them all at once.)\n\twg.Add(len(r.collectorsByID))\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(metricChan)\n\t}()\n\tfor _, collector := range r.collectorsByID {\n\t\tgo func(collector Collector) {\n\t\t\tdefer wg.Done()\n\t\t\tcollector.Collect(metricChan)\n\t\t}(collector)\n\t}\n\n\t// In case pedantic checks are enabled, we have to copy the map before\n\t// giving up the RLock.\n\tif r.pedanticChecksEnabled {\n\t\tregisteredDescIDs = make(map[uint64]struct{}, len(r.descIDs))\n\t\tfor id := range r.descIDs {\n\t\t\tregisteredDescIDs[id] = struct{}{}\n\t\t}\n\t}\n\n\tr.mtx.RUnlock()\n\n\t// Drain metricChan in case of premature return.\n\tdefer func() {\n\t\tfor range metricChan {\n\t\t}\n\t}()\n\n\t// Gather.\n\tfor metric := range metricChan {\n\t\t// This could be done concurrently, too, but it required locking\n\t\t// of metricFamiliesByName (and of metricHashes if checks are\n\t\t// enabled). Most likely not worth it.\n\t\tdesc := metric.Desc()\n\t\tdtoMetric := &dto.Metric{}\n\t\tif err := metric.Write(dtoMetric); err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\"error collecting metric %v: %s\", desc, err,\n\t\t\t))\n\t\t\tcontinue\n\t\t}\n\t\tmetricFamily, ok := metricFamiliesByName[desc.fqName]\n\t\tif ok {\n\t\t\tif metricFamily.GetHelp() != desc.help {\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"collected metric %s %s has help %q but should have %q\",\n\t\t\t\t\tdesc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(),\n\t\t\t\t))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// TODO(beorn7): Simplify switch once Desc has type.\n\t\t\tswitch metricFamily.GetType() {\n\t\t\tcase dto.MetricType_COUNTER:\n\t\t\t\tif dtoMetric.Counter == nil {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\t\"collected metric %s %s should be a Counter\",\n\t\t\t\t\t\tdesc.fqName, dtoMetric,\n\t\t\t\t\t))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase dto.MetricType_GAUGE:\n\t\t\t\tif dtoMetric.Gauge == nil {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\t\"collected metric %s %s should be a Gauge\",\n\t\t\t\t\t\tdesc.fqName, dtoMetric,\n\t\t\t\t\t))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase dto.MetricType_SUMMARY:\n\t\t\t\tif dtoMetric.Summary == nil {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\t\"collected metric %s %s should be a Summary\",\n\t\t\t\t\t\tdesc.fqName, dtoMetric,\n\t\t\t\t\t))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase dto.MetricType_UNTYPED:\n\t\t\t\tif dtoMetric.Untyped == nil {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\t\"collected metric %s %s should be Untyped\",\n\t\t\t\t\t\tdesc.fqName, dtoMetric,\n\t\t\t\t\t))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tcase dto.MetricType_HISTOGRAM:\n\t\t\t\tif dtoMetric.Histogram == nil {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\t\"collected metric %s %s should be a Histogram\",\n\t\t\t\t\t\tdesc.fqName, dtoMetric,\n\t\t\t\t\t))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tpanic(\"encountered MetricFamily with invalid type\")\n\t\t\t}\n\t\t} else {\n\t\t\tmetricFamily = &dto.MetricFamily{}\n\t\t\tmetricFamily.Name = proto.String(desc.fqName)\n\t\t\tmetricFamily.Help = proto.String(desc.help)\n\t\t\t// TODO(beorn7): Simplify switch once Desc has type.\n\t\t\tswitch {\n\t\t\tcase dtoMetric.Gauge != nil:\n\t\t\t\tmetricFamily.Type = dto.MetricType_GAUGE.Enum()\n\t\t\tcase dtoMetric.Counter != nil:\n\t\t\t\tmetricFamily.Type = dto.MetricType_COUNTER.Enum()\n\t\t\tcase dtoMetric.Summary != nil:\n\t\t\t\tmetricFamily.Type = dto.MetricType_SUMMARY.Enum()\n\t\t\tcase dtoMetric.Untyped != nil:\n\t\t\t\tmetricFamily.Type = dto.MetricType_UNTYPED.Enum()\n\t\t\tcase dtoMetric.Histogram != nil:\n\t\t\t\tmetricFamily.Type = dto.MetricType_HISTOGRAM.Enum()\n\t\t\tdefault:\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"empty metric collected: %s\", dtoMetric,\n\t\t\t\t))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tmetricFamiliesByName[desc.fqName] = metricFamily\n\t\t}\n\t\tif err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t\tcontinue\n\t\t}\n\t\tif r.pedanticChecksEnabled {\n\t\t\t// Is the desc registered at all?\n\t\t\tif _, exist := registeredDescIDs[desc.id]; !exist {\n\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\"collected metric %s %s with unregistered descriptor %s\",\n\t\t\t\t\tmetricFamily.GetName(), dtoMetric, desc,\n\t\t\t\t))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tmetricFamily.Metric = append(metricFamily.Metric, dtoMetric)\n\t}\n\treturn normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()\n}\n\n// Gatherers is a slice of Gatherer instances that implements the Gatherer\n// interface itself. Its Gather method calls Gather on all Gatherers in the\n// slice in order and returns the merged results. Errors returned from the\n// Gather calles are all returned in a flattened MultiError. Duplicate and\n// inconsistent Metrics are skipped (first occurrence in slice order wins) and\n// reported in the returned error.\n//\n// Gatherers can be used to merge the Gather results from multiple\n// Registries. It also provides a way to directly inject existing MetricFamily\n// protobufs into the gathering by creating a custom Gatherer with a Gather\n// method that simply returns the existing MetricFamily protobufs. Note that no\n// registration is involved (in contrast to Collector registration), so\n// obviously registration-time checks cannot happen. Any inconsistencies between\n// the gathered MetricFamilies are reported as errors by the Gather method, and\n// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies\n// (e.g. syntactically invalid metric or label names) will go undetected.\ntype Gatherers []Gatherer\n\n// Gather implements Gatherer.\nfunc (gs Gatherers) Gather() ([]*dto.MetricFamily, error) {\n\tvar (\n\t\tmetricFamiliesByName = map[string]*dto.MetricFamily{}\n\t\tmetricHashes         = map[uint64]struct{}{}\n\t\tdimHashes            = map[string]uint64{}\n\t\terrs                 MultiError // The collected errors to return in the end.\n\t)\n\n\tfor i, g := range gs {\n\t\tmfs, err := g.Gather()\n\t\tif err != nil {\n\t\t\tif multiErr, ok := err.(MultiError); ok {\n\t\t\t\tfor _, err := range multiErr {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\"[from Gatherer #%d] %s\", i+1, err))\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"[from Gatherer #%d] %s\", i+1, err))\n\t\t\t}\n\t\t}\n\t\tfor _, mf := range mfs {\n\t\t\texistingMF, exists := metricFamiliesByName[mf.GetName()]\n\t\t\tif exists {\n\t\t\t\tif existingMF.GetHelp() != mf.GetHelp() {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\t\"gathered metric family %s has help %q but should have %q\",\n\t\t\t\t\t\tmf.GetName(), mf.GetHelp(), existingMF.GetHelp(),\n\t\t\t\t\t))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif existingMF.GetType() != mf.GetType() {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\n\t\t\t\t\t\t\"gathered metric family %s has type %s but should have %s\",\n\t\t\t\t\t\tmf.GetName(), mf.GetType(), existingMF.GetType(),\n\t\t\t\t\t))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\texistingMF = &dto.MetricFamily{}\n\t\t\t\texistingMF.Name = mf.Name\n\t\t\t\texistingMF.Help = mf.Help\n\t\t\t\texistingMF.Type = mf.Type\n\t\t\t\tmetricFamiliesByName[mf.GetName()] = existingMF\n\t\t\t}\n\t\t\tfor _, m := range mf.Metric {\n\t\t\t\tif err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil {\n\t\t\t\t\terrs = append(errs, err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\texistingMF.Metric = append(existingMF.Metric, m)\n\t\t\t}\n\t\t}\n\t}\n\treturn normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap()\n}\n\n// metricSorter is a sortable slice of *dto.Metric.\ntype metricSorter []*dto.Metric\n\nfunc (s metricSorter) Len() int {\n\treturn len(s)\n}\n\nfunc (s metricSorter) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc (s metricSorter) Less(i, j int) bool {\n\tif len(s[i].Label) != len(s[j].Label) {\n\t\t// This should not happen. The metrics are\n\t\t// inconsistent. However, we have to deal with the fact, as\n\t\t// people might use custom collectors or metric family injection\n\t\t// to create inconsistent metrics. So let's simply compare the\n\t\t// number of labels in this case. That will still yield\n\t\t// reproducible sorting.\n\t\treturn len(s[i].Label) < len(s[j].Label)\n\t}\n\tfor n, lp := range s[i].Label {\n\t\tvi := lp.GetValue()\n\t\tvj := s[j].Label[n].GetValue()\n\t\tif vi != vj {\n\t\t\treturn vi < vj\n\t\t}\n\t}\n\n\t// We should never arrive here. Multiple metrics with the same\n\t// label set in the same scrape will lead to undefined ingestion\n\t// behavior. However, as above, we have to provide stable sorting\n\t// here, even for inconsistent metrics. So sort equal metrics\n\t// by their timestamp, with missing timestamps (implying \"now\")\n\t// coming last.\n\tif s[i].TimestampMs == nil {\n\t\treturn false\n\t}\n\tif s[j].TimestampMs == nil {\n\t\treturn true\n\t}\n\treturn s[i].GetTimestampMs() < s[j].GetTimestampMs()\n}\n\n// normalizeMetricFamilies returns a MetricFamily slice with empty\n// MetricFamilies pruned and the remaining MetricFamilies sorted by name within\n// the slice, with the contained Metrics sorted within each MetricFamily.\nfunc normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily {\n\tfor _, mf := range metricFamiliesByName {\n\t\tsort.Sort(metricSorter(mf.Metric))\n\t}\n\tnames := make([]string, 0, len(metricFamiliesByName))\n\tfor name, mf := range metricFamiliesByName {\n\t\tif len(mf.Metric) > 0 {\n\t\t\tnames = append(names, name)\n\t\t}\n\t}\n\tsort.Strings(names)\n\tresult := make([]*dto.MetricFamily, 0, len(names))\n\tfor _, name := range names {\n\t\tresult = append(result, metricFamiliesByName[name])\n\t}\n\treturn result\n}\n\n// checkMetricConsistency checks if the provided Metric is consistent with the\n// provided MetricFamily. It also hashed the Metric labels and the MetricFamily\n// name. If the resulting hash is alread in the provided metricHashes, an error\n// is returned. If not, it is added to metricHashes. The provided dimHashes maps\n// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes\n// doesn't yet contain a hash for the provided MetricFamily, it is\n// added. Otherwise, an error is returned if the existing dimHashes in not equal\n// the calculated dimHash.\nfunc checkMetricConsistency(\n\tmetricFamily *dto.MetricFamily,\n\tdtoMetric *dto.Metric,\n\tmetricHashes map[uint64]struct{},\n\tdimHashes map[string]uint64,\n) error {\n\t// Type consistency with metric family.\n\tif metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil ||\n\t\tmetricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil ||\n\t\tmetricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil ||\n\t\tmetricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil ||\n\t\tmetricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"collected metric %s %s is not a %s\",\n\t\t\tmetricFamily.GetName(), dtoMetric, metricFamily.GetType(),\n\t\t)\n\t}\n\n\tfor _, labelPair := range dtoMetric.GetLabel() {\n\t\tif !utf8.ValidString(*labelPair.Value) {\n\t\t\treturn fmt.Errorf(\"collected metric's label %s is not utf8: %#v\", *labelPair.Name, *labelPair.Value)\n\t\t}\n\t}\n\n\t// Is the metric unique (i.e. no other metric with the same name and the same label values)?\n\th := hashNew()\n\th = hashAdd(h, metricFamily.GetName())\n\th = hashAddByte(h, separatorByte)\n\tdh := hashNew()\n\t// Make sure label pairs are sorted. We depend on it for the consistency\n\t// check.\n\tsort.Sort(LabelPairSorter(dtoMetric.Label))\n\tfor _, lp := range dtoMetric.Label {\n\t\th = hashAdd(h, lp.GetValue())\n\t\th = hashAddByte(h, separatorByte)\n\t\tdh = hashAdd(dh, lp.GetName())\n\t\tdh = hashAddByte(dh, separatorByte)\n\t}\n\tif _, exists := metricHashes[h]; exists {\n\t\treturn fmt.Errorf(\n\t\t\t\"collected metric %s %s was collected before with the same name and label values\",\n\t\t\tmetricFamily.GetName(), dtoMetric,\n\t\t)\n\t}\n\tif dimHash, ok := dimHashes[metricFamily.GetName()]; ok {\n\t\tif dimHash != dh {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family\",\n\t\t\t\tmetricFamily.GetName(), dtoMetric,\n\t\t\t)\n\t\t}\n\t} else {\n\t\tdimHashes[metricFamily.GetName()] = dh\n\t}\n\tmetricHashes[h] = struct{}{}\n\treturn nil\n}\n\nfunc checkDescConsistency(\n\tmetricFamily *dto.MetricFamily,\n\tdtoMetric *dto.Metric,\n\tdesc *Desc,\n) error {\n\t// Desc help consistency with metric family help.\n\tif metricFamily.GetHelp() != desc.help {\n\t\treturn fmt.Errorf(\n\t\t\t\"collected metric %s %s has help %q but should have %q\",\n\t\t\tmetricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help,\n\t\t)\n\t}\n\n\t// Is the desc consistent with the content of the metric?\n\tlpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label))\n\tlpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...)\n\tfor _, l := range desc.variableLabels {\n\t\tlpsFromDesc = append(lpsFromDesc, &dto.LabelPair{\n\t\t\tName: proto.String(l),\n\t\t})\n\t}\n\tif len(lpsFromDesc) != len(dtoMetric.Label) {\n\t\treturn fmt.Errorf(\n\t\t\t\"labels in collected metric %s %s are inconsistent with descriptor %s\",\n\t\t\tmetricFamily.GetName(), dtoMetric, desc,\n\t\t)\n\t}\n\tsort.Sort(LabelPairSorter(lpsFromDesc))\n\tfor i, lpFromDesc := range lpsFromDesc {\n\t\tlpFromMetric := dtoMetric.Label[i]\n\t\tif lpFromDesc.GetName() != lpFromMetric.GetName() ||\n\t\t\tlpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() {\n\t\t\treturn fmt.Errorf(\n\t\t\t\t\"labels in collected metric %s %s are inconsistent with descriptor %s\",\n\t\t\t\tmetricFamily.GetName(), dtoMetric, desc,\n\t\t\t)\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/registry_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Copyright (c) 2013, The Prometheus Authors\n// All rights reserved.\n//\n// Use of this source code is governed by a BSD-style license that can be found\n// in the LICENSE file.\n\npackage prometheus_test\n\nimport (\n\t\"bytes\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"github.com/prometheus/common/expfmt\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n)\n\nfunc testHandler(t testing.TB) {\n\n\tmetricVec := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName:        \"name\",\n\t\t\tHelp:        \"docstring\",\n\t\t\tConstLabels: prometheus.Labels{\"constname\": \"constvalue\"},\n\t\t},\n\t\t[]string{\"labelname\"},\n\t)\n\n\tmetricVec.WithLabelValues(\"val1\").Inc()\n\tmetricVec.WithLabelValues(\"val2\").Inc()\n\n\texternalMetricFamily := &dto.MetricFamily{\n\t\tName: proto.String(\"externalname\"),\n\t\tHelp: proto.String(\"externaldocstring\"),\n\t\tType: dto.MetricType_COUNTER.Enum(),\n\t\tMetric: []*dto.Metric{\n\t\t\t{\n\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"externalconstname\"),\n\t\t\t\t\t\tValue: proto.String(\"externalconstvalue\"),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"externallabelname\"),\n\t\t\t\t\t\tValue: proto.String(\"externalval1\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\tValue: proto.Float64(1),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\texternalBuf := &bytes.Buffer{}\n\tenc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim)\n\tif err := enc.Encode(externalMetricFamily); err != nil {\n\t\tt.Fatal(err)\n\t}\n\texternalMetricFamilyAsBytes := externalBuf.Bytes()\n\texternalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring\n# TYPE externalname counter\nexternalname{externalconstname=\"externalconstvalue\",externallabelname=\"externalval1\"} 1\n`)\n\texternalMetricFamilyAsProtoText := []byte(`name: \"externalname\"\nhelp: \"externaldocstring\"\ntype: COUNTER\nmetric: <\n  label: <\n    name: \"externalconstname\"\n    value: \"externalconstvalue\"\n  >\n  label: <\n    name: \"externallabelname\"\n    value: \"externalval1\"\n  >\n  counter: <\n    value: 1\n  >\n>\n\n`)\n\texternalMetricFamilyAsProtoCompactText := []byte(`name:\"externalname\" help:\"externaldocstring\" type:COUNTER metric:<label:<name:\"externalconstname\" value:\"externalconstvalue\" > label:<name:\"externallabelname\" value:\"externalval1\" > counter:<value:1 > > \n`)\n\n\texpectedMetricFamily := &dto.MetricFamily{\n\t\tName: proto.String(\"name\"),\n\t\tHelp: proto.String(\"docstring\"),\n\t\tType: dto.MetricType_COUNTER.Enum(),\n\t\tMetric: []*dto.Metric{\n\t\t\t{\n\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"constname\"),\n\t\t\t\t\t\tValue: proto.String(\"constvalue\"),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\tValue: proto.String(\"val1\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\tValue: proto.Float64(1),\n\t\t\t\t},\n\t\t\t},\n\t\t\t{\n\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"constname\"),\n\t\t\t\t\t\tValue: proto.String(\"constvalue\"),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\tValue: proto.String(\"val2\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\tValue: proto.Float64(1),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tbuf := &bytes.Buffer{}\n\tenc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim)\n\tif err := enc.Encode(expectedMetricFamily); err != nil {\n\t\tt.Fatal(err)\n\t}\n\texpectedMetricFamilyAsBytes := buf.Bytes()\n\texpectedMetricFamilyAsText := []byte(`# HELP name docstring\n# TYPE name counter\nname{constname=\"constvalue\",labelname=\"val1\"} 1\nname{constname=\"constvalue\",labelname=\"val2\"} 1\n`)\n\texpectedMetricFamilyAsProtoText := []byte(`name: \"name\"\nhelp: \"docstring\"\ntype: COUNTER\nmetric: <\n  label: <\n    name: \"constname\"\n    value: \"constvalue\"\n  >\n  label: <\n    name: \"labelname\"\n    value: \"val1\"\n  >\n  counter: <\n    value: 1\n  >\n>\nmetric: <\n  label: <\n    name: \"constname\"\n    value: \"constvalue\"\n  >\n  label: <\n    name: \"labelname\"\n    value: \"val2\"\n  >\n  counter: <\n    value: 1\n  >\n>\n\n`)\n\texpectedMetricFamilyAsProtoCompactText := []byte(`name:\"name\" help:\"docstring\" type:COUNTER metric:<label:<name:\"constname\" value:\"constvalue\" > label:<name:\"labelname\" value:\"val1\" > counter:<value:1 > > metric:<label:<name:\"constname\" value:\"constvalue\" > label:<name:\"labelname\" value:\"val2\" > counter:<value:1 > > \n`)\n\n\texternalMetricFamilyWithSameName := &dto.MetricFamily{\n\t\tName: proto.String(\"name\"),\n\t\tHelp: proto.String(\"docstring\"),\n\t\tType: dto.MetricType_COUNTER.Enum(),\n\t\tMetric: []*dto.Metric{\n\t\t\t{\n\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"constname\"),\n\t\t\t\t\t\tValue: proto.String(\"constvalue\"),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\tValue: proto.String(\"different_val\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\tValue: proto.Float64(42),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\texpectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:\"name\" help:\"docstring\" type:COUNTER metric:<label:<name:\"constname\" value:\"constvalue\" > label:<name:\"labelname\" value:\"different_val\" > counter:<value:42 > > metric:<label:<name:\"constname\" value:\"constvalue\" > label:<name:\"labelname\" value:\"val1\" > counter:<value:1 > > metric:<label:<name:\"constname\" value:\"constvalue\" > label:<name:\"labelname\" value:\"val2\" > counter:<value:1 > > \n`)\n\n\texternalMetricFamilyWithInvalidLabelValue := &dto.MetricFamily{\n\t\tName: proto.String(\"name\"),\n\t\tHelp: proto.String(\"docstring\"),\n\t\tType: dto.MetricType_COUNTER.Enum(),\n\t\tMetric: []*dto.Metric{\n\t\t\t{\n\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"constname\"),\n\t\t\t\t\t\tValue: proto.String(\"\\xFF\"),\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\tValue: proto.String(\"different_val\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\tValue: proto.Float64(42),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\texpectedMetricFamilyInvalidLabelValueAsText := []byte(`An error has occurred during metrics gathering:\n\ncollected metric's label constname is not utf8: \"\\xff\"\n`)\n\n\ttype output struct {\n\t\theaders map[string]string\n\t\tbody    []byte\n\t}\n\n\tvar scenarios = []struct {\n\t\theaders    map[string]string\n\t\tout        output\n\t\tcollector  prometheus.Collector\n\t\texternalMF []*dto.MetricFamily\n\t}{\n\t\t{ // 0\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"foo/bar;q=0.2, dings/bums;q=0.8\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; version=0.0.4`,\n\t\t\t\t},\n\t\t\t\tbody: []byte{},\n\t\t\t},\n\t\t},\n\t\t{ // 1\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"foo/bar;q=0.2, application/quark;q=0.8\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; version=0.0.4`,\n\t\t\t\t},\n\t\t\t\tbody: []byte{},\n\t\t\t},\n\t\t},\n\t\t{ // 2\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; version=0.0.4`,\n\t\t\t\t},\n\t\t\t\tbody: []byte{},\n\t\t\t},\n\t\t},\n\t\t{ // 3\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,\n\t\t\t\t},\n\t\t\t\tbody: []byte{},\n\t\t\t},\n\t\t},\n\t\t{ // 4\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/json\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; version=0.0.4`,\n\t\t\t\t},\n\t\t\t\tbody: expectedMetricFamilyAsText,\n\t\t\t},\n\t\t\tcollector: metricVec,\n\t\t},\n\t\t{ // 5\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,\n\t\t\t\t},\n\t\t\t\tbody: expectedMetricFamilyAsBytes,\n\t\t\t},\n\t\t\tcollector: metricVec,\n\t\t},\n\t\t{ // 6\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/json\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; version=0.0.4`,\n\t\t\t\t},\n\t\t\t\tbody: externalMetricFamilyAsText,\n\t\t\t},\n\t\t\texternalMF: []*dto.MetricFamily{externalMetricFamily},\n\t\t},\n\t\t{ // 7\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,\n\t\t\t\t},\n\t\t\t\tbody: externalMetricFamilyAsBytes,\n\t\t\t},\n\t\t\texternalMF: []*dto.MetricFamily{externalMetricFamily},\n\t\t},\n\t\t{ // 8\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,\n\t\t\t\t},\n\t\t\t\tbody: bytes.Join(\n\t\t\t\t\t[][]byte{\n\t\t\t\t\t\texternalMetricFamilyAsBytes,\n\t\t\t\t\t\texpectedMetricFamilyAsBytes,\n\t\t\t\t\t},\n\t\t\t\t\t[]byte{},\n\t\t\t\t),\n\t\t\t},\n\t\t\tcollector:  metricVec,\n\t\t\texternalMF: []*dto.MetricFamily{externalMetricFamily},\n\t\t},\n\t\t{ // 9\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"text/plain\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; version=0.0.4`,\n\t\t\t\t},\n\t\t\t\tbody: []byte{},\n\t\t\t},\n\t\t},\n\t\t{ // 10\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; version=0.0.4`,\n\t\t\t\t},\n\t\t\t\tbody: expectedMetricFamilyAsText,\n\t\t\t},\n\t\t\tcollector: metricVec,\n\t\t},\n\t\t{ // 11\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; version=0.0.4`,\n\t\t\t\t},\n\t\t\t\tbody: bytes.Join(\n\t\t\t\t\t[][]byte{\n\t\t\t\t\t\texternalMetricFamilyAsText,\n\t\t\t\t\t\texpectedMetricFamilyAsText,\n\t\t\t\t\t},\n\t\t\t\t\t[]byte{},\n\t\t\t\t),\n\t\t\t},\n\t\t\tcollector:  metricVec,\n\t\t\texternalMF: []*dto.MetricFamily{externalMetricFamily},\n\t\t},\n\t\t{ // 12\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`,\n\t\t\t\t},\n\t\t\t\tbody: bytes.Join(\n\t\t\t\t\t[][]byte{\n\t\t\t\t\t\texternalMetricFamilyAsBytes,\n\t\t\t\t\t\texpectedMetricFamilyAsBytes,\n\t\t\t\t\t},\n\t\t\t\t\t[]byte{},\n\t\t\t\t),\n\t\t\t},\n\t\t\tcollector:  metricVec,\n\t\t\texternalMF: []*dto.MetricFamily{externalMetricFamily},\n\t\t},\n\t\t{ // 13\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`,\n\t\t\t\t},\n\t\t\t\tbody: bytes.Join(\n\t\t\t\t\t[][]byte{\n\t\t\t\t\t\texternalMetricFamilyAsProtoText,\n\t\t\t\t\t\texpectedMetricFamilyAsProtoText,\n\t\t\t\t\t},\n\t\t\t\t\t[]byte{},\n\t\t\t\t),\n\t\t\t},\n\t\t\tcollector:  metricVec,\n\t\t\texternalMF: []*dto.MetricFamily{externalMetricFamily},\n\t\t},\n\t\t{ // 14\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,\n\t\t\t\t},\n\t\t\t\tbody: bytes.Join(\n\t\t\t\t\t[][]byte{\n\t\t\t\t\t\texternalMetricFamilyAsProtoCompactText,\n\t\t\t\t\t\texpectedMetricFamilyAsProtoCompactText,\n\t\t\t\t\t},\n\t\t\t\t\t[]byte{},\n\t\t\t\t),\n\t\t\t},\n\t\t\tcollector:  metricVec,\n\t\t\texternalMF: []*dto.MetricFamily{externalMetricFamily},\n\t\t},\n\t\t{ // 15\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`,\n\t\t\t\t},\n\t\t\t\tbody: bytes.Join(\n\t\t\t\t\t[][]byte{\n\t\t\t\t\t\texternalMetricFamilyAsProtoCompactText,\n\t\t\t\t\t\texpectedMetricFamilyMergedWithExternalAsProtoCompactText,\n\t\t\t\t\t},\n\t\t\t\t\t[]byte{},\n\t\t\t\t),\n\t\t\t},\n\t\t\tcollector: metricVec,\n\t\t\texternalMF: []*dto.MetricFamily{\n\t\t\t\texternalMetricFamily,\n\t\t\t\texternalMetricFamilyWithSameName,\n\t\t\t},\n\t\t},\n\t\t{ // 16\n\t\t\theaders: map[string]string{\n\t\t\t\t\"Accept\": \"application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text\",\n\t\t\t},\n\t\t\tout: output{\n\t\t\t\theaders: map[string]string{\n\t\t\t\t\t\"Content-Type\": `text/plain; charset=utf-8`,\n\t\t\t\t},\n\t\t\t\tbody: expectedMetricFamilyInvalidLabelValueAsText,\n\t\t\t},\n\t\t\tcollector: metricVec,\n\t\t\texternalMF: []*dto.MetricFamily{\n\t\t\t\texternalMetricFamily,\n\t\t\t\texternalMetricFamilyWithInvalidLabelValue,\n\t\t\t},\n\t\t},\n\t}\n\tfor i, scenario := range scenarios {\n\t\tregistry := prometheus.NewPedanticRegistry()\n\t\tgatherer := prometheus.Gatherer(registry)\n\t\tif scenario.externalMF != nil {\n\t\t\tgatherer = prometheus.Gatherers{\n\t\t\t\tregistry,\n\t\t\t\tprometheus.GathererFunc(func() ([]*dto.MetricFamily, error) {\n\t\t\t\t\treturn scenario.externalMF, nil\n\t\t\t\t}),\n\t\t\t}\n\t\t}\n\n\t\tif scenario.collector != nil {\n\t\t\tregistry.Register(scenario.collector)\n\t\t}\n\t\twriter := httptest.NewRecorder()\n\t\thandler := prometheus.InstrumentHandler(\"prometheus\", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{}))\n\t\trequest, _ := http.NewRequest(\"GET\", \"/\", nil)\n\t\tfor key, value := range scenario.headers {\n\t\t\trequest.Header.Add(key, value)\n\t\t}\n\t\thandler(writer, request)\n\n\t\tfor key, value := range scenario.out.headers {\n\t\t\tif writer.HeaderMap.Get(key) != value {\n\t\t\t\tt.Errorf(\n\t\t\t\t\t\"%d. expected %q for header %q, got %q\",\n\t\t\t\t\ti, value, key, writer.Header().Get(key),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\n\t\tif !bytes.Equal(scenario.out.body, writer.Body.Bytes()) {\n\t\t\tt.Errorf(\n\t\t\t\t\"%d. expected body:\\n%s\\ngot body:\\n%s\\n\",\n\t\t\t\ti, scenario.out.body, writer.Body.Bytes(),\n\t\t\t)\n\t\t}\n\t}\n}\n\nfunc TestHandler(t *testing.T) {\n\ttestHandler(t)\n}\n\nfunc BenchmarkHandler(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestHandler(b)\n\t}\n}\n\nfunc TestRegisterWithOrGet(t *testing.T) {\n\t// Replace the default registerer just to be sure. This is bad, but this\n\t// whole test will go away once RegisterOrGet is removed.\n\toldRegisterer := prometheus.DefaultRegisterer\n\tdefer func() {\n\t\tprometheus.DefaultRegisterer = oldRegisterer\n\t}()\n\tprometheus.DefaultRegisterer = prometheus.NewRegistry()\n\toriginal := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"help\",\n\t\t},\n\t\t[]string{\"foo\", \"bar\"},\n\t)\n\tequalButNotSame := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"help\",\n\t\t},\n\t\t[]string{\"foo\", \"bar\"},\n\t)\n\tvar err error\n\tif err = prometheus.Register(original); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err = prometheus.Register(equalButNotSame); err == nil {\n\t\tt.Fatal(\"expected error when registringe equal collector\")\n\t}\n\tif are, ok := err.(prometheus.AlreadyRegisteredError); ok {\n\t\tif are.ExistingCollector != original {\n\t\t\tt.Error(\"expected original collector but got something else\")\n\t\t}\n\t\tif are.ExistingCollector == equalButNotSame {\n\t\t\tt.Error(\"expected original callector but got new one\")\n\t\t}\n\t} else {\n\t\tt.Error(\"unexpected error:\", err)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/summary.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/beorn7/perks/quantile\"\n\t\"github.com/golang/protobuf/proto\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\n// quantileLabel is used for the label that defines the quantile in a\n// summary.\nconst quantileLabel = \"quantile\"\n\n// A Summary captures individual observations from an event or sample stream and\n// summarizes them in a manner similar to traditional summary statistics: 1. sum\n// of observations, 2. observation count, 3. rank estimations.\n//\n// A typical use-case is the observation of request latencies. By default, a\n// Summary provides the median, the 90th and the 99th percentile of the latency\n// as rank estimations.\n//\n// Note that the rank estimations cannot be aggregated in a meaningful way with\n// the Prometheus query language (i.e. you cannot average or add them). If you\n// need aggregatable quantiles (e.g. you want the 99th percentile latency of all\n// queries served across all instances of a service), consider the Histogram\n// metric type. See the Prometheus documentation for more details.\n//\n// To create Summary instances, use NewSummary.\ntype Summary interface {\n\tMetric\n\tCollector\n\n\t// Observe adds a single observation to the summary.\n\tObserve(float64)\n}\n\n// DefObjectives are the default Summary quantile values.\n//\n// Deprecated: DefObjectives will not be used as the default objectives in\n// v0.10 of the library. The default Summary will have no quantiles then.\nvar (\n\tDefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}\n\n\terrQuantileLabelNotAllowed = fmt.Errorf(\n\t\t\"%q is not allowed as label name in summaries\", quantileLabel,\n\t)\n)\n\n// Default values for SummaryOpts.\nconst (\n\t// DefMaxAge is the default duration for which observations stay\n\t// relevant.\n\tDefMaxAge time.Duration = 10 * time.Minute\n\t// DefAgeBuckets is the default number of buckets used to calculate the\n\t// age of observations.\n\tDefAgeBuckets = 5\n\t// DefBufCap is the standard buffer size for collecting Summary observations.\n\tDefBufCap = 500\n)\n\n// SummaryOpts bundles the options for creating a Summary metric. It is\n// mandatory to set Name and Help to a non-empty string. All other fields are\n// optional and can safely be left at their zero value.\ntype SummaryOpts struct {\n\t// Namespace, Subsystem, and Name are components of the fully-qualified\n\t// name of the Summary (created by joining these components with\n\t// \"_\"). Only Name is mandatory, the others merely help structuring the\n\t// name. Note that the fully-qualified name of the Summary must be a\n\t// valid Prometheus metric name.\n\tNamespace string\n\tSubsystem string\n\tName      string\n\n\t// Help provides information about this Summary. Mandatory!\n\t//\n\t// Metrics with the same fully-qualified name must have the same Help\n\t// string.\n\tHelp string\n\n\t// ConstLabels are used to attach fixed labels to this\n\t// Summary. Summaries with the same fully-qualified name must have the\n\t// same label names in their ConstLabels.\n\t//\n\t// Note that in most cases, labels have a value that varies during the\n\t// lifetime of a process. Those labels are usually managed with a\n\t// SummaryVec. ConstLabels serve only special purposes. One is for the\n\t// special case where the value of a label does not change during the\n\t// lifetime of a process, e.g. if the revision of the running binary is\n\t// put into a label. Another, more advanced purpose is if more than one\n\t// Collector needs to collect Summaries with the same fully-qualified\n\t// name. In that case, those Summaries must differ in the values of\n\t// their ConstLabels. See the Collector examples.\n\t//\n\t// If the value of a label never changes (not even between binaries),\n\t// that label most likely should not be a label at all (but part of the\n\t// metric name).\n\tConstLabels Labels\n\n\t// Objectives defines the quantile rank estimates with their respective\n\t// absolute error. If Objectives[q] = e, then the value reported for q\n\t// will be the φ-quantile value for some φ between q-e and q+e.  The\n\t// default value is DefObjectives. It is used if Objectives is left at\n\t// its zero value (i.e. nil). To create a Summary without Objectives,\n\t// set it to an empty map (i.e. map[float64]float64{}).\n\t//\n\t// Deprecated: Note that the current value of DefObjectives is\n\t// deprecated. It will be replaced by an empty map in v0.10 of the\n\t// library. Please explicitly set Objectives to the desired value.\n\tObjectives map[float64]float64\n\n\t// MaxAge defines the duration for which an observation stays relevant\n\t// for the summary. Must be positive. The default value is DefMaxAge.\n\tMaxAge time.Duration\n\n\t// AgeBuckets is the number of buckets used to exclude observations that\n\t// are older than MaxAge from the summary. A higher number has a\n\t// resource penalty, so only increase it if the higher resolution is\n\t// really required. For very high observation rates, you might want to\n\t// reduce the number of age buckets. With only one age bucket, you will\n\t// effectively see a complete reset of the summary each time MaxAge has\n\t// passed. The default value is DefAgeBuckets.\n\tAgeBuckets uint32\n\n\t// BufCap defines the default sample stream buffer size.  The default\n\t// value of DefBufCap should suffice for most uses. If there is a need\n\t// to increase the value, a multiple of 500 is recommended (because that\n\t// is the internal buffer size of the underlying package\n\t// \"github.com/bmizerany/perks/quantile\").\n\tBufCap uint32\n}\n\n// Great fuck-up with the sliding-window decay algorithm... The Merge method of\n// perk/quantile is actually not working as advertised - and it might be\n// unfixable, as the underlying algorithm is apparently not capable of merging\n// summaries in the first place. To avoid using Merge, we are currently adding\n// observations to _each_ age bucket, i.e. the effort to add a sample is\n// essentially multiplied by the number of age buckets. When rotating age\n// buckets, we empty the previous head stream. On scrape time, we simply take\n// the quantiles from the head stream (no merging required). Result: More effort\n// on observation time, less effort on scrape time, which is exactly the\n// opposite of what we try to accomplish, but at least the results are correct.\n//\n// The quite elegant previous contraption to merge the age buckets efficiently\n// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0)\n// can't be used anymore.\n\n// NewSummary creates a new Summary based on the provided SummaryOpts.\nfunc NewSummary(opts SummaryOpts) Summary {\n\treturn newSummary(\n\t\tNewDesc(\n\t\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\t\topts.Help,\n\t\t\tnil,\n\t\t\topts.ConstLabels,\n\t\t),\n\t\topts,\n\t)\n}\n\nfunc newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {\n\tif len(desc.variableLabels) != len(labelValues) {\n\t\tpanic(errInconsistentCardinality)\n\t}\n\n\tfor _, n := range desc.variableLabels {\n\t\tif n == quantileLabel {\n\t\t\tpanic(errQuantileLabelNotAllowed)\n\t\t}\n\t}\n\tfor _, lp := range desc.constLabelPairs {\n\t\tif lp.GetName() == quantileLabel {\n\t\t\tpanic(errQuantileLabelNotAllowed)\n\t\t}\n\t}\n\n\tif opts.Objectives == nil {\n\t\topts.Objectives = DefObjectives\n\t}\n\n\tif opts.MaxAge < 0 {\n\t\tpanic(fmt.Errorf(\"illegal max age MaxAge=%v\", opts.MaxAge))\n\t}\n\tif opts.MaxAge == 0 {\n\t\topts.MaxAge = DefMaxAge\n\t}\n\n\tif opts.AgeBuckets == 0 {\n\t\topts.AgeBuckets = DefAgeBuckets\n\t}\n\n\tif opts.BufCap == 0 {\n\t\topts.BufCap = DefBufCap\n\t}\n\n\ts := &summary{\n\t\tdesc: desc,\n\n\t\tobjectives:       opts.Objectives,\n\t\tsortedObjectives: make([]float64, 0, len(opts.Objectives)),\n\n\t\tlabelPairs: makeLabelPairs(desc, labelValues),\n\n\t\thotBuf:         make([]float64, 0, opts.BufCap),\n\t\tcoldBuf:        make([]float64, 0, opts.BufCap),\n\t\tstreamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),\n\t}\n\ts.headStreamExpTime = time.Now().Add(s.streamDuration)\n\ts.hotBufExpTime = s.headStreamExpTime\n\n\tfor i := uint32(0); i < opts.AgeBuckets; i++ {\n\t\ts.streams = append(s.streams, s.newStream())\n\t}\n\ts.headStream = s.streams[0]\n\n\tfor qu := range s.objectives {\n\t\ts.sortedObjectives = append(s.sortedObjectives, qu)\n\t}\n\tsort.Float64s(s.sortedObjectives)\n\n\ts.init(s) // Init self-collection.\n\treturn s\n}\n\ntype summary struct {\n\tselfCollector\n\n\tbufMtx sync.Mutex // Protects hotBuf and hotBufExpTime.\n\tmtx    sync.Mutex // Protects every other moving part.\n\t// Lock bufMtx before mtx if both are needed.\n\n\tdesc *Desc\n\n\tobjectives       map[float64]float64\n\tsortedObjectives []float64\n\n\tlabelPairs []*dto.LabelPair\n\n\tsum float64\n\tcnt uint64\n\n\thotBuf, coldBuf []float64\n\n\tstreams                          []*quantile.Stream\n\tstreamDuration                   time.Duration\n\theadStream                       *quantile.Stream\n\theadStreamIdx                    int\n\theadStreamExpTime, hotBufExpTime time.Time\n}\n\nfunc (s *summary) Desc() *Desc {\n\treturn s.desc\n}\n\nfunc (s *summary) Observe(v float64) {\n\ts.bufMtx.Lock()\n\tdefer s.bufMtx.Unlock()\n\n\tnow := time.Now()\n\tif now.After(s.hotBufExpTime) {\n\t\ts.asyncFlush(now)\n\t}\n\ts.hotBuf = append(s.hotBuf, v)\n\tif len(s.hotBuf) == cap(s.hotBuf) {\n\t\ts.asyncFlush(now)\n\t}\n}\n\nfunc (s *summary) Write(out *dto.Metric) error {\n\tsum := &dto.Summary{}\n\tqs := make([]*dto.Quantile, 0, len(s.objectives))\n\n\ts.bufMtx.Lock()\n\ts.mtx.Lock()\n\t// Swap bufs even if hotBuf is empty to set new hotBufExpTime.\n\ts.swapBufs(time.Now())\n\ts.bufMtx.Unlock()\n\n\ts.flushColdBuf()\n\tsum.SampleCount = proto.Uint64(s.cnt)\n\tsum.SampleSum = proto.Float64(s.sum)\n\n\tfor _, rank := range s.sortedObjectives {\n\t\tvar q float64\n\t\tif s.headStream.Count() == 0 {\n\t\t\tq = math.NaN()\n\t\t} else {\n\t\t\tq = s.headStream.Query(rank)\n\t\t}\n\t\tqs = append(qs, &dto.Quantile{\n\t\t\tQuantile: proto.Float64(rank),\n\t\t\tValue:    proto.Float64(q),\n\t\t})\n\t}\n\n\ts.mtx.Unlock()\n\n\tif len(qs) > 0 {\n\t\tsort.Sort(quantSort(qs))\n\t}\n\tsum.Quantile = qs\n\n\tout.Summary = sum\n\tout.Label = s.labelPairs\n\treturn nil\n}\n\nfunc (s *summary) newStream() *quantile.Stream {\n\treturn quantile.NewTargeted(s.objectives)\n}\n\n// asyncFlush needs bufMtx locked.\nfunc (s *summary) asyncFlush(now time.Time) {\n\ts.mtx.Lock()\n\ts.swapBufs(now)\n\n\t// Unblock the original goroutine that was responsible for the mutation\n\t// that triggered the compaction.  But hold onto the global non-buffer\n\t// state mutex until the operation finishes.\n\tgo func() {\n\t\ts.flushColdBuf()\n\t\ts.mtx.Unlock()\n\t}()\n}\n\n// rotateStreams needs mtx AND bufMtx locked.\nfunc (s *summary) maybeRotateStreams() {\n\tfor !s.hotBufExpTime.Equal(s.headStreamExpTime) {\n\t\ts.headStream.Reset()\n\t\ts.headStreamIdx++\n\t\tif s.headStreamIdx >= len(s.streams) {\n\t\t\ts.headStreamIdx = 0\n\t\t}\n\t\ts.headStream = s.streams[s.headStreamIdx]\n\t\ts.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration)\n\t}\n}\n\n// flushColdBuf needs mtx locked.\nfunc (s *summary) flushColdBuf() {\n\tfor _, v := range s.coldBuf {\n\t\tfor _, stream := range s.streams {\n\t\t\tstream.Insert(v)\n\t\t}\n\t\ts.cnt++\n\t\ts.sum += v\n\t}\n\ts.coldBuf = s.coldBuf[0:0]\n\ts.maybeRotateStreams()\n}\n\n// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty.\nfunc (s *summary) swapBufs(now time.Time) {\n\tif len(s.coldBuf) != 0 {\n\t\tpanic(\"coldBuf is not empty\")\n\t}\n\ts.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf\n\t// hotBuf is now empty and gets new expiration set.\n\tfor now.After(s.hotBufExpTime) {\n\t\ts.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration)\n\t}\n}\n\ntype quantSort []*dto.Quantile\n\nfunc (s quantSort) Len() int {\n\treturn len(s)\n}\n\nfunc (s quantSort) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc (s quantSort) Less(i, j int) bool {\n\treturn s[i].GetQuantile() < s[j].GetQuantile()\n}\n\n// SummaryVec is a Collector that bundles a set of Summaries that all share the\n// same Desc, but have different values for their variable labels. This is used\n// if you want to count the same thing partitioned by various dimensions\n// (e.g. HTTP request latencies, partitioned by status code and method). Create\n// instances with NewSummaryVec.\ntype SummaryVec struct {\n\t*metricVec\n}\n\n// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and\n// partitioned by the given label names.\nfunc NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {\n\tdesc := NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tlabelNames,\n\t\topts.ConstLabels,\n\t)\n\treturn &SummaryVec{\n\t\tmetricVec: newMetricVec(desc, func(lvs ...string) Metric {\n\t\t\treturn newSummary(desc, opts, lvs...)\n\t\t}),\n\t}\n}\n\n// GetMetricWithLabelValues returns the Summary for the given slice of label\n// values (same order as the VariableLabels in Desc). If that combination of\n// label values is accessed for the first time, a new Summary is created.\n//\n// It is possible to call this method without using the returned Summary to only\n// create the new Summary but leave it at its starting value, a Summary without\n// any observations.\n//\n// Keeping the Summary for later use is possible (and should be considered if\n// performance is critical), but keep in mind that Reset, DeleteLabelValues and\n// Delete can be used to delete the Summary from the SummaryVec. In that case, the\n// Summary will still exist, but it will not be exported anymore, even if a\n// Summary with the same label values is created later. See also the CounterVec\n// example.\n//\n// An error is returned if the number of label values is not the same as the\n// number of VariableLabels in Desc.\n//\n// Note that for more than one label value, this method is prone to mistakes\n// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as\n// an alternative to avoid that type of mistake. For higher label numbers, the\n// latter has a much more readable (albeit more verbose) syntax, but it comes\n// with a performance overhead (for creating and processing the Labels map).\n// See also the GaugeVec example.\nfunc (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {\n\tmetric, err := m.metricVec.getMetricWithLabelValues(lvs...)\n\tif metric != nil {\n\t\treturn metric.(Observer), err\n\t}\n\treturn nil, err\n}\n\n// GetMetricWith returns the Summary for the given Labels map (the label names\n// must match those of the VariableLabels in Desc). If that label map is\n// accessed for the first time, a new Summary is created. Implications of\n// creating a Summary without using it and keeping the Summary for later use are\n// the same as for GetMetricWithLabelValues.\n//\n// An error is returned if the number and names of the Labels are inconsistent\n// with those of the VariableLabels in Desc.\n//\n// This method is used for the same purpose as\n// GetMetricWithLabelValues(...string). See there for pros and cons of the two\n// methods.\nfunc (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {\n\tmetric, err := m.metricVec.getMetricWith(labels)\n\tif metric != nil {\n\t\treturn metric.(Observer), err\n\t}\n\treturn nil, err\n}\n\n// WithLabelValues works as GetMetricWithLabelValues, but panics where\n// GetMetricWithLabelValues would have returned an error. By not returning an\n// error, WithLabelValues allows shortcuts like\n//     myVec.WithLabelValues(\"404\", \"GET\").Observe(42.21)\nfunc (m *SummaryVec) WithLabelValues(lvs ...string) Observer {\n\treturn m.metricVec.withLabelValues(lvs...).(Observer)\n}\n\n// With works as GetMetricWith, but panics where GetMetricWithLabels would have\n// returned an error. By not returning an error, With allows shortcuts like\n//     myVec.With(Labels{\"code\": \"404\", \"method\": \"GET\"}).Observe(42.21)\nfunc (m *SummaryVec) With(labels Labels) Observer {\n\treturn m.metricVec.with(labels).(Observer)\n}\n\ntype constSummary struct {\n\tdesc       *Desc\n\tcount      uint64\n\tsum        float64\n\tquantiles  map[float64]float64\n\tlabelPairs []*dto.LabelPair\n}\n\nfunc (s *constSummary) Desc() *Desc {\n\treturn s.desc\n}\n\nfunc (s *constSummary) Write(out *dto.Metric) error {\n\tsum := &dto.Summary{}\n\tqs := make([]*dto.Quantile, 0, len(s.quantiles))\n\n\tsum.SampleCount = proto.Uint64(s.count)\n\tsum.SampleSum = proto.Float64(s.sum)\n\n\tfor rank, q := range s.quantiles {\n\t\tqs = append(qs, &dto.Quantile{\n\t\t\tQuantile: proto.Float64(rank),\n\t\t\tValue:    proto.Float64(q),\n\t\t})\n\t}\n\n\tif len(qs) > 0 {\n\t\tsort.Sort(quantSort(qs))\n\t}\n\tsum.Quantile = qs\n\n\tout.Summary = sum\n\tout.Label = s.labelPairs\n\n\treturn nil\n}\n\n// NewConstSummary returns a metric representing a Prometheus summary with fixed\n// values for the count, sum, and quantiles. As those parameters cannot be\n// changed, the returned value does not implement the Summary interface (but\n// only the Metric interface). Users of this package will not have much use for\n// it in regular operations. However, when implementing custom Collectors, it is\n// useful as a throw-away metric that is generated on the fly to send it to\n// Prometheus in the Collect method.\n//\n// quantiles maps ranks to quantile values. For example, a median latency of\n// 0.23s and a 99th percentile latency of 0.56s would be expressed as:\n//     map[float64]float64{0.5: 0.23, 0.99: 0.56}\n//\n// NewConstSummary returns an error if the length of labelValues is not\n// consistent with the variable labels in Desc.\nfunc NewConstSummary(\n\tdesc *Desc,\n\tcount uint64,\n\tsum float64,\n\tquantiles map[float64]float64,\n\tlabelValues ...string,\n) (Metric, error) {\n\tif err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &constSummary{\n\t\tdesc:       desc,\n\t\tcount:      count,\n\t\tsum:        sum,\n\t\tquantiles:  quantiles,\n\t\tlabelPairs: makeLabelPairs(desc, labelValues),\n\t}, nil\n}\n\n// MustNewConstSummary is a version of NewConstSummary that panics where\n// NewConstMetric would have returned an error.\nfunc MustNewConstSummary(\n\tdesc *Desc,\n\tcount uint64,\n\tsum float64,\n\tquantiles map[float64]float64,\n\tlabelValues ...string,\n) Metric {\n\tm, err := NewConstSummary(desc, count, sum, quantiles, labelValues...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn m\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/summary_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"sort\"\n\t\"sync\"\n\t\"testing\"\n\t\"testing/quick\"\n\t\"time\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc TestSummaryWithDefaultObjectives(t *testing.T) {\n\treg := NewRegistry()\n\tsummaryWithDefaultObjectives := NewSummary(SummaryOpts{\n\t\tName: \"default_objectives\",\n\t\tHelp: \"Test help.\",\n\t})\n\tif err := reg.Register(summaryWithDefaultObjectives); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tm := &dto.Metric{}\n\tif err := summaryWithDefaultObjectives.Write(m); err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(m.GetSummary().Quantile) != len(DefObjectives) {\n\t\tt.Error(\"expected default objectives in summary\")\n\t}\n}\n\nfunc TestSummaryWithoutObjectives(t *testing.T) {\n\treg := NewRegistry()\n\tsummaryWithEmptyObjectives := NewSummary(SummaryOpts{\n\t\tName:       \"empty_objectives\",\n\t\tHelp:       \"Test help.\",\n\t\tObjectives: map[float64]float64{},\n\t})\n\tif err := reg.Register(summaryWithEmptyObjectives); err != nil {\n\t\tt.Error(err)\n\t}\n\n\tm := &dto.Metric{}\n\tif err := summaryWithEmptyObjectives.Write(m); err != nil {\n\t\tt.Error(err)\n\t}\n\tif len(m.GetSummary().Quantile) != 0 {\n\t\tt.Error(\"expected no objectives in summary\")\n\t}\n}\n\nfunc benchmarkSummaryObserve(w int, b *testing.B) {\n\tb.StopTimer()\n\n\twg := new(sync.WaitGroup)\n\twg.Add(w)\n\n\tg := new(sync.WaitGroup)\n\tg.Add(1)\n\n\ts := NewSummary(SummaryOpts{})\n\n\tfor i := 0; i < w; i++ {\n\t\tgo func() {\n\t\t\tg.Wait()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\ts.Observe(float64(i))\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tb.StartTimer()\n\tg.Done()\n\twg.Wait()\n}\n\nfunc BenchmarkSummaryObserve1(b *testing.B) {\n\tbenchmarkSummaryObserve(1, b)\n}\n\nfunc BenchmarkSummaryObserve2(b *testing.B) {\n\tbenchmarkSummaryObserve(2, b)\n}\n\nfunc BenchmarkSummaryObserve4(b *testing.B) {\n\tbenchmarkSummaryObserve(4, b)\n}\n\nfunc BenchmarkSummaryObserve8(b *testing.B) {\n\tbenchmarkSummaryObserve(8, b)\n}\n\nfunc benchmarkSummaryWrite(w int, b *testing.B) {\n\tb.StopTimer()\n\n\twg := new(sync.WaitGroup)\n\twg.Add(w)\n\n\tg := new(sync.WaitGroup)\n\tg.Add(1)\n\n\ts := NewSummary(SummaryOpts{})\n\n\tfor i := 0; i < 1000000; i++ {\n\t\ts.Observe(float64(i))\n\t}\n\n\tfor j := 0; j < w; j++ {\n\t\touts := make([]dto.Metric, b.N)\n\n\t\tgo func(o []dto.Metric) {\n\t\t\tg.Wait()\n\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\ts.Write(&o[i])\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}(outs)\n\t}\n\n\tb.StartTimer()\n\tg.Done()\n\twg.Wait()\n}\n\nfunc BenchmarkSummaryWrite1(b *testing.B) {\n\tbenchmarkSummaryWrite(1, b)\n}\n\nfunc BenchmarkSummaryWrite2(b *testing.B) {\n\tbenchmarkSummaryWrite(2, b)\n}\n\nfunc BenchmarkSummaryWrite4(b *testing.B) {\n\tbenchmarkSummaryWrite(4, b)\n}\n\nfunc BenchmarkSummaryWrite8(b *testing.B) {\n\tbenchmarkSummaryWrite(8, b)\n}\n\nfunc TestSummaryConcurrency(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t}\n\n\trand.Seed(42)\n\n\tit := func(n uint32) bool {\n\t\tmutations := int(n%1e4 + 1e4)\n\t\tconcLevel := int(n%5 + 1)\n\t\ttotal := mutations * concLevel\n\n\t\tvar start, end sync.WaitGroup\n\t\tstart.Add(1)\n\t\tend.Add(concLevel)\n\n\t\tsum := NewSummary(SummaryOpts{\n\t\t\tName:       \"test_summary\",\n\t\t\tHelp:       \"helpless\",\n\t\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},\n\t\t})\n\n\t\tallVars := make([]float64, total)\n\t\tvar sampleSum float64\n\t\tfor i := 0; i < concLevel; i++ {\n\t\t\tvals := make([]float64, mutations)\n\t\t\tfor j := 0; j < mutations; j++ {\n\t\t\t\tv := rand.NormFloat64()\n\t\t\t\tvals[j] = v\n\t\t\t\tallVars[i*mutations+j] = v\n\t\t\t\tsampleSum += v\n\t\t\t}\n\n\t\t\tgo func(vals []float64) {\n\t\t\t\tstart.Wait()\n\t\t\t\tfor _, v := range vals {\n\t\t\t\t\tsum.Observe(v)\n\t\t\t\t}\n\t\t\t\tend.Done()\n\t\t\t}(vals)\n\t\t}\n\t\tsort.Float64s(allVars)\n\t\tstart.Done()\n\t\tend.Wait()\n\n\t\tm := &dto.Metric{}\n\t\tsum.Write(m)\n\t\tif got, want := int(*m.Summary.SampleCount), total; got != want {\n\t\t\tt.Errorf(\"got sample count %d, want %d\", got, want)\n\t\t}\n\t\tif got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {\n\t\t\tt.Errorf(\"got sample sum %f, want %f\", got, want)\n\t\t}\n\n\t\tobjectives := make([]float64, 0, len(DefObjectives))\n\t\tfor qu := range DefObjectives {\n\t\t\tobjectives = append(objectives, qu)\n\t\t}\n\t\tsort.Float64s(objectives)\n\n\t\tfor i, wantQ := range objectives {\n\t\t\tε := DefObjectives[wantQ]\n\t\t\tgotQ := *m.Summary.Quantile[i].Quantile\n\t\t\tgotV := *m.Summary.Quantile[i].Value\n\t\t\tmin, max := getBounds(allVars, wantQ, ε)\n\t\t\tif gotQ != wantQ {\n\t\t\t\tt.Errorf(\"got quantile %f, want %f\", gotQ, wantQ)\n\t\t\t}\n\t\t\tif gotV < min || gotV > max {\n\t\t\t\tt.Errorf(\"got %f for quantile %f, want [%f,%f]\", gotV, gotQ, min, max)\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tif err := quick.Check(it, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestSummaryVecConcurrency(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t}\n\n\trand.Seed(42)\n\n\tobjectives := make([]float64, 0, len(DefObjectives))\n\tfor qu := range DefObjectives {\n\n\t\tobjectives = append(objectives, qu)\n\t}\n\tsort.Float64s(objectives)\n\n\tit := func(n uint32) bool {\n\t\tmutations := int(n%1e4 + 1e4)\n\t\tconcLevel := int(n%7 + 1)\n\t\tvecLength := int(n%3 + 1)\n\n\t\tvar start, end sync.WaitGroup\n\t\tstart.Add(1)\n\t\tend.Add(concLevel)\n\n\t\tsum := NewSummaryVec(\n\t\t\tSummaryOpts{\n\t\t\t\tName:       \"test_summary\",\n\t\t\t\tHelp:       \"helpless\",\n\t\t\t\tObjectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001},\n\t\t\t},\n\t\t\t[]string{\"label\"},\n\t\t)\n\n\t\tallVars := make([][]float64, vecLength)\n\t\tsampleSums := make([]float64, vecLength)\n\t\tfor i := 0; i < concLevel; i++ {\n\t\t\tvals := make([]float64, mutations)\n\t\t\tpicks := make([]int, mutations)\n\t\t\tfor j := 0; j < mutations; j++ {\n\t\t\t\tv := rand.NormFloat64()\n\t\t\t\tvals[j] = v\n\t\t\t\tpick := rand.Intn(vecLength)\n\t\t\t\tpicks[j] = pick\n\t\t\t\tallVars[pick] = append(allVars[pick], v)\n\t\t\t\tsampleSums[pick] += v\n\t\t\t}\n\n\t\t\tgo func(vals []float64) {\n\t\t\t\tstart.Wait()\n\t\t\t\tfor i, v := range vals {\n\t\t\t\t\tsum.WithLabelValues(string('A' + picks[i])).Observe(v)\n\t\t\t\t}\n\t\t\t\tend.Done()\n\t\t\t}(vals)\n\t\t}\n\t\tfor _, vars := range allVars {\n\t\t\tsort.Float64s(vars)\n\t\t}\n\t\tstart.Done()\n\t\tend.Wait()\n\n\t\tfor i := 0; i < vecLength; i++ {\n\t\t\tm := &dto.Metric{}\n\t\t\ts := sum.WithLabelValues(string('A' + i))\n\t\t\ts.(Summary).Write(m)\n\t\t\tif got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {\n\t\t\t\tt.Errorf(\"got sample count %d for label %c, want %d\", got, 'A'+i, want)\n\t\t\t}\n\t\t\tif got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {\n\t\t\t\tt.Errorf(\"got sample sum %f for label %c, want %f\", got, 'A'+i, want)\n\t\t\t}\n\t\t\tfor j, wantQ := range objectives {\n\t\t\t\tε := DefObjectives[wantQ]\n\t\t\t\tgotQ := *m.Summary.Quantile[j].Quantile\n\t\t\t\tgotV := *m.Summary.Quantile[j].Value\n\t\t\t\tmin, max := getBounds(allVars[i], wantQ, ε)\n\t\t\t\tif gotQ != wantQ {\n\t\t\t\t\tt.Errorf(\"got quantile %f for label %c, want %f\", gotQ, 'A'+i, wantQ)\n\t\t\t\t}\n\t\t\t\tif gotV < min || gotV > max {\n\t\t\t\t\tt.Errorf(\"got %f for quantile %f for label %c, want [%f,%f]\", gotV, gotQ, 'A'+i, min, max)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn true\n\t}\n\n\tif err := quick.Check(it, nil); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestSummaryDecay(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t\t// More because it depends on timing than because it is particularly long...\n\t}\n\n\tsum := NewSummary(SummaryOpts{\n\t\tName:       \"test_summary\",\n\t\tHelp:       \"helpless\",\n\t\tMaxAge:     100 * time.Millisecond,\n\t\tObjectives: map[float64]float64{0.1: 0.001},\n\t\tAgeBuckets: 10,\n\t})\n\n\tm := &dto.Metric{}\n\ti := 0\n\ttick := time.NewTicker(time.Millisecond)\n\tfor range tick.C {\n\t\ti++\n\t\tsum.Observe(float64(i))\n\t\tif i%10 == 0 {\n\t\t\tsum.Write(m)\n\t\t\tif got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 {\n\t\t\t\tt.Errorf(\"%d. got %f, want %f\", i, got, want)\n\t\t\t}\n\t\t\tm.Reset()\n\t\t}\n\t\tif i >= 1000 {\n\t\t\tbreak\n\t\t}\n\t}\n\ttick.Stop()\n\t// Wait for MaxAge without observations and make sure quantiles are NaN.\n\ttime.Sleep(100 * time.Millisecond)\n\tsum.Write(m)\n\tif got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) {\n\t\tt.Errorf(\"got %f, want NaN after expiration\", got)\n\t}\n}\n\nfunc getBounds(vars []float64, q, ε float64) (min, max float64) {\n\t// TODO(beorn7): This currently tolerates an error of up to 2*ε. The\n\t// error must be at most ε, but for some reason, it's sometimes slightly\n\t// higher. That's a bug.\n\tn := float64(len(vars))\n\tlower := int((q - 2*ε) * n)\n\tupper := int(math.Ceil((q + 2*ε) * n))\n\tmin = vars[0]\n\tif lower > 1 {\n\t\tmin = vars[lower-1]\n\t}\n\tmax = vars[len(vars)-1]\n\tif upper < len(vars) {\n\t\tmax = vars[upper-1]\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/timer.go",
    "content": "// Copyright 2016 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport \"time\"\n\n// Timer is a helper type to time functions. Use NewTimer to create new\n// instances.\ntype Timer struct {\n\tbegin    time.Time\n\tobserver Observer\n}\n\n// NewTimer creates a new Timer. The provided Observer is used to observe a\n// duration in seconds. Timer is usually used to time a function call in the\n// following way:\n//    func TimeMe() {\n//        timer := NewTimer(myHistogram)\n//        defer timer.ObserveDuration()\n//        // Do actual work.\n//    }\nfunc NewTimer(o Observer) *Timer {\n\treturn &Timer{\n\t\tbegin:    time.Now(),\n\t\tobserver: o,\n\t}\n}\n\n// ObserveDuration records the duration passed since the Timer was created with\n// NewTimer. It calls the Observe method of the Observer provided during\n// construction with the duration in seconds as an argument. ObserveDuration is\n// usually called with a defer statement.\n//\n// Note that this method is only guaranteed to never observe negative durations\n// if used with Go1.9+.\nfunc (t *Timer) ObserveDuration() {\n\tif t.observer != nil {\n\t\tt.observer.Observe(time.Since(t.begin).Seconds())\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/timer_test.go",
    "content": "// Copyright 2016 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"testing\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc TestTimerObserve(t *testing.T) {\n\tvar (\n\t\this   = NewHistogram(HistogramOpts{Name: \"test_histogram\"})\n\t\tsum   = NewSummary(SummaryOpts{Name: \"test_summary\"})\n\t\tgauge = NewGauge(GaugeOpts{Name: \"test_gauge\"})\n\t)\n\n\tfunc() {\n\t\thisTimer := NewTimer(his)\n\t\tsumTimer := NewTimer(sum)\n\t\tgaugeTimer := NewTimer(ObserverFunc(gauge.Set))\n\t\tdefer hisTimer.ObserveDuration()\n\t\tdefer sumTimer.ObserveDuration()\n\t\tdefer gaugeTimer.ObserveDuration()\n\t}()\n\n\tm := &dto.Metric{}\n\this.Write(m)\n\tif want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for histogram, got %d\", want, got)\n\t}\n\tm.Reset()\n\tsum.Write(m)\n\tif want, got := uint64(1), m.GetSummary().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for summary, got %d\", want, got)\n\t}\n\tm.Reset()\n\tgauge.Write(m)\n\tif got := m.GetGauge().GetValue(); got <= 0 {\n\t\tt.Errorf(\"want value > 0 for gauge, got %f\", got)\n\t}\n}\n\nfunc TestTimerEmpty(t *testing.T) {\n\temptyTimer := NewTimer(nil)\n\temptyTimer.ObserveDuration()\n\t// Do nothing, just demonstrate it works without panic.\n}\n\nfunc TestTimerConditionalTiming(t *testing.T) {\n\tvar (\n\t\this = NewHistogram(HistogramOpts{\n\t\t\tName: \"test_histogram\",\n\t\t})\n\t\ttimeMe = true\n\t\tm      = &dto.Metric{}\n\t)\n\n\ttimedFunc := func() {\n\t\ttimer := NewTimer(ObserverFunc(func(v float64) {\n\t\t\tif timeMe {\n\t\t\t\this.Observe(v)\n\t\t\t}\n\t\t}))\n\t\tdefer timer.ObserveDuration()\n\t}\n\n\ttimedFunc() // This will time.\n\this.Write(m)\n\tif want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for histogram, got %d\", want, got)\n\t}\n\n\ttimeMe = false\n\ttimedFunc() // This will not time again.\n\tm.Reset()\n\this.Write(m)\n\tif want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for histogram, got %d\", want, got)\n\t}\n}\n\nfunc TestTimerByOutcome(t *testing.T) {\n\tvar (\n\t\this = NewHistogramVec(\n\t\t\tHistogramOpts{Name: \"test_histogram\"},\n\t\t\t[]string{\"outcome\"},\n\t\t)\n\t\toutcome = \"foo\"\n\t\tm       = &dto.Metric{}\n\t)\n\n\ttimedFunc := func() {\n\t\ttimer := NewTimer(ObserverFunc(func(v float64) {\n\t\t\this.WithLabelValues(outcome).Observe(v)\n\t\t}))\n\t\tdefer timer.ObserveDuration()\n\n\t\tif outcome == \"foo\" {\n\t\t\toutcome = \"bar\"\n\t\t\treturn\n\t\t}\n\t\toutcome = \"foo\"\n\t}\n\n\ttimedFunc()\n\this.WithLabelValues(\"foo\").(Histogram).Write(m)\n\tif want, got := uint64(0), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for 'foo' histogram, got %d\", want, got)\n\t}\n\tm.Reset()\n\this.WithLabelValues(\"bar\").(Histogram).Write(m)\n\tif want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for 'bar' histogram, got %d\", want, got)\n\t}\n\n\ttimedFunc()\n\tm.Reset()\n\this.WithLabelValues(\"foo\").(Histogram).Write(m)\n\tif want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for 'foo' histogram, got %d\", want, got)\n\t}\n\tm.Reset()\n\this.WithLabelValues(\"bar\").(Histogram).Write(m)\n\tif want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for 'bar' histogram, got %d\", want, got)\n\t}\n\n\ttimedFunc()\n\tm.Reset()\n\this.WithLabelValues(\"foo\").(Histogram).Write(m)\n\tif want, got := uint64(1), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for 'foo' histogram, got %d\", want, got)\n\t}\n\tm.Reset()\n\this.WithLabelValues(\"bar\").(Histogram).Write(m)\n\tif want, got := uint64(2), m.GetHistogram().GetSampleCount(); want != got {\n\t\tt.Errorf(\"want %d observations for 'bar' histogram, got %d\", want, got)\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/untyped.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\n// UntypedOpts is an alias for Opts. See there for doc comments.\ntype UntypedOpts Opts\n\n// UntypedFunc works like GaugeFunc but the collected metric is of type\n// \"Untyped\". UntypedFunc is useful to mirror an external metric of unknown\n// type.\n//\n// To create UntypedFunc instances, use NewUntypedFunc.\ntype UntypedFunc interface {\n\tMetric\n\tCollector\n}\n\n// NewUntypedFunc creates a new UntypedFunc based on the provided\n// UntypedOpts. The value reported is determined by calling the given function\n// from within the Write method. Take into account that metric collection may\n// happen concurrently. If that results in concurrent calls to Write, like in\n// the case where an UntypedFunc is directly registered with Prometheus, the\n// provided function must be concurrency-safe.\nfunc NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc {\n\treturn newValueFunc(NewDesc(\n\t\tBuildFQName(opts.Namespace, opts.Subsystem, opts.Name),\n\t\topts.Help,\n\t\tnil,\n\t\topts.ConstLabels,\n\t), UntypedValue, function)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/value.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n\n\t\"github.com/golang/protobuf/proto\"\n)\n\n// ValueType is an enumeration of metric types that represent a simple value.\ntype ValueType int\n\n// Possible values for the ValueType enum.\nconst (\n\t_ ValueType = iota\n\tCounterValue\n\tGaugeValue\n\tUntypedValue\n)\n\n// value is a generic metric for simple values. It implements Metric, Collector,\n// Counter, Gauge, and Untyped. Its effective type is determined by\n// ValueType. This is a low-level building block used by the library to back the\n// implementations of Counter, Gauge, and Untyped.\ntype value struct {\n\t// valBits contains the bits of the represented float64 value. It has\n\t// to go first in the struct to guarantee alignment for atomic\n\t// operations.  http://golang.org/pkg/sync/atomic/#pkg-note-BUG\n\tvalBits uint64\n\n\tselfCollector\n\n\tdesc       *Desc\n\tvalType    ValueType\n\tlabelPairs []*dto.LabelPair\n}\n\n// newValue returns a newly allocated value with the given Desc, ValueType,\n// sample value and label values. It panics if the number of label\n// values is different from the number of variable labels in Desc.\nfunc newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value {\n\tif len(labelValues) != len(desc.variableLabels) {\n\t\tpanic(errInconsistentCardinality)\n\t}\n\tresult := &value{\n\t\tdesc:       desc,\n\t\tvalType:    valueType,\n\t\tvalBits:    math.Float64bits(val),\n\t\tlabelPairs: makeLabelPairs(desc, labelValues),\n\t}\n\tresult.init(result)\n\treturn result\n}\n\nfunc (v *value) Desc() *Desc {\n\treturn v.desc\n}\n\nfunc (v *value) Set(val float64) {\n\tatomic.StoreUint64(&v.valBits, math.Float64bits(val))\n}\n\nfunc (v *value) SetToCurrentTime() {\n\tv.Set(float64(time.Now().UnixNano()) / 1e9)\n}\n\nfunc (v *value) Inc() {\n\tv.Add(1)\n}\n\nfunc (v *value) Dec() {\n\tv.Add(-1)\n}\n\nfunc (v *value) Add(val float64) {\n\tfor {\n\t\toldBits := atomic.LoadUint64(&v.valBits)\n\t\tnewBits := math.Float64bits(math.Float64frombits(oldBits) + val)\n\t\tif atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (v *value) Sub(val float64) {\n\tv.Add(val * -1)\n}\n\nfunc (v *value) Write(out *dto.Metric) error {\n\tval := math.Float64frombits(atomic.LoadUint64(&v.valBits))\n\treturn populateMetric(v.valType, val, v.labelPairs, out)\n}\n\n// valueFunc is a generic metric for simple values retrieved on collect time\n// from a function. It implements Metric and Collector. Its effective type is\n// determined by ValueType. This is a low-level building block used by the\n// library to back the implementations of CounterFunc, GaugeFunc, and\n// UntypedFunc.\ntype valueFunc struct {\n\tselfCollector\n\n\tdesc       *Desc\n\tvalType    ValueType\n\tfunction   func() float64\n\tlabelPairs []*dto.LabelPair\n}\n\n// newValueFunc returns a newly allocated valueFunc with the given Desc and\n// ValueType. The value reported is determined by calling the given function\n// from within the Write method. Take into account that metric collection may\n// happen concurrently. If that results in concurrent calls to Write, like in\n// the case where a valueFunc is directly registered with Prometheus, the\n// provided function must be concurrency-safe.\nfunc newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc {\n\tresult := &valueFunc{\n\t\tdesc:       desc,\n\t\tvalType:    valueType,\n\t\tfunction:   function,\n\t\tlabelPairs: makeLabelPairs(desc, nil),\n\t}\n\tresult.init(result)\n\treturn result\n}\n\nfunc (v *valueFunc) Desc() *Desc {\n\treturn v.desc\n}\n\nfunc (v *valueFunc) Write(out *dto.Metric) error {\n\treturn populateMetric(v.valType, v.function(), v.labelPairs, out)\n}\n\n// NewConstMetric returns a metric with one fixed value that cannot be\n// changed. Users of this package will not have much use for it in regular\n// operations. However, when implementing custom Collectors, it is useful as a\n// throw-away metric that is generated on the fly to send it to Prometheus in\n// the Collect method. NewConstMetric returns an error if the length of\n// labelValues is not consistent with the variable labels in Desc.\nfunc NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {\n\tif err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &constMetric{\n\t\tdesc:       desc,\n\t\tvalType:    valueType,\n\t\tval:        value,\n\t\tlabelPairs: makeLabelPairs(desc, labelValues),\n\t}, nil\n}\n\n// MustNewConstMetric is a version of NewConstMetric that panics where\n// NewConstMetric would have returned an error.\nfunc MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric {\n\tm, err := NewConstMetric(desc, valueType, value, labelValues...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn m\n}\n\ntype constMetric struct {\n\tdesc       *Desc\n\tvalType    ValueType\n\tval        float64\n\tlabelPairs []*dto.LabelPair\n}\n\nfunc (m *constMetric) Desc() *Desc {\n\treturn m.desc\n}\n\nfunc (m *constMetric) Write(out *dto.Metric) error {\n\treturn populateMetric(m.valType, m.val, m.labelPairs, out)\n}\n\nfunc populateMetric(\n\tt ValueType,\n\tv float64,\n\tlabelPairs []*dto.LabelPair,\n\tm *dto.Metric,\n) error {\n\tm.Label = labelPairs\n\tswitch t {\n\tcase CounterValue:\n\t\tm.Counter = &dto.Counter{Value: proto.Float64(v)}\n\tcase GaugeValue:\n\t\tm.Gauge = &dto.Gauge{Value: proto.Float64(v)}\n\tcase UntypedValue:\n\t\tm.Untyped = &dto.Untyped{Value: proto.Float64(v)}\n\tdefault:\n\t\treturn fmt.Errorf(\"encountered unknown type %v\", t)\n\t}\n\treturn nil\n}\n\nfunc makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {\n\ttotalLen := len(desc.variableLabels) + len(desc.constLabelPairs)\n\tif totalLen == 0 {\n\t\t// Super fast path.\n\t\treturn nil\n\t}\n\tif len(desc.variableLabels) == 0 {\n\t\t// Moderately fast path.\n\t\treturn desc.constLabelPairs\n\t}\n\tlabelPairs := make([]*dto.LabelPair, 0, totalLen)\n\tfor i, n := range desc.variableLabels {\n\t\tlabelPairs = append(labelPairs, &dto.LabelPair{\n\t\t\tName:  proto.String(n),\n\t\t\tValue: proto.String(labelValues[i]),\n\t\t})\n\t}\n\tfor _, lp := range desc.constLabelPairs {\n\t\tlabelPairs = append(labelPairs, lp)\n\t}\n\tsort.Sort(LabelPairSorter(labelPairs))\n\treturn labelPairs\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/value_test.go",
    "content": "package prometheus\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestNewConstMetricInvalidLabelValues(t *testing.T) {\n\ttestCases := []struct {\n\t\tdesc   string\n\t\tlabels Labels\n\t}{\n\t\t{\n\t\t\tdesc:   \"non utf8 label value\",\n\t\t\tlabels: Labels{\"a\": \"\\xFF\"},\n\t\t},\n\t\t{\n\t\t\tdesc:   \"not enough label values\",\n\t\t\tlabels: Labels{},\n\t\t},\n\t\t{\n\t\t\tdesc:   \"too many label values\",\n\t\t\tlabels: Labels{\"a\": \"1\", \"b\": \"2\"},\n\t\t},\n\t}\n\n\tfor _, test := range testCases {\n\t\tmetricDesc := NewDesc(\n\t\t\t\"sample_value\",\n\t\t\t\"sample value\",\n\t\t\t[]string{\"a\"},\n\t\t\tLabels{},\n\t\t)\n\n\t\texpectPanic(t, func() {\n\t\t\tMustNewConstMetric(metricDesc, CounterValue, 0.3, \"\\xFF\")\n\t\t}, fmt.Sprintf(\"WithLabelValues: expected panic because: %s\", test.desc))\n\n\t\tif _, err := NewConstMetric(metricDesc, CounterValue, 0.3, \"\\xFF\"); err == nil {\n\t\t\tt.Errorf(\"NewConstMetric: expected error because: %s\", test.desc)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/vec.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com/prometheus/common/model\"\n)\n\n// metricVec is a Collector to bundle metrics of the same name that differ in\n// their label values. metricVec is not used directly (and therefore\n// unexported). It is used as a building block for implementations of vectors of\n// a given metric type, like GaugeVec, CounterVec, SummaryVec, HistogramVec, and\n// UntypedVec.\ntype metricVec struct {\n\tmtx      sync.RWMutex // Protects the children.\n\tchildren map[uint64][]metricWithLabelValues\n\tdesc     *Desc\n\n\tnewMetric   func(labelValues ...string) Metric\n\thashAdd     func(h uint64, s string) uint64 // replace hash function for testing collision handling\n\thashAddByte func(h uint64, b byte) uint64\n}\n\n// newMetricVec returns an initialized metricVec.\nfunc newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {\n\treturn &metricVec{\n\t\tchildren:    map[uint64][]metricWithLabelValues{},\n\t\tdesc:        desc,\n\t\tnewMetric:   newMetric,\n\t\thashAdd:     hashAdd,\n\t\thashAddByte: hashAddByte,\n\t}\n}\n\n// metricWithLabelValues provides the metric and its label values for\n// disambiguation on hash collision.\ntype metricWithLabelValues struct {\n\tvalues []string\n\tmetric Metric\n}\n\n// Describe implements Collector. The length of the returned slice\n// is always one.\nfunc (m *metricVec) Describe(ch chan<- *Desc) {\n\tch <- m.desc\n}\n\n// Collect implements Collector.\nfunc (m *metricVec) Collect(ch chan<- Metric) {\n\tm.mtx.RLock()\n\tdefer m.mtx.RUnlock()\n\n\tfor _, metrics := range m.children {\n\t\tfor _, metric := range metrics {\n\t\t\tch <- metric.metric\n\t\t}\n\t}\n}\n\nfunc (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {\n\th, err := m.hashLabelValues(lvs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m.getOrCreateMetricWithLabelValues(h, lvs), nil\n}\n\nfunc (m *metricVec) getMetricWith(labels Labels) (Metric, error) {\n\th, err := m.hashLabels(labels)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m.getOrCreateMetricWithLabels(h, labels), nil\n}\n\nfunc (m *metricVec) withLabelValues(lvs ...string) Metric {\n\tmetric, err := m.getMetricWithLabelValues(lvs...)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn metric\n}\n\nfunc (m *metricVec) with(labels Labels) Metric {\n\tmetric, err := m.getMetricWith(labels)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn metric\n}\n\n// DeleteLabelValues removes the metric where the variable labels are the same\n// as those passed in as labels (same order as the VariableLabels in Desc). It\n// returns true if a metric was deleted.\n//\n// It is not an error if the number of label values is not the same as the\n// number of VariableLabels in Desc. However, such inconsistent label count can\n// never match an actual metric, so the method will always return false in that\n// case.\n//\n// Note that for more than one label value, this method is prone to mistakes\n// caused by an incorrect order of arguments. Consider Delete(Labels) as an\n// alternative to avoid that type of mistake. For higher label numbers, the\n// latter has a much more readable (albeit more verbose) syntax, but it comes\n// with a performance overhead (for creating and processing the Labels map).\n// See also the CounterVec example.\nfunc (m *metricVec) DeleteLabelValues(lvs ...string) bool {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\n\th, err := m.hashLabelValues(lvs)\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn m.deleteByHashWithLabelValues(h, lvs)\n}\n\n// Delete deletes the metric where the variable labels are the same as those\n// passed in as labels. It returns true if a metric was deleted.\n//\n// It is not an error if the number and names of the Labels are inconsistent\n// with those of the VariableLabels in Desc. However, such inconsistent Labels\n// can never match an actual metric, so the method will always return false in\n// that case.\n//\n// This method is used for the same purpose as DeleteLabelValues(...string). See\n// there for pros and cons of the two methods.\nfunc (m *metricVec) Delete(labels Labels) bool {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\n\th, err := m.hashLabels(labels)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn m.deleteByHashWithLabels(h, labels)\n}\n\n// deleteByHashWithLabelValues removes the metric from the hash bucket h. If\n// there are multiple matches in the bucket, use lvs to select a metric and\n// remove only that metric.\nfunc (m *metricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {\n\tmetrics, ok := m.children[h]\n\tif !ok {\n\t\treturn false\n\t}\n\n\ti := m.findMetricWithLabelValues(metrics, lvs)\n\tif i >= len(metrics) {\n\t\treturn false\n\t}\n\n\tif len(metrics) > 1 {\n\t\tm.children[h] = append(metrics[:i], metrics[i+1:]...)\n\t} else {\n\t\tdelete(m.children, h)\n\t}\n\treturn true\n}\n\n// deleteByHashWithLabels removes the metric from the hash bucket h. If there\n// are multiple matches in the bucket, use lvs to select a metric and remove\n// only that metric.\nfunc (m *metricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {\n\tmetrics, ok := m.children[h]\n\tif !ok {\n\t\treturn false\n\t}\n\ti := m.findMetricWithLabels(metrics, labels)\n\tif i >= len(metrics) {\n\t\treturn false\n\t}\n\n\tif len(metrics) > 1 {\n\t\tm.children[h] = append(metrics[:i], metrics[i+1:]...)\n\t} else {\n\t\tdelete(m.children, h)\n\t}\n\treturn true\n}\n\n// Reset deletes all metrics in this vector.\nfunc (m *metricVec) Reset() {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\n\tfor h := range m.children {\n\t\tdelete(m.children, h)\n\t}\n}\n\nfunc (m *metricVec) hashLabelValues(vals []string) (uint64, error) {\n\tif err := validateLabelValues(vals, len(m.desc.variableLabels)); err != nil {\n\t\treturn 0, err\n\t}\n\n\th := hashNew()\n\tfor _, val := range vals {\n\t\th = m.hashAdd(h, val)\n\t\th = m.hashAddByte(h, model.SeparatorByte)\n\t}\n\treturn h, nil\n}\n\nfunc (m *metricVec) hashLabels(labels Labels) (uint64, error) {\n\tif err := validateValuesInLabels(labels, len(m.desc.variableLabels)); err != nil {\n\t\treturn 0, err\n\t}\n\n\th := hashNew()\n\tfor _, label := range m.desc.variableLabels {\n\t\tval, ok := labels[label]\n\t\tif !ok {\n\t\t\treturn 0, fmt.Errorf(\"label name %q missing in label map\", label)\n\t\t}\n\t\th = m.hashAdd(h, val)\n\t\th = m.hashAddByte(h, model.SeparatorByte)\n\t}\n\treturn h, nil\n}\n\n// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value\n// or creates it and returns the new one.\n//\n// This function holds the mutex.\nfunc (m *metricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {\n\tm.mtx.RLock()\n\tmetric, ok := m.getMetricWithHashAndLabelValues(hash, lvs)\n\tm.mtx.RUnlock()\n\tif ok {\n\t\treturn metric\n\t}\n\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\tmetric, ok = m.getMetricWithHashAndLabelValues(hash, lvs)\n\tif !ok {\n\t\t// Copy to avoid allocation in case wo don't go down this code path.\n\t\tcopiedLVs := make([]string, len(lvs))\n\t\tcopy(copiedLVs, lvs)\n\t\tmetric = m.newMetric(copiedLVs...)\n\t\tm.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric})\n\t}\n\treturn metric\n}\n\n// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value\n// or creates it and returns the new one.\n//\n// This function holds the mutex.\nfunc (m *metricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {\n\tm.mtx.RLock()\n\tmetric, ok := m.getMetricWithHashAndLabels(hash, labels)\n\tm.mtx.RUnlock()\n\tif ok {\n\t\treturn metric\n\t}\n\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\tmetric, ok = m.getMetricWithHashAndLabels(hash, labels)\n\tif !ok {\n\t\tlvs := m.extractLabelValues(labels)\n\t\tmetric = m.newMetric(lvs...)\n\t\tm.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric})\n\t}\n\treturn metric\n}\n\n// getMetricWithHashAndLabelValues gets a metric while handling possible\n// collisions in the hash space. Must be called while holding the read mutex.\nfunc (m *metricVec) getMetricWithHashAndLabelValues(h uint64, lvs []string) (Metric, bool) {\n\tmetrics, ok := m.children[h]\n\tif ok {\n\t\tif i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {\n\t\t\treturn metrics[i].metric, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\n// getMetricWithHashAndLabels gets a metric while handling possible collisions in\n// the hash space. Must be called while holding read mutex.\nfunc (m *metricVec) getMetricWithHashAndLabels(h uint64, labels Labels) (Metric, bool) {\n\tmetrics, ok := m.children[h]\n\tif ok {\n\t\tif i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {\n\t\t\treturn metrics[i].metric, true\n\t\t}\n\t}\n\treturn nil, false\n}\n\n// findMetricWithLabelValues returns the index of the matching metric or\n// len(metrics) if not found.\nfunc (m *metricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {\n\tfor i, metric := range metrics {\n\t\tif m.matchLabelValues(metric.values, lvs) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn len(metrics)\n}\n\n// findMetricWithLabels returns the index of the matching metric or len(metrics)\n// if not found.\nfunc (m *metricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {\n\tfor i, metric := range metrics {\n\t\tif m.matchLabels(metric.values, labels) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn len(metrics)\n}\n\nfunc (m *metricVec) matchLabelValues(values []string, lvs []string) bool {\n\tif len(values) != len(lvs) {\n\t\treturn false\n\t}\n\tfor i, v := range values {\n\t\tif v != lvs[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (m *metricVec) matchLabels(values []string, labels Labels) bool {\n\tif len(labels) != len(values) {\n\t\treturn false\n\t}\n\tfor i, k := range m.desc.variableLabels {\n\t\tif values[i] != labels[k] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc (m *metricVec) extractLabelValues(labels Labels) []string {\n\tlabelValues := make([]string, len(labels))\n\tfor i, k := range m.desc.variableLabels {\n\t\tlabelValues[i] = labels[k]\n\t}\n\treturn labelValues\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_golang/prometheus/vec_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage prometheus\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc TestDelete(t *testing.T) {\n\tvec := NewGaugeVec(\n\t\tGaugeOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"helpless\",\n\t\t},\n\t\t[]string{\"l1\", \"l2\"},\n\t)\n\ttestDelete(t, vec)\n}\n\nfunc TestDeleteWithCollisions(t *testing.T) {\n\tvec := NewGaugeVec(\n\t\tGaugeOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"helpless\",\n\t\t},\n\t\t[]string{\"l1\", \"l2\"},\n\t)\n\tvec.hashAdd = func(h uint64, s string) uint64 { return 1 }\n\tvec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }\n\ttestDelete(t, vec)\n}\n\nfunc testDelete(t *testing.T, vec *GaugeVec) {\n\tif got, want := vec.Delete(Labels{\"l1\": \"v1\", \"l2\": \"v2\"}), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\n\tvec.With(Labels{\"l1\": \"v1\", \"l2\": \"v2\"}).(Gauge).Set(42)\n\tif got, want := vec.Delete(Labels{\"l1\": \"v1\", \"l2\": \"v2\"}), true; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\tif got, want := vec.Delete(Labels{\"l1\": \"v1\", \"l2\": \"v2\"}), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\n\tvec.With(Labels{\"l1\": \"v1\", \"l2\": \"v2\"}).(Gauge).Set(42)\n\tif got, want := vec.Delete(Labels{\"l2\": \"v2\", \"l1\": \"v1\"}), true; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\tif got, want := vec.Delete(Labels{\"l2\": \"v2\", \"l1\": \"v1\"}), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\n\tvec.With(Labels{\"l1\": \"v1\", \"l2\": \"v2\"}).(Gauge).Set(42)\n\tif got, want := vec.Delete(Labels{\"l2\": \"v1\", \"l1\": \"v2\"}), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\tif got, want := vec.Delete(Labels{\"l1\": \"v1\"}), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n}\n\nfunc TestDeleteLabelValues(t *testing.T) {\n\tvec := NewGaugeVec(\n\t\tGaugeOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"helpless\",\n\t\t},\n\t\t[]string{\"l1\", \"l2\"},\n\t)\n\ttestDeleteLabelValues(t, vec)\n}\n\nfunc TestDeleteLabelValuesWithCollisions(t *testing.T) {\n\tvec := NewGaugeVec(\n\t\tGaugeOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"helpless\",\n\t\t},\n\t\t[]string{\"l1\", \"l2\"},\n\t)\n\tvec.hashAdd = func(h uint64, s string) uint64 { return 1 }\n\tvec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }\n\ttestDeleteLabelValues(t, vec)\n}\n\nfunc testDeleteLabelValues(t *testing.T, vec *GaugeVec) {\n\tif got, want := vec.DeleteLabelValues(\"v1\", \"v2\"), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\n\tvec.With(Labels{\"l1\": \"v1\", \"l2\": \"v2\"}).(Gauge).Set(42)\n\tvec.With(Labels{\"l1\": \"v1\", \"l2\": \"v3\"}).(Gauge).Set(42) // Add junk data for collision.\n\tif got, want := vec.DeleteLabelValues(\"v1\", \"v2\"), true; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\tif got, want := vec.DeleteLabelValues(\"v1\", \"v2\"), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\tif got, want := vec.DeleteLabelValues(\"v1\", \"v3\"), true; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\n\tvec.With(Labels{\"l1\": \"v1\", \"l2\": \"v2\"}).(Gauge).Set(42)\n\t// Delete out of order.\n\tif got, want := vec.DeleteLabelValues(\"v2\", \"v1\"), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n\tif got, want := vec.DeleteLabelValues(\"v1\"), false; got != want {\n\t\tt.Errorf(\"got %v, want %v\", got, want)\n\t}\n}\n\nfunc TestMetricVec(t *testing.T) {\n\tvec := NewGaugeVec(\n\t\tGaugeOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"helpless\",\n\t\t},\n\t\t[]string{\"l1\", \"l2\"},\n\t)\n\ttestMetricVec(t, vec)\n}\n\nfunc TestMetricVecWithCollisions(t *testing.T) {\n\tvec := NewGaugeVec(\n\t\tGaugeOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"helpless\",\n\t\t},\n\t\t[]string{\"l1\", \"l2\"},\n\t)\n\tvec.hashAdd = func(h uint64, s string) uint64 { return 1 }\n\tvec.hashAddByte = func(h uint64, b byte) uint64 { return 1 }\n\ttestMetricVec(t, vec)\n}\n\nfunc testMetricVec(t *testing.T, vec *GaugeVec) {\n\tvec.Reset() // Actually test Reset now!\n\n\tvar pair [2]string\n\t// Keep track of metrics.\n\texpected := map[[2]string]int{}\n\n\tfor i := 0; i < 1000; i++ {\n\t\tpair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples.\n\t\texpected[pair]++\n\t\tvec.WithLabelValues(pair[0], pair[1]).Inc()\n\n\t\texpected[[2]string{\"v1\", \"v2\"}]++\n\t\tvec.WithLabelValues(\"v1\", \"v2\").(Gauge).Inc()\n\t}\n\n\tvar total int\n\tfor _, metrics := range vec.children {\n\t\tfor _, metric := range metrics {\n\t\t\ttotal++\n\t\t\tcopy(pair[:], metric.values)\n\n\t\t\tvar metricOut dto.Metric\n\t\t\tif err := metric.metric.Write(&metricOut); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tactual := *metricOut.Gauge.Value\n\n\t\t\tvar actualPair [2]string\n\t\t\tfor i, label := range metricOut.Label {\n\t\t\t\tactualPair[i] = *label.Value\n\t\t\t}\n\n\t\t\t// Test output pair against metric.values to ensure we've selected\n\t\t\t// the right one. We check this to ensure the below check means\n\t\t\t// anything at all.\n\t\t\tif actualPair != pair {\n\t\t\t\tt.Fatalf(\"unexpected pair association in metric map: %v != %v\", actualPair, pair)\n\t\t\t}\n\n\t\t\tif actual != float64(expected[pair]) {\n\t\t\t\tt.Fatalf(\"incorrect counter value for %v: %v != %v\", pair, actual, expected[pair])\n\t\t\t}\n\t\t}\n\t}\n\n\tif total != len(expected) {\n\t\tt.Fatalf(\"unexpected number of metrics: %v != %v\", total, len(expected))\n\t}\n\n\tvec.Reset()\n\n\tif len(vec.children) > 0 {\n\t\tt.Fatalf(\"reset failed\")\n\t}\n}\n\nfunc TestCounterVecEndToEndWithCollision(t *testing.T) {\n\tvec := NewCounterVec(\n\t\tCounterOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"helpless\",\n\t\t},\n\t\t[]string{\"labelname\"},\n\t)\n\tvec.WithLabelValues(\"77kepQFQ8Kl\").Inc()\n\tvec.WithLabelValues(\"!0IC=VloaY\").Add(2)\n\n\tm := &dto.Metric{}\n\tif err := vec.WithLabelValues(\"77kepQFQ8Kl\").Write(m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := m.GetLabel()[0].GetValue(), \"77kepQFQ8Kl\"; got != want {\n\t\tt.Errorf(\"got label value %q, want %q\", got, want)\n\t}\n\tif got, want := m.GetCounter().GetValue(), 1.; got != want {\n\t\tt.Errorf(\"got value %f, want %f\", got, want)\n\t}\n\tm.Reset()\n\tif err := vec.WithLabelValues(\"!0IC=VloaY\").Write(m); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif got, want := m.GetLabel()[0].GetValue(), \"!0IC=VloaY\"; got != want {\n\t\tt.Errorf(\"got label value %q, want %q\", got, want)\n\t}\n\tif got, want := m.GetCounter().GetValue(), 2.; got != want {\n\t\tt.Errorf(\"got value %f, want %f\", got, want)\n\t}\n}\n\nfunc BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) {\n\tbenchmarkMetricVecWithLabelValues(b, map[string][]string{\n\t\t\"l1\": {\"onevalue\"},\n\t\t\"l2\": {\"twovalue\"},\n\t})\n}\n\nfunc BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) {\n\tbenchmarkMetricVecWithLabelValuesCardinality(b, 2, 10)\n}\n\nfunc BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) {\n\tbenchmarkMetricVecWithLabelValuesCardinality(b, 4, 10)\n}\n\nfunc BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) {\n\tbenchmarkMetricVecWithLabelValuesCardinality(b, 2, 100)\n}\n\nfunc BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) {\n\tbenchmarkMetricVecWithLabelValuesCardinality(b, 10, 100)\n}\n\nfunc BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) {\n\tbenchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000)\n}\n\nfunc benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) {\n\tlabels := map[string][]string{}\n\n\tfor i := 0; i < nkeys; i++ {\n\t\tvar (\n\t\t\tk  = fmt.Sprintf(\"key-%v\", i)\n\t\t\tvs = make([]string, 0, nvalues)\n\t\t)\n\t\tfor j := 0; j < nvalues; j++ {\n\t\t\tvs = append(vs, fmt.Sprintf(\"value-%v\", j))\n\t\t}\n\t\tlabels[k] = vs\n\t}\n\n\tbenchmarkMetricVecWithLabelValues(b, labels)\n}\n\nfunc benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) {\n\tvar keys []string\n\tfor k := range labels { // Map order dependent, who cares though.\n\t\tkeys = append(keys, k)\n\t}\n\n\tvalues := make([]string, len(labels)) // Value cache for permutations.\n\tvec := NewGaugeVec(\n\t\tGaugeOpts{\n\t\t\tName: \"test\",\n\t\t\tHelp: \"helpless\",\n\t\t},\n\t\tkeys,\n\t)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\t// Varies input across provide map entries based on key size.\n\t\tfor j, k := range keys {\n\t\t\tcandidates := labels[k]\n\t\t\tvalues[j] = candidates[i%len(candidates)]\n\t\t}\n\n\t\tvec.WithLabelValues(values...)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/.gitignore",
    "content": "target/\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/CONTRIBUTING.md",
    "content": "# Contributing\n\nPrometheus uses GitHub to manage reviews of pull requests.\n\n* If you have a trivial fix or improvement, go ahead and create a pull request,\n  addressing (with `@...`) the maintainer of this repository (see\n  [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.\n\n* If you plan to do something more involved, first discuss your ideas\n  on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).\n  This will avoid unnecessary work and surely give you and us a good deal\n  of inspiration.\n\n* Relevant coding style guidelines are the [Go Code Review\n  Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)\n  and the _Formatting and style_ section of Peter Bourgon's [Go: Best\n  Practices for Production\n  Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/MAINTAINERS.md",
    "content": "* Björn Rabenstein <beorn@soundcloud.com>\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/Makefile",
    "content": "# Copyright 2013 Prometheus Team\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nKEY_ID ?= _DEFINE_ME_\n\nall: cpp go java python ruby\n\nSUFFIXES:\n\ncpp: cpp/metrics.pb.cc cpp/metrics.pb.h\n\ncpp/metrics.pb.cc: metrics.proto\n\tprotoc $< --cpp_out=cpp/\n\ncpp/metrics.pb.h: metrics.proto\n\tprotoc $< --cpp_out=cpp/\n\ngo: go/metrics.pb.go\n\ngo/metrics.pb.go: metrics.proto\n\tprotoc $< --go_out=go/\n\njava: src/main/java/io/prometheus/client/Metrics.java pom.xml\n\tmvn clean compile package\n\nsrc/main/java/io/prometheus/client/Metrics.java: metrics.proto\n\tprotoc $< --java_out=src/main/java\n\npython: python/prometheus/client/model/metrics_pb2.py\n\npython/prometheus/client/model/metrics_pb2.py: metrics.proto\n\tprotoc $< --python_out=python/prometheus/client/model\n\nruby:\n\t$(MAKE) -C ruby build\n\nclean:\n\t-rm -rf cpp/*\n\t-rm -rf go/*\n\t-rm -rf java/*\n\t-rm -rf python/*\n\t-$(MAKE) -C ruby clean\n\t-mvn clean\n\nmaven-deploy-snapshot: java\n\tmvn clean deploy -Dgpg.keyname=$(KEY_ID) -DperformRelease=true\n\nmaven-deploy-release: java\n\tmvn clean release:clean release:prepare release:perform -Dgpg.keyname=$(KEY_ID) -DperformRelease=true\n\n.PHONY: all clean cpp go java maven-deploy-snapshot maven-deploy-release python ruby\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/NOTICE",
    "content": "Data model artifacts for Prometheus.\nCopyright 2012-2015 The Prometheus Authors\n\nThis product includes software developed at\nSoundCloud Ltd. (http://soundcloud.com/).\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/README.md",
    "content": "# Background\nUnder most circumstances, manually downloading this repository should never\nbe required.\n\n# Prerequisites\n# Base\n* [Google Protocol Buffers](https://developers.google.com/protocol-buffers)\n\n## Java\n* [Apache Maven](http://maven.apache.org)\n* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository\n\n## Go\n*  [Go](http://golang.org)\n*  [goprotobuf](https://code.google.com/p/goprotobuf)\n\n## Ruby\n*  [Ruby](https://www.ruby-lang.org)\n*  [bundler](https://rubygems.org/gems/bundler)\n\n# Building\n    $ make\n\n# Getting Started\n  * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go).\n  * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/go/metrics.pb.go",
    "content": "// Code generated by protoc-gen-go.\n// source: metrics.proto\n// DO NOT EDIT!\n\n/*\nPackage io_prometheus_client is a generated protocol buffer package.\n\nIt is generated from these files:\n\tmetrics.proto\n\nIt has these top-level messages:\n\tLabelPair\n\tGauge\n\tCounter\n\tQuantile\n\tSummary\n\tUntyped\n\tHistogram\n\tBucket\n\tMetric\n\tMetricFamily\n*/\npackage io_prometheus_client\n\nimport proto \"github.com/golang/protobuf/proto\"\nimport math \"math\"\n\n// Reference imports to suppress errors if they are not otherwise used.\nvar _ = proto.Marshal\nvar _ = math.Inf\n\ntype MetricType int32\n\nconst (\n\tMetricType_COUNTER   MetricType = 0\n\tMetricType_GAUGE     MetricType = 1\n\tMetricType_SUMMARY   MetricType = 2\n\tMetricType_UNTYPED   MetricType = 3\n\tMetricType_HISTOGRAM MetricType = 4\n)\n\nvar MetricType_name = map[int32]string{\n\t0: \"COUNTER\",\n\t1: \"GAUGE\",\n\t2: \"SUMMARY\",\n\t3: \"UNTYPED\",\n\t4: \"HISTOGRAM\",\n}\nvar MetricType_value = map[string]int32{\n\t\"COUNTER\":   0,\n\t\"GAUGE\":     1,\n\t\"SUMMARY\":   2,\n\t\"UNTYPED\":   3,\n\t\"HISTOGRAM\": 4,\n}\n\nfunc (x MetricType) Enum() *MetricType {\n\tp := new(MetricType)\n\t*p = x\n\treturn p\n}\nfunc (x MetricType) String() string {\n\treturn proto.EnumName(MetricType_name, int32(x))\n}\nfunc (x *MetricType) UnmarshalJSON(data []byte) error {\n\tvalue, err := proto.UnmarshalJSONEnum(MetricType_value, data, \"MetricType\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = MetricType(value)\n\treturn nil\n}\n\ntype LabelPair struct {\n\tName             *string `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tValue            *string `protobuf:\"bytes,2,opt,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte  `json:\"-\"`\n}\n\nfunc (m *LabelPair) Reset()         { *m = LabelPair{} }\nfunc (m *LabelPair) String() string { return proto.CompactTextString(m) }\nfunc (*LabelPair) ProtoMessage()    {}\n\nfunc (m *LabelPair) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *LabelPair) GetValue() string {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn \"\"\n}\n\ntype Gauge struct {\n\tValue            *float64 `protobuf:\"fixed64,1,opt,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *Gauge) Reset()         { *m = Gauge{} }\nfunc (m *Gauge) String() string { return proto.CompactTextString(m) }\nfunc (*Gauge) ProtoMessage()    {}\n\nfunc (m *Gauge) GetValue() float64 {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn 0\n}\n\ntype Counter struct {\n\tValue            *float64 `protobuf:\"fixed64,1,opt,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *Counter) Reset()         { *m = Counter{} }\nfunc (m *Counter) String() string { return proto.CompactTextString(m) }\nfunc (*Counter) ProtoMessage()    {}\n\nfunc (m *Counter) GetValue() float64 {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn 0\n}\n\ntype Quantile struct {\n\tQuantile         *float64 `protobuf:\"fixed64,1,opt,name=quantile\" json:\"quantile,omitempty\"`\n\tValue            *float64 `protobuf:\"fixed64,2,opt,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *Quantile) Reset()         { *m = Quantile{} }\nfunc (m *Quantile) String() string { return proto.CompactTextString(m) }\nfunc (*Quantile) ProtoMessage()    {}\n\nfunc (m *Quantile) GetQuantile() float64 {\n\tif m != nil && m.Quantile != nil {\n\t\treturn *m.Quantile\n\t}\n\treturn 0\n}\n\nfunc (m *Quantile) GetValue() float64 {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn 0\n}\n\ntype Summary struct {\n\tSampleCount      *uint64     `protobuf:\"varint,1,opt,name=sample_count\" json:\"sample_count,omitempty\"`\n\tSampleSum        *float64    `protobuf:\"fixed64,2,opt,name=sample_sum\" json:\"sample_sum,omitempty\"`\n\tQuantile         []*Quantile `protobuf:\"bytes,3,rep,name=quantile\" json:\"quantile,omitempty\"`\n\tXXX_unrecognized []byte      `json:\"-\"`\n}\n\nfunc (m *Summary) Reset()         { *m = Summary{} }\nfunc (m *Summary) String() string { return proto.CompactTextString(m) }\nfunc (*Summary) ProtoMessage()    {}\n\nfunc (m *Summary) GetSampleCount() uint64 {\n\tif m != nil && m.SampleCount != nil {\n\t\treturn *m.SampleCount\n\t}\n\treturn 0\n}\n\nfunc (m *Summary) GetSampleSum() float64 {\n\tif m != nil && m.SampleSum != nil {\n\t\treturn *m.SampleSum\n\t}\n\treturn 0\n}\n\nfunc (m *Summary) GetQuantile() []*Quantile {\n\tif m != nil {\n\t\treturn m.Quantile\n\t}\n\treturn nil\n}\n\ntype Untyped struct {\n\tValue            *float64 `protobuf:\"fixed64,1,opt,name=value\" json:\"value,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *Untyped) Reset()         { *m = Untyped{} }\nfunc (m *Untyped) String() string { return proto.CompactTextString(m) }\nfunc (*Untyped) ProtoMessage()    {}\n\nfunc (m *Untyped) GetValue() float64 {\n\tif m != nil && m.Value != nil {\n\t\treturn *m.Value\n\t}\n\treturn 0\n}\n\ntype Histogram struct {\n\tSampleCount      *uint64   `protobuf:\"varint,1,opt,name=sample_count\" json:\"sample_count,omitempty\"`\n\tSampleSum        *float64  `protobuf:\"fixed64,2,opt,name=sample_sum\" json:\"sample_sum,omitempty\"`\n\tBucket           []*Bucket `protobuf:\"bytes,3,rep,name=bucket\" json:\"bucket,omitempty\"`\n\tXXX_unrecognized []byte    `json:\"-\"`\n}\n\nfunc (m *Histogram) Reset()         { *m = Histogram{} }\nfunc (m *Histogram) String() string { return proto.CompactTextString(m) }\nfunc (*Histogram) ProtoMessage()    {}\n\nfunc (m *Histogram) GetSampleCount() uint64 {\n\tif m != nil && m.SampleCount != nil {\n\t\treturn *m.SampleCount\n\t}\n\treturn 0\n}\n\nfunc (m *Histogram) GetSampleSum() float64 {\n\tif m != nil && m.SampleSum != nil {\n\t\treturn *m.SampleSum\n\t}\n\treturn 0\n}\n\nfunc (m *Histogram) GetBucket() []*Bucket {\n\tif m != nil {\n\t\treturn m.Bucket\n\t}\n\treturn nil\n}\n\ntype Bucket struct {\n\tCumulativeCount  *uint64  `protobuf:\"varint,1,opt,name=cumulative_count\" json:\"cumulative_count,omitempty\"`\n\tUpperBound       *float64 `protobuf:\"fixed64,2,opt,name=upper_bound\" json:\"upper_bound,omitempty\"`\n\tXXX_unrecognized []byte   `json:\"-\"`\n}\n\nfunc (m *Bucket) Reset()         { *m = Bucket{} }\nfunc (m *Bucket) String() string { return proto.CompactTextString(m) }\nfunc (*Bucket) ProtoMessage()    {}\n\nfunc (m *Bucket) GetCumulativeCount() uint64 {\n\tif m != nil && m.CumulativeCount != nil {\n\t\treturn *m.CumulativeCount\n\t}\n\treturn 0\n}\n\nfunc (m *Bucket) GetUpperBound() float64 {\n\tif m != nil && m.UpperBound != nil {\n\t\treturn *m.UpperBound\n\t}\n\treturn 0\n}\n\ntype Metric struct {\n\tLabel            []*LabelPair `protobuf:\"bytes,1,rep,name=label\" json:\"label,omitempty\"`\n\tGauge            *Gauge       `protobuf:\"bytes,2,opt,name=gauge\" json:\"gauge,omitempty\"`\n\tCounter          *Counter     `protobuf:\"bytes,3,opt,name=counter\" json:\"counter,omitempty\"`\n\tSummary          *Summary     `protobuf:\"bytes,4,opt,name=summary\" json:\"summary,omitempty\"`\n\tUntyped          *Untyped     `protobuf:\"bytes,5,opt,name=untyped\" json:\"untyped,omitempty\"`\n\tHistogram        *Histogram   `protobuf:\"bytes,7,opt,name=histogram\" json:\"histogram,omitempty\"`\n\tTimestampMs      *int64       `protobuf:\"varint,6,opt,name=timestamp_ms\" json:\"timestamp_ms,omitempty\"`\n\tXXX_unrecognized []byte       `json:\"-\"`\n}\n\nfunc (m *Metric) Reset()         { *m = Metric{} }\nfunc (m *Metric) String() string { return proto.CompactTextString(m) }\nfunc (*Metric) ProtoMessage()    {}\n\nfunc (m *Metric) GetLabel() []*LabelPair {\n\tif m != nil {\n\t\treturn m.Label\n\t}\n\treturn nil\n}\n\nfunc (m *Metric) GetGauge() *Gauge {\n\tif m != nil {\n\t\treturn m.Gauge\n\t}\n\treturn nil\n}\n\nfunc (m *Metric) GetCounter() *Counter {\n\tif m != nil {\n\t\treturn m.Counter\n\t}\n\treturn nil\n}\n\nfunc (m *Metric) GetSummary() *Summary {\n\tif m != nil {\n\t\treturn m.Summary\n\t}\n\treturn nil\n}\n\nfunc (m *Metric) GetUntyped() *Untyped {\n\tif m != nil {\n\t\treturn m.Untyped\n\t}\n\treturn nil\n}\n\nfunc (m *Metric) GetHistogram() *Histogram {\n\tif m != nil {\n\t\treturn m.Histogram\n\t}\n\treturn nil\n}\n\nfunc (m *Metric) GetTimestampMs() int64 {\n\tif m != nil && m.TimestampMs != nil {\n\t\treturn *m.TimestampMs\n\t}\n\treturn 0\n}\n\ntype MetricFamily struct {\n\tName             *string     `protobuf:\"bytes,1,opt,name=name\" json:\"name,omitempty\"`\n\tHelp             *string     `protobuf:\"bytes,2,opt,name=help\" json:\"help,omitempty\"`\n\tType             *MetricType `protobuf:\"varint,3,opt,name=type,enum=io.prometheus.client.MetricType\" json:\"type,omitempty\"`\n\tMetric           []*Metric   `protobuf:\"bytes,4,rep,name=metric\" json:\"metric,omitempty\"`\n\tXXX_unrecognized []byte      `json:\"-\"`\n}\n\nfunc (m *MetricFamily) Reset()         { *m = MetricFamily{} }\nfunc (m *MetricFamily) String() string { return proto.CompactTextString(m) }\nfunc (*MetricFamily) ProtoMessage()    {}\n\nfunc (m *MetricFamily) GetName() string {\n\tif m != nil && m.Name != nil {\n\t\treturn *m.Name\n\t}\n\treturn \"\"\n}\n\nfunc (m *MetricFamily) GetHelp() string {\n\tif m != nil && m.Help != nil {\n\t\treturn *m.Help\n\t}\n\treturn \"\"\n}\n\nfunc (m *MetricFamily) GetType() MetricType {\n\tif m != nil && m.Type != nil {\n\t\treturn *m.Type\n\t}\n\treturn MetricType_COUNTER\n}\n\nfunc (m *MetricFamily) GetMetric() []*Metric {\n\tif m != nil {\n\t\treturn m.Metric\n\t}\n\treturn nil\n}\n\nfunc init() {\n\tproto.RegisterEnum(\"io.prometheus.client.MetricType\", MetricType_name, MetricType_value)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/metrics.proto",
    "content": "// Copyright 2013 Prometheus Team\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\nsyntax = \"proto2\";\n\npackage io.prometheus.client;\noption java_package = \"io.prometheus.client\";\n\nmessage LabelPair {\n  optional string name  = 1;\n  optional string value = 2;\n}\n\nenum MetricType {\n  COUNTER    = 0;\n  GAUGE      = 1;\n  SUMMARY    = 2;\n  UNTYPED    = 3;\n  HISTOGRAM  = 4;\n}\n\nmessage Gauge {\n  optional double value = 1;\n}\n\nmessage Counter {\n  optional double value = 1;\n}\n\nmessage Quantile {\n  optional double quantile = 1;\n  optional double value    = 2;\n}\n\nmessage Summary {\n  optional uint64   sample_count = 1;\n  optional double   sample_sum   = 2;\n  repeated Quantile quantile     = 3;\n}\n\nmessage Untyped {\n  optional double value = 1;\n}\n\nmessage Histogram {\n  optional uint64 sample_count = 1;\n  optional double sample_sum   = 2;\n  repeated Bucket bucket       = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional.\n}\n\nmessage Bucket {\n  optional uint64 cumulative_count = 1; // Cumulative in increasing order.\n  optional double upper_bound = 2;      // Inclusive.\n}\n\nmessage Metric {\n  repeated LabelPair label        = 1;\n  optional Gauge     gauge        = 2;\n  optional Counter   counter      = 3;\n  optional Summary   summary      = 4;\n  optional Untyped   untyped      = 5;\n  optional Histogram histogram    = 7;\n  optional int64     timestamp_ms = 6;\n}\n\nmessage MetricFamily {\n  optional string     name   = 1;\n  optional string     help   = 2;\n  optional MetricType type   = 3;\n  repeated Metric     metric = 4;\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/pom.xml",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<project xmlns=\"http://maven.apache.org/POM/4.0.0\" xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd\">\n    <modelVersion>4.0.0</modelVersion>\n\n    <groupId>io.prometheus.client</groupId>\n    <artifactId>model</artifactId>\n    <version>0.0.3-SNAPSHOT</version>\n\n    <parent>\n        <groupId>org.sonatype.oss</groupId>\n        <artifactId>oss-parent</artifactId>\n        <version>7</version>\n    </parent>\n\n    <name>Prometheus Client Data Model</name>\n    <url>http://github.com/prometheus/client_model</url>\n    <description>\n      Prometheus Client Data Model: Generated Protocol Buffer Assets\n    </description>\n\n    <licenses>\n        <license>\n            <name>The Apache Software License, Version 2.0</name>\n            <url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>\n            <distribution>repo</distribution>\n        </license>\n    </licenses>\n\n    <scm>\n        <connection>scm:git:git@github.com:prometheus/client_model.git</connection>\n        <developerConnection>scm:git:git@github.com:prometheus/client_model.git</developerConnection>\n        <url>git@github.com:prometheus/client_model.git</url>\n    </scm>\n\n    <developers>\n        <developer>\n            <id>mtp</id>\n            <name>Matt T. Proud</name>\n            <email>matt.proud@gmail.com</email>\n        </developer>\n    </developers>\n\n    <dependencies>\n      <dependency>\n        <groupId>com.google.protobuf</groupId>\n        <artifactId>protobuf-java</artifactId>\n        <version>2.5.0</version>\n      </dependency>\n    </dependencies>\n\n    <build>\n        <plugins>\n            <plugin>\n                <groupId>org.apache.maven.plugins</groupId>\n                <artifactId>maven-javadoc-plugin</artifactId>\n                <version>2.8</version>\n                <configuration>\n                    <encoding>UTF-8</encoding>\n                    <docencoding>UTF-8</docencoding>\n                    <linksource>true</linksource>\n                </configuration>\n                <executions>\n                    <execution>\n                        <id>generate-javadoc-site-report</id>\n                        <phase>site</phase>\n                        <goals>\n                            <goal>javadoc</goal>\n                        </goals>\n                    </execution>\n                    <execution>\n                      <id>attach-javadocs</id>\n                      <goals>\n                        <goal>jar</goal>\n                      </goals>\n                    </execution>\n                </executions>\n            </plugin>\n            <plugin>\n                <artifactId>maven-compiler-plugin</artifactId>\n                <configuration>\n                    <source>1.6</source>\n                    <target>1.6</target>\n                </configuration>\n                <version>3.1</version>\n            </plugin>\n            <plugin>\n              <groupId>org.apache.maven.plugins</groupId>\n              <artifactId>maven-source-plugin</artifactId>\n              <version>2.2.1</version>\n              <executions>\n                <execution>\n                  <id>attach-sources</id>\n                  <goals>\n                    <goal>jar</goal>\n                  </goals>\n                </execution>\n              </executions>\n            </plugin>\n        </plugins>\n    </build>\n    <profiles>\n        <profile>\n            <id>release-sign-artifacts</id>\n            <activation>\n                <property>\n                    <name>performRelease</name>\n                    <value>true</value>\n                </property>\n            </activation>\n            <build>\n                <plugins>\n                    <plugin>\n                        <groupId>org.apache.maven.plugins</groupId>\n                        <artifactId>maven-gpg-plugin</artifactId>\n                        <version>1.4</version>\n                        <executions>\n                            <execution>\n                                <id>sign-artifacts</id>\n                                <phase>verify</phase>\n                                <goals>\n                                    <goal>sign</goal>\n                                </goals>\n                            </execution>\n                        </executions>\n                    </plugin>\n                </plugins>\n            </build>\n        </profile>\n    </profiles>\n</project>\n"
  },
  {
    "path": "vendor/github.com/prometheus/client_model/setup.py",
    "content": "#!/usr/bin/python\n\nfrom setuptools import setup\n\nsetup(\n    name = 'prometheus_client_model',\n    version = '0.0.1',\n    author = 'Matt T. Proud',\n    author_email = 'matt.proud@gmail.com',\n    description = 'Data model artifacts for the Prometheus client.',\n    license = 'Apache License 2.0',\n    url = 'http://github.com/prometheus/client_model',\n    packages = ['prometheus', 'prometheus/client', 'prometheus/client/model'],\n    package_dir = {'': 'python'},\n    requires = ['protobuf(==2.4.1)'],\n    platforms = 'Platform Independent',\n    classifiers = ['Development Status :: 3 - Alpha',\n                   'Intended Audience :: Developers',\n                   'Intended Audience :: System Administrators',\n                   'License :: OSI Approved :: Apache Software License',\n                   'Operating System :: OS Independent',\n                   'Topic :: Software Development :: Testing',\n                   'Topic :: System :: Monitoring'])\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/.travis.yml",
    "content": "sudo: false\n\nlanguage: go\ngo:\n  - 1.7.5\n  - tip\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/CONTRIBUTING.md",
    "content": "# Contributing\n\nPrometheus uses GitHub to manage reviews of pull requests.\n\n* If you have a trivial fix or improvement, go ahead and create a pull request,\n  addressing (with `@...`) the maintainer of this repository (see\n  [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.\n\n* If you plan to do something more involved, first discuss your ideas\n  on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).\n  This will avoid unnecessary work and surely give you and us a good deal\n  of inspiration.\n\n* Relevant coding style guidelines are the [Go Code Review\n  Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)\n  and the _Formatting and style_ section of Peter Bourgon's [Go: Best\n  Practices for Production\n  Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/MAINTAINERS.md",
    "content": "* Fabian Reinartz <fabian.reinartz@coreos.com>\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/NOTICE",
    "content": "Common libraries shared by Prometheus Go components.\nCopyright 2015 The Prometheus Authors\n\nThis product includes software developed at\nSoundCloud Ltd. (http://soundcloud.com/).\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/README.md",
    "content": "# Common\n[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common)\n\nThis repository contains Go libraries that are shared across Prometheus\ncomponents and libraries.\n\n* **config**: Common configuration structures\n* **expfmt**: Decoding and encoding for the exposition format\n* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus)\n* **model**: Shared data structures\n* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context`\n* **version**: Version informations and metric\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/bench_test.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage expfmt\n\nimport (\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"testing\"\n\n\t\"github.com/matttproud/golang_protobuf_extensions/pbutil\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nvar parser TextParser\n\n// Benchmarks to show how much penalty text format parsing actually inflicts.\n//\n// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4.\n//\n// BenchmarkParseText          1000           1188535 ns/op          205085 B/op       6135 allocs/op\n// BenchmarkParseTextGzip      1000           1376567 ns/op          246224 B/op       6151 allocs/op\n// BenchmarkParseProto        10000            172790 ns/op           52258 B/op       1160 allocs/op\n// BenchmarkParseProtoGzip     5000            324021 ns/op           94931 B/op       1211 allocs/op\n// BenchmarkParseProtoMap     10000            187946 ns/op           58714 B/op       1203 allocs/op\n//\n// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations.\n// Without compression, it needs ~7x longer, but with compression (the more relevant scenario),\n// the difference becomes less relevant, only ~4x.\n//\n// The test data contains 248 samples.\n\n// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric\n// family DTOs.\nfunc BenchmarkParseText(b *testing.B) {\n\tb.StopTimer()\n\tdata, err := ioutil.ReadFile(\"testdata/text\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tb.StartTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tif _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n\n// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape\n// into metric family DTOs.\nfunc BenchmarkParseTextGzip(b *testing.B) {\n\tb.StopTimer()\n\tdata, err := ioutil.ReadFile(\"testdata/text.gz\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tb.StartTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tin, err := gzip.NewReader(bytes.NewReader(data))\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tif _, err := parser.TextToMetricFamilies(in); err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t}\n}\n\n// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into\n// metric family DTOs. Note that this does not build a map of metric families\n// (as the text version does), because it is not required for Prometheus\n// ingestion either. (However, it is required for the text-format parsing, as\n// the metric family might be sprinkled all over the text, while the\n// protobuf-format guarantees bundling at one place.)\nfunc BenchmarkParseProto(b *testing.B) {\n\tb.StopTimer()\n\tdata, err := ioutil.ReadFile(\"testdata/protobuf\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tb.StartTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tfamily := &dto.MetricFamily{}\n\t\tin := bytes.NewReader(data)\n\t\tfor {\n\t\t\tfamily.Reset()\n\t\t\tif _, err := pbutil.ReadDelimited(in, family); err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped\n// protobuf format.\nfunc BenchmarkParseProtoGzip(b *testing.B) {\n\tb.StopTimer()\n\tdata, err := ioutil.ReadFile(\"testdata/protobuf.gz\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tb.StartTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tfamily := &dto.MetricFamily{}\n\t\tin, err := gzip.NewReader(bytes.NewReader(data))\n\t\tif err != nil {\n\t\t\tb.Fatal(err)\n\t\t}\n\t\tfor {\n\t\t\tfamily.Reset()\n\t\t\tif _, err := pbutil.ReadDelimited(in, family); err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n}\n\n// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed\n// metric family DTOs into a map. This is not happening during Prometheus\n// ingestion. It is just here to measure the overhead of that map creation and\n// separate it from the overhead of the text format parsing.\nfunc BenchmarkParseProtoMap(b *testing.B) {\n\tb.StopTimer()\n\tdata, err := ioutil.ReadFile(\"testdata/protobuf\")\n\tif err != nil {\n\t\tb.Fatal(err)\n\t}\n\tb.StartTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tfamilies := map[string]*dto.MetricFamily{}\n\t\tin := bytes.NewReader(data)\n\t\tfor {\n\t\t\tfamily := &dto.MetricFamily{}\n\t\t\tif _, err := pbutil.ReadDelimited(in, family); err != nil {\n\t\t\t\tif err == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tb.Fatal(err)\n\t\t\t}\n\t\t\tfamilies[family.GetName()] = family\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/decode.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage expfmt\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"mime\"\n\t\"net/http\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n\n\t\"github.com/matttproud/golang_protobuf_extensions/pbutil\"\n\t\"github.com/prometheus/common/model\"\n)\n\n// Decoder types decode an input stream into metric families.\ntype Decoder interface {\n\tDecode(*dto.MetricFamily) error\n}\n\n// DecodeOptions contains options used by the Decoder and in sample extraction.\ntype DecodeOptions struct {\n\t// Timestamp is added to each value from the stream that has no explicit timestamp set.\n\tTimestamp model.Time\n}\n\n// ResponseFormat extracts the correct format from a HTTP response header.\n// If no matching format can be found FormatUnknown is returned.\nfunc ResponseFormat(h http.Header) Format {\n\tct := h.Get(hdrContentType)\n\n\tmediatype, params, err := mime.ParseMediaType(ct)\n\tif err != nil {\n\t\treturn FmtUnknown\n\t}\n\n\tconst textType = \"text/plain\"\n\n\tswitch mediatype {\n\tcase ProtoType:\n\t\tif p, ok := params[\"proto\"]; ok && p != ProtoProtocol {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\tif e, ok := params[\"encoding\"]; ok && e != \"delimited\" {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtProtoDelim\n\n\tcase textType:\n\t\tif v, ok := params[\"version\"]; ok && v != TextVersion {\n\t\t\treturn FmtUnknown\n\t\t}\n\t\treturn FmtText\n\t}\n\n\treturn FmtUnknown\n}\n\n// NewDecoder returns a new decoder based on the given input format.\n// If the input format does not imply otherwise, a text format decoder is returned.\nfunc NewDecoder(r io.Reader, format Format) Decoder {\n\tswitch format {\n\tcase FmtProtoDelim:\n\t\treturn &protoDecoder{r: r}\n\t}\n\treturn &textDecoder{r: r}\n}\n\n// protoDecoder implements the Decoder interface for protocol buffers.\ntype protoDecoder struct {\n\tr io.Reader\n}\n\n// Decode implements the Decoder interface.\nfunc (d *protoDecoder) Decode(v *dto.MetricFamily) error {\n\t_, err := pbutil.ReadDelimited(d.r, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !model.IsValidMetricName(model.LabelValue(v.GetName())) {\n\t\treturn fmt.Errorf(\"invalid metric name %q\", v.GetName())\n\t}\n\tfor _, m := range v.GetMetric() {\n\t\tif m == nil {\n\t\t\tcontinue\n\t\t}\n\t\tfor _, l := range m.GetLabel() {\n\t\t\tif l == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !model.LabelValue(l.GetValue()).IsValid() {\n\t\t\t\treturn fmt.Errorf(\"invalid label value %q\", l.GetValue())\n\t\t\t}\n\t\t\tif !model.LabelName(l.GetName()).IsValid() {\n\t\t\t\treturn fmt.Errorf(\"invalid label name %q\", l.GetName())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\n// textDecoder implements the Decoder interface for the text protocol.\ntype textDecoder struct {\n\tr    io.Reader\n\tp    TextParser\n\tfams []*dto.MetricFamily\n}\n\n// Decode implements the Decoder interface.\nfunc (d *textDecoder) Decode(v *dto.MetricFamily) error {\n\t// TODO(fabxc): Wrap this as a line reader to make streaming safer.\n\tif len(d.fams) == 0 {\n\t\t// No cached metric families, read everything and parse metrics.\n\t\tfams, err := d.p.TextToMetricFamilies(d.r)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(fams) == 0 {\n\t\t\treturn io.EOF\n\t\t}\n\t\td.fams = make([]*dto.MetricFamily, 0, len(fams))\n\t\tfor _, f := range fams {\n\t\t\td.fams = append(d.fams, f)\n\t\t}\n\t}\n\n\t*v = *d.fams[0]\n\td.fams = d.fams[1:]\n\n\treturn nil\n}\n\n// SampleDecoder wraps a Decoder to extract samples from the metric families\n// decoded by the wrapped Decoder.\ntype SampleDecoder struct {\n\tDec  Decoder\n\tOpts *DecodeOptions\n\n\tf dto.MetricFamily\n}\n\n// Decode calls the Decode method of the wrapped Decoder and then extracts the\n// samples from the decoded MetricFamily into the provided model.Vector.\nfunc (sd *SampleDecoder) Decode(s *model.Vector) error {\n\terr := sd.Dec.Decode(&sd.f)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*s, err = extractSamples(&sd.f, sd.Opts)\n\treturn err\n}\n\n// ExtractSamples builds a slice of samples from the provided metric\n// families. If an error occurs during sample extraction, it continues to\n// extract from the remaining metric families. The returned error is the last\n// error that has occured.\nfunc ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {\n\tvar (\n\t\tall     model.Vector\n\t\tlastErr error\n\t)\n\tfor _, f := range fams {\n\t\tsome, err := extractSamples(f, o)\n\t\tif err != nil {\n\t\t\tlastErr = err\n\t\t\tcontinue\n\t\t}\n\t\tall = append(all, some...)\n\t}\n\treturn all, lastErr\n}\n\nfunc extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {\n\tswitch f.GetType() {\n\tcase dto.MetricType_COUNTER:\n\t\treturn extractCounter(o, f), nil\n\tcase dto.MetricType_GAUGE:\n\t\treturn extractGauge(o, f), nil\n\tcase dto.MetricType_SUMMARY:\n\t\treturn extractSummary(o, f), nil\n\tcase dto.MetricType_UNTYPED:\n\t\treturn extractUntyped(o, f), nil\n\tcase dto.MetricType_HISTOGRAM:\n\t\treturn extractHistogram(o, f), nil\n\t}\n\treturn nil, fmt.Errorf(\"expfmt.extractSamples: unknown metric family type %v\", f.GetType())\n}\n\nfunc extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {\n\tsamples := make(model.Vector, 0, len(f.Metric))\n\n\tfor _, m := range f.Metric {\n\t\tif m.Counter == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlset := make(model.LabelSet, len(m.Label)+1)\n\t\tfor _, p := range m.Label {\n\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t}\n\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName())\n\n\t\tsmpl := &model.Sample{\n\t\t\tMetric: model.Metric(lset),\n\t\t\tValue:  model.SampleValue(m.Counter.GetValue()),\n\t\t}\n\n\t\tif m.TimestampMs != nil {\n\t\t\tsmpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)\n\t\t} else {\n\t\t\tsmpl.Timestamp = o.Timestamp\n\t\t}\n\n\t\tsamples = append(samples, smpl)\n\t}\n\n\treturn samples\n}\n\nfunc extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector {\n\tsamples := make(model.Vector, 0, len(f.Metric))\n\n\tfor _, m := range f.Metric {\n\t\tif m.Gauge == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlset := make(model.LabelSet, len(m.Label)+1)\n\t\tfor _, p := range m.Label {\n\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t}\n\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName())\n\n\t\tsmpl := &model.Sample{\n\t\t\tMetric: model.Metric(lset),\n\t\t\tValue:  model.SampleValue(m.Gauge.GetValue()),\n\t\t}\n\n\t\tif m.TimestampMs != nil {\n\t\t\tsmpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)\n\t\t} else {\n\t\t\tsmpl.Timestamp = o.Timestamp\n\t\t}\n\n\t\tsamples = append(samples, smpl)\n\t}\n\n\treturn samples\n}\n\nfunc extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector {\n\tsamples := make(model.Vector, 0, len(f.Metric))\n\n\tfor _, m := range f.Metric {\n\t\tif m.Untyped == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlset := make(model.LabelSet, len(m.Label)+1)\n\t\tfor _, p := range m.Label {\n\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t}\n\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName())\n\n\t\tsmpl := &model.Sample{\n\t\t\tMetric: model.Metric(lset),\n\t\t\tValue:  model.SampleValue(m.Untyped.GetValue()),\n\t\t}\n\n\t\tif m.TimestampMs != nil {\n\t\t\tsmpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)\n\t\t} else {\n\t\t\tsmpl.Timestamp = o.Timestamp\n\t\t}\n\n\t\tsamples = append(samples, smpl)\n\t}\n\n\treturn samples\n}\n\nfunc extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector {\n\tsamples := make(model.Vector, 0, len(f.Metric))\n\n\tfor _, m := range f.Metric {\n\t\tif m.Summary == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttimestamp := o.Timestamp\n\t\tif m.TimestampMs != nil {\n\t\t\ttimestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)\n\t\t}\n\n\t\tfor _, q := range m.Summary.Quantile {\n\t\t\tlset := make(model.LabelSet, len(m.Label)+2)\n\t\t\tfor _, p := range m.Label {\n\t\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t\t}\n\t\t\t// BUG(matt): Update other names to \"quantile\".\n\t\t\tlset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile()))\n\t\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName())\n\n\t\t\tsamples = append(samples, &model.Sample{\n\t\t\t\tMetric:    model.Metric(lset),\n\t\t\t\tValue:     model.SampleValue(q.GetValue()),\n\t\t\t\tTimestamp: timestamp,\n\t\t\t})\n\t\t}\n\n\t\tlset := make(model.LabelSet, len(m.Label)+1)\n\t\tfor _, p := range m.Label {\n\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t}\n\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName() + \"_sum\")\n\n\t\tsamples = append(samples, &model.Sample{\n\t\t\tMetric:    model.Metric(lset),\n\t\t\tValue:     model.SampleValue(m.Summary.GetSampleSum()),\n\t\t\tTimestamp: timestamp,\n\t\t})\n\n\t\tlset = make(model.LabelSet, len(m.Label)+1)\n\t\tfor _, p := range m.Label {\n\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t}\n\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName() + \"_count\")\n\n\t\tsamples = append(samples, &model.Sample{\n\t\t\tMetric:    model.Metric(lset),\n\t\t\tValue:     model.SampleValue(m.Summary.GetSampleCount()),\n\t\t\tTimestamp: timestamp,\n\t\t})\n\t}\n\n\treturn samples\n}\n\nfunc extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector {\n\tsamples := make(model.Vector, 0, len(f.Metric))\n\n\tfor _, m := range f.Metric {\n\t\tif m.Histogram == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttimestamp := o.Timestamp\n\t\tif m.TimestampMs != nil {\n\t\t\ttimestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000)\n\t\t}\n\n\t\tinfSeen := false\n\n\t\tfor _, q := range m.Histogram.Bucket {\n\t\t\tlset := make(model.LabelSet, len(m.Label)+2)\n\t\t\tfor _, p := range m.Label {\n\t\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t\t}\n\t\t\tlset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound()))\n\t\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName() + \"_bucket\")\n\n\t\t\tif math.IsInf(q.GetUpperBound(), +1) {\n\t\t\t\tinfSeen = true\n\t\t\t}\n\n\t\t\tsamples = append(samples, &model.Sample{\n\t\t\t\tMetric:    model.Metric(lset),\n\t\t\t\tValue:     model.SampleValue(q.GetCumulativeCount()),\n\t\t\t\tTimestamp: timestamp,\n\t\t\t})\n\t\t}\n\n\t\tlset := make(model.LabelSet, len(m.Label)+1)\n\t\tfor _, p := range m.Label {\n\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t}\n\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName() + \"_sum\")\n\n\t\tsamples = append(samples, &model.Sample{\n\t\t\tMetric:    model.Metric(lset),\n\t\t\tValue:     model.SampleValue(m.Histogram.GetSampleSum()),\n\t\t\tTimestamp: timestamp,\n\t\t})\n\n\t\tlset = make(model.LabelSet, len(m.Label)+1)\n\t\tfor _, p := range m.Label {\n\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t}\n\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName() + \"_count\")\n\n\t\tcount := &model.Sample{\n\t\t\tMetric:    model.Metric(lset),\n\t\t\tValue:     model.SampleValue(m.Histogram.GetSampleCount()),\n\t\t\tTimestamp: timestamp,\n\t\t}\n\t\tsamples = append(samples, count)\n\n\t\tif !infSeen {\n\t\t\t// Append an infinity bucket sample.\n\t\t\tlset := make(model.LabelSet, len(m.Label)+2)\n\t\t\tfor _, p := range m.Label {\n\t\t\t\tlset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue())\n\t\t\t}\n\t\t\tlset[model.LabelName(model.BucketLabel)] = model.LabelValue(\"+Inf\")\n\t\t\tlset[model.MetricNameLabel] = model.LabelValue(f.GetName() + \"_bucket\")\n\n\t\t\tsamples = append(samples, &model.Sample{\n\t\t\t\tMetric:    model.Metric(lset),\n\t\t\t\tValue:     count.Value,\n\t\t\t\tTimestamp: timestamp,\n\t\t\t})\n\t\t}\n\t}\n\n\treturn samples\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/decode_test.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage expfmt\n\nimport (\n\t\"io\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tdto \"github.com/prometheus/client_model/go\"\n\n\t\"github.com/prometheus/common/model\"\n)\n\nfunc TestTextDecoder(t *testing.T) {\n\tvar (\n\t\tts = model.Now()\n\t\tin = `\n# Only a quite simple scenario with two metric families.\n# More complicated tests of the parser itself can be found in the text package.\n# TYPE mf2 counter\nmf2 3\nmf1{label=\"value1\"} -3.14 123456\nmf1{label=\"value2\"} 42\nmf2 4\n`\n\t\tout = model.Vector{\n\t\t\t&model.Sample{\n\t\t\t\tMetric: model.Metric{\n\t\t\t\t\tmodel.MetricNameLabel: \"mf1\",\n\t\t\t\t\t\"label\":               \"value1\",\n\t\t\t\t},\n\t\t\t\tValue:     -3.14,\n\t\t\t\tTimestamp: 123456,\n\t\t\t},\n\t\t\t&model.Sample{\n\t\t\t\tMetric: model.Metric{\n\t\t\t\t\tmodel.MetricNameLabel: \"mf1\",\n\t\t\t\t\t\"label\":               \"value2\",\n\t\t\t\t},\n\t\t\t\tValue:     42,\n\t\t\t\tTimestamp: ts,\n\t\t\t},\n\t\t\t&model.Sample{\n\t\t\t\tMetric: model.Metric{\n\t\t\t\t\tmodel.MetricNameLabel: \"mf2\",\n\t\t\t\t},\n\t\t\t\tValue:     3,\n\t\t\t\tTimestamp: ts,\n\t\t\t},\n\t\t\t&model.Sample{\n\t\t\t\tMetric: model.Metric{\n\t\t\t\t\tmodel.MetricNameLabel: \"mf2\",\n\t\t\t\t},\n\t\t\t\tValue:     4,\n\t\t\t\tTimestamp: ts,\n\t\t\t},\n\t\t}\n\t)\n\n\tdec := &SampleDecoder{\n\t\tDec: &textDecoder{r: strings.NewReader(in)},\n\t\tOpts: &DecodeOptions{\n\t\t\tTimestamp: ts,\n\t\t},\n\t}\n\tvar all model.Vector\n\tfor {\n\t\tvar smpls model.Vector\n\t\terr := dec.Decode(&smpls)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tall = append(all, smpls...)\n\t}\n\tsort.Sort(all)\n\tsort.Sort(out)\n\tif !reflect.DeepEqual(all, out) {\n\t\tt.Fatalf(\"output does not match\")\n\t}\n}\n\nfunc TestProtoDecoder(t *testing.T) {\n\n\tvar testTime = model.Now()\n\n\tscenarios := []struct {\n\t\tin       string\n\t\texpected model.Vector\n\t\tfail     bool\n\t}{\n\t\t{\n\t\t\tin: \"\",\n\t\t},\n\t\t{\n\t\t\tin:   \"\\x8f\\x01\\n\\rrequest_count\\x12\\x12Number of requests\\x18\\x00\\\"0\\n#\\n\\x0fsome_!abel_name\\x12\\x10some_label_value\\x1a\\t\\t\\x00\\x00\\x00\\x00\\x00\\x00E\\xc0\\\"6\\n)\\n\\x12another_label_name\\x12\\x13another_label_value\\x1a\\t\\t\\x00\\x00\\x00\\x00\\x00\\x00U@\",\n\t\t\tfail: true,\n\t\t},\n\t\t{\n\t\t\tin: \"\\x8f\\x01\\n\\rrequest_count\\x12\\x12Number of requests\\x18\\x00\\\"0\\n#\\n\\x0fsome_label_name\\x12\\x10some_label_value\\x1a\\t\\t\\x00\\x00\\x00\\x00\\x00\\x00E\\xc0\\\"6\\n)\\n\\x12another_label_name\\x12\\x13another_label_value\\x1a\\t\\t\\x00\\x00\\x00\\x00\\x00\\x00U@\",\n\t\t\texpected: model.Vector{\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count\",\n\t\t\t\t\t\t\"some_label_name\":     \"some_label_value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     -42,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count\",\n\t\t\t\t\t\t\"another_label_name\":  \"another_label_value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     84,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tin: \"\\xb9\\x01\\n\\rrequest_count\\x12\\x12Number of requests\\x18\\x02\\\"O\\n#\\n\\x0fsome_label_name\\x12\\x10some_label_value\\\"(\\x1a\\x12\\t\\xaeG\\xe1z\\x14\\xae\\xef?\\x11\\x00\\x00\\x00\\x00\\x00\\x00E\\xc0\\x1a\\x12\\t+\\x87\\x16\\xd9\\xce\\xf7\\xef?\\x11\\x00\\x00\\x00\\x00\\x00\\x00U\\xc0\\\"A\\n)\\n\\x12another_label_name\\x12\\x13another_label_value\\\"\\x14\\x1a\\x12\\t\\x00\\x00\\x00\\x00\\x00\\x00\\xe0?\\x11\\x00\\x00\\x00\\x00\\x00\\x00$@\",\n\t\t\texpected: model.Vector{\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count_count\",\n\t\t\t\t\t\t\"some_label_name\":     \"some_label_value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     0,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count_sum\",\n\t\t\t\t\t\t\"some_label_name\":     \"some_label_value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     0,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count\",\n\t\t\t\t\t\t\"some_label_name\":     \"some_label_value\",\n\t\t\t\t\t\t\"quantile\":            \"0.99\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     -42,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count\",\n\t\t\t\t\t\t\"some_label_name\":     \"some_label_value\",\n\t\t\t\t\t\t\"quantile\":            \"0.999\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     -84,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count_count\",\n\t\t\t\t\t\t\"another_label_name\":  \"another_label_value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     0,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count_sum\",\n\t\t\t\t\t\t\"another_label_name\":  \"another_label_value\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     0,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count\",\n\t\t\t\t\t\t\"another_label_name\":  \"another_label_value\",\n\t\t\t\t\t\t\"quantile\":            \"0.5\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     10,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tin: \"\\x8d\\x01\\n\\x1drequest_duration_microseconds\\x12\\x15The response latency.\\x18\\x04\\\"S:Q\\b\\x85\\x15\\x11\\xcd\\xcc\\xccL\\x8f\\xcb:A\\x1a\\v\\b{\\x11\\x00\\x00\\x00\\x00\\x00\\x00Y@\\x1a\\f\\b\\x9c\\x03\\x11\\x00\\x00\\x00\\x00\\x00\\x00^@\\x1a\\f\\b\\xd0\\x04\\x11\\x00\\x00\\x00\\x00\\x00\\x00b@\\x1a\\f\\b\\xf4\\v\\x11\\x9a\\x99\\x99\\x99\\x99\\x99e@\\x1a\\f\\b\\x85\\x15\\x11\\x00\\x00\\x00\\x00\\x00\\x00\\xf0\\u007f\",\n\t\t\texpected: model.Vector{\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_duration_microseconds_bucket\",\n\t\t\t\t\t\t\"le\": \"100\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     123,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_duration_microseconds_bucket\",\n\t\t\t\t\t\t\"le\": \"120\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     412,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_duration_microseconds_bucket\",\n\t\t\t\t\t\t\"le\": \"144\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     592,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_duration_microseconds_bucket\",\n\t\t\t\t\t\t\"le\": \"172.8\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     1524,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_duration_microseconds_bucket\",\n\t\t\t\t\t\t\"le\": \"+Inf\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     2693,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_duration_microseconds_sum\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     1756047.3,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_duration_microseconds_count\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     2693,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\t// The metric type is unset in this protobuf, which needs to be handled\n\t\t\t// correctly by the decoder.\n\t\t\tin: \"\\x1c\\n\\rrequest_count\\\"\\v\\x1a\\t\\t\\x00\\x00\\x00\\x00\\x00\\x00\\xf0?\",\n\t\t\texpected: model.Vector{\n\t\t\t\t&model.Sample{\n\t\t\t\t\tMetric: model.Metric{\n\t\t\t\t\t\tmodel.MetricNameLabel: \"request_count\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     1,\n\t\t\t\t\tTimestamp: testTime,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tdec := &SampleDecoder{\n\t\t\tDec: &protoDecoder{r: strings.NewReader(scenario.in)},\n\t\t\tOpts: &DecodeOptions{\n\t\t\t\tTimestamp: testTime,\n\t\t\t},\n\t\t}\n\n\t\tvar all model.Vector\n\t\tfor {\n\t\t\tvar smpls model.Vector\n\t\t\terr := dec.Decode(&smpls)\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif scenario.fail {\n\t\t\t\tif err == nil {\n\t\t\t\t\tt.Fatal(\"Expected error but got none\")\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tall = append(all, smpls...)\n\t\t}\n\t\tsort.Sort(all)\n\t\tsort.Sort(scenario.expected)\n\t\tif !reflect.DeepEqual(all, scenario.expected) {\n\t\t\tt.Fatalf(\"%d. output does not match, want: %#v, got %#v\", i, scenario.expected, all)\n\t\t}\n\t}\n}\n\nfunc testDiscriminatorHTTPHeader(t testing.TB) {\n\tvar scenarios = []struct {\n\t\tinput  map[string]string\n\t\toutput Format\n\t\terr    error\n\t}{\n\t\t{\n\t\t\tinput:  map[string]string{\"Content-Type\": `application/vnd.google.protobuf; proto=\"io.prometheus.client.MetricFamily\"; encoding=\"delimited\"`},\n\t\t\toutput: FmtProtoDelim,\n\t\t},\n\t\t{\n\t\t\tinput:  map[string]string{\"Content-Type\": `application/vnd.google.protobuf; proto=\"illegal\"; encoding=\"delimited\"`},\n\t\t\toutput: FmtUnknown,\n\t\t},\n\t\t{\n\t\t\tinput:  map[string]string{\"Content-Type\": `application/vnd.google.protobuf; proto=\"io.prometheus.client.MetricFamily\"; encoding=\"illegal\"`},\n\t\t\toutput: FmtUnknown,\n\t\t},\n\t\t{\n\t\t\tinput:  map[string]string{\"Content-Type\": `text/plain; version=0.0.4`},\n\t\t\toutput: FmtText,\n\t\t},\n\t\t{\n\t\t\tinput:  map[string]string{\"Content-Type\": `text/plain`},\n\t\t\toutput: FmtText,\n\t\t},\n\t\t{\n\t\t\tinput:  map[string]string{\"Content-Type\": `text/plain; version=0.0.3`},\n\t\t\toutput: FmtUnknown,\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tvar header http.Header\n\n\t\tif len(scenario.input) > 0 {\n\t\t\theader = http.Header{}\n\t\t}\n\n\t\tfor key, value := range scenario.input {\n\t\t\theader.Add(key, value)\n\t\t}\n\n\t\tactual := ResponseFormat(header)\n\n\t\tif scenario.output != actual {\n\t\t\tt.Errorf(\"%d. expected %s, got %s\", i, scenario.output, actual)\n\t\t}\n\t}\n}\n\nfunc TestDiscriminatorHTTPHeader(t *testing.T) {\n\ttestDiscriminatorHTTPHeader(t)\n}\n\nfunc BenchmarkDiscriminatorHTTPHeader(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestDiscriminatorHTTPHeader(b)\n\t}\n}\n\nfunc TestExtractSamples(t *testing.T) {\n\tvar (\n\t\tgoodMetricFamily1 = &dto.MetricFamily{\n\t\t\tName: proto.String(\"foo\"),\n\t\t\tHelp: proto.String(\"Help for foo.\"),\n\t\t\tType: dto.MetricType_COUNTER.Enum(),\n\t\t\tMetric: []*dto.Metric{\n\t\t\t\t&dto.Metric{\n\t\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\t\tValue: proto.Float64(4711),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tgoodMetricFamily2 = &dto.MetricFamily{\n\t\t\tName: proto.String(\"bar\"),\n\t\t\tHelp: proto.String(\"Help for bar.\"),\n\t\t\tType: dto.MetricType_GAUGE.Enum(),\n\t\t\tMetric: []*dto.Metric{\n\t\t\t\t&dto.Metric{\n\t\t\t\t\tGauge: &dto.Gauge{\n\t\t\t\t\t\tValue: proto.Float64(3.14),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tbadMetricFamily = &dto.MetricFamily{\n\t\t\tName: proto.String(\"bad\"),\n\t\t\tHelp: proto.String(\"Help for bad.\"),\n\t\t\tType: dto.MetricType(42).Enum(),\n\t\t\tMetric: []*dto.Metric{\n\t\t\t\t&dto.Metric{\n\t\t\t\t\tGauge: &dto.Gauge{\n\t\t\t\t\t\tValue: proto.Float64(2.7),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\topts = &DecodeOptions{\n\t\t\tTimestamp: 42,\n\t\t}\n\t)\n\n\tgot, err := ExtractSamples(opts, goodMetricFamily1, goodMetricFamily2)\n\tif err != nil {\n\t\tt.Error(\"Unexpected error from ExtractSamples:\", err)\n\t}\n\twant := model.Vector{\n\t\t&model.Sample{Metric: model.Metric{model.MetricNameLabel: \"foo\"}, Value: 4711, Timestamp: 42},\n\t\t&model.Sample{Metric: model.Metric{model.MetricNameLabel: \"bar\"}, Value: 3.14, Timestamp: 42},\n\t}\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"unexpected samples extracted, got: %v, want: %v\", got, want)\n\t}\n\n\tgot, err = ExtractSamples(opts, goodMetricFamily1, badMetricFamily, goodMetricFamily2)\n\tif err == nil {\n\t\tt.Error(\"Expected error from ExtractSamples\")\n\t}\n\tif !reflect.DeepEqual(got, want) {\n\t\tt.Errorf(\"unexpected samples extracted, got: %v, want: %v\", got, want)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/encode.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage expfmt\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"github.com/matttproud/golang_protobuf_extensions/pbutil\"\n\t\"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\n// Encoder types encode metric families into an underlying wire protocol.\ntype Encoder interface {\n\tEncode(*dto.MetricFamily) error\n}\n\ntype encoder func(*dto.MetricFamily) error\n\nfunc (e encoder) Encode(v *dto.MetricFamily) error {\n\treturn e(v)\n}\n\n// Negotiate returns the Content-Type based on the given Accept header.\n// If no appropriate accepted type is found, FmtText is returned.\nfunc Negotiate(h http.Header) Format {\n\tfor _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) {\n\t\t// Check for protocol buffer\n\t\tif ac.Type+\"/\"+ac.SubType == ProtoType && ac.Params[\"proto\"] == ProtoProtocol {\n\t\t\tswitch ac.Params[\"encoding\"] {\n\t\t\tcase \"delimited\":\n\t\t\t\treturn FmtProtoDelim\n\t\t\tcase \"text\":\n\t\t\t\treturn FmtProtoText\n\t\t\tcase \"compact-text\":\n\t\t\t\treturn FmtProtoCompact\n\t\t\t}\n\t\t}\n\t\t// Check for text format.\n\t\tver := ac.Params[\"version\"]\n\t\tif ac.Type == \"text\" && ac.SubType == \"plain\" && (ver == TextVersion || ver == \"\") {\n\t\t\treturn FmtText\n\t\t}\n\t}\n\treturn FmtText\n}\n\n// NewEncoder returns a new encoder based on content type negotiation.\nfunc NewEncoder(w io.Writer, format Format) Encoder {\n\tswitch format {\n\tcase FmtProtoDelim:\n\t\treturn encoder(func(v *dto.MetricFamily) error {\n\t\t\t_, err := pbutil.WriteDelimited(w, v)\n\t\t\treturn err\n\t\t})\n\tcase FmtProtoCompact:\n\t\treturn encoder(func(v *dto.MetricFamily) error {\n\t\t\t_, err := fmt.Fprintln(w, v.String())\n\t\t\treturn err\n\t\t})\n\tcase FmtProtoText:\n\t\treturn encoder(func(v *dto.MetricFamily) error {\n\t\t\t_, err := fmt.Fprintln(w, proto.MarshalTextString(v))\n\t\t\treturn err\n\t\t})\n\tcase FmtText:\n\t\treturn encoder(func(v *dto.MetricFamily) error {\n\t\t\t_, err := MetricFamilyToText(w, v)\n\t\t\treturn err\n\t\t})\n\t}\n\tpanic(\"expfmt.NewEncoder: unknown format\")\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/expfmt.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package expfmt contains tools for reading and writing Prometheus metrics.\npackage expfmt\n\n// Format specifies the HTTP content type of the different wire protocols.\ntype Format string\n\n// Constants to assemble the Content-Type values for the different wire protocols.\nconst (\n\tTextVersion   = \"0.0.4\"\n\tProtoType     = `application/vnd.google.protobuf`\n\tProtoProtocol = `io.prometheus.client.MetricFamily`\n\tProtoFmt      = ProtoType + \"; proto=\" + ProtoProtocol + \";\"\n\n\t// The Content-Type values for the different wire protocols.\n\tFmtUnknown      Format = `<unknown>`\n\tFmtText         Format = `text/plain; version=` + TextVersion\n\tFmtProtoDelim   Format = ProtoFmt + ` encoding=delimited`\n\tFmtProtoText    Format = ProtoFmt + ` encoding=text`\n\tFmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`\n)\n\nconst (\n\thdrContentType = \"Content-Type\"\n\thdrAccept      = \"Accept\"\n)\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/fuzz.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Build only when actually fuzzing\n// +build gofuzz\n\npackage expfmt\n\nimport \"bytes\"\n\n// Fuzz text metric parser with with github.com/dvyukov/go-fuzz:\n//\n//     go-fuzz-build github.com/prometheus/common/expfmt\n//     go-fuzz -bin expfmt-fuzz.zip -workdir fuzz\n//\n// Further input samples should go in the folder fuzz/corpus.\nfunc Fuzz(in []byte) int {\n\tparser := TextParser{}\n\t_, err := parser.TextToMetricFamilies(bytes.NewReader(in))\n\n\tif err != nil {\n\t\treturn 0\n\t}\n\n\treturn 1\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/text_create.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage expfmt\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"strings\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/prometheus/common/model\"\n)\n\n// MetricFamilyToText converts a MetricFamily proto message into text format and\n// writes the resulting lines to 'out'. It returns the number of bytes written\n// and any error encountered. The output will have the same order as the input,\n// no further sorting is performed. Furthermore, this function assumes the input\n// is already sanitized and does not perform any sanity checks. If the input\n// contains duplicate metrics or invalid metric or label names, the conversion\n// will result in invalid text format output.\n//\n// This method fulfills the type 'prometheus.encoder'.\nfunc MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) {\n\tvar written int\n\n\t// Fail-fast checks.\n\tif len(in.Metric) == 0 {\n\t\treturn written, fmt.Errorf(\"MetricFamily has no metrics: %s\", in)\n\t}\n\tname := in.GetName()\n\tif name == \"\" {\n\t\treturn written, fmt.Errorf(\"MetricFamily has no name: %s\", in)\n\t}\n\n\t// Comments, first HELP, then TYPE.\n\tif in.Help != nil {\n\t\tn, err := fmt.Fprintf(\n\t\t\tout, \"# HELP %s %s\\n\",\n\t\t\tname, escapeString(*in.Help, false),\n\t\t)\n\t\twritten += n\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t}\n\tmetricType := in.GetType()\n\tn, err := fmt.Fprintf(\n\t\tout, \"# TYPE %s %s\\n\",\n\t\tname, strings.ToLower(metricType.String()),\n\t)\n\twritten += n\n\tif err != nil {\n\t\treturn written, err\n\t}\n\n\t// Finally the samples, one line for each.\n\tfor _, metric := range in.Metric {\n\t\tswitch metricType {\n\t\tcase dto.MetricType_COUNTER:\n\t\t\tif metric.Counter == nil {\n\t\t\t\treturn written, fmt.Errorf(\n\t\t\t\t\t\"expected counter in metric %s %s\", name, metric,\n\t\t\t\t)\n\t\t\t}\n\t\t\tn, err = writeSample(\n\t\t\t\tname, metric, \"\", \"\",\n\t\t\t\tmetric.Counter.GetValue(),\n\t\t\t\tout,\n\t\t\t)\n\t\tcase dto.MetricType_GAUGE:\n\t\t\tif metric.Gauge == nil {\n\t\t\t\treturn written, fmt.Errorf(\n\t\t\t\t\t\"expected gauge in metric %s %s\", name, metric,\n\t\t\t\t)\n\t\t\t}\n\t\t\tn, err = writeSample(\n\t\t\t\tname, metric, \"\", \"\",\n\t\t\t\tmetric.Gauge.GetValue(),\n\t\t\t\tout,\n\t\t\t)\n\t\tcase dto.MetricType_UNTYPED:\n\t\t\tif metric.Untyped == nil {\n\t\t\t\treturn written, fmt.Errorf(\n\t\t\t\t\t\"expected untyped in metric %s %s\", name, metric,\n\t\t\t\t)\n\t\t\t}\n\t\t\tn, err = writeSample(\n\t\t\t\tname, metric, \"\", \"\",\n\t\t\t\tmetric.Untyped.GetValue(),\n\t\t\t\tout,\n\t\t\t)\n\t\tcase dto.MetricType_SUMMARY:\n\t\t\tif metric.Summary == nil {\n\t\t\t\treturn written, fmt.Errorf(\n\t\t\t\t\t\"expected summary in metric %s %s\", name, metric,\n\t\t\t\t)\n\t\t\t}\n\t\t\tfor _, q := range metric.Summary.Quantile {\n\t\t\t\tn, err = writeSample(\n\t\t\t\t\tname, metric,\n\t\t\t\t\tmodel.QuantileLabel, fmt.Sprint(q.GetQuantile()),\n\t\t\t\t\tq.GetValue(),\n\t\t\t\t\tout,\n\t\t\t\t)\n\t\t\t\twritten += n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn written, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tn, err = writeSample(\n\t\t\t\tname+\"_sum\", metric, \"\", \"\",\n\t\t\t\tmetric.Summary.GetSampleSum(),\n\t\t\t\tout,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn written, err\n\t\t\t}\n\t\t\twritten += n\n\t\t\tn, err = writeSample(\n\t\t\t\tname+\"_count\", metric, \"\", \"\",\n\t\t\t\tfloat64(metric.Summary.GetSampleCount()),\n\t\t\t\tout,\n\t\t\t)\n\t\tcase dto.MetricType_HISTOGRAM:\n\t\t\tif metric.Histogram == nil {\n\t\t\t\treturn written, fmt.Errorf(\n\t\t\t\t\t\"expected histogram in metric %s %s\", name, metric,\n\t\t\t\t)\n\t\t\t}\n\t\t\tinfSeen := false\n\t\t\tfor _, q := range metric.Histogram.Bucket {\n\t\t\t\tn, err = writeSample(\n\t\t\t\t\tname+\"_bucket\", metric,\n\t\t\t\t\tmodel.BucketLabel, fmt.Sprint(q.GetUpperBound()),\n\t\t\t\t\tfloat64(q.GetCumulativeCount()),\n\t\t\t\t\tout,\n\t\t\t\t)\n\t\t\t\twritten += n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn written, err\n\t\t\t\t}\n\t\t\t\tif math.IsInf(q.GetUpperBound(), +1) {\n\t\t\t\t\tinfSeen = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !infSeen {\n\t\t\t\tn, err = writeSample(\n\t\t\t\t\tname+\"_bucket\", metric,\n\t\t\t\t\tmodel.BucketLabel, \"+Inf\",\n\t\t\t\t\tfloat64(metric.Histogram.GetSampleCount()),\n\t\t\t\t\tout,\n\t\t\t\t)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn written, err\n\t\t\t\t}\n\t\t\t\twritten += n\n\t\t\t}\n\t\t\tn, err = writeSample(\n\t\t\t\tname+\"_sum\", metric, \"\", \"\",\n\t\t\t\tmetric.Histogram.GetSampleSum(),\n\t\t\t\tout,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn written, err\n\t\t\t}\n\t\t\twritten += n\n\t\t\tn, err = writeSample(\n\t\t\t\tname+\"_count\", metric, \"\", \"\",\n\t\t\t\tfloat64(metric.Histogram.GetSampleCount()),\n\t\t\t\tout,\n\t\t\t)\n\t\tdefault:\n\t\t\treturn written, fmt.Errorf(\n\t\t\t\t\"unexpected type in metric %s %s\", name, metric,\n\t\t\t)\n\t\t}\n\t\twritten += n\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t}\n\treturn written, nil\n}\n\n// writeSample writes a single sample in text format to out, given the metric\n// name, the metric proto message itself, optionally an additional label name\n// and value (use empty strings if not required), and the value. The function\n// returns the number of bytes written and any error encountered.\nfunc writeSample(\n\tname string,\n\tmetric *dto.Metric,\n\tadditionalLabelName, additionalLabelValue string,\n\tvalue float64,\n\tout io.Writer,\n) (int, error) {\n\tvar written int\n\tn, err := fmt.Fprint(out, name)\n\twritten += n\n\tif err != nil {\n\t\treturn written, err\n\t}\n\tn, err = labelPairsToText(\n\t\tmetric.Label,\n\t\tadditionalLabelName, additionalLabelValue,\n\t\tout,\n\t)\n\twritten += n\n\tif err != nil {\n\t\treturn written, err\n\t}\n\tn, err = fmt.Fprintf(out, \" %v\", value)\n\twritten += n\n\tif err != nil {\n\t\treturn written, err\n\t}\n\tif metric.TimestampMs != nil {\n\t\tn, err = fmt.Fprintf(out, \" %v\", *metric.TimestampMs)\n\t\twritten += n\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t}\n\tn, err = out.Write([]byte{'\\n'})\n\twritten += n\n\tif err != nil {\n\t\treturn written, err\n\t}\n\treturn written, nil\n}\n\n// labelPairsToText converts a slice of LabelPair proto messages plus the\n// explicitly given additional label pair into text formatted as required by the\n// text format and writes it to 'out'. An empty slice in combination with an\n// empty string 'additionalLabelName' results in nothing being\n// written. Otherwise, the label pairs are written, escaped as required by the\n// text format, and enclosed in '{...}'. The function returns the number of\n// bytes written and any error encountered.\nfunc labelPairsToText(\n\tin []*dto.LabelPair,\n\tadditionalLabelName, additionalLabelValue string,\n\tout io.Writer,\n) (int, error) {\n\tif len(in) == 0 && additionalLabelName == \"\" {\n\t\treturn 0, nil\n\t}\n\tvar written int\n\tseparator := '{'\n\tfor _, lp := range in {\n\t\tn, err := fmt.Fprintf(\n\t\t\tout, `%c%s=\"%s\"`,\n\t\t\tseparator, lp.GetName(), escapeString(lp.GetValue(), true),\n\t\t)\n\t\twritten += n\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t\tseparator = ','\n\t}\n\tif additionalLabelName != \"\" {\n\t\tn, err := fmt.Fprintf(\n\t\t\tout, `%c%s=\"%s\"`,\n\t\t\tseparator, additionalLabelName,\n\t\t\tescapeString(additionalLabelValue, true),\n\t\t)\n\t\twritten += n\n\t\tif err != nil {\n\t\t\treturn written, err\n\t\t}\n\t}\n\tn, err := out.Write([]byte{'}'})\n\twritten += n\n\tif err != nil {\n\t\treturn written, err\n\t}\n\treturn written, nil\n}\n\nvar (\n\tescape                = strings.NewReplacer(\"\\\\\", `\\\\`, \"\\n\", `\\n`)\n\tescapeWithDoubleQuote = strings.NewReplacer(\"\\\\\", `\\\\`, \"\\n\", `\\n`, \"\\\"\", `\\\"`)\n)\n\n// escapeString replaces '\\' by '\\\\', new line character by '\\n', and - if\n// includeDoubleQuote is true - '\"' by '\\\"'.\nfunc escapeString(v string, includeDoubleQuote bool) string {\n\tif includeDoubleQuote {\n\t\treturn escapeWithDoubleQuote.Replace(v)\n\t}\n\n\treturn escape.Replace(v)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/text_create_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage expfmt\n\nimport (\n\t\"bytes\"\n\t\"math\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc testCreate(t testing.TB) {\n\tvar scenarios = []struct {\n\t\tin  *dto.MetricFamily\n\t\tout string\n\t}{\n\t\t// 0: Counter, NaN as value, timestamp given.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName: proto.String(\"name\"),\n\t\t\t\tHelp: proto.String(\"two-line\\n doc  str\\\\ing\"),\n\t\t\t\tType: dto.MetricType_COUNTER.Enum(),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"val1\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"basename\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"basevalue\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\t\t\tValue: proto.Float64(math.NaN()),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"val2\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"basename\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"basevalue\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\t\t\tValue: proto.Float64(.23),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTimestampMs: proto.Int64(1234567890),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: `# HELP name two-line\\n doc  str\\\\ing\n# TYPE name counter\nname{labelname=\"val1\",basename=\"basevalue\"} NaN\nname{labelname=\"val2\",basename=\"basevalue\"} 0.23 1234567890\n`,\n\t\t},\n\t\t// 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName: proto.String(\"gauge_name\"),\n\t\t\t\tHelp: proto.String(\"gauge\\ndoc\\nstr\\\"ing\"),\n\t\t\t\tType: dto.MetricType_GAUGE.Enum(),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"name_1\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"val with\\nnew line\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"name_2\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"val with \\\\backslash and \\\"quotes\\\"\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGauge: &dto.Gauge{\n\t\t\t\t\t\t\tValue: proto.Float64(math.Inf(+1)),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"name_1\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"Björn\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"name_2\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"佖佥\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tGauge: &dto.Gauge{\n\t\t\t\t\t\t\tValue: proto.Float64(3.14E42),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: `# HELP gauge_name gauge\\ndoc\\nstr\"ing\n# TYPE gauge_name gauge\ngauge_name{name_1=\"val with\\nnew line\",name_2=\"val with \\\\backslash and \\\"quotes\\\"\"} +Inf\ngauge_name{name_1=\"Björn\",name_2=\"佖佥\"} 3.14e+42\n`,\n\t\t},\n\t\t// 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName: proto.String(\"untyped_name\"),\n\t\t\t\tType: dto.MetricType_UNTYPED.Enum(),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\tValue: proto.Float64(math.Inf(-1)),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"name_1\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"value 1\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\tValue: proto.Float64(-1.23e-45),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: `# TYPE untyped_name untyped\nuntyped_name -Inf\nuntyped_name{name_1=\"value 1\"} -1.23e-45\n`,\n\t\t},\n\t\t// 3: Summary.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName: proto.String(\"summary_name\"),\n\t\t\t\tHelp: proto.String(\"summary docstring\"),\n\t\t\t\tType: dto.MetricType_SUMMARY.Enum(),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tSummary: &dto.Summary{\n\t\t\t\t\t\t\tSampleCount: proto.Uint64(42),\n\t\t\t\t\t\t\tSampleSum:   proto.Float64(-3.4567),\n\t\t\t\t\t\t\tQuantile: []*dto.Quantile{\n\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.5),\n\t\t\t\t\t\t\t\t\tValue:    proto.Float64(-1.23),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.9),\n\t\t\t\t\t\t\t\t\tValue:    proto.Float64(.2342354),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.99),\n\t\t\t\t\t\t\t\t\tValue:    proto.Float64(0),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"name_1\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"value 1\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\tName:  proto.String(\"name_2\"),\n\t\t\t\t\t\t\t\tValue: proto.String(\"value 2\"),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSummary: &dto.Summary{\n\t\t\t\t\t\t\tSampleCount: proto.Uint64(4711),\n\t\t\t\t\t\t\tSampleSum:   proto.Float64(2010.1971),\n\t\t\t\t\t\t\tQuantile: []*dto.Quantile{\n\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.5),\n\t\t\t\t\t\t\t\t\tValue:    proto.Float64(1),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.9),\n\t\t\t\t\t\t\t\t\tValue:    proto.Float64(2),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.99),\n\t\t\t\t\t\t\t\t\tValue:    proto.Float64(3),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: `# HELP summary_name summary docstring\n# TYPE summary_name summary\nsummary_name{quantile=\"0.5\"} -1.23\nsummary_name{quantile=\"0.9\"} 0.2342354\nsummary_name{quantile=\"0.99\"} 0\nsummary_name_sum -3.4567\nsummary_name_count 42\nsummary_name{name_1=\"value 1\",name_2=\"value 2\",quantile=\"0.5\"} 1\nsummary_name{name_1=\"value 1\",name_2=\"value 2\",quantile=\"0.9\"} 2\nsummary_name{name_1=\"value 1\",name_2=\"value 2\",quantile=\"0.99\"} 3\nsummary_name_sum{name_1=\"value 1\",name_2=\"value 2\"} 2010.1971\nsummary_name_count{name_1=\"value 1\",name_2=\"value 2\"} 4711\n`,\n\t\t},\n\t\t// 4: Histogram\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName: proto.String(\"request_duration_microseconds\"),\n\t\t\t\tHelp: proto.String(\"The response latency.\"),\n\t\t\t\tType: dto.MetricType_HISTOGRAM.Enum(),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tHistogram: &dto.Histogram{\n\t\t\t\t\t\t\tSampleCount: proto.Uint64(2693),\n\t\t\t\t\t\t\tSampleSum:   proto.Float64(1756047.3),\n\t\t\t\t\t\t\tBucket: []*dto.Bucket{\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(100),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(123),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(120),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(412),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(144),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(592),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(172.8),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(1524),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(math.Inf(+1)),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(2693),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: `# HELP request_duration_microseconds The response latency.\n# TYPE request_duration_microseconds histogram\nrequest_duration_microseconds_bucket{le=\"100\"} 123\nrequest_duration_microseconds_bucket{le=\"120\"} 412\nrequest_duration_microseconds_bucket{le=\"144\"} 592\nrequest_duration_microseconds_bucket{le=\"172.8\"} 1524\nrequest_duration_microseconds_bucket{le=\"+Inf\"} 2693\nrequest_duration_microseconds_sum 1.7560473e+06\nrequest_duration_microseconds_count 2693\n`,\n\t\t},\n\t\t// 5: Histogram with missing +Inf bucket.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName: proto.String(\"request_duration_microseconds\"),\n\t\t\t\tHelp: proto.String(\"The response latency.\"),\n\t\t\t\tType: dto.MetricType_HISTOGRAM.Enum(),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tHistogram: &dto.Histogram{\n\t\t\t\t\t\t\tSampleCount: proto.Uint64(2693),\n\t\t\t\t\t\t\tSampleSum:   proto.Float64(1756047.3),\n\t\t\t\t\t\t\tBucket: []*dto.Bucket{\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(100),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(123),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(120),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(412),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(144),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(592),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(172.8),\n\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(1524),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: `# HELP request_duration_microseconds The response latency.\n# TYPE request_duration_microseconds histogram\nrequest_duration_microseconds_bucket{le=\"100\"} 123\nrequest_duration_microseconds_bucket{le=\"120\"} 412\nrequest_duration_microseconds_bucket{le=\"144\"} 592\nrequest_duration_microseconds_bucket{le=\"172.8\"} 1524\nrequest_duration_microseconds_bucket{le=\"+Inf\"} 2693\nrequest_duration_microseconds_sum 1.7560473e+06\nrequest_duration_microseconds_count 2693\n`,\n\t\t},\n\t\t// 6: No metric type, should result in default type Counter.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName: proto.String(\"name\"),\n\t\t\t\tHelp: proto.String(\"doc string\"),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\t\t\tValue: proto.Float64(math.Inf(-1)),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tout: `# HELP name doc string\n# TYPE name counter\nname -Inf\n`,\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tout := bytes.NewBuffer(make([]byte, 0, len(scenario.out)))\n\t\tn, err := MetricFamilyToText(out, scenario.in)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. error: %s\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif expected, got := len(scenario.out), n; expected != got {\n\t\t\tt.Errorf(\n\t\t\t\t\"%d. expected %d bytes written, got %d\",\n\t\t\t\ti, expected, got,\n\t\t\t)\n\t\t}\n\t\tif expected, got := scenario.out, out.String(); expected != got {\n\t\t\tt.Errorf(\n\t\t\t\t\"%d. expected out=%q, got %q\",\n\t\t\t\ti, expected, got,\n\t\t\t)\n\t\t}\n\t}\n\n}\n\nfunc TestCreate(t *testing.T) {\n\ttestCreate(t)\n}\n\nfunc BenchmarkCreate(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestCreate(b)\n\t}\n}\n\nfunc testCreateError(t testing.TB) {\n\tvar scenarios = []struct {\n\t\tin  *dto.MetricFamily\n\t\terr string\n\t}{\n\t\t// 0: No metric.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName:   proto.String(\"name\"),\n\t\t\t\tHelp:   proto.String(\"doc string\"),\n\t\t\t\tType:   dto.MetricType_COUNTER.Enum(),\n\t\t\t\tMetric: []*dto.Metric{},\n\t\t\t},\n\t\t\terr: \"MetricFamily has no metrics\",\n\t\t},\n\t\t// 1: No metric name.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tHelp: proto.String(\"doc string\"),\n\t\t\t\tType: dto.MetricType_UNTYPED.Enum(),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\tValue: proto.Float64(math.Inf(-1)),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terr: \"MetricFamily has no name\",\n\t\t},\n\t\t// 2: Wrong type.\n\t\t{\n\t\t\tin: &dto.MetricFamily{\n\t\t\t\tName: proto.String(\"name\"),\n\t\t\t\tHelp: proto.String(\"doc string\"),\n\t\t\t\tType: dto.MetricType_COUNTER.Enum(),\n\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\tValue: proto.Float64(math.Inf(-1)),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\terr: \"expected counter in metric\",\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tvar out bytes.Buffer\n\t\t_, err := MetricFamilyToText(&out, scenario.in)\n\t\tif err == nil {\n\t\t\tt.Errorf(\"%d. expected error, got nil\", i)\n\t\t\tcontinue\n\t\t}\n\t\tif expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"%d. expected error starting with %q, got %q\",\n\t\t\t\ti, expected, got,\n\t\t\t)\n\t\t}\n\t}\n\n}\n\nfunc TestCreateError(t *testing.T) {\n\ttestCreateError(t)\n}\n\nfunc BenchmarkCreateError(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestCreateError(b)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/text_parse.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage expfmt\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"strconv\"\n\t\"strings\"\n\n\tdto \"github.com/prometheus/client_model/go\"\n\n\t\"github.com/golang/protobuf/proto\"\n\t\"github.com/prometheus/common/model\"\n)\n\n// A stateFn is a function that represents a state in a state machine. By\n// executing it, the state is progressed to the next state. The stateFn returns\n// another stateFn, which represents the new state. The end state is represented\n// by nil.\ntype stateFn func() stateFn\n\n// ParseError signals errors while parsing the simple and flat text-based\n// exchange format.\ntype ParseError struct {\n\tLine int\n\tMsg  string\n}\n\n// Error implements the error interface.\nfunc (e ParseError) Error() string {\n\treturn fmt.Sprintf(\"text format parsing error in line %d: %s\", e.Line, e.Msg)\n}\n\n// TextParser is used to parse the simple and flat text-based exchange format. Its\n// zero value is ready to use.\ntype TextParser struct {\n\tmetricFamiliesByName map[string]*dto.MetricFamily\n\tbuf                  *bufio.Reader // Where the parsed input is read through.\n\terr                  error         // Most recent error.\n\tlineCount            int           // Tracks the line count for error messages.\n\tcurrentByte          byte          // The most recent byte read.\n\tcurrentToken         bytes.Buffer  // Re-used each time a token has to be gathered from multiple bytes.\n\tcurrentMF            *dto.MetricFamily\n\tcurrentMetric        *dto.Metric\n\tcurrentLabelPair     *dto.LabelPair\n\n\t// The remaining member variables are only used for summaries/histograms.\n\tcurrentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le'\n\t// Summary specific.\n\tsummaries       map[uint64]*dto.Metric // Key is created with LabelsToSignature.\n\tcurrentQuantile float64\n\t// Histogram specific.\n\thistograms    map[uint64]*dto.Metric // Key is created with LabelsToSignature.\n\tcurrentBucket float64\n\t// These tell us if the currently processed line ends on '_count' or\n\t// '_sum' respectively and belong to a summary/histogram, representing the sample\n\t// count and sum of that summary/histogram.\n\tcurrentIsSummaryCount, currentIsSummarySum     bool\n\tcurrentIsHistogramCount, currentIsHistogramSum bool\n}\n\n// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange\n// format and creates MetricFamily proto messages. It returns the MetricFamily\n// proto messages in a map where the metric names are the keys, along with any\n// error encountered.\n//\n// If the input contains duplicate metrics (i.e. lines with the same metric name\n// and exactly the same label set), the resulting MetricFamily will contain\n// duplicate Metric proto messages. Similar is true for duplicate label\n// names. Checks for duplicates have to be performed separately, if required.\n// Also note that neither the metrics within each MetricFamily are sorted nor\n// the label pairs within each Metric. Sorting is not required for the most\n// frequent use of this method, which is sample ingestion in the Prometheus\n// server. However, for presentation purposes, you might want to sort the\n// metrics, and in some cases, you must sort the labels, e.g. for consumption by\n// the metric family injection hook of the Prometheus registry.\n//\n// Summaries and histograms are rather special beasts. You would probably not\n// use them in the simple text format anyway. This method can deal with\n// summaries and histograms if they are presented in exactly the way the\n// text.Create function creates them.\n//\n// This method must not be called concurrently. If you want to parse different\n// input concurrently, instantiate a separate Parser for each goroutine.\nfunc (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) {\n\tp.reset(in)\n\tfor nextState := p.startOfLine; nextState != nil; nextState = nextState() {\n\t\t// Magic happens here...\n\t}\n\t// Get rid of empty metric families.\n\tfor k, mf := range p.metricFamiliesByName {\n\t\tif len(mf.GetMetric()) == 0 {\n\t\t\tdelete(p.metricFamiliesByName, k)\n\t\t}\n\t}\n\t// If p.err is io.EOF now, we have run into a premature end of the input\n\t// stream. Turn this error into something nicer and more\n\t// meaningful. (io.EOF is often used as a signal for the legitimate end\n\t// of an input stream.)\n\tif p.err == io.EOF {\n\t\tp.parseError(\"unexpected end of input stream\")\n\t}\n\treturn p.metricFamiliesByName, p.err\n}\n\nfunc (p *TextParser) reset(in io.Reader) {\n\tp.metricFamiliesByName = map[string]*dto.MetricFamily{}\n\tif p.buf == nil {\n\t\tp.buf = bufio.NewReader(in)\n\t} else {\n\t\tp.buf.Reset(in)\n\t}\n\tp.err = nil\n\tp.lineCount = 0\n\tif p.summaries == nil || len(p.summaries) > 0 {\n\t\tp.summaries = map[uint64]*dto.Metric{}\n\t}\n\tif p.histograms == nil || len(p.histograms) > 0 {\n\t\tp.histograms = map[uint64]*dto.Metric{}\n\t}\n\tp.currentQuantile = math.NaN()\n\tp.currentBucket = math.NaN()\n}\n\n// startOfLine represents the state where the next byte read from p.buf is the\n// start of a line (or whitespace leading up to it).\nfunc (p *TextParser) startOfLine() stateFn {\n\tp.lineCount++\n\tif p.skipBlankTab(); p.err != nil {\n\t\t// End of input reached. This is the only case where\n\t\t// that is not an error but a signal that we are done.\n\t\tp.err = nil\n\t\treturn nil\n\t}\n\tswitch p.currentByte {\n\tcase '#':\n\t\treturn p.startComment\n\tcase '\\n':\n\t\treturn p.startOfLine // Empty line, start the next one.\n\t}\n\treturn p.readingMetricName\n}\n\n// startComment represents the state where the next byte read from p.buf is the\n// start of a comment (or whitespace leading up to it).\nfunc (p *TextParser) startComment() stateFn {\n\tif p.skipBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.currentByte == '\\n' {\n\t\treturn p.startOfLine\n\t}\n\tif p.readTokenUntilWhitespace(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\t// If we have hit the end of line already, there is nothing left\n\t// to do. This is not considered a syntax error.\n\tif p.currentByte == '\\n' {\n\t\treturn p.startOfLine\n\t}\n\tkeyword := p.currentToken.String()\n\tif keyword != \"HELP\" && keyword != \"TYPE\" {\n\t\t// Generic comment, ignore by fast forwarding to end of line.\n\t\tfor p.currentByte != '\\n' {\n\t\t\tif p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {\n\t\t\t\treturn nil // Unexpected end of input.\n\t\t\t}\n\t\t}\n\t\treturn p.startOfLine\n\t}\n\t// There is something. Next has to be a metric name.\n\tif p.skipBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.readTokenAsMetricName(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.currentByte == '\\n' {\n\t\t// At the end of the line already.\n\t\t// Again, this is not considered a syntax error.\n\t\treturn p.startOfLine\n\t}\n\tif !isBlankOrTab(p.currentByte) {\n\t\tp.parseError(\"invalid metric name in comment\")\n\t\treturn nil\n\t}\n\tp.setOrCreateCurrentMF()\n\tif p.skipBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.currentByte == '\\n' {\n\t\t// At the end of the line already.\n\t\t// Again, this is not considered a syntax error.\n\t\treturn p.startOfLine\n\t}\n\tswitch keyword {\n\tcase \"HELP\":\n\t\treturn p.readingHelp\n\tcase \"TYPE\":\n\t\treturn p.readingType\n\t}\n\tpanic(fmt.Sprintf(\"code error: unexpected keyword %q\", keyword))\n}\n\n// readingMetricName represents the state where the last byte read (now in\n// p.currentByte) is the first byte of a metric name.\nfunc (p *TextParser) readingMetricName() stateFn {\n\tif p.readTokenAsMetricName(); p.err != nil {\n\t\treturn nil\n\t}\n\tif p.currentToken.Len() == 0 {\n\t\tp.parseError(\"invalid metric name\")\n\t\treturn nil\n\t}\n\tp.setOrCreateCurrentMF()\n\t// Now is the time to fix the type if it hasn't happened yet.\n\tif p.currentMF.Type == nil {\n\t\tp.currentMF.Type = dto.MetricType_UNTYPED.Enum()\n\t}\n\tp.currentMetric = &dto.Metric{}\n\t// Do not append the newly created currentMetric to\n\t// currentMF.Metric right now. First wait if this is a summary,\n\t// and the metric exists already, which we can only know after\n\t// having read all the labels.\n\tif p.skipBlankTabIfCurrentBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\treturn p.readingLabels\n}\n\n// readingLabels represents the state where the last byte read (now in\n// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the\n// first byte of the value (otherwise).\nfunc (p *TextParser) readingLabels() stateFn {\n\t// Summaries/histograms are special. We have to reset the\n\t// currentLabels map, currentQuantile and currentBucket before starting to\n\t// read labels.\n\tif p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM {\n\t\tp.currentLabels = map[string]string{}\n\t\tp.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName()\n\t\tp.currentQuantile = math.NaN()\n\t\tp.currentBucket = math.NaN()\n\t}\n\tif p.currentByte != '{' {\n\t\treturn p.readingValue\n\t}\n\treturn p.startLabelName\n}\n\n// startLabelName represents the state where the next byte read from p.buf is\n// the start of a label name (or whitespace leading up to it).\nfunc (p *TextParser) startLabelName() stateFn {\n\tif p.skipBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.currentByte == '}' {\n\t\tif p.skipBlankTab(); p.err != nil {\n\t\t\treturn nil // Unexpected end of input.\n\t\t}\n\t\treturn p.readingValue\n\t}\n\tif p.readTokenAsLabelName(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.currentToken.Len() == 0 {\n\t\tp.parseError(fmt.Sprintf(\"invalid label name for metric %q\", p.currentMF.GetName()))\n\t\treturn nil\n\t}\n\tp.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())}\n\tif p.currentLabelPair.GetName() == string(model.MetricNameLabel) {\n\t\tp.parseError(fmt.Sprintf(\"label name %q is reserved\", model.MetricNameLabel))\n\t\treturn nil\n\t}\n\t// Special summary/histogram treatment. Don't add 'quantile' and 'le'\n\t// labels to 'real' labels.\n\tif !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) &&\n\t\t!(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) {\n\t\tp.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair)\n\t}\n\tif p.skipBlankTabIfCurrentBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.currentByte != '=' {\n\t\tp.parseError(fmt.Sprintf(\"expected '=' after label name, found %q\", p.currentByte))\n\t\treturn nil\n\t}\n\treturn p.startLabelValue\n}\n\n// startLabelValue represents the state where the next byte read from p.buf is\n// the start of a (quoted) label value (or whitespace leading up to it).\nfunc (p *TextParser) startLabelValue() stateFn {\n\tif p.skipBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.currentByte != '\"' {\n\t\tp.parseError(fmt.Sprintf(\"expected '\\\"' at start of label value, found %q\", p.currentByte))\n\t\treturn nil\n\t}\n\tif p.readTokenAsLabelValue(); p.err != nil {\n\t\treturn nil\n\t}\n\tif !model.LabelValue(p.currentToken.String()).IsValid() {\n\t\tp.parseError(fmt.Sprintf(\"invalid label value %q\", p.currentToken.String()))\n\t\treturn nil\n\t}\n\tp.currentLabelPair.Value = proto.String(p.currentToken.String())\n\t// Special treatment of summaries:\n\t// - Quantile labels are special, will result in dto.Quantile later.\n\t// - Other labels have to be added to currentLabels for signature calculation.\n\tif p.currentMF.GetType() == dto.MetricType_SUMMARY {\n\t\tif p.currentLabelPair.GetName() == model.QuantileLabel {\n\t\t\tif p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {\n\t\t\t\t// Create a more helpful error message.\n\t\t\t\tp.parseError(fmt.Sprintf(\"expected float as value for 'quantile' label, got %q\", p.currentLabelPair.GetValue()))\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else {\n\t\t\tp.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()\n\t\t}\n\t}\n\t// Similar special treatment of histograms.\n\tif p.currentMF.GetType() == dto.MetricType_HISTOGRAM {\n\t\tif p.currentLabelPair.GetName() == model.BucketLabel {\n\t\t\tif p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil {\n\t\t\t\t// Create a more helpful error message.\n\t\t\t\tp.parseError(fmt.Sprintf(\"expected float as value for 'le' label, got %q\", p.currentLabelPair.GetValue()))\n\t\t\t\treturn nil\n\t\t\t}\n\t\t} else {\n\t\t\tp.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue()\n\t\t}\n\t}\n\tif p.skipBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tswitch p.currentByte {\n\tcase ',':\n\t\treturn p.startLabelName\n\n\tcase '}':\n\t\tif p.skipBlankTab(); p.err != nil {\n\t\t\treturn nil // Unexpected end of input.\n\t\t}\n\t\treturn p.readingValue\n\tdefault:\n\t\tp.parseError(fmt.Sprintf(\"unexpected end of label value %q\", p.currentLabelPair.Value))\n\t\treturn nil\n\t}\n}\n\n// readingValue represents the state where the last byte read (now in\n// p.currentByte) is the first byte of the sample value (i.e. a float).\nfunc (p *TextParser) readingValue() stateFn {\n\t// When we are here, we have read all the labels, so for the\n\t// special case of a summary/histogram, we can finally find out\n\t// if the metric already exists.\n\tif p.currentMF.GetType() == dto.MetricType_SUMMARY {\n\t\tsignature := model.LabelsToSignature(p.currentLabels)\n\t\tif summary := p.summaries[signature]; summary != nil {\n\t\t\tp.currentMetric = summary\n\t\t} else {\n\t\t\tp.summaries[signature] = p.currentMetric\n\t\t\tp.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)\n\t\t}\n\t} else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM {\n\t\tsignature := model.LabelsToSignature(p.currentLabels)\n\t\tif histogram := p.histograms[signature]; histogram != nil {\n\t\t\tp.currentMetric = histogram\n\t\t} else {\n\t\t\tp.histograms[signature] = p.currentMetric\n\t\t\tp.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)\n\t\t}\n\t} else {\n\t\tp.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric)\n\t}\n\tif p.readTokenUntilWhitespace(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tvalue, err := strconv.ParseFloat(p.currentToken.String(), 64)\n\tif err != nil {\n\t\t// Create a more helpful error message.\n\t\tp.parseError(fmt.Sprintf(\"expected float as value, got %q\", p.currentToken.String()))\n\t\treturn nil\n\t}\n\tswitch p.currentMF.GetType() {\n\tcase dto.MetricType_COUNTER:\n\t\tp.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)}\n\tcase dto.MetricType_GAUGE:\n\t\tp.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)}\n\tcase dto.MetricType_UNTYPED:\n\t\tp.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)}\n\tcase dto.MetricType_SUMMARY:\n\t\t// *sigh*\n\t\tif p.currentMetric.Summary == nil {\n\t\t\tp.currentMetric.Summary = &dto.Summary{}\n\t\t}\n\t\tswitch {\n\t\tcase p.currentIsSummaryCount:\n\t\t\tp.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value))\n\t\tcase p.currentIsSummarySum:\n\t\t\tp.currentMetric.Summary.SampleSum = proto.Float64(value)\n\t\tcase !math.IsNaN(p.currentQuantile):\n\t\t\tp.currentMetric.Summary.Quantile = append(\n\t\t\t\tp.currentMetric.Summary.Quantile,\n\t\t\t\t&dto.Quantile{\n\t\t\t\t\tQuantile: proto.Float64(p.currentQuantile),\n\t\t\t\t\tValue:    proto.Float64(value),\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\tcase dto.MetricType_HISTOGRAM:\n\t\t// *sigh*\n\t\tif p.currentMetric.Histogram == nil {\n\t\t\tp.currentMetric.Histogram = &dto.Histogram{}\n\t\t}\n\t\tswitch {\n\t\tcase p.currentIsHistogramCount:\n\t\t\tp.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value))\n\t\tcase p.currentIsHistogramSum:\n\t\t\tp.currentMetric.Histogram.SampleSum = proto.Float64(value)\n\t\tcase !math.IsNaN(p.currentBucket):\n\t\t\tp.currentMetric.Histogram.Bucket = append(\n\t\t\t\tp.currentMetric.Histogram.Bucket,\n\t\t\t\t&dto.Bucket{\n\t\t\t\t\tUpperBound:      proto.Float64(p.currentBucket),\n\t\t\t\t\tCumulativeCount: proto.Uint64(uint64(value)),\n\t\t\t\t},\n\t\t\t)\n\t\t}\n\tdefault:\n\t\tp.err = fmt.Errorf(\"unexpected type for metric name %q\", p.currentMF.GetName())\n\t}\n\tif p.currentByte == '\\n' {\n\t\treturn p.startOfLine\n\t}\n\treturn p.startTimestamp\n}\n\n// startTimestamp represents the state where the next byte read from p.buf is\n// the start of the timestamp (or whitespace leading up to it).\nfunc (p *TextParser) startTimestamp() stateFn {\n\tif p.skipBlankTab(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.readTokenUntilWhitespace(); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\ttimestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64)\n\tif err != nil {\n\t\t// Create a more helpful error message.\n\t\tp.parseError(fmt.Sprintf(\"expected integer as timestamp, got %q\", p.currentToken.String()))\n\t\treturn nil\n\t}\n\tp.currentMetric.TimestampMs = proto.Int64(timestamp)\n\tif p.readTokenUntilNewline(false); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tif p.currentToken.Len() > 0 {\n\t\tp.parseError(fmt.Sprintf(\"spurious string after timestamp: %q\", p.currentToken.String()))\n\t\treturn nil\n\t}\n\treturn p.startOfLine\n}\n\n// readingHelp represents the state where the last byte read (now in\n// p.currentByte) is the first byte of the docstring after 'HELP'.\nfunc (p *TextParser) readingHelp() stateFn {\n\tif p.currentMF.Help != nil {\n\t\tp.parseError(fmt.Sprintf(\"second HELP line for metric name %q\", p.currentMF.GetName()))\n\t\treturn nil\n\t}\n\t// Rest of line is the docstring.\n\tif p.readTokenUntilNewline(true); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tp.currentMF.Help = proto.String(p.currentToken.String())\n\treturn p.startOfLine\n}\n\n// readingType represents the state where the last byte read (now in\n// p.currentByte) is the first byte of the type hint after 'HELP'.\nfunc (p *TextParser) readingType() stateFn {\n\tif p.currentMF.Type != nil {\n\t\tp.parseError(fmt.Sprintf(\"second TYPE line for metric name %q, or TYPE reported after samples\", p.currentMF.GetName()))\n\t\treturn nil\n\t}\n\t// Rest of line is the type.\n\tif p.readTokenUntilNewline(false); p.err != nil {\n\t\treturn nil // Unexpected end of input.\n\t}\n\tmetricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())]\n\tif !ok {\n\t\tp.parseError(fmt.Sprintf(\"unknown metric type %q\", p.currentToken.String()))\n\t\treturn nil\n\t}\n\tp.currentMF.Type = dto.MetricType(metricType).Enum()\n\treturn p.startOfLine\n}\n\n// parseError sets p.err to a ParseError at the current line with the given\n// message.\nfunc (p *TextParser) parseError(msg string) {\n\tp.err = ParseError{\n\t\tLine: p.lineCount,\n\t\tMsg:  msg,\n\t}\n}\n\n// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte\n// that is neither ' ' nor '\\t'. That byte is left in p.currentByte.\nfunc (p *TextParser) skipBlankTab() {\n\tfor {\n\t\tif p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do\n// anything if p.currentByte is neither ' ' nor '\\t'.\nfunc (p *TextParser) skipBlankTabIfCurrentBlankTab() {\n\tif isBlankOrTab(p.currentByte) {\n\t\tp.skipBlankTab()\n\t}\n}\n\n// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken.  The\n// first byte considered is the byte already read (now in p.currentByte).  The\n// first whitespace byte encountered is still copied into p.currentByte, but not\n// into p.currentToken.\nfunc (p *TextParser) readTokenUntilWhitespace() {\n\tp.currentToken.Reset()\n\tfor p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\\n' {\n\t\tp.currentToken.WriteByte(p.currentByte)\n\t\tp.currentByte, p.err = p.buf.ReadByte()\n\t}\n}\n\n// readTokenUntilNewline copies bytes from p.buf into p.currentToken.  The first\n// byte considered is the byte already read (now in p.currentByte).  The first\n// newline byte encountered is still copied into p.currentByte, but not into\n// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are\n// recognized: '\\\\' tranlates into '\\', and '\\n' into a line-feed character. All\n// other escape sequences are invalid and cause an error.\nfunc (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) {\n\tp.currentToken.Reset()\n\tescaped := false\n\tfor p.err == nil {\n\t\tif recognizeEscapeSequence && escaped {\n\t\t\tswitch p.currentByte {\n\t\t\tcase '\\\\':\n\t\t\t\tp.currentToken.WriteByte(p.currentByte)\n\t\t\tcase 'n':\n\t\t\t\tp.currentToken.WriteByte('\\n')\n\t\t\tdefault:\n\t\t\t\tp.parseError(fmt.Sprintf(\"invalid escape sequence '\\\\%c'\", p.currentByte))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tescaped = false\n\t\t} else {\n\t\t\tswitch p.currentByte {\n\t\t\tcase '\\n':\n\t\t\t\treturn\n\t\t\tcase '\\\\':\n\t\t\t\tescaped = true\n\t\t\tdefault:\n\t\t\t\tp.currentToken.WriteByte(p.currentByte)\n\t\t\t}\n\t\t}\n\t\tp.currentByte, p.err = p.buf.ReadByte()\n\t}\n}\n\n// readTokenAsMetricName copies a metric name from p.buf into p.currentToken.\n// The first byte considered is the byte already read (now in p.currentByte).\n// The first byte not part of a metric name is still copied into p.currentByte,\n// but not into p.currentToken.\nfunc (p *TextParser) readTokenAsMetricName() {\n\tp.currentToken.Reset()\n\tif !isValidMetricNameStart(p.currentByte) {\n\t\treturn\n\t}\n\tfor {\n\t\tp.currentToken.WriteByte(p.currentByte)\n\t\tp.currentByte, p.err = p.buf.ReadByte()\n\t\tif p.err != nil || !isValidMetricNameContinuation(p.currentByte) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// readTokenAsLabelName copies a label name from p.buf into p.currentToken.\n// The first byte considered is the byte already read (now in p.currentByte).\n// The first byte not part of a label name is still copied into p.currentByte,\n// but not into p.currentToken.\nfunc (p *TextParser) readTokenAsLabelName() {\n\tp.currentToken.Reset()\n\tif !isValidLabelNameStart(p.currentByte) {\n\t\treturn\n\t}\n\tfor {\n\t\tp.currentToken.WriteByte(p.currentByte)\n\t\tp.currentByte, p.err = p.buf.ReadByte()\n\t\tif p.err != nil || !isValidLabelNameContinuation(p.currentByte) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// readTokenAsLabelValue copies a label value from p.buf into p.currentToken.\n// In contrast to the other 'readTokenAs...' functions, which start with the\n// last read byte in p.currentByte, this method ignores p.currentByte and starts\n// with reading a new byte from p.buf. The first byte not part of a label value\n// is still copied into p.currentByte, but not into p.currentToken.\nfunc (p *TextParser) readTokenAsLabelValue() {\n\tp.currentToken.Reset()\n\tescaped := false\n\tfor {\n\t\tif p.currentByte, p.err = p.buf.ReadByte(); p.err != nil {\n\t\t\treturn\n\t\t}\n\t\tif escaped {\n\t\t\tswitch p.currentByte {\n\t\t\tcase '\"', '\\\\':\n\t\t\t\tp.currentToken.WriteByte(p.currentByte)\n\t\t\tcase 'n':\n\t\t\t\tp.currentToken.WriteByte('\\n')\n\t\t\tdefault:\n\t\t\t\tp.parseError(fmt.Sprintf(\"invalid escape sequence '\\\\%c'\", p.currentByte))\n\t\t\t\treturn\n\t\t\t}\n\t\t\tescaped = false\n\t\t\tcontinue\n\t\t}\n\t\tswitch p.currentByte {\n\t\tcase '\"':\n\t\t\treturn\n\t\tcase '\\n':\n\t\t\tp.parseError(fmt.Sprintf(\"label value %q contains unescaped new-line\", p.currentToken.String()))\n\t\t\treturn\n\t\tcase '\\\\':\n\t\t\tescaped = true\n\t\tdefault:\n\t\t\tp.currentToken.WriteByte(p.currentByte)\n\t\t}\n\t}\n}\n\nfunc (p *TextParser) setOrCreateCurrentMF() {\n\tp.currentIsSummaryCount = false\n\tp.currentIsSummarySum = false\n\tp.currentIsHistogramCount = false\n\tp.currentIsHistogramSum = false\n\tname := p.currentToken.String()\n\tif p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil {\n\t\treturn\n\t}\n\t// Try out if this is a _sum or _count for a summary/histogram.\n\tsummaryName := summaryMetricName(name)\n\tif p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil {\n\t\tif p.currentMF.GetType() == dto.MetricType_SUMMARY {\n\t\t\tif isCount(name) {\n\t\t\t\tp.currentIsSummaryCount = true\n\t\t\t}\n\t\t\tif isSum(name) {\n\t\t\t\tp.currentIsSummarySum = true\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\thistogramName := histogramMetricName(name)\n\tif p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil {\n\t\tif p.currentMF.GetType() == dto.MetricType_HISTOGRAM {\n\t\t\tif isCount(name) {\n\t\t\t\tp.currentIsHistogramCount = true\n\t\t\t}\n\t\t\tif isSum(name) {\n\t\t\t\tp.currentIsHistogramSum = true\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n\tp.currentMF = &dto.MetricFamily{Name: proto.String(name)}\n\tp.metricFamiliesByName[name] = p.currentMF\n}\n\nfunc isValidLabelNameStart(b byte) bool {\n\treturn (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_'\n}\n\nfunc isValidLabelNameContinuation(b byte) bool {\n\treturn isValidLabelNameStart(b) || (b >= '0' && b <= '9')\n}\n\nfunc isValidMetricNameStart(b byte) bool {\n\treturn isValidLabelNameStart(b) || b == ':'\n}\n\nfunc isValidMetricNameContinuation(b byte) bool {\n\treturn isValidLabelNameContinuation(b) || b == ':'\n}\n\nfunc isBlankOrTab(b byte) bool {\n\treturn b == ' ' || b == '\\t'\n}\n\nfunc isCount(name string) bool {\n\treturn len(name) > 6 && name[len(name)-6:] == \"_count\"\n}\n\nfunc isSum(name string) bool {\n\treturn len(name) > 4 && name[len(name)-4:] == \"_sum\"\n}\n\nfunc isBucket(name string) bool {\n\treturn len(name) > 7 && name[len(name)-7:] == \"_bucket\"\n}\n\nfunc summaryMetricName(name string) string {\n\tswitch {\n\tcase isCount(name):\n\t\treturn name[:len(name)-6]\n\tcase isSum(name):\n\t\treturn name[:len(name)-4]\n\tdefault:\n\t\treturn name\n\t}\n}\n\nfunc histogramMetricName(name string) string {\n\tswitch {\n\tcase isCount(name):\n\t\treturn name[:len(name)-6]\n\tcase isSum(name):\n\t\treturn name[:len(name)-4]\n\tcase isBucket(name):\n\t\treturn name[:len(name)-7]\n\tdefault:\n\t\treturn name\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/expfmt/text_parse_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage expfmt\n\nimport (\n\t\"math\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/golang/protobuf/proto\"\n\tdto \"github.com/prometheus/client_model/go\"\n)\n\nfunc testTextParse(t testing.TB) {\n\tvar scenarios = []struct {\n\t\tin  string\n\t\tout []*dto.MetricFamily\n\t}{\n\t\t// 0: Empty lines as input.\n\t\t{\n\t\t\tin: `\n\n`,\n\t\t\tout: []*dto.MetricFamily{},\n\t\t},\n\t\t// 1: Minimal case.\n\t\t{\n\t\t\tin: `\nminimal_metric 1.234\nanother_metric -3e3 103948\n# Even that:\nno_labels{} 3\n# HELP line for non-existing metric will be ignored.\n`,\n\t\t\tout: []*dto.MetricFamily{\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"minimal_metric\"),\n\t\t\t\t\tType: dto.MetricType_UNTYPED.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\t\tValue: proto.Float64(1.234),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"another_metric\"),\n\t\t\t\t\tType: dto.MetricType_UNTYPED.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\t\tValue: proto.Float64(-3e3),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestampMs: proto.Int64(103948),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"no_labels\"),\n\t\t\t\t\tType: dto.MetricType_UNTYPED.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\t\tValue: proto.Float64(3),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// 2: Counters & gauges, docstrings, various whitespace, escape sequences.\n\t\t{\n\t\t\tin: `\n# A normal comment.\n#\n# TYPE name counter\nname{labelname=\"val1\",basename=\"basevalue\"} NaN\nname {labelname=\"val2\",basename=\"base\\\"v\\\\al\\nue\"} 0.23 1234567890\n# HELP name two-line\\n doc  str\\\\ing\n\n # HELP  name2  \tdoc str\"ing 2\n  #    TYPE    name2 gauge\nname2{labelname=\"val2\"\t,basename   =   \"basevalue2\"\t\t} +Inf 54321\nname2{ labelname = \"val1\" , }-Inf\n`,\n\t\t\tout: []*dto.MetricFamily{\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"name\"),\n\t\t\t\t\tHelp: proto.String(\"two-line\\n doc  str\\\\ing\"),\n\t\t\t\t\tType: dto.MetricType_COUNTER.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val1\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"basename\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"basevalue\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\t\t\t\tValue: proto.Float64(math.NaN()),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val2\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"basename\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"base\\\"v\\\\al\\nue\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCounter: &dto.Counter{\n\t\t\t\t\t\t\t\tValue: proto.Float64(.23),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestampMs: proto.Int64(1234567890),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"name2\"),\n\t\t\t\t\tHelp: proto.String(\"doc str\\\"ing 2\"),\n\t\t\t\t\tType: dto.MetricType_GAUGE.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val2\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"basename\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"basevalue2\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tGauge: &dto.Gauge{\n\t\t\t\t\t\t\t\tValue: proto.Float64(math.Inf(+1)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestampMs: proto.Int64(54321),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"labelname\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val1\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tGauge: &dto.Gauge{\n\t\t\t\t\t\t\t\tValue: proto.Float64(math.Inf(-1)),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// 3: The evil summary, mixed with other types and funny comments.\n\t\t{\n\t\t\tin: `\n# TYPE my_summary summary\nmy_summary{n1=\"val1\",quantile=\"0.5\"} 110\ndecoy -1 -2\nmy_summary{n1=\"val1\",quantile=\"0.9\"} 140 1\nmy_summary_count{n1=\"val1\"} 42\n# Latest timestamp wins in case of a summary.\nmy_summary_sum{n1=\"val1\"} 4711 2\nfake_sum{n1=\"val1\"} 2001\n# TYPE another_summary summary\nanother_summary_count{n2=\"val2\",n1=\"val1\"} 20\nmy_summary_count{n2=\"val2\",n1=\"val1\"} 5 5\nanother_summary{n1=\"val1\",n2=\"val2\",quantile=\".3\"} -1.2\nmy_summary_sum{n1=\"val2\"} 08 15\nmy_summary{n1=\"val3\", quantile=\"0.2\"} 4711\n  my_summary{n1=\"val1\",n2=\"val2\",quantile=\"-12.34\",} NaN\n# some\n# funny comments\n# HELP \n# HELP\n# HELP my_summary\n# HELP my_summary \n`,\n\t\t\tout: []*dto.MetricFamily{\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"fake_sum\"),\n\t\t\t\t\tType: dto.MetricType_UNTYPED.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"n1\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val1\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\t\tValue: proto.Float64(2001),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"decoy\"),\n\t\t\t\t\tType: dto.MetricType_UNTYPED.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tUntyped: &dto.Untyped{\n\t\t\t\t\t\t\t\tValue: proto.Float64(-1),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestampMs: proto.Int64(-2),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"my_summary\"),\n\t\t\t\t\tType: dto.MetricType_SUMMARY.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"n1\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val1\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSummary: &dto.Summary{\n\t\t\t\t\t\t\t\tSampleCount: proto.Uint64(42),\n\t\t\t\t\t\t\t\tSampleSum:   proto.Float64(4711),\n\t\t\t\t\t\t\t\tQuantile: []*dto.Quantile{\n\t\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.5),\n\t\t\t\t\t\t\t\t\t\tValue:    proto.Float64(110),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.9),\n\t\t\t\t\t\t\t\t\t\tValue:    proto.Float64(140),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestampMs: proto.Int64(2),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"n2\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val2\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"n1\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val1\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSummary: &dto.Summary{\n\t\t\t\t\t\t\t\tSampleCount: proto.Uint64(5),\n\t\t\t\t\t\t\t\tQuantile: []*dto.Quantile{\n\t\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(-12.34),\n\t\t\t\t\t\t\t\t\t\tValue:    proto.Float64(math.NaN()),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestampMs: proto.Int64(5),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"n1\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val2\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSummary: &dto.Summary{\n\t\t\t\t\t\t\t\tSampleSum: proto.Float64(8),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tTimestampMs: proto.Int64(15),\n\t\t\t\t\t\t},\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"n1\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val3\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSummary: &dto.Summary{\n\t\t\t\t\t\t\t\tQuantile: []*dto.Quantile{\n\t\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.2),\n\t\t\t\t\t\t\t\t\t\tValue:    proto.Float64(4711),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t&dto.MetricFamily{\n\t\t\t\t\tName: proto.String(\"another_summary\"),\n\t\t\t\t\tType: dto.MetricType_SUMMARY.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tLabel: []*dto.LabelPair{\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"n2\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val2\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t&dto.LabelPair{\n\t\t\t\t\t\t\t\t\tName:  proto.String(\"n1\"),\n\t\t\t\t\t\t\t\t\tValue: proto.String(\"val1\"),\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSummary: &dto.Summary{\n\t\t\t\t\t\t\t\tSampleCount: proto.Uint64(20),\n\t\t\t\t\t\t\t\tQuantile: []*dto.Quantile{\n\t\t\t\t\t\t\t\t\t&dto.Quantile{\n\t\t\t\t\t\t\t\t\t\tQuantile: proto.Float64(0.3),\n\t\t\t\t\t\t\t\t\t\tValue:    proto.Float64(-1.2),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t// 4: The histogram.\n\t\t{\n\t\t\tin: `\n# HELP request_duration_microseconds The response latency.\n# TYPE request_duration_microseconds histogram\nrequest_duration_microseconds_bucket{le=\"100\"} 123\nrequest_duration_microseconds_bucket{le=\"120\"} 412\nrequest_duration_microseconds_bucket{le=\"144\"} 592\nrequest_duration_microseconds_bucket{le=\"172.8\"} 1524\nrequest_duration_microseconds_bucket{le=\"+Inf\"} 2693\nrequest_duration_microseconds_sum 1.7560473e+06\nrequest_duration_microseconds_count 2693\n`,\n\t\t\tout: []*dto.MetricFamily{\n\t\t\t\t{\n\t\t\t\t\tName: proto.String(\"request_duration_microseconds\"),\n\t\t\t\t\tHelp: proto.String(\"The response latency.\"),\n\t\t\t\t\tType: dto.MetricType_HISTOGRAM.Enum(),\n\t\t\t\t\tMetric: []*dto.Metric{\n\t\t\t\t\t\t&dto.Metric{\n\t\t\t\t\t\t\tHistogram: &dto.Histogram{\n\t\t\t\t\t\t\t\tSampleCount: proto.Uint64(2693),\n\t\t\t\t\t\t\t\tSampleSum:   proto.Float64(1756047.3),\n\t\t\t\t\t\t\t\tBucket: []*dto.Bucket{\n\t\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(100),\n\t\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(123),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(120),\n\t\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(412),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(144),\n\t\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(592),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(172.8),\n\t\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(1524),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t&dto.Bucket{\n\t\t\t\t\t\t\t\t\t\tUpperBound:      proto.Float64(math.Inf(+1)),\n\t\t\t\t\t\t\t\t\t\tCumulativeCount: proto.Uint64(2693),\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tout, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. error: %s\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif expected, got := len(scenario.out), len(out); expected != got {\n\t\t\tt.Errorf(\n\t\t\t\t\"%d. expected %d MetricFamilies, got %d\",\n\t\t\t\ti, expected, got,\n\t\t\t)\n\t\t}\n\t\tfor _, expected := range scenario.out {\n\t\t\tgot, ok := out[expected.GetName()]\n\t\t\tif !ok {\n\t\t\t\tt.Errorf(\n\t\t\t\t\t\"%d. expected MetricFamily %q, found none\",\n\t\t\t\t\ti, expected.GetName(),\n\t\t\t\t)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif expected.String() != got.String() {\n\t\t\t\tt.Errorf(\n\t\t\t\t\t\"%d. expected MetricFamily %s, got %s\",\n\t\t\t\t\ti, expected, got,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestTextParse(t *testing.T) {\n\ttestTextParse(t)\n}\n\nfunc BenchmarkTextParse(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestTextParse(b)\n\t}\n}\n\nfunc testTextParseError(t testing.TB) {\n\tvar scenarios = []struct {\n\t\tin  string\n\t\terr string\n\t}{\n\t\t// 0: No new-line at end of input.\n\t\t{\n\t\t\tin: `\nbla 3.14\nblubber 42`,\n\t\t\terr: \"text format parsing error in line 3: unexpected end of input stream\",\n\t\t},\n\t\t// 1: Invalid escape sequence in label value.\n\t\t{\n\t\t\tin:  `metric{label=\"\\t\"} 3.14`,\n\t\t\terr: \"text format parsing error in line 1: invalid escape sequence\",\n\t\t},\n\t\t// 2: Newline in label value.\n\t\t{\n\t\t\tin: `\nmetric{label=\"new\nline\"} 3.14\n`,\n\t\t\terr: `text format parsing error in line 2: label value \"new\" contains unescaped new-line`,\n\t\t},\n\t\t// 3:\n\t\t{\n\t\t\tin:  `metric{@=\"bla\"} 3.14`,\n\t\t\terr: \"text format parsing error in line 1: invalid label name for metric\",\n\t\t},\n\t\t// 4:\n\t\t{\n\t\t\tin:  `metric{__name__=\"bla\"} 3.14`,\n\t\t\terr: `text format parsing error in line 1: label name \"__name__\" is reserved`,\n\t\t},\n\t\t// 5:\n\t\t{\n\t\t\tin:  `metric{label+=\"bla\"} 3.14`,\n\t\t\terr: \"text format parsing error in line 1: expected '=' after label name\",\n\t\t},\n\t\t// 6:\n\t\t{\n\t\t\tin:  `metric{label=bla} 3.14`,\n\t\t\terr: \"text format parsing error in line 1: expected '\\\"' at start of label value\",\n\t\t},\n\t\t// 7:\n\t\t{\n\t\t\tin: `\n# TYPE metric summary\nmetric{quantile=\"bla\"} 3.14\n`,\n\t\t\terr: \"text format parsing error in line 3: expected float as value for 'quantile' label\",\n\t\t},\n\t\t// 8:\n\t\t{\n\t\t\tin:  `metric{label=\"bla\"+} 3.14`,\n\t\t\terr: \"text format parsing error in line 1: unexpected end of label value\",\n\t\t},\n\t\t// 9:\n\t\t{\n\t\t\tin: `metric{label=\"bla\"} 3.14 2.72\n`,\n\t\t\terr: \"text format parsing error in line 1: expected integer as timestamp\",\n\t\t},\n\t\t// 10:\n\t\t{\n\t\t\tin: `metric{label=\"bla\"} 3.14 2 3\n`,\n\t\t\terr: \"text format parsing error in line 1: spurious string after timestamp\",\n\t\t},\n\t\t// 11:\n\t\t{\n\t\t\tin: `metric{label=\"bla\"} blubb\n`,\n\t\t\terr: \"text format parsing error in line 1: expected float as value\",\n\t\t},\n\t\t// 12:\n\t\t{\n\t\t\tin: `\n# HELP metric one\n# HELP metric two\n`,\n\t\t\terr: \"text format parsing error in line 3: second HELP line for metric name\",\n\t\t},\n\t\t// 13:\n\t\t{\n\t\t\tin: `\n# TYPE metric counter\n# TYPE metric untyped\n`,\n\t\t\terr: `text format parsing error in line 3: second TYPE line for metric name \"metric\", or TYPE reported after samples`,\n\t\t},\n\t\t// 14:\n\t\t{\n\t\t\tin: `\nmetric 4.12\n# TYPE metric counter\n`,\n\t\t\terr: `text format parsing error in line 3: second TYPE line for metric name \"metric\", or TYPE reported after samples`,\n\t\t},\n\t\t// 14:\n\t\t{\n\t\t\tin: `\n# TYPE metric bla\n`,\n\t\t\terr: \"text format parsing error in line 2: unknown metric type\",\n\t\t},\n\t\t// 15:\n\t\t{\n\t\t\tin: `\n# TYPE met-ric\n`,\n\t\t\terr: \"text format parsing error in line 2: invalid metric name in comment\",\n\t\t},\n\t\t// 16:\n\t\t{\n\t\t\tin:  `@invalidmetric{label=\"bla\"} 3.14 2`,\n\t\t\terr: \"text format parsing error in line 1: invalid metric name\",\n\t\t},\n\t\t// 17:\n\t\t{\n\t\t\tin:  `{label=\"bla\"} 3.14 2`,\n\t\t\terr: \"text format parsing error in line 1: invalid metric name\",\n\t\t},\n\t\t// 18:\n\t\t{\n\t\t\tin: `\n# TYPE metric histogram\nmetric_bucket{le=\"bla\"} 3.14\n`,\n\t\t\terr: \"text format parsing error in line 3: expected float as value for 'le' label\",\n\t\t},\n\t\t// 19: Invalid UTF-8 in label value.\n\t\t{\n\t\t\tin:  \"metric{l=\\\"\\xbd\\\"} 3.14\\n\",\n\t\t\terr: \"text format parsing error in line 1: invalid label value \\\"\\\\xbd\\\"\",\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\t_, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in))\n\t\tif err == nil {\n\t\t\tt.Errorf(\"%d. expected error, got nil\", i)\n\t\t\tcontinue\n\t\t}\n\t\tif expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 {\n\t\t\tt.Errorf(\n\t\t\t\t\"%d. expected error starting with %q, got %q\",\n\t\t\t\ti, expected, got,\n\t\t\t)\n\t\t}\n\t}\n\n}\n\nfunc TestTextParseError(t *testing.T) {\n\ttestTextParseError(t)\n}\n\nfunc BenchmarkParseError(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestTextParseError(b)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt",
    "content": "PACKAGE\n\npackage goautoneg\nimport \"bitbucket.org/ww/goautoneg\"\n\nHTTP Content-Type Autonegotiation.\n\nThe functions in this package implement the behaviour specified in\nhttp://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html\n\nCopyright (c) 2011, Open Knowledge Foundation Ltd.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n\n    Redistributions in binary form must reproduce the above copyright\n    notice, this list of conditions and the following disclaimer in\n    the documentation and/or other materials provided with the\n    distribution.\n\n    Neither the name of the Open Knowledge Foundation Ltd. nor the\n    names of its contributors may be used to endorse or promote\n    products derived from this software without specific prior written\n    permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\nFUNCTIONS\n\nfunc Negotiate(header string, alternatives []string) (content_type string)\nNegotiate the most appropriate content_type given the accept header\nand a list of alternatives.\n\nfunc ParseAccept(header string) (accept []Accept)\nParse an Accept Header string returning a sorted list\nof clauses\n\n\nTYPES\n\ntype Accept struct {\n    Type, SubType string\n    Q             float32\n    Params        map[string]string\n}\nStructure to represent a clause in an HTTP Accept Header\n\n\nSUBDIRECTORIES\n\n\t.hg\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go",
    "content": "/*\nHTTP Content-Type Autonegotiation.\n\nThe functions in this package implement the behaviour specified in\nhttp://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html\n\nCopyright (c) 2011, Open Knowledge Foundation Ltd.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n    Redistributions of source code must retain the above copyright\n    notice, this list of conditions and the following disclaimer.\n\n    Redistributions in binary form must reproduce the above copyright\n    notice, this list of conditions and the following disclaimer in\n    the documentation and/or other materials provided with the\n    distribution.\n\n    Neither the name of the Open Knowledge Foundation Ltd. nor the\n    names of its contributors may be used to endorse or promote\n    products derived from this software without specific prior written\n    permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nHOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n\n*/\npackage goautoneg\n\nimport (\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// Structure to represent a clause in an HTTP Accept Header\ntype Accept struct {\n\tType, SubType string\n\tQ             float64\n\tParams        map[string]string\n}\n\n// For internal use, so that we can use the sort interface\ntype accept_slice []Accept\n\nfunc (accept accept_slice) Len() int {\n\tslice := []Accept(accept)\n\treturn len(slice)\n}\n\nfunc (accept accept_slice) Less(i, j int) bool {\n\tslice := []Accept(accept)\n\tai, aj := slice[i], slice[j]\n\tif ai.Q > aj.Q {\n\t\treturn true\n\t}\n\tif ai.Type != \"*\" && aj.Type == \"*\" {\n\t\treturn true\n\t}\n\tif ai.SubType != \"*\" && aj.SubType == \"*\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (accept accept_slice) Swap(i, j int) {\n\tslice := []Accept(accept)\n\tslice[i], slice[j] = slice[j], slice[i]\n}\n\n// Parse an Accept Header string returning a sorted list\n// of clauses\nfunc ParseAccept(header string) (accept []Accept) {\n\tparts := strings.Split(header, \",\")\n\taccept = make([]Accept, 0, len(parts))\n\tfor _, part := range parts {\n\t\tpart := strings.Trim(part, \" \")\n\n\t\ta := Accept{}\n\t\ta.Params = make(map[string]string)\n\t\ta.Q = 1.0\n\n\t\tmrp := strings.Split(part, \";\")\n\n\t\tmedia_range := mrp[0]\n\t\tsp := strings.Split(media_range, \"/\")\n\t\ta.Type = strings.Trim(sp[0], \" \")\n\n\t\tswitch {\n\t\tcase len(sp) == 1 && a.Type == \"*\":\n\t\t\ta.SubType = \"*\"\n\t\tcase len(sp) == 2:\n\t\t\ta.SubType = strings.Trim(sp[1], \" \")\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(mrp) == 1 {\n\t\t\taccept = append(accept, a)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, param := range mrp[1:] {\n\t\t\tsp := strings.SplitN(param, \"=\", 2)\n\t\t\tif len(sp) != 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttoken := strings.Trim(sp[0], \" \")\n\t\t\tif token == \"q\" {\n\t\t\t\ta.Q, _ = strconv.ParseFloat(sp[1], 32)\n\t\t\t} else {\n\t\t\t\ta.Params[token] = strings.Trim(sp[1], \" \")\n\t\t\t}\n\t\t}\n\n\t\taccept = append(accept, a)\n\t}\n\n\tslice := accept_slice(accept)\n\tsort.Sort(slice)\n\n\treturn\n}\n\n// Negotiate the most appropriate content_type given the accept header\n// and a list of alternatives.\nfunc Negotiate(header string, alternatives []string) (content_type string) {\n\tasp := make([][]string, 0, len(alternatives))\n\tfor _, ctype := range alternatives {\n\t\tasp = append(asp, strings.SplitN(ctype, \"/\", 2))\n\t}\n\tfor _, clause := range ParseAccept(header) {\n\t\tfor i, ctsp := range asp {\n\t\t\tif clause.Type == ctsp[0] && clause.SubType == ctsp[1] {\n\t\t\t\tcontent_type = alternatives[i]\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif clause.Type == ctsp[0] && clause.SubType == \"*\" {\n\t\t\t\tcontent_type = alternatives[i]\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif clause.Type == \"*\" && clause.SubType == \"*\" {\n\t\t\t\tcontent_type = alternatives[i]\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go",
    "content": "package goautoneg\n\nimport (\n\t\"testing\"\n)\n\nvar chrome = \"application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5\"\n\nfunc TestParseAccept(t *testing.T) {\n\talternatives := []string{\"text/html\", \"image/png\"}\n\tcontent_type := Negotiate(chrome, alternatives)\n\tif content_type != \"image/png\" {\n\t\tt.Errorf(\"got %s expected image/png\", content_type)\n\t}\n\n\talternatives = []string{\"text/html\", \"text/plain\", \"text/n3\"}\n\tcontent_type = Negotiate(chrome, alternatives)\n\tif content_type != \"text/html\" {\n\t\tt.Errorf(\"got %s expected text/html\", content_type)\n\t}\n\n\talternatives = []string{\"text/n3\", \"text/plain\"}\n\tcontent_type = Negotiate(chrome, alternatives)\n\tif content_type != \"text/plain\" {\n\t\tt.Errorf(\"got %s expected text/plain\", content_type)\n\t}\n\n\talternatives = []string{\"text/n3\", \"application/rdf+xml\"}\n\tcontent_type = Negotiate(chrome, alternatives)\n\tif content_type != \"text/n3\" {\n\t\tt.Errorf(\"got %s expected text/n3\", content_type)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/alert.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\ntype AlertStatus string\n\nconst (\n\tAlertFiring   AlertStatus = \"firing\"\n\tAlertResolved AlertStatus = \"resolved\"\n)\n\n// Alert is a generic representation of an alert in the Prometheus eco-system.\ntype Alert struct {\n\t// Label value pairs for purpose of aggregation, matching, and disposition\n\t// dispatching. This must minimally include an \"alertname\" label.\n\tLabels LabelSet `json:\"labels\"`\n\n\t// Extra key/value information which does not define alert identity.\n\tAnnotations LabelSet `json:\"annotations\"`\n\n\t// The known time range for this alert. Both ends are optional.\n\tStartsAt     time.Time `json:\"startsAt,omitempty\"`\n\tEndsAt       time.Time `json:\"endsAt,omitempty\"`\n\tGeneratorURL string    `json:\"generatorURL\"`\n}\n\n// Name returns the name of the alert. It is equivalent to the \"alertname\" label.\nfunc (a *Alert) Name() string {\n\treturn string(a.Labels[AlertNameLabel])\n}\n\n// Fingerprint returns a unique hash for the alert. It is equivalent to\n// the fingerprint of the alert's label set.\nfunc (a *Alert) Fingerprint() Fingerprint {\n\treturn a.Labels.Fingerprint()\n}\n\nfunc (a *Alert) String() string {\n\ts := fmt.Sprintf(\"%s[%s]\", a.Name(), a.Fingerprint().String()[:7])\n\tif a.Resolved() {\n\t\treturn s + \"[resolved]\"\n\t}\n\treturn s + \"[active]\"\n}\n\n// Resolved returns true iff the activity interval ended in the past.\nfunc (a *Alert) Resolved() bool {\n\treturn a.ResolvedAt(time.Now())\n}\n\n// ResolvedAt returns true off the activity interval ended before\n// the given timestamp.\nfunc (a *Alert) ResolvedAt(ts time.Time) bool {\n\tif a.EndsAt.IsZero() {\n\t\treturn false\n\t}\n\treturn !a.EndsAt.After(ts)\n}\n\n// Status returns the status of the alert.\nfunc (a *Alert) Status() AlertStatus {\n\tif a.Resolved() {\n\t\treturn AlertResolved\n\t}\n\treturn AlertFiring\n}\n\n// Validate checks whether the alert data is inconsistent.\nfunc (a *Alert) Validate() error {\n\tif a.StartsAt.IsZero() {\n\t\treturn fmt.Errorf(\"start time missing\")\n\t}\n\tif !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) {\n\t\treturn fmt.Errorf(\"start time must be before end time\")\n\t}\n\tif err := a.Labels.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"invalid label set: %s\", err)\n\t}\n\tif len(a.Labels) == 0 {\n\t\treturn fmt.Errorf(\"at least one label pair required\")\n\t}\n\tif err := a.Annotations.Validate(); err != nil {\n\t\treturn fmt.Errorf(\"invalid annotations: %s\", err)\n\t}\n\treturn nil\n}\n\n// Alert is a list of alerts that can be sorted in chronological order.\ntype Alerts []*Alert\n\nfunc (as Alerts) Len() int      { return len(as) }\nfunc (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] }\n\nfunc (as Alerts) Less(i, j int) bool {\n\tif as[i].StartsAt.Before(as[j].StartsAt) {\n\t\treturn true\n\t}\n\tif as[i].EndsAt.Before(as[j].EndsAt) {\n\t\treturn true\n\t}\n\treturn as[i].Fingerprint() < as[j].Fingerprint()\n}\n\n// HasFiring returns true iff one of the alerts is not resolved.\nfunc (as Alerts) HasFiring() bool {\n\tfor _, a := range as {\n\t\tif !a.Resolved() {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// Status returns StatusFiring iff at least one of the alerts is firing.\nfunc (as Alerts) Status() AlertStatus {\n\tif as.HasFiring() {\n\t\treturn AlertFiring\n\t}\n\treturn AlertResolved\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/alert_test.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestAlertValidate(t *testing.T) {\n\tts := time.Now()\n\n\tvar cases = []struct {\n\t\talert *Alert\n\t\terr   string\n\t}{\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels:   LabelSet{\"a\": \"b\"},\n\t\t\t\tStartsAt: ts,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels: LabelSet{\"a\": \"b\"},\n\t\t\t},\n\t\t\terr: \"start time missing\",\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels:   LabelSet{\"a\": \"b\"},\n\t\t\t\tStartsAt: ts,\n\t\t\t\tEndsAt:   ts,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels:   LabelSet{\"a\": \"b\"},\n\t\t\t\tStartsAt: ts,\n\t\t\t\tEndsAt:   ts.Add(1 * time.Minute),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels:   LabelSet{\"a\": \"b\"},\n\t\t\t\tStartsAt: ts,\n\t\t\t\tEndsAt:   ts.Add(-1 * time.Minute),\n\t\t\t},\n\t\t\terr: \"start time must be before end time\",\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tStartsAt: ts,\n\t\t\t},\n\t\t\terr: \"at least one label pair required\",\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels:   LabelSet{\"a\": \"b\", \"!bad\": \"label\"},\n\t\t\t\tStartsAt: ts,\n\t\t\t},\n\t\t\terr: \"invalid label set: invalid name\",\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels:   LabelSet{\"a\": \"b\", \"bad\": \"\\xfflabel\"},\n\t\t\t\tStartsAt: ts,\n\t\t\t},\n\t\t\terr: \"invalid label set: invalid value\",\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels:      LabelSet{\"a\": \"b\"},\n\t\t\t\tAnnotations: LabelSet{\"!bad\": \"label\"},\n\t\t\t\tStartsAt:    ts,\n\t\t\t},\n\t\t\terr: \"invalid annotations: invalid name\",\n\t\t},\n\t\t{\n\t\t\talert: &Alert{\n\t\t\t\tLabels:      LabelSet{\"a\": \"b\"},\n\t\t\t\tAnnotations: LabelSet{\"bad\": \"\\xfflabel\"},\n\t\t\t\tStartsAt:    ts,\n\t\t\t},\n\t\t\terr: \"invalid annotations: invalid value\",\n\t\t},\n\t}\n\n\tfor i, c := range cases {\n\t\terr := c.alert.Validate()\n\t\tif err == nil {\n\t\t\tif c.err == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Errorf(\"%d. Expected error %q but got none\", i, c.err)\n\t\t\tcontinue\n\t\t}\n\t\tif c.err == \"\" && err != nil {\n\t\t\tt.Errorf(\"%d. Expected no error but got %q\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.Contains(err.Error(), c.err) {\n\t\t\tt.Errorf(\"%d. Expected error to contain %q but got %q\", i, c.err, err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/fingerprinting.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n)\n\n// Fingerprint provides a hash-capable representation of a Metric.\n// For our purposes, FNV-1A 64-bit is used.\ntype Fingerprint uint64\n\n// FingerprintFromString transforms a string representation into a Fingerprint.\nfunc FingerprintFromString(s string) (Fingerprint, error) {\n\tnum, err := strconv.ParseUint(s, 16, 64)\n\treturn Fingerprint(num), err\n}\n\n// ParseFingerprint parses the input string into a fingerprint.\nfunc ParseFingerprint(s string) (Fingerprint, error) {\n\tnum, err := strconv.ParseUint(s, 16, 64)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn Fingerprint(num), nil\n}\n\nfunc (f Fingerprint) String() string {\n\treturn fmt.Sprintf(\"%016x\", uint64(f))\n}\n\n// Fingerprints represents a collection of Fingerprint subject to a given\n// natural sorting scheme. It implements sort.Interface.\ntype Fingerprints []Fingerprint\n\n// Len implements sort.Interface.\nfunc (f Fingerprints) Len() int {\n\treturn len(f)\n}\n\n// Less implements sort.Interface.\nfunc (f Fingerprints) Less(i, j int) bool {\n\treturn f[i] < f[j]\n}\n\n// Swap implements sort.Interface.\nfunc (f Fingerprints) Swap(i, j int) {\n\tf[i], f[j] = f[j], f[i]\n}\n\n// FingerprintSet is a set of Fingerprints.\ntype FingerprintSet map[Fingerprint]struct{}\n\n// Equal returns true if both sets contain the same elements (and not more).\nfunc (s FingerprintSet) Equal(o FingerprintSet) bool {\n\tif len(s) != len(o) {\n\t\treturn false\n\t}\n\n\tfor k := range s {\n\t\tif _, ok := o[k]; !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\n// Intersection returns the elements contained in both sets.\nfunc (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet {\n\tmyLength, otherLength := len(s), len(o)\n\tif myLength == 0 || otherLength == 0 {\n\t\treturn FingerprintSet{}\n\t}\n\n\tsubSet := s\n\tsuperSet := o\n\n\tif otherLength < myLength {\n\t\tsubSet = o\n\t\tsuperSet = s\n\t}\n\n\tout := FingerprintSet{}\n\n\tfor k := range subSet {\n\t\tif _, ok := superSet[k]; ok {\n\t\t\tout[k] = struct{}{}\n\t\t}\n\t}\n\n\treturn out\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/fnv.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\n// Inline and byte-free variant of hash/fnv's fnv64a.\n\nconst (\n\toffset64 = 14695981039346656037\n\tprime64  = 1099511628211\n)\n\n// hashNew initializies a new fnv64a hash value.\nfunc hashNew() uint64 {\n\treturn offset64\n}\n\n// hashAdd adds a string to a fnv64a hash value, returning the updated hash.\nfunc hashAdd(h uint64, s string) uint64 {\n\tfor i := 0; i < len(s); i++ {\n\t\th ^= uint64(s[i])\n\t\th *= prime64\n\t}\n\treturn h\n}\n\n// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash.\nfunc hashAddByte(h uint64, b byte) uint64 {\n\th ^= uint64(b)\n\th *= prime64\n\treturn h\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/labels.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\nconst (\n\t// AlertNameLabel is the name of the label containing the an alert's name.\n\tAlertNameLabel = \"alertname\"\n\n\t// ExportedLabelPrefix is the prefix to prepend to the label names present in\n\t// exported metrics if a label of the same name is added by the server.\n\tExportedLabelPrefix = \"exported_\"\n\n\t// MetricNameLabel is the label name indicating the metric name of a\n\t// timeseries.\n\tMetricNameLabel = \"__name__\"\n\n\t// SchemeLabel is the name of the label that holds the scheme on which to\n\t// scrape a target.\n\tSchemeLabel = \"__scheme__\"\n\n\t// AddressLabel is the name of the label that holds the address of\n\t// a scrape target.\n\tAddressLabel = \"__address__\"\n\n\t// MetricsPathLabel is the name of the label that holds the path on which to\n\t// scrape a target.\n\tMetricsPathLabel = \"__metrics_path__\"\n\n\t// ReservedLabelPrefix is a prefix which is not legal in user-supplied\n\t// label names.\n\tReservedLabelPrefix = \"__\"\n\n\t// MetaLabelPrefix is a prefix for labels that provide meta information.\n\t// Labels with this prefix are used for intermediate label processing and\n\t// will not be attached to time series.\n\tMetaLabelPrefix = \"__meta_\"\n\n\t// TmpLabelPrefix is a prefix for temporary labels as part of relabelling.\n\t// Labels with this prefix are used for intermediate label processing and\n\t// will not be attached to time series. This is reserved for use in\n\t// Prometheus configuration files by users.\n\tTmpLabelPrefix = \"__tmp_\"\n\n\t// ParamLabelPrefix is a prefix for labels that provide URL parameters\n\t// used to scrape a target.\n\tParamLabelPrefix = \"__param_\"\n\n\t// JobLabel is the label name indicating the job from which a timeseries\n\t// was scraped.\n\tJobLabel = \"job\"\n\n\t// InstanceLabel is the label name used for the instance label.\n\tInstanceLabel = \"instance\"\n\n\t// BucketLabel is used for the label that defines the upper bound of a\n\t// bucket of a histogram (\"le\" -> \"less or equal\").\n\tBucketLabel = \"le\"\n\n\t// QuantileLabel is used for the label that defines the quantile in a\n\t// summary.\n\tQuantileLabel = \"quantile\"\n)\n\n// LabelNameRE is a regular expression matching valid label names. Note that the\n// IsValid method of LabelName performs the same check but faster than a match\n// with this regular expression.\nvar LabelNameRE = regexp.MustCompile(\"^[a-zA-Z_][a-zA-Z0-9_]*$\")\n\n// A LabelName is a key for a LabelSet or Metric.  It has a value associated\n// therewith.\ntype LabelName string\n\n// IsValid is true iff the label name matches the pattern of LabelNameRE. This\n// method, however, does not use LabelNameRE for the check but a much faster\n// hardcoded implementation.\nfunc (ln LabelName) IsValid() bool {\n\tif len(ln) == 0 {\n\t\treturn false\n\t}\n\tfor i, b := range ln {\n\t\tif !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\tvar s string\n\tif err := unmarshal(&s); err != nil {\n\t\treturn err\n\t}\n\tif !LabelName(s).IsValid() {\n\t\treturn fmt.Errorf(\"%q is not a valid label name\", s)\n\t}\n\t*ln = LabelName(s)\n\treturn nil\n}\n\n// UnmarshalJSON implements the json.Unmarshaler interface.\nfunc (ln *LabelName) UnmarshalJSON(b []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(b, &s); err != nil {\n\t\treturn err\n\t}\n\tif !LabelName(s).IsValid() {\n\t\treturn fmt.Errorf(\"%q is not a valid label name\", s)\n\t}\n\t*ln = LabelName(s)\n\treturn nil\n}\n\n// LabelNames is a sortable LabelName slice. In implements sort.Interface.\ntype LabelNames []LabelName\n\nfunc (l LabelNames) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelNames) Less(i, j int) bool {\n\treturn l[i] < l[j]\n}\n\nfunc (l LabelNames) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\nfunc (l LabelNames) String() string {\n\tlabelStrings := make([]string, 0, len(l))\n\tfor _, label := range l {\n\t\tlabelStrings = append(labelStrings, string(label))\n\t}\n\treturn strings.Join(labelStrings, \", \")\n}\n\n// A LabelValue is an associated value for a LabelName.\ntype LabelValue string\n\n// IsValid returns true iff the string is a valid UTF8.\nfunc (lv LabelValue) IsValid() bool {\n\treturn utf8.ValidString(string(lv))\n}\n\n// LabelValues is a sortable LabelValue slice. It implements sort.Interface.\ntype LabelValues []LabelValue\n\nfunc (l LabelValues) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelValues) Less(i, j int) bool {\n\treturn string(l[i]) < string(l[j])\n}\n\nfunc (l LabelValues) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n\n// LabelPair pairs a name with a value.\ntype LabelPair struct {\n\tName  LabelName\n\tValue LabelValue\n}\n\n// LabelPairs is a sortable slice of LabelPair pointers. It implements\n// sort.Interface.\ntype LabelPairs []*LabelPair\n\nfunc (l LabelPairs) Len() int {\n\treturn len(l)\n}\n\nfunc (l LabelPairs) Less(i, j int) bool {\n\tswitch {\n\tcase l[i].Name > l[j].Name:\n\t\treturn false\n\tcase l[i].Name < l[j].Name:\n\t\treturn true\n\tcase l[i].Value > l[j].Value:\n\t\treturn false\n\tcase l[i].Value < l[j].Value:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (l LabelPairs) Swap(i, j int) {\n\tl[i], l[j] = l[j], l[i]\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/labels_test.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc testLabelNames(t testing.TB) {\n\tvar scenarios = []struct {\n\t\tin  LabelNames\n\t\tout LabelNames\n\t}{\n\t\t{\n\t\t\tin:  LabelNames{\"ZZZ\", \"zzz\"},\n\t\t\tout: LabelNames{\"ZZZ\", \"zzz\"},\n\t\t},\n\t\t{\n\t\t\tin:  LabelNames{\"aaa\", \"AAA\"},\n\t\t\tout: LabelNames{\"AAA\", \"aaa\"},\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tsort.Sort(scenario.in)\n\n\t\tfor j, expected := range scenario.out {\n\t\t\tif expected != scenario.in[j] {\n\t\t\t\tt.Errorf(\"%d.%d expected %s, got %s\", i, j, expected, scenario.in[j])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestLabelNames(t *testing.T) {\n\ttestLabelNames(t)\n}\n\nfunc BenchmarkLabelNames(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestLabelNames(b)\n\t}\n}\n\nfunc testLabelValues(t testing.TB) {\n\tvar scenarios = []struct {\n\t\tin  LabelValues\n\t\tout LabelValues\n\t}{\n\t\t{\n\t\t\tin:  LabelValues{\"ZZZ\", \"zzz\"},\n\t\t\tout: LabelValues{\"ZZZ\", \"zzz\"},\n\t\t},\n\t\t{\n\t\t\tin:  LabelValues{\"aaa\", \"AAA\"},\n\t\t\tout: LabelValues{\"AAA\", \"aaa\"},\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tsort.Sort(scenario.in)\n\n\t\tfor j, expected := range scenario.out {\n\t\t\tif expected != scenario.in[j] {\n\t\t\t\tt.Errorf(\"%d.%d expected %s, got %s\", i, j, expected, scenario.in[j])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestLabelValues(t *testing.T) {\n\ttestLabelValues(t)\n}\n\nfunc BenchmarkLabelValues(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestLabelValues(b)\n\t}\n}\n\nfunc TestLabelNameIsValid(t *testing.T) {\n\tvar scenarios = []struct {\n\t\tln    LabelName\n\t\tvalid bool\n\t}{\n\t\t{\n\t\t\tln:    \"Avalid_23name\",\n\t\t\tvalid: true,\n\t\t},\n\t\t{\n\t\t\tln:    \"_Avalid_23name\",\n\t\t\tvalid: true,\n\t\t},\n\t\t{\n\t\t\tln:    \"1valid_23name\",\n\t\t\tvalid: false,\n\t\t},\n\t\t{\n\t\t\tln:    \"avalid_23name\",\n\t\t\tvalid: true,\n\t\t},\n\t\t{\n\t\t\tln:    \"Ava:lid_23name\",\n\t\t\tvalid: false,\n\t\t},\n\t\t{\n\t\t\tln:    \"a lid_23name\",\n\t\t\tvalid: false,\n\t\t},\n\t\t{\n\t\t\tln:    \":leading_colon\",\n\t\t\tvalid: false,\n\t\t},\n\t\t{\n\t\t\tln:    \"colon:in:the:middle\",\n\t\t\tvalid: false,\n\t\t},\n\t}\n\n\tfor _, s := range scenarios {\n\t\tif s.ln.IsValid() != s.valid {\n\t\t\tt.Errorf(\"Expected %v for %q using IsValid method\", s.valid, s.ln)\n\t\t}\n\t\tif LabelNameRE.MatchString(string(s.ln)) != s.valid {\n\t\t\tt.Errorf(\"Expected %v for %q using regexp match\", s.valid, s.ln)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/labelset.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n)\n\n// A LabelSet is a collection of LabelName and LabelValue pairs.  The LabelSet\n// may be fully-qualified down to the point where it may resolve to a single\n// Metric in the data store or not.  All operations that occur within the realm\n// of a LabelSet can emit a vector of Metric entities to which the LabelSet may\n// match.\ntype LabelSet map[LabelName]LabelValue\n\n// Validate checks whether all names and values in the label set\n// are valid.\nfunc (ls LabelSet) Validate() error {\n\tfor ln, lv := range ls {\n\t\tif !ln.IsValid() {\n\t\t\treturn fmt.Errorf(\"invalid name %q\", ln)\n\t\t}\n\t\tif !lv.IsValid() {\n\t\t\treturn fmt.Errorf(\"invalid value %q\", lv)\n\t\t}\n\t}\n\treturn nil\n}\n\n// Equal returns true iff both label sets have exactly the same key/value pairs.\nfunc (ls LabelSet) Equal(o LabelSet) bool {\n\tif len(ls) != len(o) {\n\t\treturn false\n\t}\n\tfor ln, lv := range ls {\n\t\tolv, ok := o[ln]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif olv != lv {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// Before compares the metrics, using the following criteria:\n//\n// If m has fewer labels than o, it is before o. If it has more, it is not.\n//\n// If the number of labels is the same, the superset of all label names is\n// sorted alphanumerically. The first differing label pair found in that order\n// determines the outcome: If the label does not exist at all in m, then m is\n// before o, and vice versa. Otherwise the label value is compared\n// alphanumerically.\n//\n// If m and o are equal, the method returns false.\nfunc (ls LabelSet) Before(o LabelSet) bool {\n\tif len(ls) < len(o) {\n\t\treturn true\n\t}\n\tif len(ls) > len(o) {\n\t\treturn false\n\t}\n\n\tlns := make(LabelNames, 0, len(ls)+len(o))\n\tfor ln := range ls {\n\t\tlns = append(lns, ln)\n\t}\n\tfor ln := range o {\n\t\tlns = append(lns, ln)\n\t}\n\t// It's probably not worth it to de-dup lns.\n\tsort.Sort(lns)\n\tfor _, ln := range lns {\n\t\tmlv, ok := ls[ln]\n\t\tif !ok {\n\t\t\treturn true\n\t\t}\n\t\tolv, ok := o[ln]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\t\tif mlv < olv {\n\t\t\treturn true\n\t\t}\n\t\tif mlv > olv {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n\n// Clone returns a copy of the label set.\nfunc (ls LabelSet) Clone() LabelSet {\n\tlsn := make(LabelSet, len(ls))\n\tfor ln, lv := range ls {\n\t\tlsn[ln] = lv\n\t}\n\treturn lsn\n}\n\n// Merge is a helper function to non-destructively merge two label sets.\nfunc (l LabelSet) Merge(other LabelSet) LabelSet {\n\tresult := make(LabelSet, len(l))\n\n\tfor k, v := range l {\n\t\tresult[k] = v\n\t}\n\n\tfor k, v := range other {\n\t\tresult[k] = v\n\t}\n\n\treturn result\n}\n\nfunc (l LabelSet) String() string {\n\tlstrs := make([]string, 0, len(l))\n\tfor l, v := range l {\n\t\tlstrs = append(lstrs, fmt.Sprintf(\"%s=%q\", l, v))\n\t}\n\n\tsort.Strings(lstrs)\n\treturn fmt.Sprintf(\"{%s}\", strings.Join(lstrs, \", \"))\n}\n\n// Fingerprint returns the LabelSet's fingerprint.\nfunc (ls LabelSet) Fingerprint() Fingerprint {\n\treturn labelSetToFingerprint(ls)\n}\n\n// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing\n// algorithm, which is, however, more susceptible to hash collisions.\nfunc (ls LabelSet) FastFingerprint() Fingerprint {\n\treturn labelSetToFastFingerprint(ls)\n}\n\n// UnmarshalJSON implements the json.Unmarshaler interface.\nfunc (l *LabelSet) UnmarshalJSON(b []byte) error {\n\tvar m map[LabelName]LabelValue\n\tif err := json.Unmarshal(b, &m); err != nil {\n\t\treturn err\n\t}\n\t// encoding/json only unmarshals maps of the form map[string]T. It treats\n\t// LabelName as a string and does not call its UnmarshalJSON method.\n\t// Thus, we have to replicate the behavior here.\n\tfor ln := range m {\n\t\tif !ln.IsValid() {\n\t\t\treturn fmt.Errorf(\"%q is not a valid label name\", ln)\n\t\t}\n\t}\n\t*l = LabelSet(m)\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/metric.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n)\n\nvar (\n\tseparator = []byte{0}\n\t// MetricNameRE is a regular expression matching valid metric\n\t// names. Note that the IsValidMetricName function performs the same\n\t// check but faster than a match with this regular expression.\n\tMetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`)\n)\n\n// A Metric is similar to a LabelSet, but the key difference is that a Metric is\n// a singleton and refers to one and only one stream of samples.\ntype Metric LabelSet\n\n// Equal compares the metrics.\nfunc (m Metric) Equal(o Metric) bool {\n\treturn LabelSet(m).Equal(LabelSet(o))\n}\n\n// Before compares the metrics' underlying label sets.\nfunc (m Metric) Before(o Metric) bool {\n\treturn LabelSet(m).Before(LabelSet(o))\n}\n\n// Clone returns a copy of the Metric.\nfunc (m Metric) Clone() Metric {\n\tclone := make(Metric, len(m))\n\tfor k, v := range m {\n\t\tclone[k] = v\n\t}\n\treturn clone\n}\n\nfunc (m Metric) String() string {\n\tmetricName, hasName := m[MetricNameLabel]\n\tnumLabels := len(m) - 1\n\tif !hasName {\n\t\tnumLabels = len(m)\n\t}\n\tlabelStrings := make([]string, 0, numLabels)\n\tfor label, value := range m {\n\t\tif label != MetricNameLabel {\n\t\t\tlabelStrings = append(labelStrings, fmt.Sprintf(\"%s=%q\", label, value))\n\t\t}\n\t}\n\n\tswitch numLabels {\n\tcase 0:\n\t\tif hasName {\n\t\t\treturn string(metricName)\n\t\t}\n\t\treturn \"{}\"\n\tdefault:\n\t\tsort.Strings(labelStrings)\n\t\treturn fmt.Sprintf(\"%s{%s}\", metricName, strings.Join(labelStrings, \", \"))\n\t}\n}\n\n// Fingerprint returns a Metric's Fingerprint.\nfunc (m Metric) Fingerprint() Fingerprint {\n\treturn LabelSet(m).Fingerprint()\n}\n\n// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing\n// algorithm, which is, however, more susceptible to hash collisions.\nfunc (m Metric) FastFingerprint() Fingerprint {\n\treturn LabelSet(m).FastFingerprint()\n}\n\n// IsValidMetricName returns true iff name matches the pattern of MetricNameRE.\n// This function, however, does not use MetricNameRE for the check but a much\n// faster hardcoded implementation.\nfunc IsValidMetricName(n LabelValue) bool {\n\tif len(n) == 0 {\n\t\treturn false\n\t}\n\tfor i, b := range n {\n\t\tif !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/metric_test.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport \"testing\"\n\nfunc testMetric(t testing.TB) {\n\tvar scenarios = []struct {\n\t\tinput           LabelSet\n\t\tfingerprint     Fingerprint\n\t\tfastFingerprint Fingerprint\n\t}{\n\t\t{\n\t\t\tinput:           LabelSet{},\n\t\t\tfingerprint:     14695981039346656037,\n\t\t\tfastFingerprint: 14695981039346656037,\n\t\t},\n\t\t{\n\t\t\tinput: LabelSet{\n\t\t\t\t\"first_name\":   \"electro\",\n\t\t\t\t\"occupation\":   \"robot\",\n\t\t\t\t\"manufacturer\": \"westinghouse\",\n\t\t\t},\n\t\t\tfingerprint:     5911716720268894962,\n\t\t\tfastFingerprint: 11310079640881077873,\n\t\t},\n\t\t{\n\t\t\tinput: LabelSet{\n\t\t\t\t\"x\": \"y\",\n\t\t\t},\n\t\t\tfingerprint:     8241431561484471700,\n\t\t\tfastFingerprint: 13948396922932177635,\n\t\t},\n\t\t{\n\t\t\tinput: LabelSet{\n\t\t\t\t\"a\": \"bb\",\n\t\t\t\t\"b\": \"c\",\n\t\t\t},\n\t\t\tfingerprint:     3016285359649981711,\n\t\t\tfastFingerprint: 3198632812309449502,\n\t\t},\n\t\t{\n\t\t\tinput: LabelSet{\n\t\t\t\t\"a\":  \"b\",\n\t\t\t\t\"bb\": \"c\",\n\t\t\t},\n\t\t\tfingerprint:     7122421792099404749,\n\t\t\tfastFingerprint: 5774953389407657638,\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tinput := Metric(scenario.input)\n\n\t\tif scenario.fingerprint != input.Fingerprint() {\n\t\t\tt.Errorf(\"%d. expected %d, got %d\", i, scenario.fingerprint, input.Fingerprint())\n\t\t}\n\t\tif scenario.fastFingerprint != input.FastFingerprint() {\n\t\t\tt.Errorf(\"%d. expected %d, got %d\", i, scenario.fastFingerprint, input.FastFingerprint())\n\t\t}\n\t}\n}\n\nfunc TestMetric(t *testing.T) {\n\ttestMetric(t)\n}\n\nfunc BenchmarkMetric(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\ttestMetric(b)\n\t}\n}\n\nfunc TestMetricNameIsValid(t *testing.T) {\n\tvar scenarios = []struct {\n\t\tmn    LabelValue\n\t\tvalid bool\n\t}{\n\t\t{\n\t\t\tmn:    \"Avalid_23name\",\n\t\t\tvalid: true,\n\t\t},\n\t\t{\n\t\t\tmn:    \"_Avalid_23name\",\n\t\t\tvalid: true,\n\t\t},\n\t\t{\n\t\t\tmn:    \"1valid_23name\",\n\t\t\tvalid: false,\n\t\t},\n\t\t{\n\t\t\tmn:    \"avalid_23name\",\n\t\t\tvalid: true,\n\t\t},\n\t\t{\n\t\t\tmn:    \"Ava:lid_23name\",\n\t\t\tvalid: true,\n\t\t},\n\t\t{\n\t\t\tmn:    \"a lid_23name\",\n\t\t\tvalid: false,\n\t\t},\n\t\t{\n\t\t\tmn:    \":leading_colon\",\n\t\t\tvalid: true,\n\t\t},\n\t\t{\n\t\t\tmn:    \"colon:in:the:middle\",\n\t\t\tvalid: true,\n\t\t},\n\t}\n\n\tfor _, s := range scenarios {\n\t\tif IsValidMetricName(s.mn) != s.valid {\n\t\t\tt.Errorf(\"Expected %v for %q using IsValidMetricName function\", s.valid, s.mn)\n\t\t}\n\t\tif MetricNameRE.MatchString(string(s.mn)) != s.valid {\n\t\t\tt.Errorf(\"Expected %v for %q using regexp matching\", s.valid, s.mn)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/model.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package model contains common data structures that are shared across\n// Prometheus components and libraries.\npackage model\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/signature.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"sort\"\n)\n\n// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is\n// used to separate label names, label values, and other strings from each other\n// when calculating their combined hash value (aka signature aka fingerprint).\nconst SeparatorByte byte = 255\n\nvar (\n\t// cache the signature of an empty label set.\n\temptyLabelSignature = hashNew()\n)\n\n// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a\n// given label set. (Collisions are possible but unlikely if the number of label\n// sets the function is applied to is small.)\nfunc LabelsToSignature(labels map[string]string) uint64 {\n\tif len(labels) == 0 {\n\t\treturn emptyLabelSignature\n\t}\n\n\tlabelNames := make([]string, 0, len(labels))\n\tfor labelName := range labels {\n\t\tlabelNames = append(labelNames, labelName)\n\t}\n\tsort.Strings(labelNames)\n\n\tsum := hashNew()\n\tfor _, labelName := range labelNames {\n\t\tsum = hashAdd(sum, labelName)\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t\tsum = hashAdd(sum, labels[labelName])\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t}\n\treturn sum\n}\n\n// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as\n// parameter (rather than a label map) and returns a Fingerprint.\nfunc labelSetToFingerprint(ls LabelSet) Fingerprint {\n\tif len(ls) == 0 {\n\t\treturn Fingerprint(emptyLabelSignature)\n\t}\n\n\tlabelNames := make(LabelNames, 0, len(ls))\n\tfor labelName := range ls {\n\t\tlabelNames = append(labelNames, labelName)\n\t}\n\tsort.Sort(labelNames)\n\n\tsum := hashNew()\n\tfor _, labelName := range labelNames {\n\t\tsum = hashAdd(sum, string(labelName))\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t\tsum = hashAdd(sum, string(ls[labelName]))\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t}\n\treturn Fingerprint(sum)\n}\n\n// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a\n// faster and less allocation-heavy hash function, which is more susceptible to\n// create hash collisions. Therefore, collision detection should be applied.\nfunc labelSetToFastFingerprint(ls LabelSet) Fingerprint {\n\tif len(ls) == 0 {\n\t\treturn Fingerprint(emptyLabelSignature)\n\t}\n\n\tvar result uint64\n\tfor labelName, labelValue := range ls {\n\t\tsum := hashNew()\n\t\tsum = hashAdd(sum, string(labelName))\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t\tsum = hashAdd(sum, string(labelValue))\n\t\tresult ^= sum\n\t}\n\treturn Fingerprint(result)\n}\n\n// SignatureForLabels works like LabelsToSignature but takes a Metric as\n// parameter (rather than a label map) and only includes the labels with the\n// specified LabelNames into the signature calculation. The labels passed in\n// will be sorted by this function.\nfunc SignatureForLabels(m Metric, labels ...LabelName) uint64 {\n\tif len(labels) == 0 {\n\t\treturn emptyLabelSignature\n\t}\n\n\tsort.Sort(LabelNames(labels))\n\n\tsum := hashNew()\n\tfor _, label := range labels {\n\t\tsum = hashAdd(sum, string(label))\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t\tsum = hashAdd(sum, string(m[label]))\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t}\n\treturn sum\n}\n\n// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as\n// parameter (rather than a label map) and excludes the labels with any of the\n// specified LabelNames from the signature calculation.\nfunc SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 {\n\tif len(m) == 0 {\n\t\treturn emptyLabelSignature\n\t}\n\n\tlabelNames := make(LabelNames, 0, len(m))\n\tfor labelName := range m {\n\t\tif _, exclude := labels[labelName]; !exclude {\n\t\t\tlabelNames = append(labelNames, labelName)\n\t\t}\n\t}\n\tif len(labelNames) == 0 {\n\t\treturn emptyLabelSignature\n\t}\n\tsort.Sort(labelNames)\n\n\tsum := hashNew()\n\tfor _, labelName := range labelNames {\n\t\tsum = hashAdd(sum, string(labelName))\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t\tsum = hashAdd(sum, string(m[labelName]))\n\t\tsum = hashAddByte(sum, SeparatorByte)\n\t}\n\treturn sum\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/signature_test.go",
    "content": "// Copyright 2014 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n\t\"testing\"\n)\n\nfunc TestLabelsToSignature(t *testing.T) {\n\tvar scenarios = []struct {\n\t\tin  map[string]string\n\t\tout uint64\n\t}{\n\t\t{\n\t\t\tin:  map[string]string{},\n\t\t\tout: 14695981039346656037,\n\t\t},\n\t\t{\n\t\t\tin:  map[string]string{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tout: 5799056148416392346,\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tactual := LabelsToSignature(scenario.in)\n\n\t\tif actual != scenario.out {\n\t\t\tt.Errorf(\"%d. expected %d, got %d\", i, scenario.out, actual)\n\t\t}\n\t}\n}\n\nfunc TestMetricToFingerprint(t *testing.T) {\n\tvar scenarios = []struct {\n\t\tin  LabelSet\n\t\tout Fingerprint\n\t}{\n\t\t{\n\t\t\tin:  LabelSet{},\n\t\t\tout: 14695981039346656037,\n\t\t},\n\t\t{\n\t\t\tin:  LabelSet{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tout: 5799056148416392346,\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tactual := labelSetToFingerprint(scenario.in)\n\n\t\tif actual != scenario.out {\n\t\t\tt.Errorf(\"%d. expected %d, got %d\", i, scenario.out, actual)\n\t\t}\n\t}\n}\n\nfunc TestMetricToFastFingerprint(t *testing.T) {\n\tvar scenarios = []struct {\n\t\tin  LabelSet\n\t\tout Fingerprint\n\t}{\n\t\t{\n\t\t\tin:  LabelSet{},\n\t\t\tout: 14695981039346656037,\n\t\t},\n\t\t{\n\t\t\tin:  LabelSet{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tout: 12952432476264840823,\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tactual := labelSetToFastFingerprint(scenario.in)\n\n\t\tif actual != scenario.out {\n\t\t\tt.Errorf(\"%d. expected %d, got %d\", i, scenario.out, actual)\n\t\t}\n\t}\n}\n\nfunc TestSignatureForLabels(t *testing.T) {\n\tvar scenarios = []struct {\n\t\tin     Metric\n\t\tlabels LabelNames\n\t\tout    uint64\n\t}{\n\t\t{\n\t\t\tin:     Metric{},\n\t\t\tlabels: nil,\n\t\t\tout:    14695981039346656037,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{},\n\t\t\tlabels: LabelNames{\"empty\"},\n\t\t\tout:    7187873163539638612,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tlabels: LabelNames{\"empty\"},\n\t\t\tout:    7187873163539638612,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tlabels: LabelNames{\"fear\", \"name\"},\n\t\t\tout:    5799056148416392346,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\", \"foo\": \"bar\"},\n\t\t\tlabels: LabelNames{\"fear\", \"name\"},\n\t\t\tout:    5799056148416392346,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tlabels: LabelNames{},\n\t\t\tout:    14695981039346656037,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tlabels: nil,\n\t\t\tout:    14695981039346656037,\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tactual := SignatureForLabels(scenario.in, scenario.labels...)\n\n\t\tif actual != scenario.out {\n\t\t\tt.Errorf(\"%d. expected %d, got %d\", i, scenario.out, actual)\n\t\t}\n\t}\n}\n\nfunc TestSignatureWithoutLabels(t *testing.T) {\n\tvar scenarios = []struct {\n\t\tin     Metric\n\t\tlabels map[LabelName]struct{}\n\t\tout    uint64\n\t}{\n\t\t{\n\t\t\tin:     Metric{},\n\t\t\tlabels: nil,\n\t\t\tout:    14695981039346656037,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tlabels: map[LabelName]struct{}{\"fear\": struct{}{}, \"name\": struct{}{}},\n\t\t\tout:    14695981039346656037,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\", \"foo\": \"bar\"},\n\t\t\tlabels: map[LabelName]struct{}{\"foo\": struct{}{}},\n\t\t\tout:    5799056148416392346,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tlabels: map[LabelName]struct{}{},\n\t\t\tout:    5799056148416392346,\n\t\t},\n\t\t{\n\t\t\tin:     Metric{\"name\": \"garland, briggs\", \"fear\": \"love is not enough\"},\n\t\t\tlabels: nil,\n\t\t\tout:    5799056148416392346,\n\t\t},\n\t}\n\n\tfor i, scenario := range scenarios {\n\t\tactual := SignatureWithoutLabels(scenario.in, scenario.labels)\n\n\t\tif actual != scenario.out {\n\t\t\tt.Errorf(\"%d. expected %d, got %d\", i, scenario.out, actual)\n\t\t}\n\t}\n}\n\nfunc benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) {\n\tfor i := 0; i < b.N; i++ {\n\t\tif a := LabelsToSignature(l); a != e {\n\t\t\tb.Fatalf(\"expected signature of %d for %s, got %d\", e, l, a)\n\t\t}\n\t}\n}\n\nfunc BenchmarkLabelToSignatureScalar(b *testing.B) {\n\tbenchmarkLabelToSignature(b, nil, 14695981039346656037)\n}\n\nfunc BenchmarkLabelToSignatureSingle(b *testing.B) {\n\tbenchmarkLabelToSignature(b, map[string]string{\"first-label\": \"first-label-value\"}, 5146282821936882169)\n}\n\nfunc BenchmarkLabelToSignatureDouble(b *testing.B) {\n\tbenchmarkLabelToSignature(b, map[string]string{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\"}, 3195800080984914717)\n}\n\nfunc BenchmarkLabelToSignatureTriple(b *testing.B) {\n\tbenchmarkLabelToSignature(b, map[string]string{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\", \"third-label\": \"third-label-value\"}, 13843036195897128121)\n}\n\nfunc benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {\n\tfor i := 0; i < b.N; i++ {\n\t\tif a := labelSetToFingerprint(ls); a != e {\n\t\t\tb.Fatalf(\"expected signature of %d for %s, got %d\", e, ls, a)\n\t\t}\n\t}\n}\n\nfunc BenchmarkMetricToFingerprintScalar(b *testing.B) {\n\tbenchmarkMetricToFingerprint(b, nil, 14695981039346656037)\n}\n\nfunc BenchmarkMetricToFingerprintSingle(b *testing.B) {\n\tbenchmarkMetricToFingerprint(b, LabelSet{\"first-label\": \"first-label-value\"}, 5146282821936882169)\n}\n\nfunc BenchmarkMetricToFingerprintDouble(b *testing.B) {\n\tbenchmarkMetricToFingerprint(b, LabelSet{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\"}, 3195800080984914717)\n}\n\nfunc BenchmarkMetricToFingerprintTriple(b *testing.B) {\n\tbenchmarkMetricToFingerprint(b, LabelSet{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\", \"third-label\": \"third-label-value\"}, 13843036195897128121)\n}\n\nfunc benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) {\n\tfor i := 0; i < b.N; i++ {\n\t\tif a := labelSetToFastFingerprint(ls); a != e {\n\t\t\tb.Fatalf(\"expected signature of %d for %s, got %d\", e, ls, a)\n\t\t}\n\t}\n}\n\nfunc BenchmarkMetricToFastFingerprintScalar(b *testing.B) {\n\tbenchmarkMetricToFastFingerprint(b, nil, 14695981039346656037)\n}\n\nfunc BenchmarkMetricToFastFingerprintSingle(b *testing.B) {\n\tbenchmarkMetricToFastFingerprint(b, LabelSet{\"first-label\": \"first-label-value\"}, 5147259542624943964)\n}\n\nfunc BenchmarkMetricToFastFingerprintDouble(b *testing.B) {\n\tbenchmarkMetricToFastFingerprint(b, LabelSet{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\"}, 18269973311206963528)\n}\n\nfunc BenchmarkMetricToFastFingerprintTriple(b *testing.B) {\n\tbenchmarkMetricToFastFingerprint(b, LabelSet{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\", \"third-label\": \"third-label-value\"}, 15738406913934009676)\n}\n\nfunc BenchmarkEmptyLabelSignature(b *testing.B) {\n\tinput := []map[string]string{nil, {}}\n\n\tvar ms runtime.MemStats\n\truntime.ReadMemStats(&ms)\n\n\talloc := ms.Alloc\n\n\tfor _, labels := range input {\n\t\tLabelsToSignature(labels)\n\t}\n\n\truntime.ReadMemStats(&ms)\n\n\tif got := ms.Alloc; alloc != got {\n\t\tb.Fatal(\"expected LabelsToSignature with empty labels not to perform allocations\")\n\t}\n}\n\nfunc benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) {\n\tvar start, end sync.WaitGroup\n\tstart.Add(1)\n\tend.Add(concLevel)\n\n\tfor i := 0; i < concLevel; i++ {\n\t\tgo func() {\n\t\t\tstart.Wait()\n\t\t\tfor j := b.N / concLevel; j >= 0; j-- {\n\t\t\t\tif a := labelSetToFastFingerprint(ls); a != e {\n\t\t\t\t\tb.Fatalf(\"expected signature of %d for %s, got %d\", e, ls, a)\n\t\t\t\t}\n\t\t\t}\n\t\t\tend.Done()\n\t\t}()\n\t}\n\tb.ResetTimer()\n\tstart.Done()\n\tend.Wait()\n}\n\nfunc BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) {\n\tbenchmarkMetricToFastFingerprintConc(b, LabelSet{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\", \"third-label\": \"third-label-value\"}, 15738406913934009676, 1)\n}\n\nfunc BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) {\n\tbenchmarkMetricToFastFingerprintConc(b, LabelSet{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\", \"third-label\": \"third-label-value\"}, 15738406913934009676, 2)\n}\n\nfunc BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) {\n\tbenchmarkMetricToFastFingerprintConc(b, LabelSet{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\", \"third-label\": \"third-label-value\"}, 15738406913934009676, 4)\n}\n\nfunc BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) {\n\tbenchmarkMetricToFastFingerprintConc(b, LabelSet{\"first-label\": \"first-label-value\", \"second-label\": \"second-label-value\", \"third-label\": \"third-label-value\"}, 15738406913934009676, 8)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/silence.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"regexp\"\n\t\"time\"\n)\n\n// Matcher describes a matches the value of a given label.\ntype Matcher struct {\n\tName    LabelName `json:\"name\"`\n\tValue   string    `json:\"value\"`\n\tIsRegex bool      `json:\"isRegex\"`\n}\n\nfunc (m *Matcher) UnmarshalJSON(b []byte) error {\n\ttype plain Matcher\n\tif err := json.Unmarshal(b, (*plain)(m)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(m.Name) == 0 {\n\t\treturn fmt.Errorf(\"label name in matcher must not be empty\")\n\t}\n\tif m.IsRegex {\n\t\tif _, err := regexp.Compile(m.Value); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// Validate returns true iff all fields of the matcher have valid values.\nfunc (m *Matcher) Validate() error {\n\tif !m.Name.IsValid() {\n\t\treturn fmt.Errorf(\"invalid name %q\", m.Name)\n\t}\n\tif m.IsRegex {\n\t\tif _, err := regexp.Compile(m.Value); err != nil {\n\t\t\treturn fmt.Errorf(\"invalid regular expression %q\", m.Value)\n\t\t}\n\t} else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 {\n\t\treturn fmt.Errorf(\"invalid value %q\", m.Value)\n\t}\n\treturn nil\n}\n\n// Silence defines the representation of a silence definiton\n// in the Prometheus eco-system.\ntype Silence struct {\n\tID uint64 `json:\"id,omitempty\"`\n\n\tMatchers []*Matcher `json:\"matchers\"`\n\n\tStartsAt time.Time `json:\"startsAt\"`\n\tEndsAt   time.Time `json:\"endsAt\"`\n\n\tCreatedAt time.Time `json:\"createdAt,omitempty\"`\n\tCreatedBy string    `json:\"createdBy\"`\n\tComment   string    `json:\"comment,omitempty\"`\n}\n\n// Validate returns true iff all fields of the silence have valid values.\nfunc (s *Silence) Validate() error {\n\tif len(s.Matchers) == 0 {\n\t\treturn fmt.Errorf(\"at least one matcher required\")\n\t}\n\tfor _, m := range s.Matchers {\n\t\tif err := m.Validate(); err != nil {\n\t\t\treturn fmt.Errorf(\"invalid matcher: %s\", err)\n\t\t}\n\t}\n\tif s.StartsAt.IsZero() {\n\t\treturn fmt.Errorf(\"start time missing\")\n\t}\n\tif s.EndsAt.IsZero() {\n\t\treturn fmt.Errorf(\"end time missing\")\n\t}\n\tif s.EndsAt.Before(s.StartsAt) {\n\t\treturn fmt.Errorf(\"start time must be before end time\")\n\t}\n\tif s.CreatedBy == \"\" {\n\t\treturn fmt.Errorf(\"creator information missing\")\n\t}\n\tif s.Comment == \"\" {\n\t\treturn fmt.Errorf(\"comment missing\")\n\t}\n\tif s.CreatedAt.IsZero() {\n\t\treturn fmt.Errorf(\"creation timestamp missing\")\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/silence_test.go",
    "content": "// Copyright 2015 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestMatcherValidate(t *testing.T) {\n\tvar cases = []struct {\n\t\tmatcher *Matcher\n\t\terr     string\n\t}{\n\t\t{\n\t\t\tmatcher: &Matcher{\n\t\t\t\tName:  \"name\",\n\t\t\t\tValue: \"value\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmatcher: &Matcher{\n\t\t\t\tName:    \"name\",\n\t\t\t\tValue:   \"value\",\n\t\t\t\tIsRegex: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tmatcher: &Matcher{\n\t\t\t\tName:  \"name!\",\n\t\t\t\tValue: \"value\",\n\t\t\t},\n\t\t\terr: \"invalid name\",\n\t\t},\n\t\t{\n\t\t\tmatcher: &Matcher{\n\t\t\t\tName:  \"\",\n\t\t\t\tValue: \"value\",\n\t\t\t},\n\t\t\terr: \"invalid name\",\n\t\t},\n\t\t{\n\t\t\tmatcher: &Matcher{\n\t\t\t\tName:  \"name\",\n\t\t\t\tValue: \"value\\xff\",\n\t\t\t},\n\t\t\terr: \"invalid value\",\n\t\t},\n\t\t{\n\t\t\tmatcher: &Matcher{\n\t\t\t\tName:  \"name\",\n\t\t\t\tValue: \"\",\n\t\t\t},\n\t\t\terr: \"invalid value\",\n\t\t},\n\t}\n\n\tfor i, c := range cases {\n\t\terr := c.matcher.Validate()\n\t\tif err == nil {\n\t\t\tif c.err == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Errorf(\"%d. Expected error %q but got none\", i, c.err)\n\t\t\tcontinue\n\t\t}\n\t\tif c.err == \"\" && err != nil {\n\t\t\tt.Errorf(\"%d. Expected no error but got %q\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.Contains(err.Error(), c.err) {\n\t\t\tt.Errorf(\"%d. Expected error to contain %q but got %q\", i, c.err, err)\n\t\t}\n\t}\n}\n\nfunc TestSilenceValidate(t *testing.T) {\n\tts := time.Now()\n\n\tvar cases = []struct {\n\t\tsil *Silence\n\t\terr string\n\t}{\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t},\n\t\t\t\tStartsAt:  ts,\n\t\t\t\tEndsAt:    ts,\n\t\t\t\tCreatedAt: ts,\n\t\t\t\tCreatedBy: \"name\",\n\t\t\t\tComment:   \"comment\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t\t{Name: \"name\", Value: \"value\", IsRegex: true},\n\t\t\t\t},\n\t\t\t\tStartsAt:  ts,\n\t\t\t\tEndsAt:    ts,\n\t\t\t\tCreatedAt: ts,\n\t\t\t\tCreatedBy: \"name\",\n\t\t\t\tComment:   \"comment\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t},\n\t\t\t\tStartsAt:  ts,\n\t\t\t\tEndsAt:    ts.Add(-1 * time.Minute),\n\t\t\t\tCreatedAt: ts,\n\t\t\t\tCreatedBy: \"name\",\n\t\t\t\tComment:   \"comment\",\n\t\t\t},\n\t\t\terr: \"start time must be before end time\",\n\t\t},\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t},\n\t\t\t\tStartsAt:  ts,\n\t\t\t\tCreatedAt: ts,\n\t\t\t\tCreatedBy: \"name\",\n\t\t\t\tComment:   \"comment\",\n\t\t\t},\n\t\t\terr: \"end time missing\",\n\t\t},\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t},\n\t\t\t\tEndsAt:    ts,\n\t\t\t\tCreatedAt: ts,\n\t\t\t\tCreatedBy: \"name\",\n\t\t\t\tComment:   \"comment\",\n\t\t\t},\n\t\t\terr: \"start time missing\",\n\t\t},\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"!name\", Value: \"value\"},\n\t\t\t\t},\n\t\t\t\tStartsAt:  ts,\n\t\t\t\tEndsAt:    ts,\n\t\t\t\tCreatedAt: ts,\n\t\t\t\tCreatedBy: \"name\",\n\t\t\t\tComment:   \"comment\",\n\t\t\t},\n\t\t\terr: \"invalid matcher\",\n\t\t},\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t},\n\t\t\t\tStartsAt:  ts,\n\t\t\t\tEndsAt:    ts,\n\t\t\t\tCreatedAt: ts,\n\t\t\t\tCreatedBy: \"name\",\n\t\t\t},\n\t\t\terr: \"comment missing\",\n\t\t},\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t},\n\t\t\t\tStartsAt:  ts,\n\t\t\t\tEndsAt:    ts,\n\t\t\t\tCreatedBy: \"name\",\n\t\t\t\tComment:   \"comment\",\n\t\t\t},\n\t\t\terr: \"creation timestamp missing\",\n\t\t},\n\t\t{\n\t\t\tsil: &Silence{\n\t\t\t\tMatchers: []*Matcher{\n\t\t\t\t\t{Name: \"name\", Value: \"value\"},\n\t\t\t\t},\n\t\t\t\tStartsAt:  ts,\n\t\t\t\tEndsAt:    ts,\n\t\t\t\tCreatedAt: ts,\n\t\t\t\tComment:   \"comment\",\n\t\t\t},\n\t\t\terr: \"creator information missing\",\n\t\t},\n\t}\n\n\tfor i, c := range cases {\n\t\terr := c.sil.Validate()\n\t\tif err == nil {\n\t\t\tif c.err == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tt.Errorf(\"%d. Expected error %q but got none\", i, c.err)\n\t\t\tcontinue\n\t\t}\n\t\tif c.err == \"\" && err != nil {\n\t\t\tt.Errorf(\"%d. Expected no error but got %q\", i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.Contains(err.Error(), c.err) {\n\t\t\tt.Errorf(\"%d. Expected error to contain %q but got %q\", i, c.err, err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/time.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nconst (\n\t// MinimumTick is the minimum supported time resolution. This has to be\n\t// at least time.Second in order for the code below to work.\n\tminimumTick = time.Millisecond\n\t// second is the Time duration equivalent to one second.\n\tsecond = int64(time.Second / minimumTick)\n\t// The number of nanoseconds per minimum tick.\n\tnanosPerTick = int64(minimumTick / time.Nanosecond)\n\n\t// Earliest is the earliest Time representable. Handy for\n\t// initializing a high watermark.\n\tEarliest = Time(math.MinInt64)\n\t// Latest is the latest Time representable. Handy for initializing\n\t// a low watermark.\n\tLatest = Time(math.MaxInt64)\n)\n\n// Time is the number of milliseconds since the epoch\n// (1970-01-01 00:00 UTC) excluding leap seconds.\ntype Time int64\n\n// Interval describes and interval between two timestamps.\ntype Interval struct {\n\tStart, End Time\n}\n\n// Now returns the current time as a Time.\nfunc Now() Time {\n\treturn TimeFromUnixNano(time.Now().UnixNano())\n}\n\n// TimeFromUnix returns the Time equivalent to the Unix Time t\n// provided in seconds.\nfunc TimeFromUnix(t int64) Time {\n\treturn Time(t * second)\n}\n\n// TimeFromUnixNano returns the Time equivalent to the Unix Time\n// t provided in nanoseconds.\nfunc TimeFromUnixNano(t int64) Time {\n\treturn Time(t / nanosPerTick)\n}\n\n// Equal reports whether two Times represent the same instant.\nfunc (t Time) Equal(o Time) bool {\n\treturn t == o\n}\n\n// Before reports whether the Time t is before o.\nfunc (t Time) Before(o Time) bool {\n\treturn t < o\n}\n\n// After reports whether the Time t is after o.\nfunc (t Time) After(o Time) bool {\n\treturn t > o\n}\n\n// Add returns the Time t + d.\nfunc (t Time) Add(d time.Duration) Time {\n\treturn t + Time(d/minimumTick)\n}\n\n// Sub returns the Duration t - o.\nfunc (t Time) Sub(o Time) time.Duration {\n\treturn time.Duration(t-o) * minimumTick\n}\n\n// Time returns the time.Time representation of t.\nfunc (t Time) Time() time.Time {\n\treturn time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick)\n}\n\n// Unix returns t as a Unix time, the number of seconds elapsed\n// since January 1, 1970 UTC.\nfunc (t Time) Unix() int64 {\n\treturn int64(t) / second\n}\n\n// UnixNano returns t as a Unix time, the number of nanoseconds elapsed\n// since January 1, 1970 UTC.\nfunc (t Time) UnixNano() int64 {\n\treturn int64(t) * nanosPerTick\n}\n\n// The number of digits after the dot.\nvar dotPrecision = int(math.Log10(float64(second)))\n\n// String returns a string representation of the Time.\nfunc (t Time) String() string {\n\treturn strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64)\n}\n\n// MarshalJSON implements the json.Marshaler interface.\nfunc (t Time) MarshalJSON() ([]byte, error) {\n\treturn []byte(t.String()), nil\n}\n\n// UnmarshalJSON implements the json.Unmarshaler interface.\nfunc (t *Time) UnmarshalJSON(b []byte) error {\n\tp := strings.Split(string(b), \".\")\n\tswitch len(p) {\n\tcase 1:\n\t\tv, err := strconv.ParseInt(string(p[0]), 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t*t = Time(v * second)\n\n\tcase 2:\n\t\tv, err := strconv.ParseInt(string(p[0]), 10, 64)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tv *= second\n\n\t\tprec := dotPrecision - len(p[1])\n\t\tif prec < 0 {\n\t\t\tp[1] = p[1][:dotPrecision]\n\t\t} else if prec > 0 {\n\t\t\tp[1] = p[1] + strings.Repeat(\"0\", prec)\n\t\t}\n\n\t\tva, err := strconv.ParseInt(p[1], 10, 32)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t*t = Time(v + va)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid time %q\", string(b))\n\t}\n\treturn nil\n}\n\n// Duration wraps time.Duration. It is used to parse the custom duration format\n// from YAML.\n// This type should not propagate beyond the scope of input/output processing.\ntype Duration time.Duration\n\n// Set implements pflag/flag.Value\nfunc (d *Duration) Set(s string) error {\n\tvar err error\n\t*d, err = ParseDuration(s)\n\treturn err\n}\n\n// Type implements pflag.Value\nfunc (d *Duration) Type() string {\n\treturn \"duration\"\n}\n\nvar durationRE = regexp.MustCompile(\"^([0-9]+)(y|w|d|h|m|s|ms)$\")\n\n// ParseDuration parses a string into a time.Duration, assuming that a year\n// always has 365d, a week always has 7d, and a day always has 24h.\nfunc ParseDuration(durationStr string) (Duration, error) {\n\tmatches := durationRE.FindStringSubmatch(durationStr)\n\tif len(matches) != 3 {\n\t\treturn 0, fmt.Errorf(\"not a valid duration string: %q\", durationStr)\n\t}\n\tvar (\n\t\tn, _ = strconv.Atoi(matches[1])\n\t\tdur  = time.Duration(n) * time.Millisecond\n\t)\n\tswitch unit := matches[2]; unit {\n\tcase \"y\":\n\t\tdur *= 1000 * 60 * 60 * 24 * 365\n\tcase \"w\":\n\t\tdur *= 1000 * 60 * 60 * 24 * 7\n\tcase \"d\":\n\t\tdur *= 1000 * 60 * 60 * 24\n\tcase \"h\":\n\t\tdur *= 1000 * 60 * 60\n\tcase \"m\":\n\t\tdur *= 1000 * 60\n\tcase \"s\":\n\t\tdur *= 1000\n\tcase \"ms\":\n\t\t// Value already correct\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"invalid time unit in duration string: %q\", unit)\n\t}\n\treturn Duration(dur), nil\n}\n\nfunc (d Duration) String() string {\n\tvar (\n\t\tms   = int64(time.Duration(d) / time.Millisecond)\n\t\tunit = \"ms\"\n\t)\n\tfactors := map[string]int64{\n\t\t\"y\":  1000 * 60 * 60 * 24 * 365,\n\t\t\"w\":  1000 * 60 * 60 * 24 * 7,\n\t\t\"d\":  1000 * 60 * 60 * 24,\n\t\t\"h\":  1000 * 60 * 60,\n\t\t\"m\":  1000 * 60,\n\t\t\"s\":  1000,\n\t\t\"ms\": 1,\n\t}\n\n\tswitch int64(0) {\n\tcase ms % factors[\"y\"]:\n\t\tunit = \"y\"\n\tcase ms % factors[\"w\"]:\n\t\tunit = \"w\"\n\tcase ms % factors[\"d\"]:\n\t\tunit = \"d\"\n\tcase ms % factors[\"h\"]:\n\t\tunit = \"h\"\n\tcase ms % factors[\"m\"]:\n\t\tunit = \"m\"\n\tcase ms % factors[\"s\"]:\n\t\tunit = \"s\"\n\t}\n\treturn fmt.Sprintf(\"%v%v\", ms/factors[unit], unit)\n}\n\n// MarshalYAML implements the yaml.Marshaler interface.\nfunc (d Duration) MarshalYAML() (interface{}, error) {\n\treturn d.String(), nil\n}\n\n// UnmarshalYAML implements the yaml.Unmarshaler interface.\nfunc (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {\n\tvar s string\n\tif err := unmarshal(&s); err != nil {\n\t\treturn err\n\t}\n\tdur, err := ParseDuration(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*d = dur\n\treturn nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/time_test.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestComparators(t *testing.T) {\n\tt1a := TimeFromUnix(0)\n\tt1b := TimeFromUnix(0)\n\tt2 := TimeFromUnix(2*second - 1)\n\n\tif !t1a.Equal(t1b) {\n\t\tt.Fatalf(\"Expected %s to be equal to %s\", t1a, t1b)\n\t}\n\tif t1a.Equal(t2) {\n\t\tt.Fatalf(\"Expected %s to not be equal to %s\", t1a, t2)\n\t}\n\n\tif !t1a.Before(t2) {\n\t\tt.Fatalf(\"Expected %s to be before %s\", t1a, t2)\n\t}\n\tif t1a.Before(t1b) {\n\t\tt.Fatalf(\"Expected %s to not be before %s\", t1a, t1b)\n\t}\n\n\tif !t2.After(t1a) {\n\t\tt.Fatalf(\"Expected %s to be after %s\", t2, t1a)\n\t}\n\tif t1b.After(t1a) {\n\t\tt.Fatalf(\"Expected %s to not be after %s\", t1b, t1a)\n\t}\n}\n\nfunc TestTimeConversions(t *testing.T) {\n\tunixSecs := int64(1136239445)\n\tunixNsecs := int64(123456789)\n\tunixNano := unixSecs*1e9 + unixNsecs\n\n\tt1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick)\n\tt2 := time.Unix(unixSecs, unixNsecs)\n\n\tts := TimeFromUnixNano(unixNano)\n\tif !ts.Time().Equal(t1) {\n\t\tt.Fatalf(\"Expected %s, got %s\", t1, ts.Time())\n\t}\n\n\t// Test available precision.\n\tts = TimeFromUnixNano(t2.UnixNano())\n\tif !ts.Time().Equal(t1) {\n\t\tt.Fatalf(\"Expected %s, got %s\", t1, ts.Time())\n\t}\n\n\tif ts.UnixNano() != unixNano-unixNano%nanosPerTick {\n\t\tt.Fatalf(\"Expected %d, got %d\", unixNano, ts.UnixNano())\n\t}\n}\n\nfunc TestDuration(t *testing.T) {\n\tduration := time.Second + time.Minute + time.Hour\n\tgoTime := time.Unix(1136239445, 0)\n\n\tts := TimeFromUnix(goTime.Unix())\n\tif !goTime.Add(duration).Equal(ts.Add(duration).Time()) {\n\t\tt.Fatalf(\"Expected %s to be equal to %s\", goTime.Add(duration), ts.Add(duration))\n\t}\n\n\tearlier := ts.Add(-duration)\n\tdelta := ts.Sub(earlier)\n\tif delta != duration {\n\t\tt.Fatalf(\"Expected %s to be equal to %s\", delta, duration)\n\t}\n}\n\nfunc TestParseDuration(t *testing.T) {\n\tvar cases = []struct {\n\t\tin  string\n\t\tout time.Duration\n\t}{\n\t\t{\n\t\t\tin:  \"324ms\",\n\t\t\tout: 324 * time.Millisecond,\n\t\t}, {\n\t\t\tin:  \"3s\",\n\t\t\tout: 3 * time.Second,\n\t\t}, {\n\t\t\tin:  \"5m\",\n\t\t\tout: 5 * time.Minute,\n\t\t}, {\n\t\t\tin:  \"1h\",\n\t\t\tout: time.Hour,\n\t\t}, {\n\t\t\tin:  \"4d\",\n\t\t\tout: 4 * 24 * time.Hour,\n\t\t}, {\n\t\t\tin:  \"3w\",\n\t\t\tout: 3 * 7 * 24 * time.Hour,\n\t\t}, {\n\t\t\tin:  \"10y\",\n\t\t\tout: 10 * 365 * 24 * time.Hour,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\td, err := ParseDuration(c.in)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"Unexpected error on input %q\", c.in)\n\t\t}\n\t\tif time.Duration(d) != c.out {\n\t\t\tt.Errorf(\"Expected %v but got %v\", c.out, d)\n\t\t}\n\t\tif d.String() != c.in {\n\t\t\tt.Errorf(\"Expected duration string %q but got %q\", c.in, d.String())\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/value.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\t// ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a\n\t// non-existing sample pair. It is a SamplePair with timestamp Earliest and\n\t// value 0.0. Note that the natural zero value of SamplePair has a timestamp\n\t// of 0, which is possible to appear in a real SamplePair and thus not\n\t// suitable to signal a non-existing SamplePair.\n\tZeroSamplePair = SamplePair{Timestamp: Earliest}\n\n\t// ZeroSample is the pseudo zero-value of Sample used to signal a\n\t// non-existing sample. It is a Sample with timestamp Earliest, value 0.0,\n\t// and metric nil. Note that the natural zero value of Sample has a timestamp\n\t// of 0, which is possible to appear in a real Sample and thus not suitable\n\t// to signal a non-existing Sample.\n\tZeroSample = Sample{Timestamp: Earliest}\n)\n\n// A SampleValue is a representation of a value for a given sample at a given\n// time.\ntype SampleValue float64\n\n// MarshalJSON implements json.Marshaler.\nfunc (v SampleValue) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(v.String())\n}\n\n// UnmarshalJSON implements json.Unmarshaler.\nfunc (v *SampleValue) UnmarshalJSON(b []byte) error {\n\tif len(b) < 2 || b[0] != '\"' || b[len(b)-1] != '\"' {\n\t\treturn fmt.Errorf(\"sample value must be a quoted string\")\n\t}\n\tf, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*v = SampleValue(f)\n\treturn nil\n}\n\n// Equal returns true if the value of v and o is equal or if both are NaN. Note\n// that v==o is false if both are NaN. If you want the conventional float\n// behavior, use == to compare two SampleValues.\nfunc (v SampleValue) Equal(o SampleValue) bool {\n\tif v == o {\n\t\treturn true\n\t}\n\treturn math.IsNaN(float64(v)) && math.IsNaN(float64(o))\n}\n\nfunc (v SampleValue) String() string {\n\treturn strconv.FormatFloat(float64(v), 'f', -1, 64)\n}\n\n// SamplePair pairs a SampleValue with a Timestamp.\ntype SamplePair struct {\n\tTimestamp Time\n\tValue     SampleValue\n}\n\n// MarshalJSON implements json.Marshaler.\nfunc (s SamplePair) MarshalJSON() ([]byte, error) {\n\tt, err := json.Marshal(s.Timestamp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tv, err := json.Marshal(s.Value)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn []byte(fmt.Sprintf(\"[%s,%s]\", t, v)), nil\n}\n\n// UnmarshalJSON implements json.Unmarshaler.\nfunc (s *SamplePair) UnmarshalJSON(b []byte) error {\n\tv := [...]json.Unmarshaler{&s.Timestamp, &s.Value}\n\treturn json.Unmarshal(b, &v)\n}\n\n// Equal returns true if this SamplePair and o have equal Values and equal\n// Timestamps. The sematics of Value equality is defined by SampleValue.Equal.\nfunc (s *SamplePair) Equal(o *SamplePair) bool {\n\treturn s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp))\n}\n\nfunc (s SamplePair) String() string {\n\treturn fmt.Sprintf(\"%s @[%s]\", s.Value, s.Timestamp)\n}\n\n// Sample is a sample pair associated with a metric.\ntype Sample struct {\n\tMetric    Metric      `json:\"metric\"`\n\tValue     SampleValue `json:\"value\"`\n\tTimestamp Time        `json:\"timestamp\"`\n}\n\n// Equal compares first the metrics, then the timestamp, then the value. The\n// sematics of value equality is defined by SampleValue.Equal.\nfunc (s *Sample) Equal(o *Sample) bool {\n\tif s == o {\n\t\treturn true\n\t}\n\n\tif !s.Metric.Equal(o.Metric) {\n\t\treturn false\n\t}\n\tif !s.Timestamp.Equal(o.Timestamp) {\n\t\treturn false\n\t}\n\n\treturn s.Value.Equal(o.Value)\n}\n\nfunc (s Sample) String() string {\n\treturn fmt.Sprintf(\"%s => %s\", s.Metric, SamplePair{\n\t\tTimestamp: s.Timestamp,\n\t\tValue:     s.Value,\n\t})\n}\n\n// MarshalJSON implements json.Marshaler.\nfunc (s Sample) MarshalJSON() ([]byte, error) {\n\tv := struct {\n\t\tMetric Metric     `json:\"metric\"`\n\t\tValue  SamplePair `json:\"value\"`\n\t}{\n\t\tMetric: s.Metric,\n\t\tValue: SamplePair{\n\t\t\tTimestamp: s.Timestamp,\n\t\t\tValue:     s.Value,\n\t\t},\n\t}\n\n\treturn json.Marshal(&v)\n}\n\n// UnmarshalJSON implements json.Unmarshaler.\nfunc (s *Sample) UnmarshalJSON(b []byte) error {\n\tv := struct {\n\t\tMetric Metric     `json:\"metric\"`\n\t\tValue  SamplePair `json:\"value\"`\n\t}{\n\t\tMetric: s.Metric,\n\t\tValue: SamplePair{\n\t\t\tTimestamp: s.Timestamp,\n\t\t\tValue:     s.Value,\n\t\t},\n\t}\n\n\tif err := json.Unmarshal(b, &v); err != nil {\n\t\treturn err\n\t}\n\n\ts.Metric = v.Metric\n\ts.Timestamp = v.Value.Timestamp\n\ts.Value = v.Value.Value\n\n\treturn nil\n}\n\n// Samples is a sortable Sample slice. It implements sort.Interface.\ntype Samples []*Sample\n\nfunc (s Samples) Len() int {\n\treturn len(s)\n}\n\n// Less compares first the metrics, then the timestamp.\nfunc (s Samples) Less(i, j int) bool {\n\tswitch {\n\tcase s[i].Metric.Before(s[j].Metric):\n\t\treturn true\n\tcase s[j].Metric.Before(s[i].Metric):\n\t\treturn false\n\tcase s[i].Timestamp.Before(s[j].Timestamp):\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (s Samples) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\n// Equal compares two sets of samples and returns true if they are equal.\nfunc (s Samples) Equal(o Samples) bool {\n\tif len(s) != len(o) {\n\t\treturn false\n\t}\n\n\tfor i, sample := range s {\n\t\tif !sample.Equal(o[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// SampleStream is a stream of Values belonging to an attached COWMetric.\ntype SampleStream struct {\n\tMetric Metric       `json:\"metric\"`\n\tValues []SamplePair `json:\"values\"`\n}\n\nfunc (ss SampleStream) String() string {\n\tvals := make([]string, len(ss.Values))\n\tfor i, v := range ss.Values {\n\t\tvals[i] = v.String()\n\t}\n\treturn fmt.Sprintf(\"%s =>\\n%s\", ss.Metric, strings.Join(vals, \"\\n\"))\n}\n\n// Value is a generic interface for values resulting from a query evaluation.\ntype Value interface {\n\tType() ValueType\n\tString() string\n}\n\nfunc (Matrix) Type() ValueType  { return ValMatrix }\nfunc (Vector) Type() ValueType  { return ValVector }\nfunc (*Scalar) Type() ValueType { return ValScalar }\nfunc (*String) Type() ValueType { return ValString }\n\ntype ValueType int\n\nconst (\n\tValNone ValueType = iota\n\tValScalar\n\tValVector\n\tValMatrix\n\tValString\n)\n\n// MarshalJSON implements json.Marshaler.\nfunc (et ValueType) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(et.String())\n}\n\nfunc (et *ValueType) UnmarshalJSON(b []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(b, &s); err != nil {\n\t\treturn err\n\t}\n\tswitch s {\n\tcase \"<ValNone>\":\n\t\t*et = ValNone\n\tcase \"scalar\":\n\t\t*et = ValScalar\n\tcase \"vector\":\n\t\t*et = ValVector\n\tcase \"matrix\":\n\t\t*et = ValMatrix\n\tcase \"string\":\n\t\t*et = ValString\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown value type %q\", s)\n\t}\n\treturn nil\n}\n\nfunc (e ValueType) String() string {\n\tswitch e {\n\tcase ValNone:\n\t\treturn \"<ValNone>\"\n\tcase ValScalar:\n\t\treturn \"scalar\"\n\tcase ValVector:\n\t\treturn \"vector\"\n\tcase ValMatrix:\n\t\treturn \"matrix\"\n\tcase ValString:\n\t\treturn \"string\"\n\t}\n\tpanic(\"ValueType.String: unhandled value type\")\n}\n\n// Scalar is a scalar value evaluated at the set timestamp.\ntype Scalar struct {\n\tValue     SampleValue `json:\"value\"`\n\tTimestamp Time        `json:\"timestamp\"`\n}\n\nfunc (s Scalar) String() string {\n\treturn fmt.Sprintf(\"scalar: %v @[%v]\", s.Value, s.Timestamp)\n}\n\n// MarshalJSON implements json.Marshaler.\nfunc (s Scalar) MarshalJSON() ([]byte, error) {\n\tv := strconv.FormatFloat(float64(s.Value), 'f', -1, 64)\n\treturn json.Marshal([...]interface{}{s.Timestamp, string(v)})\n}\n\n// UnmarshalJSON implements json.Unmarshaler.\nfunc (s *Scalar) UnmarshalJSON(b []byte) error {\n\tvar f string\n\tv := [...]interface{}{&s.Timestamp, &f}\n\n\tif err := json.Unmarshal(b, &v); err != nil {\n\t\treturn err\n\t}\n\n\tvalue, err := strconv.ParseFloat(f, 64)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error parsing sample value: %s\", err)\n\t}\n\ts.Value = SampleValue(value)\n\treturn nil\n}\n\n// String is a string value evaluated at the set timestamp.\ntype String struct {\n\tValue     string `json:\"value\"`\n\tTimestamp Time   `json:\"timestamp\"`\n}\n\nfunc (s *String) String() string {\n\treturn s.Value\n}\n\n// MarshalJSON implements json.Marshaler.\nfunc (s String) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal([]interface{}{s.Timestamp, s.Value})\n}\n\n// UnmarshalJSON implements json.Unmarshaler.\nfunc (s *String) UnmarshalJSON(b []byte) error {\n\tv := [...]interface{}{&s.Timestamp, &s.Value}\n\treturn json.Unmarshal(b, &v)\n}\n\n// Vector is basically only an alias for Samples, but the\n// contract is that in a Vector, all Samples have the same timestamp.\ntype Vector []*Sample\n\nfunc (vec Vector) String() string {\n\tentries := make([]string, len(vec))\n\tfor i, s := range vec {\n\t\tentries[i] = s.String()\n\t}\n\treturn strings.Join(entries, \"\\n\")\n}\n\nfunc (vec Vector) Len() int      { return len(vec) }\nfunc (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] }\n\n// Less compares first the metrics, then the timestamp.\nfunc (vec Vector) Less(i, j int) bool {\n\tswitch {\n\tcase vec[i].Metric.Before(vec[j].Metric):\n\t\treturn true\n\tcase vec[j].Metric.Before(vec[i].Metric):\n\t\treturn false\n\tcase vec[i].Timestamp.Before(vec[j].Timestamp):\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// Equal compares two sets of samples and returns true if they are equal.\nfunc (vec Vector) Equal(o Vector) bool {\n\tif len(vec) != len(o) {\n\t\treturn false\n\t}\n\n\tfor i, sample := range vec {\n\t\tif !sample.Equal(o[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// Matrix is a list of time series.\ntype Matrix []*SampleStream\n\nfunc (m Matrix) Len() int           { return len(m) }\nfunc (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) }\nfunc (m Matrix) Swap(i, j int)      { m[i], m[j] = m[j], m[i] }\n\nfunc (mat Matrix) String() string {\n\tmatCp := make(Matrix, len(mat))\n\tcopy(matCp, mat)\n\tsort.Sort(matCp)\n\n\tstrs := make([]string, len(matCp))\n\n\tfor i, ss := range matCp {\n\t\tstrs[i] = ss.String()\n\t}\n\n\treturn strings.Join(strs, \"\\n\")\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/common/model/value_test.go",
    "content": "// Copyright 2013 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage model\n\nimport (\n\t\"encoding/json\"\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestEqualValues(t *testing.T) {\n\ttests := map[string]struct {\n\t\tin1, in2 SampleValue\n\t\twant     bool\n\t}{\n\t\t\"equal floats\": {\n\t\t\tin1:  3.14,\n\t\t\tin2:  3.14,\n\t\t\twant: true,\n\t\t},\n\t\t\"unequal floats\": {\n\t\t\tin1:  3.14,\n\t\t\tin2:  3.1415,\n\t\t\twant: false,\n\t\t},\n\t\t\"positive inifinities\": {\n\t\t\tin1:  SampleValue(math.Inf(+1)),\n\t\t\tin2:  SampleValue(math.Inf(+1)),\n\t\t\twant: true,\n\t\t},\n\t\t\"negative inifinities\": {\n\t\t\tin1:  SampleValue(math.Inf(-1)),\n\t\t\tin2:  SampleValue(math.Inf(-1)),\n\t\t\twant: true,\n\t\t},\n\t\t\"different inifinities\": {\n\t\t\tin1:  SampleValue(math.Inf(+1)),\n\t\t\tin2:  SampleValue(math.Inf(-1)),\n\t\t\twant: false,\n\t\t},\n\t\t\"number and infinity\": {\n\t\t\tin1:  42,\n\t\t\tin2:  SampleValue(math.Inf(+1)),\n\t\t\twant: false,\n\t\t},\n\t\t\"number and NaN\": {\n\t\t\tin1:  42,\n\t\t\tin2:  SampleValue(math.NaN()),\n\t\t\twant: false,\n\t\t},\n\t\t\"NaNs\": {\n\t\t\tin1:  SampleValue(math.NaN()),\n\t\t\tin2:  SampleValue(math.NaN()),\n\t\t\twant: true, // !!!\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tgot := test.in1.Equal(test.in2)\n\t\tif got != test.want {\n\t\t\tt.Errorf(\"Comparing %s, %f and %f: got %t, want %t\", name, test.in1, test.in2, got, test.want)\n\t\t}\n\t}\n}\n\nfunc TestEqualSamples(t *testing.T) {\n\ttestSample := &Sample{}\n\n\ttests := map[string]struct {\n\t\tin1, in2 *Sample\n\t\twant     bool\n\t}{\n\t\t\"equal pointers\": {\n\t\t\tin1:  testSample,\n\t\t\tin2:  testSample,\n\t\t\twant: true,\n\t\t},\n\t\t\"different metrics\": {\n\t\t\tin1:  &Sample{Metric: Metric{\"foo\": \"bar\"}},\n\t\t\tin2:  &Sample{Metric: Metric{\"foo\": \"biz\"}},\n\t\t\twant: false,\n\t\t},\n\t\t\"different timestamp\": {\n\t\t\tin1:  &Sample{Timestamp: 0},\n\t\t\tin2:  &Sample{Timestamp: 1},\n\t\t\twant: false,\n\t\t},\n\t\t\"different value\": {\n\t\t\tin1:  &Sample{Value: 0},\n\t\t\tin2:  &Sample{Value: 1},\n\t\t\twant: false,\n\t\t},\n\t\t\"equal samples\": {\n\t\t\tin1: &Sample{\n\t\t\t\tMetric:    Metric{\"foo\": \"bar\"},\n\t\t\t\tTimestamp: 0,\n\t\t\t\tValue:     1,\n\t\t\t},\n\t\t\tin2: &Sample{\n\t\t\t\tMetric:    Metric{\"foo\": \"bar\"},\n\t\t\t\tTimestamp: 0,\n\t\t\t\tValue:     1,\n\t\t\t},\n\t\t\twant: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tgot := test.in1.Equal(test.in2)\n\t\tif got != test.want {\n\t\t\tt.Errorf(\"Comparing %s, %v and %v: got %t, want %t\", name, test.in1, test.in2, got, test.want)\n\t\t}\n\t}\n\n}\n\nfunc TestSamplePairJSON(t *testing.T) {\n\tinput := []struct {\n\t\tplain string\n\t\tvalue SamplePair\n\t}{\n\t\t{\n\t\t\tplain: `[1234.567,\"123.1\"]`,\n\t\t\tvalue: SamplePair{\n\t\t\t\tValue:     123.1,\n\t\t\t\tTimestamp: 1234567,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range input {\n\t\tb, err := json.Marshal(test.value)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif string(b) != test.plain {\n\t\t\tt.Errorf(\"encoding error: expected %q, got %q\", test.plain, b)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar sp SamplePair\n\t\terr = json.Unmarshal(b, &sp)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif sp != test.value {\n\t\t\tt.Errorf(\"decoding error: expected %v, got %v\", test.value, sp)\n\t\t}\n\t}\n}\n\nfunc TestSampleJSON(t *testing.T) {\n\tinput := []struct {\n\t\tplain string\n\t\tvalue Sample\n\t}{\n\t\t{\n\t\t\tplain: `{\"metric\":{\"__name__\":\"test_metric\"},\"value\":[1234.567,\"123.1\"]}`,\n\t\t\tvalue: Sample{\n\t\t\t\tMetric: Metric{\n\t\t\t\t\tMetricNameLabel: \"test_metric\",\n\t\t\t\t},\n\t\t\t\tValue:     123.1,\n\t\t\t\tTimestamp: 1234567,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range input {\n\t\tb, err := json.Marshal(test.value)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif string(b) != test.plain {\n\t\t\tt.Errorf(\"encoding error: expected %q, got %q\", test.plain, b)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar sv Sample\n\t\terr = json.Unmarshal(b, &sv)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !reflect.DeepEqual(sv, test.value) {\n\t\t\tt.Errorf(\"decoding error: expected %v, got %v\", test.value, sv)\n\t\t}\n\t}\n}\n\nfunc TestVectorJSON(t *testing.T) {\n\tinput := []struct {\n\t\tplain string\n\t\tvalue Vector\n\t}{\n\t\t{\n\t\t\tplain: `[]`,\n\t\t\tvalue: Vector{},\n\t\t},\n\t\t{\n\t\t\tplain: `[{\"metric\":{\"__name__\":\"test_metric\"},\"value\":[1234.567,\"123.1\"]}]`,\n\t\t\tvalue: Vector{&Sample{\n\t\t\t\tMetric: Metric{\n\t\t\t\t\tMetricNameLabel: \"test_metric\",\n\t\t\t\t},\n\t\t\t\tValue:     123.1,\n\t\t\t\tTimestamp: 1234567,\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tplain: `[{\"metric\":{\"__name__\":\"test_metric\"},\"value\":[1234.567,\"123.1\"]},{\"metric\":{\"foo\":\"bar\"},\"value\":[1.234,\"+Inf\"]}]`,\n\t\t\tvalue: Vector{\n\t\t\t\t&Sample{\n\t\t\t\t\tMetric: Metric{\n\t\t\t\t\t\tMetricNameLabel: \"test_metric\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     123.1,\n\t\t\t\t\tTimestamp: 1234567,\n\t\t\t\t},\n\t\t\t\t&Sample{\n\t\t\t\t\tMetric: Metric{\n\t\t\t\t\t\t\"foo\": \"bar\",\n\t\t\t\t\t},\n\t\t\t\t\tValue:     SampleValue(math.Inf(1)),\n\t\t\t\t\tTimestamp: 1234,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range input {\n\t\tb, err := json.Marshal(test.value)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif string(b) != test.plain {\n\t\t\tt.Errorf(\"encoding error: expected %q, got %q\", test.plain, b)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar vec Vector\n\t\terr = json.Unmarshal(b, &vec)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif !reflect.DeepEqual(vec, test.value) {\n\t\t\tt.Errorf(\"decoding error: expected %v, got %v\", test.value, vec)\n\t\t}\n\t}\n}\n\nfunc TestScalarJSON(t *testing.T) {\n\tinput := []struct {\n\t\tplain string\n\t\tvalue Scalar\n\t}{\n\t\t{\n\t\t\tplain: `[123.456,\"456\"]`,\n\t\t\tvalue: Scalar{\n\t\t\t\tTimestamp: 123456,\n\t\t\t\tValue:     456,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tplain: `[123123.456,\"+Inf\"]`,\n\t\t\tvalue: Scalar{\n\t\t\t\tTimestamp: 123123456,\n\t\t\t\tValue:     SampleValue(math.Inf(1)),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tplain: `[123123.456,\"-Inf\"]`,\n\t\t\tvalue: Scalar{\n\t\t\t\tTimestamp: 123123456,\n\t\t\t\tValue:     SampleValue(math.Inf(-1)),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range input {\n\t\tb, err := json.Marshal(test.value)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif string(b) != test.plain {\n\t\t\tt.Errorf(\"encoding error: expected %q, got %q\", test.plain, b)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar sv Scalar\n\t\terr = json.Unmarshal(b, &sv)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif sv != test.value {\n\t\t\tt.Errorf(\"decoding error: expected %v, got %v\", test.value, sv)\n\t\t}\n\t}\n}\n\nfunc TestStringJSON(t *testing.T) {\n\tinput := []struct {\n\t\tplain string\n\t\tvalue String\n\t}{\n\t\t{\n\t\t\tplain: `[123.456,\"test\"]`,\n\t\t\tvalue: String{\n\t\t\t\tTimestamp: 123456,\n\t\t\t\tValue:     \"test\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tplain: `[123123.456,\"台北\"]`,\n\t\t\tvalue: String{\n\t\t\t\tTimestamp: 123123456,\n\t\t\t\tValue:     \"台北\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range input {\n\t\tb, err := json.Marshal(test.value)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif string(b) != test.plain {\n\t\t\tt.Errorf(\"encoding error: expected %q, got %q\", test.plain, b)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar sv String\n\t\terr = json.Unmarshal(b, &sv)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif sv != test.value {\n\t\t\tt.Errorf(\"decoding error: expected %v, got %v\", test.value, sv)\n\t\t}\n\t}\n}\n\nfunc TestVectorSort(t *testing.T) {\n\tinput := Vector{\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"A\",\n\t\t\t},\n\t\t\tTimestamp: 1,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"A\",\n\t\t\t},\n\t\t\tTimestamp: 2,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"C\",\n\t\t\t},\n\t\t\tTimestamp: 1,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"C\",\n\t\t\t},\n\t\t\tTimestamp: 2,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"B\",\n\t\t\t},\n\t\t\tTimestamp: 1,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"B\",\n\t\t\t},\n\t\t\tTimestamp: 2,\n\t\t},\n\t}\n\n\texpected := Vector{\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"A\",\n\t\t\t},\n\t\t\tTimestamp: 1,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"A\",\n\t\t\t},\n\t\t\tTimestamp: 2,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"B\",\n\t\t\t},\n\t\t\tTimestamp: 1,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"B\",\n\t\t\t},\n\t\t\tTimestamp: 2,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"C\",\n\t\t\t},\n\t\t\tTimestamp: 1,\n\t\t},\n\t\t&Sample{\n\t\t\tMetric: Metric{\n\t\t\t\tMetricNameLabel: \"C\",\n\t\t\t},\n\t\t\tTimestamp: 2,\n\t\t},\n\t}\n\n\tsort.Sort(input)\n\n\tfor i, actual := range input {\n\t\tactualFp := actual.Metric.Fingerprint()\n\t\texpectedFp := expected[i].Metric.Fingerprint()\n\n\t\tif actualFp != expectedFp {\n\t\t\tt.Fatalf(\"%d. Incorrect fingerprint. Got %s; want %s\", i, actualFp.String(), expectedFp.String())\n\t\t}\n\n\t\tif actual.Timestamp != expected[i].Timestamp {\n\t\t\tt.Fatalf(\"%d. Incorrect timestamp. Got %s; want %s\", i, actual.Timestamp, expected[i].Timestamp)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/.travis.yml",
    "content": "sudo: false\nlanguage: go\ngo:\n    - 1.7.6\n    - 1.8.3\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/CONTRIBUTING.md",
    "content": "# Contributing\n\nPrometheus uses GitHub to manage reviews of pull requests.\n\n* If you have a trivial fix or improvement, go ahead and create a pull request,\n  addressing (with `@...`) the maintainer of this repository (see\n  [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request.\n\n* If you plan to do something more involved, first discuss your ideas\n  on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).\n  This will avoid unnecessary work and surely give you and us a good deal\n  of inspiration.\n\n* Relevant coding style guidelines are the [Go Code Review\n  Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments)\n  and the _Formatting and style_ section of Peter Bourgon's [Go: Best\n  Practices for Production\n  Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style).\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/MAINTAINERS.md",
    "content": "* Tobias Schmidt <tobidt@gmail.com>\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/Makefile",
    "content": "ci: fmt lint test\n\nfmt:\n\t! gofmt -l *.go | read nothing\n\tgo vet\n\nlint:\n\tgo get github.com/golang/lint/golint\n\tgolint *.go\n\ntest: sysfs/fixtures/.unpacked\n\tgo test -v ./...\n\nsysfs/fixtures/.unpacked: sysfs/fixtures.ttar\n\t./ttar -C sysfs -x -f sysfs/fixtures.ttar\n\ttouch $@\n\n.PHONY: fmt lint test ci\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/NOTICE",
    "content": "procfs provides functions to retrieve system, kernel and process\nmetrics from the pseudo-filesystem proc.\n\nCopyright 2014-2015 The Prometheus Authors\n\nThis product includes software developed at\nSoundCloud Ltd. (http://soundcloud.com/).\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/README.md",
    "content": "# procfs\n\nThis procfs package provides functions to retrieve system, kernel and process\nmetrics from the pseudo-filesystem proc.\n\n*WARNING*: This package is a work in progress. Its API may still break in\nbackwards-incompatible ways without warnings. Use it at your own risk.\n\n[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)\n[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)\n[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/buddyinfo.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage procfs\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// A BuddyInfo is the details parsed from /proc/buddyinfo.\n// The data is comprised of an array of free fragments of each size.\n// The sizes are 2^n*PAGE_SIZE, where n is the array index.\ntype BuddyInfo struct {\n\tNode  string\n\tZone  string\n\tSizes []float64\n}\n\n// NewBuddyInfo reads the buddyinfo statistics.\nfunc NewBuddyInfo() ([]BuddyInfo, error) {\n\tfs, err := NewFS(DefaultMountPoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn fs.NewBuddyInfo()\n}\n\n// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem.\nfunc (fs FS) NewBuddyInfo() ([]BuddyInfo, error) {\n\tfile, err := os.Open(fs.Path(\"buddyinfo\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\treturn parseBuddyInfo(file)\n}\n\nfunc parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {\n\tvar (\n\t\tbuddyInfo   = []BuddyInfo{}\n\t\tscanner     = bufio.NewScanner(r)\n\t\tbucketCount = -1\n\t)\n\n\tfor scanner.Scan() {\n\t\tvar err error\n\t\tline := scanner.Text()\n\t\tparts := strings.Fields(string(line))\n\n\t\tif len(parts) < 4 {\n\t\t\treturn nil, fmt.Errorf(\"invalid number of fields when parsing buddyinfo\")\n\t\t}\n\n\t\tnode := strings.TrimRight(parts[1], \",\")\n\t\tzone := strings.TrimRight(parts[3], \",\")\n\t\tarraySize := len(parts[4:])\n\n\t\tif bucketCount == -1 {\n\t\t\tbucketCount = arraySize\n\t\t} else {\n\t\t\tif bucketCount != arraySize {\n\t\t\t\treturn nil, fmt.Errorf(\"mismatch in number of buddyinfo buckets, previous count %d, new count %d\", bucketCount, arraySize)\n\t\t\t}\n\t\t}\n\n\t\tsizes := make([]float64, arraySize)\n\t\tfor i := 0; i < arraySize; i++ {\n\t\t\tsizes[i], err = strconv.ParseFloat(parts[i+4], 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid value in buddyinfo: %s\", err)\n\t\t\t}\n\t\t}\n\n\t\tbuddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes})\n\t}\n\n\treturn buddyInfo, scanner.Err()\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/buddyinfo_test.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage procfs\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestBuddyInfo(t *testing.T) {\n\tbuddyInfo, err := FS(\"fixtures/buddyinfo/valid\").NewBuddyInfo()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif want, got := \"DMA\", buddyInfo[0].Zone; want != got {\n\t\tt.Errorf(\"want Node 0, Zone %s, got %s\", want, got)\n\t}\n\n\tif want, got := \"Normal\", buddyInfo[2].Zone; want != got {\n\t\tt.Errorf(\"want Node 0, Zone %s, got %s\", want, got)\n\t}\n\n\tif want, got := 4381.0, buddyInfo[2].Sizes[0]; want != got {\n\t\tt.Errorf(\"want Node 0, Zone Normal %f, got %f\", want, got)\n\t}\n\n\tif want, got := 572.0, buddyInfo[1].Sizes[1]; want != got {\n\t\tt.Errorf(\"want Node 0, Zone DMA32 %f, got %f\", want, got)\n\t}\n}\n\nfunc TestBuddyInfoShort(t *testing.T) {\n\t_, err := FS(\"fixtures/buddyinfo/short\").NewBuddyInfo()\n\tif err == nil {\n\t\tt.Errorf(\"expected error, but none occurred\")\n\t}\n\n\tif want, got := \"invalid number of fields when parsing buddyinfo\", err.Error(); want != got {\n\t\tt.Errorf(\"wrong error returned, wanted %q, got %q\", want, got)\n\t}\n}\n\nfunc TestBuddyInfoSizeMismatch(t *testing.T) {\n\t_, err := FS(\"fixtures/buddyinfo/sizemismatch\").NewBuddyInfo()\n\tif err == nil {\n\t\tt.Errorf(\"expected error, but none occurred\")\n\t}\n\n\tif want, got := \"mismatch in number of buddyinfo buckets\", err.Error(); !strings.HasPrefix(got, want) {\n\t\tt.Errorf(\"wrong error returned, wanted prefix %q, got %q\", want, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/doc.go",
    "content": "// Copyright 2014 Prometheus Team\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package procfs provides functions to retrieve system, kernel and process\n// metrics from the pseudo-filesystem proc.\n//\n// Example:\n//\n//    package main\n//\n//    import (\n//    \t\"fmt\"\n//    \t\"log\"\n//\n//    \t\"github.com/prometheus/procfs\"\n//    )\n//\n//    func main() {\n//    \tp, err := procfs.Self()\n//    \tif err != nil {\n//    \t\tlog.Fatalf(\"could not get process: %s\", err)\n//    \t}\n//\n//    \tstat, err := p.NewStat()\n//    \tif err != nil {\n//    \t\tlog.Fatalf(\"could not get process stat: %s\", err)\n//    \t}\n//\n//    \tfmt.Printf(\"command:  %s\\n\", stat.Comm)\n//    \tfmt.Printf(\"cpu time: %fs\\n\", stat.CPUTime())\n//    \tfmt.Printf(\"vsize:    %dB\\n\", stat.VirtualMemory())\n//    \tfmt.Printf(\"rss:      %dB\\n\", stat.ResidentMemory())\n//    }\n//\npackage procfs\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/fs.go",
    "content": "package procfs\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\n\t\"github.com/prometheus/procfs/xfs\"\n)\n\n// FS represents the pseudo-filesystem proc, which provides an interface to\n// kernel data structures.\ntype FS string\n\n// DefaultMountPoint is the common mount point of the proc filesystem.\nconst DefaultMountPoint = \"/proc\"\n\n// NewFS returns a new FS mounted under the given mountPoint. It will error\n// if the mount point can't be read.\nfunc NewFS(mountPoint string) (FS, error) {\n\tinfo, err := os.Stat(mountPoint)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not read %s: %s\", mountPoint, err)\n\t}\n\tif !info.IsDir() {\n\t\treturn \"\", fmt.Errorf(\"mount point %s is not a directory\", mountPoint)\n\t}\n\n\treturn FS(mountPoint), nil\n}\n\n// Path returns the path of the given subsystem relative to the procfs root.\nfunc (fs FS) Path(p ...string) string {\n\treturn path.Join(append([]string{string(fs)}, p...)...)\n}\n\n// XFSStats retrieves XFS filesystem runtime statistics.\nfunc (fs FS) XFSStats() (*xfs.Stats, error) {\n\tf, err := os.Open(fs.Path(\"fs/xfs/stat\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treturn xfs.ParseStats(f)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/fs_test.go",
    "content": "package procfs\n\nimport \"testing\"\n\nfunc TestNewFS(t *testing.T) {\n\tif _, err := NewFS(\"foobar\"); err == nil {\n\t\tt.Error(\"want NewFS to fail for non-existing mount point\")\n\t}\n\n\tif _, err := NewFS(\"procfs.go\"); err == nil {\n\t\tt.Error(\"want NewFS to fail if mount point is not a directory\")\n\t}\n}\n\nfunc TestFSXFSStats(t *testing.T) {\n\tstats, err := FS(\"fixtures\").XFSStats()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to parse XFS stats: %v\", err)\n\t}\n\n\t// Very lightweight test just to sanity check the path used\n\t// to open XFS stats. Heavier tests in package xfs.\n\tif want, got := uint32(92447), stats.ExtentAllocation.ExtentsAllocated; want != got {\n\t\tt.Errorf(\"unexpected extents allocated:\\nwant: %d\\nhave: %d\", want, got)\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/ipvs.go",
    "content": "package procfs\n\nimport (\n\t\"bufio\"\n\t\"encoding/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`.\ntype IPVSStats struct {\n\t// Total count of connections.\n\tConnections uint64\n\t// Total incoming packages processed.\n\tIncomingPackets uint64\n\t// Total outgoing packages processed.\n\tOutgoingPackets uint64\n\t// Total incoming traffic.\n\tIncomingBytes uint64\n\t// Total outgoing traffic.\n\tOutgoingBytes uint64\n}\n\n// IPVSBackendStatus holds current metrics of one virtual / real address pair.\ntype IPVSBackendStatus struct {\n\t// The local (virtual) IP address.\n\tLocalAddress net.IP\n\t// The local (virtual) port.\n\tLocalPort uint16\n\t// The local firewall mark\n\tLocalMark string\n\t// The transport protocol (TCP, UDP).\n\tProto string\n\t// The remote (real) IP address.\n\tRemoteAddress net.IP\n\t// The remote (real) port.\n\tRemotePort uint16\n\t// The current number of active connections for this virtual/real address pair.\n\tActiveConn uint64\n\t// The current number of inactive connections for this virtual/real address pair.\n\tInactConn uint64\n\t// The current weight of this virtual/real address pair.\n\tWeight uint64\n}\n\n// NewIPVSStats reads the IPVS statistics.\nfunc NewIPVSStats() (IPVSStats, error) {\n\tfs, err := NewFS(DefaultMountPoint)\n\tif err != nil {\n\t\treturn IPVSStats{}, err\n\t}\n\n\treturn fs.NewIPVSStats()\n}\n\n// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem.\nfunc (fs FS) NewIPVSStats() (IPVSStats, error) {\n\tfile, err := os.Open(fs.Path(\"net/ip_vs_stats\"))\n\tif err != nil {\n\t\treturn IPVSStats{}, err\n\t}\n\tdefer file.Close()\n\n\treturn parseIPVSStats(file)\n}\n\n// parseIPVSStats performs the actual parsing of `ip_vs_stats`.\nfunc parseIPVSStats(file io.Reader) (IPVSStats, error) {\n\tvar (\n\t\tstatContent []byte\n\t\tstatLines   []string\n\t\tstatFields  []string\n\t\tstats       IPVSStats\n\t)\n\n\tstatContent, err := ioutil.ReadAll(file)\n\tif err != nil {\n\t\treturn IPVSStats{}, err\n\t}\n\n\tstatLines = strings.SplitN(string(statContent), \"\\n\", 4)\n\tif len(statLines) != 4 {\n\t\treturn IPVSStats{}, errors.New(\"ip_vs_stats corrupt: too short\")\n\t}\n\n\tstatFields = strings.Fields(statLines[2])\n\tif len(statFields) != 5 {\n\t\treturn IPVSStats{}, errors.New(\"ip_vs_stats corrupt: unexpected number of fields\")\n\t}\n\n\tstats.Connections, err = strconv.ParseUint(statFields[0], 16, 64)\n\tif err != nil {\n\t\treturn IPVSStats{}, err\n\t}\n\tstats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64)\n\tif err != nil {\n\t\treturn IPVSStats{}, err\n\t}\n\tstats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64)\n\tif err != nil {\n\t\treturn IPVSStats{}, err\n\t}\n\tstats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64)\n\tif err != nil {\n\t\treturn IPVSStats{}, err\n\t}\n\tstats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64)\n\tif err != nil {\n\t\treturn IPVSStats{}, err\n\t}\n\n\treturn stats, nil\n}\n\n// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs.\nfunc NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {\n\tfs, err := NewFS(DefaultMountPoint)\n\tif err != nil {\n\t\treturn []IPVSBackendStatus{}, err\n\t}\n\n\treturn fs.NewIPVSBackendStatus()\n}\n\n// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem.\nfunc (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) {\n\tfile, err := os.Open(fs.Path(\"net/ip_vs\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\treturn parseIPVSBackendStatus(file)\n}\n\nfunc parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {\n\tvar (\n\t\tstatus       []IPVSBackendStatus\n\t\tscanner      = bufio.NewScanner(file)\n\t\tproto        string\n\t\tlocalMark    string\n\t\tlocalAddress net.IP\n\t\tlocalPort    uint16\n\t\terr          error\n\t)\n\n\tfor scanner.Scan() {\n\t\tfields := strings.Fields(string(scanner.Text()))\n\t\tif len(fields) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase fields[0] == \"IP\" || fields[0] == \"Prot\" || fields[1] == \"RemoteAddress:Port\":\n\t\t\tcontinue\n\t\tcase fields[0] == \"TCP\" || fields[0] == \"UDP\":\n\t\t\tif len(fields) < 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tproto = fields[0]\n\t\t\tlocalMark = \"\"\n\t\t\tlocalAddress, localPort, err = parseIPPort(fields[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase fields[0] == \"FWM\":\n\t\t\tif len(fields) < 2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tproto = fields[0]\n\t\t\tlocalMark = fields[1]\n\t\t\tlocalAddress = nil\n\t\t\tlocalPort = 0\n\t\tcase fields[0] == \"->\":\n\t\t\tif len(fields) < 6 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tremoteAddress, remotePort, err := parseIPPort(fields[1])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tweight, err := strconv.ParseUint(fields[3], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tactiveConn, err := strconv.ParseUint(fields[4], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tinactConn, err := strconv.ParseUint(fields[5], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tstatus = append(status, IPVSBackendStatus{\n\t\t\t\tLocalAddress:  localAddress,\n\t\t\t\tLocalPort:     localPort,\n\t\t\t\tLocalMark:     localMark,\n\t\t\t\tRemoteAddress: remoteAddress,\n\t\t\t\tRemotePort:    remotePort,\n\t\t\t\tProto:         proto,\n\t\t\t\tWeight:        weight,\n\t\t\t\tActiveConn:    activeConn,\n\t\t\t\tInactConn:     inactConn,\n\t\t\t})\n\t\t}\n\t}\n\treturn status, nil\n}\n\nfunc parseIPPort(s string) (net.IP, uint16, error) {\n\tvar (\n\t\tip  net.IP\n\t\terr error\n\t)\n\n\tswitch len(s) {\n\tcase 13:\n\t\tip, err = hex.DecodeString(s[0:8])\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\tcase 46:\n\t\tip = net.ParseIP(s[1:40])\n\t\tif ip == nil {\n\t\t\treturn nil, 0, fmt.Errorf(\"invalid IPv6 address: %s\", s[1:40])\n\t\t}\n\tdefault:\n\t\treturn nil, 0, fmt.Errorf(\"unexpected IP:Port: %s\", s)\n\t}\n\n\tportString := s[len(s)-4:]\n\tif len(portString) != 4 {\n\t\treturn nil, 0, fmt.Errorf(\"unexpected port string format: %s\", portString)\n\t}\n\tport, err := strconv.ParseUint(portString, 16, 16)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn ip, uint16(port), nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/ipvs_test.go",
    "content": "package procfs\n\nimport (\n\t\"net\"\n\t\"testing\"\n)\n\nvar (\n\texpectedIPVSStats = IPVSStats{\n\t\tConnections:     23765872,\n\t\tIncomingPackets: 3811989221,\n\t\tOutgoingPackets: 0,\n\t\tIncomingBytes:   89991519156915,\n\t\tOutgoingBytes:   0,\n\t}\n\texpectedIPVSBackendStatuses = []IPVSBackendStatus{\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"192.168.0.22\"),\n\t\t\tLocalPort:     3306,\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.82.22\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        100,\n\t\t\tActiveConn:    248,\n\t\t\tInactConn:     2,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"192.168.0.22\"),\n\t\t\tLocalPort:     3306,\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.83.24\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        100,\n\t\t\tActiveConn:    248,\n\t\t\tInactConn:     2,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"192.168.0.22\"),\n\t\t\tLocalPort:     3306,\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.83.21\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        100,\n\t\t\tActiveConn:    248,\n\t\t\tInactConn:     1,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"192.168.0.57\"),\n\t\t\tLocalPort:     3306,\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.84.22\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        0,\n\t\t\tActiveConn:    0,\n\t\t\tInactConn:     0,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"192.168.0.57\"),\n\t\t\tLocalPort:     3306,\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.82.21\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        100,\n\t\t\tActiveConn:    1499,\n\t\t\tInactConn:     0,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"192.168.0.57\"),\n\t\t\tLocalPort:     3306,\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.50.21\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        100,\n\t\t\tActiveConn:    1498,\n\t\t\tInactConn:     0,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"192.168.0.55\"),\n\t\t\tLocalPort:     3306,\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.50.26\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        0,\n\t\t\tActiveConn:    0,\n\t\t\tInactConn:     0,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"192.168.0.55\"),\n\t\t\tLocalPort:     3306,\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.49.32\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        100,\n\t\t\tActiveConn:    0,\n\t\t\tInactConn:     0,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"2620::1\"),\n\t\t\tLocalPort:     80,\n\t\t\tRemoteAddress: net.ParseIP(\"2620::2\"),\n\t\t\tRemotePort:    80,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        1,\n\t\t\tActiveConn:    0,\n\t\t\tInactConn:     0,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"2620::1\"),\n\t\t\tLocalPort:     80,\n\t\t\tRemoteAddress: net.ParseIP(\"2620::3\"),\n\t\t\tRemotePort:    80,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        1,\n\t\t\tActiveConn:    0,\n\t\t\tInactConn:     0,\n\t\t},\n\t\t{\n\t\t\tLocalAddress:  net.ParseIP(\"2620::1\"),\n\t\t\tLocalPort:     80,\n\t\t\tRemoteAddress: net.ParseIP(\"2620::4\"),\n\t\t\tRemotePort:    80,\n\t\t\tProto:         \"TCP\",\n\t\t\tWeight:        1,\n\t\t\tActiveConn:    1,\n\t\t\tInactConn:     1,\n\t\t},\n\t\t{\n\t\t\tLocalMark:     \"10001000\",\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.50.26\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"FWM\",\n\t\t\tWeight:        0,\n\t\t\tActiveConn:    0,\n\t\t\tInactConn:     1,\n\t\t},\n\t\t{\n\t\t\tLocalMark:     \"10001000\",\n\t\t\tRemoteAddress: net.ParseIP(\"192.168.50.21\"),\n\t\t\tRemotePort:    3306,\n\t\t\tProto:         \"FWM\",\n\t\t\tWeight:        0,\n\t\t\tActiveConn:    0,\n\t\t\tInactConn:     2,\n\t\t},\n\t}\n)\n\nfunc TestIPVSStats(t *testing.T) {\n\tstats, err := FS(\"fixtures\").NewIPVSStats()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif stats != expectedIPVSStats {\n\t\tt.Errorf(\"want %+v, have %+v\", expectedIPVSStats, stats)\n\t}\n}\n\nfunc TestParseIPPort(t *testing.T) {\n\tip := net.ParseIP(\"192.168.0.22\")\n\tport := uint16(3306)\n\n\tgotIP, gotPort, err := parseIPPort(\"C0A80016:0CEA\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !(gotIP.Equal(ip) && port == gotPort) {\n\t\tt.Errorf(\"want %s:%d, have %s:%d\", ip, port, gotIP, gotPort)\n\t}\n}\n\nfunc TestParseIPPortInvalid(t *testing.T) {\n\ttestcases := []string{\n\t\t\"\",\n\t\t\"C0A80016\",\n\t\t\"C0A800:1234\",\n\t\t\"FOOBARBA:1234\",\n\t\t\"C0A80016:0CEA:1234\",\n\t}\n\n\tfor _, s := range testcases {\n\t\tip, port, err := parseIPPort(s)\n\t\tif ip != nil || port != uint16(0) || err == nil {\n\t\t\tt.Errorf(\"Expected error for input %s, have ip = %s, port = %v, err = %v\", s, ip, port, err)\n\t\t}\n\t}\n}\n\nfunc TestParseIPPortIPv6(t *testing.T) {\n\tip := net.ParseIP(\"dead:beef::1\")\n\tport := uint16(8080)\n\n\tgotIP, gotPort, err := parseIPPort(\"[DEAD:BEEF:0000:0000:0000:0000:0000:0001]:1F90\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !(gotIP.Equal(ip) && port == gotPort) {\n\t\tt.Errorf(\"want %s:%d, have %s:%d\", ip, port, gotIP, gotPort)\n\t}\n}\n\nfunc TestIPVSBackendStatus(t *testing.T) {\n\tbackendStats, err := FS(\"fixtures\").NewIPVSBackendStatus()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif want, have := len(expectedIPVSBackendStatuses), len(backendStats); want != have {\n\t\tt.Fatalf(\"want %d backend statuses, have %d\", want, have)\n\t}\n\n\tfor idx, expect := range expectedIPVSBackendStatuses {\n\t\tif !backendStats[idx].LocalAddress.Equal(expect.LocalAddress) {\n\t\t\tt.Errorf(\"want LocalAddress %s, have %s\", expect.LocalAddress, backendStats[idx].LocalAddress)\n\t\t}\n\t\tif backendStats[idx].LocalPort != expect.LocalPort {\n\t\t\tt.Errorf(\"want LocalPort %d, have %d\", expect.LocalPort, backendStats[idx].LocalPort)\n\t\t}\n\t\tif !backendStats[idx].RemoteAddress.Equal(expect.RemoteAddress) {\n\t\t\tt.Errorf(\"want RemoteAddress %s, have %s\", expect.RemoteAddress, backendStats[idx].RemoteAddress)\n\t\t}\n\t\tif backendStats[idx].RemotePort != expect.RemotePort {\n\t\t\tt.Errorf(\"want RemotePort %d, have %d\", expect.RemotePort, backendStats[idx].RemotePort)\n\t\t}\n\t\tif backendStats[idx].Proto != expect.Proto {\n\t\t\tt.Errorf(\"want Proto %s, have %s\", expect.Proto, backendStats[idx].Proto)\n\t\t}\n\t\tif backendStats[idx].Weight != expect.Weight {\n\t\t\tt.Errorf(\"want Weight %d, have %d\", expect.Weight, backendStats[idx].Weight)\n\t\t}\n\t\tif backendStats[idx].ActiveConn != expect.ActiveConn {\n\t\t\tt.Errorf(\"want ActiveConn %d, have %d\", expect.ActiveConn, backendStats[idx].ActiveConn)\n\t\t}\n\t\tif backendStats[idx].InactConn != expect.InactConn {\n\t\t\tt.Errorf(\"want InactConn %d, have %d\", expect.InactConn, backendStats[idx].InactConn)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/mdstat.go",
    "content": "package procfs\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar (\n\tstatuslineRE = regexp.MustCompile(`(\\d+) blocks .*\\[(\\d+)/(\\d+)\\] \\[[U_]+\\]`)\n\tbuildlineRE  = regexp.MustCompile(`\\((\\d+)/\\d+\\)`)\n)\n\n// MDStat holds info parsed from /proc/mdstat.\ntype MDStat struct {\n\t// Name of the device.\n\tName string\n\t// activity-state of the device.\n\tActivityState string\n\t// Number of active disks.\n\tDisksActive int64\n\t// Total number of disks the device consists of.\n\tDisksTotal int64\n\t// Number of blocks the device holds.\n\tBlocksTotal int64\n\t// Number of blocks on the device that are in sync.\n\tBlocksSynced int64\n}\n\n// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos.\nfunc (fs FS) ParseMDStat() (mdstates []MDStat, err error) {\n\tmdStatusFilePath := fs.Path(\"mdstat\")\n\tcontent, err := ioutil.ReadFile(mdStatusFilePath)\n\tif err != nil {\n\t\treturn []MDStat{}, fmt.Errorf(\"error parsing %s: %s\", mdStatusFilePath, err)\n\t}\n\n\tmdStates := []MDStat{}\n\tlines := strings.Split(string(content), \"\\n\")\n\tfor i, l := range lines {\n\t\tif l == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif l[0] == ' ' {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(l, \"Personalities\") || strings.HasPrefix(l, \"unused\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tmainLine := strings.Split(l, \" \")\n\t\tif len(mainLine) < 3 {\n\t\t\treturn mdStates, fmt.Errorf(\"error parsing mdline: %s\", l)\n\t\t}\n\t\tmdName := mainLine[0]\n\t\tactivityState := mainLine[2]\n\n\t\tif len(lines) <= i+3 {\n\t\t\treturn mdStates, fmt.Errorf(\n\t\t\t\t\"error parsing %s: too few lines for md device %s\",\n\t\t\t\tmdStatusFilePath,\n\t\t\t\tmdName,\n\t\t\t)\n\t\t}\n\n\t\tactive, total, size, err := evalStatusline(lines[i+1])\n\t\tif err != nil {\n\t\t\treturn mdStates, fmt.Errorf(\"error parsing %s: %s\", mdStatusFilePath, err)\n\t\t}\n\n\t\t// j is the line number of the syncing-line.\n\t\tj := i + 2\n\t\tif strings.Contains(lines[i+2], \"bitmap\") { // skip bitmap line\n\t\t\tj = i + 3\n\t\t}\n\n\t\t// If device is syncing at the moment, get the number of currently\n\t\t// synced bytes, otherwise that number equals the size of the device.\n\t\tsyncedBlocks := size\n\t\tif strings.Contains(lines[j], \"recovery\") || strings.Contains(lines[j], \"resync\") {\n\t\t\tsyncedBlocks, err = evalBuildline(lines[j])\n\t\t\tif err != nil {\n\t\t\t\treturn mdStates, fmt.Errorf(\"error parsing %s: %s\", mdStatusFilePath, err)\n\t\t\t}\n\t\t}\n\n\t\tmdStates = append(mdStates, MDStat{\n\t\t\tName:          mdName,\n\t\t\tActivityState: activityState,\n\t\t\tDisksActive:   active,\n\t\t\tDisksTotal:    total,\n\t\t\tBlocksTotal:   size,\n\t\t\tBlocksSynced:  syncedBlocks,\n\t\t})\n\t}\n\n\treturn mdStates, nil\n}\n\nfunc evalStatusline(statusline string) (active, total, size int64, err error) {\n\tmatches := statuslineRE.FindStringSubmatch(statusline)\n\tif len(matches) != 4 {\n\t\treturn 0, 0, 0, fmt.Errorf(\"unexpected statusline: %s\", statusline)\n\t}\n\n\tsize, err = strconv.ParseInt(matches[1], 10, 64)\n\tif err != nil {\n\t\treturn 0, 0, 0, fmt.Errorf(\"unexpected statusline %s: %s\", statusline, err)\n\t}\n\n\ttotal, err = strconv.ParseInt(matches[2], 10, 64)\n\tif err != nil {\n\t\treturn 0, 0, 0, fmt.Errorf(\"unexpected statusline %s: %s\", statusline, err)\n\t}\n\n\tactive, err = strconv.ParseInt(matches[3], 10, 64)\n\tif err != nil {\n\t\treturn 0, 0, 0, fmt.Errorf(\"unexpected statusline %s: %s\", statusline, err)\n\t}\n\n\treturn active, total, size, nil\n}\n\nfunc evalBuildline(buildline string) (syncedBlocks int64, err error) {\n\tmatches := buildlineRE.FindStringSubmatch(buildline)\n\tif len(matches) != 2 {\n\t\treturn 0, fmt.Errorf(\"unexpected buildline: %s\", buildline)\n\t}\n\n\tsyncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"%s in buildline: %s\", err, buildline)\n\t}\n\n\treturn syncedBlocks, nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/mdstat_test.go",
    "content": "package procfs\n\nimport (\n\t\"testing\"\n)\n\nfunc TestMDStat(t *testing.T) {\n\tmdStates, err := FS(\"fixtures\").ParseMDStat()\n\tif err != nil {\n\t\tt.Fatalf(\"parsing of reference-file failed entirely: %s\", err)\n\t}\n\n\trefs := map[string]MDStat{\n\t\t\"md3\":   {\"md3\", \"active\", 8, 8, 5853468288, 5853468288},\n\t\t\"md127\": {\"md127\", \"active\", 2, 2, 312319552, 312319552},\n\t\t\"md0\":   {\"md0\", \"active\", 2, 2, 248896, 248896},\n\t\t\"md4\":   {\"md4\", \"inactive\", 2, 2, 4883648, 4883648},\n\t\t\"md6\":   {\"md6\", \"active\", 1, 2, 195310144, 16775552},\n\t\t\"md8\":   {\"md8\", \"active\", 2, 2, 195310144, 16775552},\n\t\t\"md7\":   {\"md7\", \"active\", 3, 4, 7813735424, 7813735424},\n\t}\n\n\tif want, have := len(refs), len(mdStates); want != have {\n\t\tt.Errorf(\"want %d parsed md-devices, have %d\", want, have)\n\t}\n\tfor _, md := range mdStates {\n\t\tif want, have := refs[md.Name], md; want != have {\n\t\t\tt.Errorf(\"%s: want %v, have %v\", md.Name, want, have)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/mountstats.go",
    "content": "package procfs\n\n// While implementing parsing of /proc/[pid]/mountstats, this blog was used\n// heavily as a reference:\n//   https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex\n//\n// Special thanks to Chris Siebenmann for all of his posts explaining the\n// various statistics available for NFS.\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\n// Constants shared between multiple functions.\nconst (\n\tdeviceEntryLen = 8\n\n\tfieldBytesLen  = 8\n\tfieldEventsLen = 27\n\n\tstatVersion10 = \"1.0\"\n\tstatVersion11 = \"1.1\"\n\n\tfieldTransport10Len = 10\n\tfieldTransport11Len = 13\n)\n\n// A Mount is a device mount parsed from /proc/[pid]/mountstats.\ntype Mount struct {\n\t// Name of the device.\n\tDevice string\n\t// The mount point of the device.\n\tMount string\n\t// The filesystem type used by the device.\n\tType string\n\t// If available additional statistics related to this Mount.\n\t// Use a type assertion to determine if additional statistics are available.\n\tStats MountStats\n}\n\n// A MountStats is a type which contains detailed statistics for a specific\n// type of Mount.\ntype MountStats interface {\n\tmountStats()\n}\n\n// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts.\ntype MountStatsNFS struct {\n\t// The version of statistics provided.\n\tStatVersion string\n\t// The age of the NFS mount.\n\tAge time.Duration\n\t// Statistics related to byte counters for various operations.\n\tBytes NFSBytesStats\n\t// Statistics related to various NFS event occurrences.\n\tEvents NFSEventsStats\n\t// Statistics broken down by filesystem operation.\n\tOperations []NFSOperationStats\n\t// Statistics about the NFS RPC transport.\n\tTransport NFSTransportStats\n}\n\n// mountStats implements MountStats.\nfunc (m MountStatsNFS) mountStats() {}\n\n// A NFSBytesStats contains statistics about the number of bytes read and written\n// by an NFS client to and from an NFS server.\ntype NFSBytesStats struct {\n\t// Number of bytes read using the read() syscall.\n\tRead uint64\n\t// Number of bytes written using the write() syscall.\n\tWrite uint64\n\t// Number of bytes read using the read() syscall in O_DIRECT mode.\n\tDirectRead uint64\n\t// Number of bytes written using the write() syscall in O_DIRECT mode.\n\tDirectWrite uint64\n\t// Number of bytes read from the NFS server, in total.\n\tReadTotal uint64\n\t// Number of bytes written to the NFS server, in total.\n\tWriteTotal uint64\n\t// Number of pages read directly via mmap()'d files.\n\tReadPages uint64\n\t// Number of pages written directly via mmap()'d files.\n\tWritePages uint64\n}\n\n// A NFSEventsStats contains statistics about NFS event occurrences.\ntype NFSEventsStats struct {\n\t// Number of times cached inode attributes are re-validated from the server.\n\tInodeRevalidate uint64\n\t// Number of times cached dentry nodes are re-validated from the server.\n\tDnodeRevalidate uint64\n\t// Number of times an inode cache is cleared.\n\tDataInvalidate uint64\n\t// Number of times cached inode attributes are invalidated.\n\tAttributeInvalidate uint64\n\t// Number of times files or directories have been open()'d.\n\tVFSOpen uint64\n\t// Number of times a directory lookup has occurred.\n\tVFSLookup uint64\n\t// Number of times permissions have been checked.\n\tVFSAccess uint64\n\t// Number of updates (and potential writes) to pages.\n\tVFSUpdatePage uint64\n\t// Number of pages read directly via mmap()'d files.\n\tVFSReadPage uint64\n\t// Number of times a group of pages have been read.\n\tVFSReadPages uint64\n\t// Number of pages written directly via mmap()'d files.\n\tVFSWritePage uint64\n\t// Number of times a group of pages have been written.\n\tVFSWritePages uint64\n\t// Number of times directory entries have been read with getdents().\n\tVFSGetdents uint64\n\t// Number of times attributes have been set on inodes.\n\tVFSSetattr uint64\n\t// Number of pending writes that have been forcefully flushed to the server.\n\tVFSFlush uint64\n\t// Number of times fsync() has been called on directories and files.\n\tVFSFsync uint64\n\t// Number of times locking has been attempted on a file.\n\tVFSLock uint64\n\t// Number of times files have been closed and released.\n\tVFSFileRelease uint64\n\t// Unknown.  Possibly unused.\n\tCongestionWait uint64\n\t// Number of times files have been truncated.\n\tTruncation uint64\n\t// Number of times a file has been grown due to writes beyond its existing end.\n\tWriteExtension uint64\n\t// Number of times a file was removed while still open by another process.\n\tSillyRename uint64\n\t// Number of times the NFS server gave less data than expected while reading.\n\tShortRead uint64\n\t// Number of times the NFS server wrote less data than expected while writing.\n\tShortWrite uint64\n\t// Number of times the NFS server indicated EJUKEBOX; retrieving data from\n\t// offline storage.\n\tJukeboxDelay uint64\n\t// Number of NFS v4.1+ pNFS reads.\n\tPNFSRead uint64\n\t// Number of NFS v4.1+ pNFS writes.\n\tPNFSWrite uint64\n}\n\n// A NFSOperationStats contains statistics for a single operation.\ntype NFSOperationStats struct {\n\t// The name of the operation.\n\tOperation string\n\t// Number of requests performed for this operation.\n\tRequests uint64\n\t// Number of times an actual RPC request has been transmitted for this operation.\n\tTransmissions uint64\n\t// Number of times a request has had a major timeout.\n\tMajorTimeouts uint64\n\t// Number of bytes sent for this operation, including RPC headers and payload.\n\tBytesSent uint64\n\t// Number of bytes received for this operation, including RPC headers and payload.\n\tBytesReceived uint64\n\t// Duration all requests spent queued for transmission before they were sent.\n\tCumulativeQueueTime time.Duration\n\t// Duration it took to get a reply back after the request was transmitted.\n\tCumulativeTotalResponseTime time.Duration\n\t// Duration from when a request was enqueued to when it was completely handled.\n\tCumulativeTotalRequestTime time.Duration\n}\n\n// A NFSTransportStats contains statistics for the NFS mount RPC requests and\n// responses.\ntype NFSTransportStats struct {\n\t// The local port used for the NFS mount.\n\tPort uint64\n\t// Number of times the client has had to establish a connection from scratch\n\t// to the NFS server.\n\tBind uint64\n\t// Number of times the client has made a TCP connection to the NFS server.\n\tConnect uint64\n\t// Duration (in jiffies, a kernel internal unit of time) the NFS mount has\n\t// spent waiting for connections to the server to be established.\n\tConnectIdleTime uint64\n\t// Duration since the NFS mount last saw any RPC traffic.\n\tIdleTime time.Duration\n\t// Number of RPC requests for this mount sent to the NFS server.\n\tSends uint64\n\t// Number of RPC responses for this mount received from the NFS server.\n\tReceives uint64\n\t// Number of times the NFS server sent a response with a transaction ID\n\t// unknown to this client.\n\tBadTransactionIDs uint64\n\t// A running counter, incremented on each request as the current difference\n\t// ebetween sends and receives.\n\tCumulativeActiveRequests uint64\n\t// A running counter, incremented on each request by the current backlog\n\t// queue size.\n\tCumulativeBacklog uint64\n\n\t// Stats below only available with stat version 1.1.\n\n\t// Maximum number of simultaneously active RPC requests ever used.\n\tMaximumRPCSlotsUsed uint64\n\t// A running counter, incremented on each request as the current size of the\n\t// sending queue.\n\tCumulativeSendingQueue uint64\n\t// A running counter, incremented on each request as the current size of the\n\t// pending queue.\n\tCumulativePendingQueue uint64\n}\n\n// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice\n// of Mount structures containing detailed information about each mount.\n// If available, statistics for each mount are parsed as well.\nfunc parseMountStats(r io.Reader) ([]*Mount, error) {\n\tconst (\n\t\tdevice            = \"device\"\n\t\tstatVersionPrefix = \"statvers=\"\n\n\t\tnfs3Type = \"nfs\"\n\t\tnfs4Type = \"nfs4\"\n\t)\n\n\tvar mounts []*Mount\n\n\ts := bufio.NewScanner(r)\n\tfor s.Scan() {\n\t\t// Only look for device entries in this function\n\t\tss := strings.Fields(string(s.Bytes()))\n\t\tif len(ss) == 0 || ss[0] != device {\n\t\t\tcontinue\n\t\t}\n\n\t\tm, err := parseMount(ss)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Does this mount also possess statistics information?\n\t\tif len(ss) > deviceEntryLen {\n\t\t\t// Only NFSv3 and v4 are supported for parsing statistics\n\t\t\tif m.Type != nfs3Type && m.Type != nfs4Type {\n\t\t\t\treturn nil, fmt.Errorf(\"cannot parse MountStats for fstype %q\", m.Type)\n\t\t\t}\n\n\t\t\tstatVersion := strings.TrimPrefix(ss[8], statVersionPrefix)\n\n\t\t\tstats, err := parseMountStatsNFS(s, statVersion)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tm.Stats = stats\n\t\t}\n\n\t\tmounts = append(mounts, m)\n\t}\n\n\treturn mounts, s.Err()\n}\n\n// parseMount parses an entry in /proc/[pid]/mountstats in the format:\n//   device [device] mounted on [mount] with fstype [type]\nfunc parseMount(ss []string) (*Mount, error) {\n\tif len(ss) < deviceEntryLen {\n\t\treturn nil, fmt.Errorf(\"invalid device entry: %v\", ss)\n\t}\n\n\t// Check for specific words appearing at specific indices to ensure\n\t// the format is consistent with what we expect\n\tformat := []struct {\n\t\ti int\n\t\ts string\n\t}{\n\t\t{i: 0, s: \"device\"},\n\t\t{i: 2, s: \"mounted\"},\n\t\t{i: 3, s: \"on\"},\n\t\t{i: 5, s: \"with\"},\n\t\t{i: 6, s: \"fstype\"},\n\t}\n\n\tfor _, f := range format {\n\t\tif ss[f.i] != f.s {\n\t\t\treturn nil, fmt.Errorf(\"invalid device entry: %v\", ss)\n\t\t}\n\t}\n\n\treturn &Mount{\n\t\tDevice: ss[1],\n\t\tMount:  ss[4],\n\t\tType:   ss[7],\n\t}, nil\n}\n\n// parseMountStatsNFS parses a MountStatsNFS by scanning additional information\n// related to NFS statistics.\nfunc parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) {\n\t// Field indicators for parsing specific types of data\n\tconst (\n\t\tfieldAge        = \"age:\"\n\t\tfieldBytes      = \"bytes:\"\n\t\tfieldEvents     = \"events:\"\n\t\tfieldPerOpStats = \"per-op\"\n\t\tfieldTransport  = \"xprt:\"\n\t)\n\n\tstats := &MountStatsNFS{\n\t\tStatVersion: statVersion,\n\t}\n\n\tfor s.Scan() {\n\t\tss := strings.Fields(string(s.Bytes()))\n\t\tif len(ss) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif len(ss) < 2 {\n\t\t\treturn nil, fmt.Errorf(\"not enough information for NFS stats: %v\", ss)\n\t\t}\n\n\t\tswitch ss[0] {\n\t\tcase fieldAge:\n\t\t\t// Age integer is in seconds\n\t\t\td, err := time.ParseDuration(ss[1] + \"s\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tstats.Age = d\n\t\tcase fieldBytes:\n\t\t\tbstats, err := parseNFSBytesStats(ss[1:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tstats.Bytes = *bstats\n\t\tcase fieldEvents:\n\t\t\testats, err := parseNFSEventsStats(ss[1:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tstats.Events = *estats\n\t\tcase fieldTransport:\n\t\t\tif len(ss) < 3 {\n\t\t\t\treturn nil, fmt.Errorf(\"not enough information for NFS transport stats: %v\", ss)\n\t\t\t}\n\n\t\t\ttstats, err := parseNFSTransportStats(ss[2:], statVersion)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tstats.Transport = *tstats\n\t\t}\n\n\t\t// When encountering \"per-operation statistics\", we must break this\n\t\t// loop and parse them separately to ensure we can terminate parsing\n\t\t// before reaching another device entry; hence why this 'if' statement\n\t\t// is not just another switch case\n\t\tif ss[0] == fieldPerOpStats {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err := s.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// NFS per-operation stats appear last before the next device entry\n\tperOpStats, err := parseNFSOperationStats(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstats.Operations = perOpStats\n\n\treturn stats, nil\n}\n\n// parseNFSBytesStats parses a NFSBytesStats line using an input set of\n// integer fields.\nfunc parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {\n\tif len(ss) != fieldBytesLen {\n\t\treturn nil, fmt.Errorf(\"invalid NFS bytes stats: %v\", ss)\n\t}\n\n\tns := make([]uint64, 0, fieldBytesLen)\n\tfor _, s := range ss {\n\t\tn, err := strconv.ParseUint(s, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tns = append(ns, n)\n\t}\n\n\treturn &NFSBytesStats{\n\t\tRead:        ns[0],\n\t\tWrite:       ns[1],\n\t\tDirectRead:  ns[2],\n\t\tDirectWrite: ns[3],\n\t\tReadTotal:   ns[4],\n\t\tWriteTotal:  ns[5],\n\t\tReadPages:   ns[6],\n\t\tWritePages:  ns[7],\n\t}, nil\n}\n\n// parseNFSEventsStats parses a NFSEventsStats line using an input set of\n// integer fields.\nfunc parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {\n\tif len(ss) != fieldEventsLen {\n\t\treturn nil, fmt.Errorf(\"invalid NFS events stats: %v\", ss)\n\t}\n\n\tns := make([]uint64, 0, fieldEventsLen)\n\tfor _, s := range ss {\n\t\tn, err := strconv.ParseUint(s, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tns = append(ns, n)\n\t}\n\n\treturn &NFSEventsStats{\n\t\tInodeRevalidate:     ns[0],\n\t\tDnodeRevalidate:     ns[1],\n\t\tDataInvalidate:      ns[2],\n\t\tAttributeInvalidate: ns[3],\n\t\tVFSOpen:             ns[4],\n\t\tVFSLookup:           ns[5],\n\t\tVFSAccess:           ns[6],\n\t\tVFSUpdatePage:       ns[7],\n\t\tVFSReadPage:         ns[8],\n\t\tVFSReadPages:        ns[9],\n\t\tVFSWritePage:        ns[10],\n\t\tVFSWritePages:       ns[11],\n\t\tVFSGetdents:         ns[12],\n\t\tVFSSetattr:          ns[13],\n\t\tVFSFlush:            ns[14],\n\t\tVFSFsync:            ns[15],\n\t\tVFSLock:             ns[16],\n\t\tVFSFileRelease:      ns[17],\n\t\tCongestionWait:      ns[18],\n\t\tTruncation:          ns[19],\n\t\tWriteExtension:      ns[20],\n\t\tSillyRename:         ns[21],\n\t\tShortRead:           ns[22],\n\t\tShortWrite:          ns[23],\n\t\tJukeboxDelay:        ns[24],\n\t\tPNFSRead:            ns[25],\n\t\tPNFSWrite:           ns[26],\n\t}, nil\n}\n\n// parseNFSOperationStats parses a slice of NFSOperationStats by scanning\n// additional information about per-operation statistics until an empty\n// line is reached.\nfunc parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {\n\tconst (\n\t\t// Number of expected fields in each per-operation statistics set\n\t\tnumFields = 9\n\t)\n\n\tvar ops []NFSOperationStats\n\n\tfor s.Scan() {\n\t\tss := strings.Fields(string(s.Bytes()))\n\t\tif len(ss) == 0 {\n\t\t\t// Must break when reading a blank line after per-operation stats to\n\t\t\t// enable top-level function to parse the next device entry\n\t\t\tbreak\n\t\t}\n\n\t\tif len(ss) != numFields {\n\t\t\treturn nil, fmt.Errorf(\"invalid NFS per-operations stats: %v\", ss)\n\t\t}\n\n\t\t// Skip string operation name for integers\n\t\tns := make([]uint64, 0, numFields-1)\n\t\tfor _, st := range ss[1:] {\n\t\t\tn, err := strconv.ParseUint(st, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tns = append(ns, n)\n\t\t}\n\n\t\tops = append(ops, NFSOperationStats{\n\t\t\tOperation:                   strings.TrimSuffix(ss[0], \":\"),\n\t\t\tRequests:                    ns[0],\n\t\t\tTransmissions:               ns[1],\n\t\t\tMajorTimeouts:               ns[2],\n\t\t\tBytesSent:                   ns[3],\n\t\t\tBytesReceived:               ns[4],\n\t\t\tCumulativeQueueTime:         time.Duration(ns[5]) * time.Millisecond,\n\t\t\tCumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond,\n\t\t\tCumulativeTotalRequestTime:  time.Duration(ns[7]) * time.Millisecond,\n\t\t})\n\t}\n\n\treturn ops, s.Err()\n}\n\n// parseNFSTransportStats parses a NFSTransportStats line using an input set of\n// integer fields matched to a specific stats version.\nfunc parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) {\n\tswitch statVersion {\n\tcase statVersion10:\n\t\tif len(ss) != fieldTransport10Len {\n\t\t\treturn nil, fmt.Errorf(\"invalid NFS transport stats 1.0 statement: %v\", ss)\n\t\t}\n\tcase statVersion11:\n\t\tif len(ss) != fieldTransport11Len {\n\t\t\treturn nil, fmt.Errorf(\"invalid NFS transport stats 1.1 statement: %v\", ss)\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unrecognized NFS transport stats version: %q\", statVersion)\n\t}\n\n\t// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay\n\t// in a v1.0 response.\n\t//\n\t// Note: slice length must be set to length of v1.1 stats to avoid a panic when\n\t// only v1.0 stats are present.\n\t// See: https://github.com/prometheus/node_exporter/issues/571.\n\tns := make([]uint64, fieldTransport11Len)\n\tfor i, s := range ss {\n\t\tn, err := strconv.ParseUint(s, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tns[i] = n\n\t}\n\n\treturn &NFSTransportStats{\n\t\tPort:                     ns[0],\n\t\tBind:                     ns[1],\n\t\tConnect:                  ns[2],\n\t\tConnectIdleTime:          ns[3],\n\t\tIdleTime:                 time.Duration(ns[4]) * time.Second,\n\t\tSends:                    ns[5],\n\t\tReceives:                 ns[6],\n\t\tBadTransactionIDs:        ns[7],\n\t\tCumulativeActiveRequests: ns[8],\n\t\tCumulativeBacklog:        ns[9],\n\t\tMaximumRPCSlotsUsed:      ns[10],\n\t\tCumulativeSendingQueue:   ns[11],\n\t\tCumulativePendingQueue:   ns[12],\n\t}, nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/mountstats_test.go",
    "content": "package procfs\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestMountStats(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\ts       string\n\t\tmounts  []*Mount\n\t\tinvalid bool\n\t}{\n\t\t{\n\t\t\tname: \"no devices\",\n\t\t\ts:    `hello`,\n\t\t},\n\t\t{\n\t\t\tname:    \"device has too few fields\",\n\t\t\ts:       `device foo`,\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"device incorrect format\",\n\t\t\ts:       `device rootfs BAD on / with fstype rootfs`,\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"device incorrect format\",\n\t\t\ts:       `device rootfs mounted BAD / with fstype rootfs`,\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"device incorrect format\",\n\t\t\ts:       `device rootfs mounted on / BAD fstype rootfs`,\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"device incorrect format\",\n\t\t\ts:       `device rootfs mounted on / with BAD rootfs`,\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"device rootfs cannot have stats\",\n\t\t\ts:       `device rootfs mounted on / with fstype rootfs stats`,\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"NFSv4 device with too little info\",\n\t\t\ts:       \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\\nhello\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"NFSv4 device with bad bytes\",\n\t\t\ts:       \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\\nbytes: 0\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"NFSv4 device with bad events\",\n\t\t\ts:       \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\\nevents: 0\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"NFSv4 device with bad per-op stats\",\n\t\t\ts:       \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\\nper-op statistics\\nFOO 0\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"NFSv4 device with bad transport stats\",\n\t\t\ts:       \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\\nxprt: tcp\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"NFSv4 device with bad transport version\",\n\t\t\ts:       \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=foo\\nxprt: tcp 0\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"NFSv4 device with bad transport stats version 1.0\",\n\t\t\ts:       \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.0\\nxprt: tcp 0 0 0 0 0 0 0 0 0 0 0 0 0\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"NFSv4 device with bad transport stats version 1.1\",\n\t\t\ts:       \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\\nxprt: tcp 0 0 0 0 0 0 0 0 0 0\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"NFSv3 device with transport stats version 1.0 OK\",\n\t\t\ts:    \"device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs statvers=1.0\\nxprt: tcp 1 2 3 4 5 6 7 8 9 10\",\n\t\t\tmounts: []*Mount{{\n\t\t\t\tDevice: \"192.168.1.1:/srv\",\n\t\t\t\tMount:  \"/mnt/nfs\",\n\t\t\t\tType:   \"nfs\",\n\t\t\t\tStats: &MountStatsNFS{\n\t\t\t\t\tStatVersion: \"1.0\",\n\t\t\t\t\tTransport: NFSTransportStats{\n\t\t\t\t\t\tPort:                     1,\n\t\t\t\t\t\tBind:                     2,\n\t\t\t\t\t\tConnect:                  3,\n\t\t\t\t\t\tConnectIdleTime:          4,\n\t\t\t\t\t\tIdleTime:                 5 * time.Second,\n\t\t\t\t\t\tSends:                    6,\n\t\t\t\t\t\tReceives:                 7,\n\t\t\t\t\t\tBadTransactionIDs:        8,\n\t\t\t\t\t\tCumulativeActiveRequests: 9,\n\t\t\t\t\t\tCumulativeBacklog:        10,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname: \"device rootfs OK\",\n\t\t\ts:    `device rootfs mounted on / with fstype rootfs`,\n\t\t\tmounts: []*Mount{{\n\t\t\t\tDevice: \"rootfs\",\n\t\t\t\tMount:  \"/\",\n\t\t\t\tType:   \"rootfs\",\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname: \"NFSv3 device with minimal stats OK\",\n\t\t\ts:    `device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs statvers=1.1`,\n\t\t\tmounts: []*Mount{{\n\t\t\t\tDevice: \"192.168.1.1:/srv\",\n\t\t\t\tMount:  \"/mnt/nfs\",\n\t\t\t\tType:   \"nfs\",\n\t\t\t\tStats: &MountStatsNFS{\n\t\t\t\t\tStatVersion: \"1.1\",\n\t\t\t\t},\n\t\t\t}},\n\t\t},\n\t\t{\n\t\t\tname: \"fixtures OK\",\n\t\t\tmounts: []*Mount{\n\t\t\t\t{\n\t\t\t\t\tDevice: \"rootfs\",\n\t\t\t\t\tMount:  \"/\",\n\t\t\t\t\tType:   \"rootfs\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tDevice: \"sysfs\",\n\t\t\t\t\tMount:  \"/sys\",\n\t\t\t\t\tType:   \"sysfs\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tDevice: \"proc\",\n\t\t\t\t\tMount:  \"/proc\",\n\t\t\t\t\tType:   \"proc\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tDevice: \"/dev/sda1\",\n\t\t\t\t\tMount:  \"/\",\n\t\t\t\t\tType:   \"ext4\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tDevice: \"192.168.1.1:/srv/test\",\n\t\t\t\t\tMount:  \"/mnt/nfs/test\",\n\t\t\t\t\tType:   \"nfs4\",\n\t\t\t\t\tStats: &MountStatsNFS{\n\t\t\t\t\t\tStatVersion: \"1.1\",\n\t\t\t\t\t\tAge:         13968 * time.Second,\n\t\t\t\t\t\tBytes: NFSBytesStats{\n\t\t\t\t\t\t\tRead:      1207640230,\n\t\t\t\t\t\t\tReadTotal: 1210214218,\n\t\t\t\t\t\t\tReadPages: 295483,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEvents: NFSEventsStats{\n\t\t\t\t\t\t\tInodeRevalidate: 52,\n\t\t\t\t\t\t\tDnodeRevalidate: 226,\n\t\t\t\t\t\t\tVFSOpen:         1,\n\t\t\t\t\t\t\tVFSLookup:       13,\n\t\t\t\t\t\t\tVFSAccess:       398,\n\t\t\t\t\t\t\tVFSReadPages:    331,\n\t\t\t\t\t\t\tVFSWritePages:   47,\n\t\t\t\t\t\t\tVFSFlush:        77,\n\t\t\t\t\t\t\tVFSFileRelease:  77,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tOperations: []NFSOperationStats{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tOperation: \"NULL\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tOperation:                   \"READ\",\n\t\t\t\t\t\t\t\tRequests:                    1298,\n\t\t\t\t\t\t\t\tTransmissions:               1298,\n\t\t\t\t\t\t\t\tBytesSent:                   207680,\n\t\t\t\t\t\t\t\tBytesReceived:               1210292152,\n\t\t\t\t\t\t\t\tCumulativeQueueTime:         6 * time.Millisecond,\n\t\t\t\t\t\t\t\tCumulativeTotalResponseTime: 79386 * time.Millisecond,\n\t\t\t\t\t\t\t\tCumulativeTotalRequestTime:  79407 * time.Millisecond,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tOperation: \"WRITE\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tTransport: NFSTransportStats{\n\t\t\t\t\t\t\tPort:                     832,\n\t\t\t\t\t\t\tConnect:                  1,\n\t\t\t\t\t\t\tIdleTime:                 11 * time.Second,\n\t\t\t\t\t\t\tSends:                    6428,\n\t\t\t\t\t\t\tReceives:                 6428,\n\t\t\t\t\t\t\tCumulativeActiveRequests: 12154,\n\t\t\t\t\t\t\tMaximumRPCSlotsUsed:      24,\n\t\t\t\t\t\t\tCumulativeSendingQueue:   26,\n\t\t\t\t\t\t\tCumulativePendingQueue:   5726,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i, tt := range tests {\n\t\tt.Logf(\"[%02d] test %q\", i, tt.name)\n\n\t\tvar mounts []*Mount\n\t\tvar err error\n\n\t\tif tt.s != \"\" {\n\t\t\tmounts, err = parseMountStats(strings.NewReader(tt.s))\n\t\t} else {\n\t\t\tproc, e := FS(\"fixtures\").NewProc(26231)\n\t\t\tif e != nil {\n\t\t\t\tt.Fatalf(\"failed to create proc: %v\", err)\n\t\t\t}\n\n\t\t\tmounts, err = proc.MountStats()\n\t\t}\n\n\t\tif tt.invalid && err == nil {\n\t\t\tt.Error(\"expected an error, but none occurred\")\n\t\t}\n\t\tif !tt.invalid && err != nil {\n\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tif want, have := tt.mounts, mounts; !reflect.DeepEqual(want, have) {\n\t\t\tt.Errorf(\"mounts:\\nwant:\\n%v\\nhave:\\n%v\", mountsStr(want), mountsStr(have))\n\t\t}\n\t}\n}\n\nfunc mountsStr(mounts []*Mount) string {\n\tvar out string\n\tfor i, m := range mounts {\n\t\tout += fmt.Sprintf(\"[%d] %q on %q (%q)\", i, m.Device, m.Mount, m.Type)\n\n\t\tstats, ok := m.Stats.(*MountStatsNFS)\n\t\tif !ok {\n\t\t\tout += \"\\n\"\n\t\t\tcontinue\n\t\t}\n\n\t\tout += fmt.Sprintf(\"\\n\\t- v%s, age: %s\", stats.StatVersion, stats.Age)\n\t\tout += fmt.Sprintf(\"\\n\\t- bytes: %v\", stats.Bytes)\n\t\tout += fmt.Sprintf(\"\\n\\t- events: %v\", stats.Events)\n\t\tout += fmt.Sprintf(\"\\n\\t- transport: %v\", stats.Transport)\n\t\tout += fmt.Sprintf(\"\\n\\t- per-operation stats:\")\n\n\t\tfor _, o := range stats.Operations {\n\t\t\tout += fmt.Sprintf(\"\\n\\t\\t- %v\", o)\n\t\t}\n\n\t\tout += \"\\n\"\n\t}\n\n\treturn out\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/proc.go",
    "content": "package procfs\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// Proc provides information about a running process.\ntype Proc struct {\n\t// The process ID.\n\tPID int\n\n\tfs FS\n}\n\n// Procs represents a list of Proc structs.\ntype Procs []Proc\n\nfunc (p Procs) Len() int           { return len(p) }\nfunc (p Procs) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }\nfunc (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }\n\n// Self returns a process for the current process read via /proc/self.\nfunc Self() (Proc, error) {\n\tfs, err := NewFS(DefaultMountPoint)\n\tif err != nil {\n\t\treturn Proc{}, err\n\t}\n\treturn fs.Self()\n}\n\n// NewProc returns a process for the given pid under /proc.\nfunc NewProc(pid int) (Proc, error) {\n\tfs, err := NewFS(DefaultMountPoint)\n\tif err != nil {\n\t\treturn Proc{}, err\n\t}\n\treturn fs.NewProc(pid)\n}\n\n// AllProcs returns a list of all currently available processes under /proc.\nfunc AllProcs() (Procs, error) {\n\tfs, err := NewFS(DefaultMountPoint)\n\tif err != nil {\n\t\treturn Procs{}, err\n\t}\n\treturn fs.AllProcs()\n}\n\n// Self returns a process for the current process.\nfunc (fs FS) Self() (Proc, error) {\n\tp, err := os.Readlink(fs.Path(\"self\"))\n\tif err != nil {\n\t\treturn Proc{}, err\n\t}\n\tpid, err := strconv.Atoi(strings.Replace(p, string(fs), \"\", -1))\n\tif err != nil {\n\t\treturn Proc{}, err\n\t}\n\treturn fs.NewProc(pid)\n}\n\n// NewProc returns a process for the given pid.\nfunc (fs FS) NewProc(pid int) (Proc, error) {\n\tif _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil {\n\t\treturn Proc{}, err\n\t}\n\treturn Proc{PID: pid, fs: fs}, nil\n}\n\n// AllProcs returns a list of all currently available processes.\nfunc (fs FS) AllProcs() (Procs, error) {\n\td, err := os.Open(fs.Path())\n\tif err != nil {\n\t\treturn Procs{}, err\n\t}\n\tdefer d.Close()\n\n\tnames, err := d.Readdirnames(-1)\n\tif err != nil {\n\t\treturn Procs{}, fmt.Errorf(\"could not read %s: %s\", d.Name(), err)\n\t}\n\n\tp := Procs{}\n\tfor _, n := range names {\n\t\tpid, err := strconv.ParseInt(n, 10, 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tp = append(p, Proc{PID: int(pid), fs: fs})\n\t}\n\n\treturn p, nil\n}\n\n// CmdLine returns the command line of a process.\nfunc (p Proc) CmdLine() ([]string, error) {\n\tf, err := os.Open(p.path(\"cmdline\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(data) < 1 {\n\t\treturn []string{}, nil\n\t}\n\n\treturn strings.Split(string(data[:len(data)-1]), string(byte(0))), nil\n}\n\n// Comm returns the command name of a process.\nfunc (p Proc) Comm() (string, error) {\n\tf, err := os.Open(p.path(\"comm\"))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn strings.TrimSpace(string(data)), nil\n}\n\n// Executable returns the absolute path of the executable command of a process.\nfunc (p Proc) Executable() (string, error) {\n\texe, err := os.Readlink(p.path(\"exe\"))\n\tif os.IsNotExist(err) {\n\t\treturn \"\", nil\n\t}\n\n\treturn exe, err\n}\n\n// FileDescriptors returns the currently open file descriptors of a process.\nfunc (p Proc) FileDescriptors() ([]uintptr, error) {\n\tnames, err := p.fileDescriptors()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfds := make([]uintptr, len(names))\n\tfor i, n := range names {\n\t\tfd, err := strconv.ParseInt(n, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not parse fd %s: %s\", n, err)\n\t\t}\n\t\tfds[i] = uintptr(fd)\n\t}\n\n\treturn fds, nil\n}\n\n// FileDescriptorTargets returns the targets of all file descriptors of a process.\n// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string.\nfunc (p Proc) FileDescriptorTargets() ([]string, error) {\n\tnames, err := p.fileDescriptors()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttargets := make([]string, len(names))\n\n\tfor i, name := range names {\n\t\ttarget, err := os.Readlink(p.path(\"fd\", name))\n\t\tif err == nil {\n\t\t\ttargets[i] = target\n\t\t}\n\t}\n\n\treturn targets, nil\n}\n\n// FileDescriptorsLen returns the number of currently open file descriptors of\n// a process.\nfunc (p Proc) FileDescriptorsLen() (int, error) {\n\tfds, err := p.fileDescriptors()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn len(fds), nil\n}\n\n// MountStats retrieves statistics and configuration for mount points in a\n// process's namespace.\nfunc (p Proc) MountStats() ([]*Mount, error) {\n\tf, err := os.Open(p.path(\"mountstats\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\n\treturn parseMountStats(f)\n}\n\nfunc (p Proc) fileDescriptors() ([]string, error) {\n\td, err := os.Open(p.path(\"fd\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer d.Close()\n\n\tnames, err := d.Readdirnames(-1)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not read %s: %s\", d.Name(), err)\n\t}\n\n\treturn names, nil\n}\n\nfunc (p Proc) path(pa ...string) string {\n\treturn p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...)\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/proc_io.go",
    "content": "package procfs\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n)\n\n// ProcIO models the content of /proc/<pid>/io.\ntype ProcIO struct {\n\t// Chars read.\n\tRChar uint64\n\t// Chars written.\n\tWChar uint64\n\t// Read syscalls.\n\tSyscR uint64\n\t// Write syscalls.\n\tSyscW uint64\n\t// Bytes read.\n\tReadBytes uint64\n\t// Bytes written.\n\tWriteBytes uint64\n\t// Bytes written, but taking into account truncation. See\n\t// Documentation/filesystems/proc.txt in the kernel sources for\n\t// detailed explanation.\n\tCancelledWriteBytes int64\n}\n\n// NewIO creates a new ProcIO instance from a given Proc instance.\nfunc (p Proc) NewIO() (ProcIO, error) {\n\tpio := ProcIO{}\n\n\tf, err := os.Open(p.path(\"io\"))\n\tif err != nil {\n\t\treturn pio, err\n\t}\n\tdefer f.Close()\n\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn pio, err\n\t}\n\n\tioFormat := \"rchar: %d\\nwchar: %d\\nsyscr: %d\\nsyscw: %d\\n\" +\n\t\t\"read_bytes: %d\\nwrite_bytes: %d\\n\" +\n\t\t\"cancelled_write_bytes: %d\\n\"\n\n\t_, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR,\n\t\t&pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes)\n\tif err != nil {\n\t\treturn pio, err\n\t}\n\n\treturn pio, nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/proc_io_test.go",
    "content": "package procfs\n\nimport \"testing\"\n\nfunc TestProcIO(t *testing.T) {\n\tp, err := FS(\"fixtures\").NewProc(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts, err := p.NewIO()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, test := range []struct {\n\t\tname string\n\t\twant int64\n\t\thave int64\n\t}{\n\t\t{name: \"RChar\", want: 750339, have: int64(s.RChar)},\n\t\t{name: \"WChar\", want: 818609, have: int64(s.WChar)},\n\t\t{name: \"SyscR\", want: 7405, have: int64(s.SyscR)},\n\t\t{name: \"SyscW\", want: 5245, have: int64(s.SyscW)},\n\t\t{name: \"ReadBytes\", want: 1024, have: int64(s.ReadBytes)},\n\t\t{name: \"WriteBytes\", want: 2048, have: int64(s.WriteBytes)},\n\t\t{name: \"CancelledWriteBytes\", want: -1024, have: s.CancelledWriteBytes},\n\t} {\n\t\tif test.want != test.have {\n\t\t\tt.Errorf(\"want %s %d, have %d\", test.name, test.want, test.have)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/proc_limits.go",
    "content": "package procfs\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n)\n\n// ProcLimits represents the soft limits for each of the process's resource\n// limits. For more information see getrlimit(2):\n// http://man7.org/linux/man-pages/man2/getrlimit.2.html.\ntype ProcLimits struct {\n\t// CPU time limit in seconds.\n\tCPUTime int\n\t// Maximum size of files that the process may create.\n\tFileSize int\n\t// Maximum size of the process's data segment (initialized data,\n\t// uninitialized data, and heap).\n\tDataSize int\n\t// Maximum size of the process stack in bytes.\n\tStackSize int\n\t// Maximum size of a core file.\n\tCoreFileSize int\n\t// Limit of the process's resident set in pages.\n\tResidentSet int\n\t// Maximum number of processes that can be created for the real user ID of\n\t// the calling process.\n\tProcesses int\n\t// Value one greater than the maximum file descriptor number that can be\n\t// opened by this process.\n\tOpenFiles int\n\t// Maximum number of bytes of memory that may be locked into RAM.\n\tLockedMemory int\n\t// Maximum size of the process's virtual memory address space in bytes.\n\tAddressSpace int\n\t// Limit on the combined number of flock(2) locks and fcntl(2) leases that\n\t// this process may establish.\n\tFileLocks int\n\t// Limit of signals that may be queued for the real user ID of the calling\n\t// process.\n\tPendingSignals int\n\t// Limit on the number of bytes that can be allocated for POSIX message\n\t// queues for the real user ID of the calling process.\n\tMsqqueueSize int\n\t// Limit of the nice priority set using setpriority(2) or nice(2).\n\tNicePriority int\n\t// Limit of the real-time priority set using sched_setscheduler(2) or\n\t// sched_setparam(2).\n\tRealtimePriority int\n\t// Limit (in microseconds) on the amount of CPU time that a process\n\t// scheduled under a real-time scheduling policy may consume without making\n\t// a blocking system call.\n\tRealtimeTimeout int\n}\n\nconst (\n\tlimitsFields    = 3\n\tlimitsUnlimited = \"unlimited\"\n)\n\nvar (\n\tlimitsDelimiter = regexp.MustCompile(\"  +\")\n)\n\n// NewLimits returns the current soft limits of the process.\nfunc (p Proc) NewLimits() (ProcLimits, error) {\n\tf, err := os.Open(p.path(\"limits\"))\n\tif err != nil {\n\t\treturn ProcLimits{}, err\n\t}\n\tdefer f.Close()\n\n\tvar (\n\t\tl = ProcLimits{}\n\t\ts = bufio.NewScanner(f)\n\t)\n\tfor s.Scan() {\n\t\tfields := limitsDelimiter.Split(s.Text(), limitsFields)\n\t\tif len(fields) != limitsFields {\n\t\t\treturn ProcLimits{}, fmt.Errorf(\n\t\t\t\t\"couldn't parse %s line %s\", f.Name(), s.Text())\n\t\t}\n\n\t\tswitch fields[0] {\n\t\tcase \"Max cpu time\":\n\t\t\tl.CPUTime, err = parseInt(fields[1])\n\t\tcase \"Max file size\":\n\t\t\tl.FileSize, err = parseInt(fields[1])\n\t\tcase \"Max data size\":\n\t\t\tl.DataSize, err = parseInt(fields[1])\n\t\tcase \"Max stack size\":\n\t\t\tl.StackSize, err = parseInt(fields[1])\n\t\tcase \"Max core file size\":\n\t\t\tl.CoreFileSize, err = parseInt(fields[1])\n\t\tcase \"Max resident set\":\n\t\t\tl.ResidentSet, err = parseInt(fields[1])\n\t\tcase \"Max processes\":\n\t\t\tl.Processes, err = parseInt(fields[1])\n\t\tcase \"Max open files\":\n\t\t\tl.OpenFiles, err = parseInt(fields[1])\n\t\tcase \"Max locked memory\":\n\t\t\tl.LockedMemory, err = parseInt(fields[1])\n\t\tcase \"Max address space\":\n\t\t\tl.AddressSpace, err = parseInt(fields[1])\n\t\tcase \"Max file locks\":\n\t\t\tl.FileLocks, err = parseInt(fields[1])\n\t\tcase \"Max pending signals\":\n\t\t\tl.PendingSignals, err = parseInt(fields[1])\n\t\tcase \"Max msgqueue size\":\n\t\t\tl.MsqqueueSize, err = parseInt(fields[1])\n\t\tcase \"Max nice priority\":\n\t\t\tl.NicePriority, err = parseInt(fields[1])\n\t\tcase \"Max realtime priority\":\n\t\t\tl.RealtimePriority, err = parseInt(fields[1])\n\t\tcase \"Max realtime timeout\":\n\t\t\tl.RealtimeTimeout, err = parseInt(fields[1])\n\t\t}\n\t\tif err != nil {\n\t\t\treturn ProcLimits{}, err\n\t\t}\n\t}\n\n\treturn l, s.Err()\n}\n\nfunc parseInt(s string) (int, error) {\n\tif s == limitsUnlimited {\n\t\treturn -1, nil\n\t}\n\ti, err := strconv.ParseInt(s, 10, 32)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"couldn't parse value %s: %s\", s, err)\n\t}\n\treturn int(i), nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/proc_limits_test.go",
    "content": "package procfs\n\nimport \"testing\"\n\nfunc TestNewLimits(t *testing.T) {\n\tp, err := FS(\"fixtures\").NewProc(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tl, err := p.NewLimits()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, test := range []struct {\n\t\tname string\n\t\twant int\n\t\thave int\n\t}{\n\t\t{name: \"cpu time\", want: -1, have: l.CPUTime},\n\t\t{name: \"open files\", want: 2048, have: l.OpenFiles},\n\t\t{name: \"msgqueue size\", want: 819200, have: l.MsqqueueSize},\n\t\t{name: \"nice priority\", want: 0, have: l.NicePriority},\n\t\t{name: \"address space\", want: -1, have: l.AddressSpace},\n\t} {\n\t\tif test.want != test.have {\n\t\t\tt.Errorf(\"want %s %d, have %d\", test.name, test.want, test.have)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/proc_stat.go",
    "content": "package procfs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n)\n\n// Originally, this USER_HZ value was dynamically retrieved via a sysconf call\n// which required cgo. However, that caused a lot of problems regarding\n// cross-compilation. Alternatives such as running a binary to determine the\n// value, or trying to derive it in some other way were all problematic.  After\n// much research it was determined that USER_HZ is actually hardcoded to 100 on\n// all Go-supported platforms as of the time of this writing. This is why we\n// decided to hardcode it here as well. It is not impossible that there could\n// be systems with exceptions, but they should be very exotic edge cases, and\n// in that case, the worst outcome will be two misreported metrics.\n//\n// See also the following discussions:\n//\n// - https://github.com/prometheus/node_exporter/issues/52\n// - https://github.com/prometheus/procfs/pull/2\n// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue\nconst userHZ = 100\n\n// ProcStat provides status information about the process,\n// read from /proc/[pid]/stat.\ntype ProcStat struct {\n\t// The process ID.\n\tPID int\n\t// The filename of the executable.\n\tComm string\n\t// The process state.\n\tState string\n\t// The PID of the parent of this process.\n\tPPID int\n\t// The process group ID of the process.\n\tPGRP int\n\t// The session ID of the process.\n\tSession int\n\t// The controlling terminal of the process.\n\tTTY int\n\t// The ID of the foreground process group of the controlling terminal of\n\t// the process.\n\tTPGID int\n\t// The kernel flags word of the process.\n\tFlags uint\n\t// The number of minor faults the process has made which have not required\n\t// loading a memory page from disk.\n\tMinFlt uint\n\t// The number of minor faults that the process's waited-for children have\n\t// made.\n\tCMinFlt uint\n\t// The number of major faults the process has made which have required\n\t// loading a memory page from disk.\n\tMajFlt uint\n\t// The number of major faults that the process's waited-for children have\n\t// made.\n\tCMajFlt uint\n\t// Amount of time that this process has been scheduled in user mode,\n\t// measured in clock ticks.\n\tUTime uint\n\t// Amount of time that this process has been scheduled in kernel mode,\n\t// measured in clock ticks.\n\tSTime uint\n\t// Amount of time that this process's waited-for children have been\n\t// scheduled in user mode, measured in clock ticks.\n\tCUTime uint\n\t// Amount of time that this process's waited-for children have been\n\t// scheduled in kernel mode, measured in clock ticks.\n\tCSTime uint\n\t// For processes running a real-time scheduling policy, this is the negated\n\t// scheduling priority, minus one.\n\tPriority int\n\t// The nice value, a value in the range 19 (low priority) to -20 (high\n\t// priority).\n\tNice int\n\t// Number of threads in this process.\n\tNumThreads int\n\t// The time the process started after system boot, the value is expressed\n\t// in clock ticks.\n\tStarttime uint64\n\t// Virtual memory size in bytes.\n\tVSize int\n\t// Resident set size in pages.\n\tRSS int\n\n\tfs FS\n}\n\n// NewStat returns the current status information of the process.\nfunc (p Proc) NewStat() (ProcStat, error) {\n\tf, err := os.Open(p.path(\"stat\"))\n\tif err != nil {\n\t\treturn ProcStat{}, err\n\t}\n\tdefer f.Close()\n\n\tdata, err := ioutil.ReadAll(f)\n\tif err != nil {\n\t\treturn ProcStat{}, err\n\t}\n\n\tvar (\n\t\tignore int\n\n\t\ts = ProcStat{PID: p.PID, fs: p.fs}\n\t\tl = bytes.Index(data, []byte(\"(\"))\n\t\tr = bytes.LastIndex(data, []byte(\")\"))\n\t)\n\n\tif l < 0 || r < 0 {\n\t\treturn ProcStat{}, fmt.Errorf(\n\t\t\t\"unexpected format, couldn't extract comm: %s\",\n\t\t\tdata,\n\t\t)\n\t}\n\n\ts.Comm = string(data[l+1 : r])\n\t_, err = fmt.Fscan(\n\t\tbytes.NewBuffer(data[r+2:]),\n\t\t&s.State,\n\t\t&s.PPID,\n\t\t&s.PGRP,\n\t\t&s.Session,\n\t\t&s.TTY,\n\t\t&s.TPGID,\n\t\t&s.Flags,\n\t\t&s.MinFlt,\n\t\t&s.CMinFlt,\n\t\t&s.MajFlt,\n\t\t&s.CMajFlt,\n\t\t&s.UTime,\n\t\t&s.STime,\n\t\t&s.CUTime,\n\t\t&s.CSTime,\n\t\t&s.Priority,\n\t\t&s.Nice,\n\t\t&s.NumThreads,\n\t\t&ignore,\n\t\t&s.Starttime,\n\t\t&s.VSize,\n\t\t&s.RSS,\n\t)\n\tif err != nil {\n\t\treturn ProcStat{}, err\n\t}\n\n\treturn s, nil\n}\n\n// VirtualMemory returns the virtual memory size in bytes.\nfunc (s ProcStat) VirtualMemory() int {\n\treturn s.VSize\n}\n\n// ResidentMemory returns the resident memory size in bytes.\nfunc (s ProcStat) ResidentMemory() int {\n\treturn s.RSS * os.Getpagesize()\n}\n\n// StartTime returns the unix timestamp of the process in seconds.\nfunc (s ProcStat) StartTime() (float64, error) {\n\tstat, err := s.fs.NewStat()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil\n}\n\n// CPUTime returns the total CPU user and system time in seconds.\nfunc (s ProcStat) CPUTime() float64 {\n\treturn float64(s.UTime+s.STime) / userHZ\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/proc_stat_test.go",
    "content": "package procfs\n\nimport (\n\t\"os\"\n\t\"testing\"\n)\n\nfunc TestProcStat(t *testing.T) {\n\tp, err := FS(\"fixtures\").NewProc(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ts, err := p.NewStat()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, test := range []struct {\n\t\tname string\n\t\twant int\n\t\thave int\n\t}{\n\t\t{name: \"pid\", want: 26231, have: s.PID},\n\t\t{name: \"user time\", want: 1677, have: int(s.UTime)},\n\t\t{name: \"system time\", want: 44, have: int(s.STime)},\n\t\t{name: \"start time\", want: 82375, have: int(s.Starttime)},\n\t\t{name: \"virtual memory size\", want: 56274944, have: s.VSize},\n\t\t{name: \"resident set size\", want: 1981, have: s.RSS},\n\t} {\n\t\tif test.want != test.have {\n\t\t\tt.Errorf(\"want %s %d, have %d\", test.name, test.want, test.have)\n\t\t}\n\t}\n}\n\nfunc TestProcStatComm(t *testing.T) {\n\ts1, err := testProcStat(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif want, have := \"vim\", s1.Comm; want != have {\n\t\tt.Errorf(\"want comm %s, have %s\", want, have)\n\t}\n\n\ts2, err := testProcStat(584)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif want, have := \"(a b ) ( c d) \", s2.Comm; want != have {\n\t\tt.Errorf(\"want comm %s, have %s\", want, have)\n\t}\n}\n\nfunc TestProcStatVirtualMemory(t *testing.T) {\n\ts, err := testProcStat(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif want, have := 56274944, s.VirtualMemory(); want != have {\n\t\tt.Errorf(\"want virtual memory %d, have %d\", want, have)\n\t}\n}\n\nfunc TestProcStatResidentMemory(t *testing.T) {\n\ts, err := testProcStat(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif want, have := 1981*os.Getpagesize(), s.ResidentMemory(); want != have {\n\t\tt.Errorf(\"want resident memory %d, have %d\", want, have)\n\t}\n}\n\nfunc TestProcStatStartTime(t *testing.T) {\n\ts, err := testProcStat(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\ttime, err := s.StartTime()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif want, have := 1418184099.75, time; want != have {\n\t\tt.Errorf(\"want start time %f, have %f\", want, have)\n\t}\n}\n\nfunc TestProcStatCPUTime(t *testing.T) {\n\ts, err := testProcStat(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif want, have := 17.21, s.CPUTime(); want != have {\n\t\tt.Errorf(\"want cpu time %f, have %f\", want, have)\n\t}\n}\n\nfunc testProcStat(pid int) (ProcStat, error) {\n\tp, err := FS(\"fixtures\").NewProc(pid)\n\tif err != nil {\n\t\treturn ProcStat{}, err\n\t}\n\n\treturn p.NewStat()\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/proc_test.go",
    "content": "package procfs\n\nimport (\n\t\"reflect\"\n\t\"sort\"\n\t\"testing\"\n)\n\nfunc TestSelf(t *testing.T) {\n\tfs := FS(\"fixtures\")\n\n\tp1, err := fs.NewProc(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tp2, err := fs.Self()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !reflect.DeepEqual(p1, p2) {\n\t\tt.Errorf(\"want process %v, have %v\", p1, p2)\n\t}\n}\n\nfunc TestAllProcs(t *testing.T) {\n\tprocs, err := FS(\"fixtures\").AllProcs()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsort.Sort(procs)\n\tfor i, p := range []*Proc{{PID: 584}, {PID: 26231}} {\n\t\tif want, have := p.PID, procs[i].PID; want != have {\n\t\t\tt.Errorf(\"want processes %d, have %d\", want, have)\n\t\t}\n\t}\n}\n\nfunc TestCmdLine(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tprocess int\n\t\twant    []string\n\t}{\n\t\t{process: 26231, want: []string{\"vim\", \"test.go\", \"+10\"}},\n\t\t{process: 26232, want: []string{}},\n\t} {\n\t\tp1, err := FS(\"fixtures\").NewProc(tt.process)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tc1, err := p1.CmdLine()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(tt.want, c1) {\n\t\t\tt.Errorf(\"want cmdline %v, have %v\", tt.want, c1)\n\t\t}\n\t}\n}\n\nfunc TestComm(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tprocess int\n\t\twant    string\n\t}{\n\t\t{process: 26231, want: \"vim\"},\n\t\t{process: 26232, want: \"ata_sff\"},\n\t} {\n\t\tp1, err := FS(\"fixtures\").NewProc(tt.process)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tc1, err := p1.Comm()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(tt.want, c1) {\n\t\t\tt.Errorf(\"want comm %v, have %v\", tt.want, c1)\n\t\t}\n\t}\n}\n\nfunc TestExecutable(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tprocess int\n\t\twant    string\n\t}{\n\t\t{process: 26231, want: \"/usr/bin/vim\"},\n\t\t{process: 26232, want: \"\"},\n\t} {\n\t\tp, err := FS(\"fixtures\").NewProc(tt.process)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\texe, err := p.Executable()\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tif !reflect.DeepEqual(tt.want, exe) {\n\t\t\tt.Errorf(\"want absolute path to cmdline %v, have %v\", tt.want, exe)\n\t\t}\n\t}\n}\n\nfunc TestFileDescriptors(t *testing.T) {\n\tp1, err := FS(\"fixtures\").NewProc(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfds, err := p1.FileDescriptors()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsort.Sort(byUintptr(fds))\n\tif want := []uintptr{0, 1, 2, 3, 10}; !reflect.DeepEqual(want, fds) {\n\t\tt.Errorf(\"want fds %v, have %v\", want, fds)\n\t}\n}\n\nfunc TestFileDescriptorTargets(t *testing.T) {\n\tp1, err := FS(\"fixtures\").NewProc(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfds, err := p1.FileDescriptorTargets()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsort.Strings(fds)\n\tvar want = []string{\n\t\t\"../../symlinktargets/abc\",\n\t\t\"../../symlinktargets/def\",\n\t\t\"../../symlinktargets/ghi\",\n\t\t\"../../symlinktargets/uvw\",\n\t\t\"../../symlinktargets/xyz\",\n\t}\n\tif !reflect.DeepEqual(want, fds) {\n\t\tt.Errorf(\"want fds %v, have %v\", want, fds)\n\t}\n}\n\nfunc TestFileDescriptorsLen(t *testing.T) {\n\tp1, err := FS(\"fixtures\").NewProc(26231)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tl, err := p1.FileDescriptorsLen()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif want, have := 5, l; want != have {\n\t\tt.Errorf(\"want fds %d, have %d\", want, have)\n\t}\n}\n\ntype byUintptr []uintptr\n\nfunc (a byUintptr) Len() int           { return len(a) }\nfunc (a byUintptr) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a byUintptr) Less(i, j int) bool { return a[i] < a[j] }\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/stat.go",
    "content": "package procfs\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// CPUStat shows how much time the cpu spend in various stages.\ntype CPUStat struct {\n\tUser      float64\n\tNice      float64\n\tSystem    float64\n\tIdle      float64\n\tIowait    float64\n\tIRQ       float64\n\tSoftIRQ   float64\n\tSteal     float64\n\tGuest     float64\n\tGuestNice float64\n}\n\n// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.\n// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html\n// It is possible to get per-cpu stats by reading /proc/softirqs\ntype SoftIRQStat struct {\n\tHi          uint64\n\tTimer       uint64\n\tNetTx       uint64\n\tNetRx       uint64\n\tBlock       uint64\n\tBlockIoPoll uint64\n\tTasklet     uint64\n\tSched       uint64\n\tHrtimer     uint64\n\tRcu         uint64\n}\n\n// Stat represents kernel/system statistics.\ntype Stat struct {\n\t// Boot time in seconds since the Epoch.\n\tBootTime uint64\n\t// Summed up cpu statistics.\n\tCPUTotal CPUStat\n\t// Per-CPU statistics.\n\tCPU []CPUStat\n\t// Number of times interrupts were handled, which contains numbered and unnumbered IRQs.\n\tIRQTotal uint64\n\t// Number of times a numbered IRQ was triggered.\n\tIRQ []uint64\n\t// Number of times a context switch happened.\n\tContextSwitches uint64\n\t// Number of times a process was created.\n\tProcessCreated uint64\n\t// Number of processes currently running.\n\tProcessesRunning uint64\n\t// Number of processes currently blocked (waiting for IO).\n\tProcessesBlocked uint64\n\t// Number of times a softirq was scheduled.\n\tSoftIRQTotal uint64\n\t// Detailed softirq statistics.\n\tSoftIRQ SoftIRQStat\n}\n\n// NewStat returns kernel/system statistics read from /proc/stat.\nfunc NewStat() (Stat, error) {\n\tfs, err := NewFS(DefaultMountPoint)\n\tif err != nil {\n\t\treturn Stat{}, err\n\t}\n\n\treturn fs.NewStat()\n}\n\n// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).\nfunc parseCPUStat(line string) (CPUStat, int64, error) {\n\tcpuStat := CPUStat{}\n\tvar cpu string\n\n\tcount, err := fmt.Sscanf(line, \"%s %f %f %f %f %f %f %f %f %f %f\",\n\t\t&cpu,\n\t\t&cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,\n\t\t&cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,\n\t\t&cpuStat.Guest, &cpuStat.GuestNice)\n\n\tif err != nil && err != io.EOF {\n\t\treturn CPUStat{}, -1, fmt.Errorf(\"couldn't parse %s (cpu): %s\", line, err)\n\t}\n\tif count == 0 {\n\t\treturn CPUStat{}, -1, fmt.Errorf(\"couldn't parse %s (cpu): 0 elements parsed\", line)\n\t}\n\n\tcpuStat.User /= userHZ\n\tcpuStat.Nice /= userHZ\n\tcpuStat.System /= userHZ\n\tcpuStat.Idle /= userHZ\n\tcpuStat.Iowait /= userHZ\n\tcpuStat.IRQ /= userHZ\n\tcpuStat.SoftIRQ /= userHZ\n\tcpuStat.Steal /= userHZ\n\tcpuStat.Guest /= userHZ\n\tcpuStat.GuestNice /= userHZ\n\n\tif cpu == \"cpu\" {\n\t\treturn cpuStat, -1, nil\n\t}\n\n\tcpuID, err := strconv.ParseInt(cpu[3:], 10, 64)\n\tif err != nil {\n\t\treturn CPUStat{}, -1, fmt.Errorf(\"couldn't parse %s (cpu/cpuid): %s\", line, err)\n\t}\n\n\treturn cpuStat, cpuID, nil\n}\n\n// Parse a softirq line.\nfunc parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {\n\tsoftIRQStat := SoftIRQStat{}\n\tvar total uint64\n\tvar prefix string\n\n\t_, err := fmt.Sscanf(line, \"%s %d %d %d %d %d %d %d %d %d %d %d\",\n\t\t&prefix, &total,\n\t\t&softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,\n\t\t&softIRQStat.Block, &softIRQStat.BlockIoPoll,\n\t\t&softIRQStat.Tasklet, &softIRQStat.Sched,\n\t\t&softIRQStat.Hrtimer, &softIRQStat.Rcu)\n\n\tif err != nil {\n\t\treturn SoftIRQStat{}, 0, fmt.Errorf(\"couldn't parse %s (softirq): %s\", line, err)\n\t}\n\n\treturn softIRQStat, total, nil\n}\n\n// NewStat returns an information about current kernel/system statistics.\nfunc (fs FS) NewStat() (Stat, error) {\n\t// See https://www.kernel.org/doc/Documentation/filesystems/proc.txt\n\n\tf, err := os.Open(fs.Path(\"stat\"))\n\tif err != nil {\n\t\treturn Stat{}, err\n\t}\n\tdefer f.Close()\n\n\tstat := Stat{}\n\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tparts := strings.Fields(scanner.Text())\n\t\t// require at least <key> <value>\n\t\tif len(parts) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase parts[0] == \"btime\":\n\t\t\tif stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {\n\t\t\t\treturn Stat{}, fmt.Errorf(\"couldn't parse %s (btime): %s\", parts[1], err)\n\t\t\t}\n\t\tcase parts[0] == \"intr\":\n\t\t\tif stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {\n\t\t\t\treturn Stat{}, fmt.Errorf(\"couldn't parse %s (intr): %s\", parts[1], err)\n\t\t\t}\n\t\t\tnumberedIRQs := parts[2:]\n\t\t\tstat.IRQ = make([]uint64, len(numberedIRQs))\n\t\t\tfor i, count := range numberedIRQs {\n\t\t\t\tif stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {\n\t\t\t\t\treturn Stat{}, fmt.Errorf(\"couldn't parse %s (intr%d): %s\", count, i, err)\n\t\t\t\t}\n\t\t\t}\n\t\tcase parts[0] == \"ctxt\":\n\t\t\tif stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {\n\t\t\t\treturn Stat{}, fmt.Errorf(\"couldn't parse %s (ctxt): %s\", parts[1], err)\n\t\t\t}\n\t\tcase parts[0] == \"processes\":\n\t\t\tif stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {\n\t\t\t\treturn Stat{}, fmt.Errorf(\"couldn't parse %s (processes): %s\", parts[1], err)\n\t\t\t}\n\t\tcase parts[0] == \"procs_running\":\n\t\t\tif stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {\n\t\t\t\treturn Stat{}, fmt.Errorf(\"couldn't parse %s (procs_running): %s\", parts[1], err)\n\t\t\t}\n\t\tcase parts[0] == \"procs_blocked\":\n\t\t\tif stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {\n\t\t\t\treturn Stat{}, fmt.Errorf(\"couldn't parse %s (procs_blocked): %s\", parts[1], err)\n\t\t\t}\n\t\tcase parts[0] == \"softirq\":\n\t\t\tsoftIRQStats, total, err := parseSoftIRQStat(line)\n\t\t\tif err != nil {\n\t\t\t\treturn Stat{}, err\n\t\t\t}\n\t\t\tstat.SoftIRQTotal = total\n\t\t\tstat.SoftIRQ = softIRQStats\n\t\tcase strings.HasPrefix(parts[0], \"cpu\"):\n\t\t\tcpuStat, cpuID, err := parseCPUStat(line)\n\t\t\tif err != nil {\n\t\t\t\treturn Stat{}, err\n\t\t\t}\n\t\t\tif cpuID == -1 {\n\t\t\t\tstat.CPUTotal = cpuStat\n\t\t\t} else {\n\t\t\t\tfor int64(len(stat.CPU)) <= cpuID {\n\t\t\t\t\tstat.CPU = append(stat.CPU, CPUStat{})\n\t\t\t\t}\n\t\t\t\tstat.CPU[cpuID] = cpuStat\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn Stat{}, fmt.Errorf(\"couldn't parse %s: %s\", f.Name(), err)\n\t}\n\n\treturn stat, nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/stat_test.go",
    "content": "package procfs\n\nimport \"testing\"\n\nfunc TestStat(t *testing.T) {\n\ts, err := FS(\"fixtures\").NewStat()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// cpu\n\tif want, have := float64(301854)/userHZ, s.CPUTotal.User; want != have {\n\t\tt.Errorf(\"want cpu/user %v, have %v\", want, have)\n\t}\n\tif want, have := float64(31)/userHZ, s.CPU[7].SoftIRQ; want != have {\n\t\tt.Errorf(\"want cpu7/softirq %v, have %v\", want, have)\n\t}\n\n\t// intr\n\tif want, have := uint64(8885917), s.IRQTotal; want != have {\n\t\tt.Errorf(\"want irq/total %d, have %d\", want, have)\n\t}\n\tif want, have := uint64(1), s.IRQ[8]; want != have {\n\t\tt.Errorf(\"want irq8 %d, have %d\", want, have)\n\t}\n\n\t// ctxt\n\tif want, have := uint64(38014093), s.ContextSwitches; want != have {\n\t\tt.Errorf(\"want context switches (ctxt) %d, have %d\", want, have)\n\t}\n\n\t// btime\n\tif want, have := uint64(1418183276), s.BootTime; want != have {\n\t\tt.Errorf(\"want boot time (btime) %d, have %d\", want, have)\n\t}\n\n\t// processes\n\tif want, have := uint64(26442), s.ProcessCreated; want != have {\n\t\tt.Errorf(\"want process created (processes) %d, have %d\", want, have)\n\t}\n\n\t// procs_running\n\tif want, have := uint64(2), s.ProcessesRunning; want != have {\n\t\tt.Errorf(\"want processes running (procs_running) %d, have %d\", want, have)\n\t}\n\n\t// procs_blocked\n\tif want, have := uint64(1), s.ProcessesBlocked; want != have {\n\t\tt.Errorf(\"want processes blocked (procs_blocked) %d, have %d\", want, have)\n\t}\n\n\t// softirq\n\tif want, have := uint64(5057579), s.SoftIRQTotal; want != have {\n\t\tt.Errorf(\"want softirq total %d, have %d\", want, have)\n\t}\n\n\tif want, have := uint64(508444), s.SoftIRQ.Rcu; want != have {\n\t\tt.Errorf(\"want softirq RCU %d, have %d\", want, have)\n\t}\n\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/ttar",
    "content": "#!/usr/bin/env bash\n# Purpose: plain text tar format\n# Limitations: - only suitable for text files, directories, and symlinks\n#              - stores only filename, content, and mode\n#              - not designed for untrusted input\n\n# Note: must work with bash version 3.2 (macOS)\n\nset -o errexit -o nounset\n\n# Sanitize environment (for instance, standard sorting of glob matches)\nexport LC_ALL=C\n\npath=\"\"\nCMD=\"\"\n\nfunction usage {\n    bname=$(basename \"$0\")\n    cat << USAGE\nUsage:   $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)\n         $bname            -t -f <ARCHIVE>           (list archive contents)\n         $bname [-C <DIR>] -x -f <ARCHIVE>           (extract archive)\n\nOptions:\n         -C <DIR>                                    (change directory)\n\nExample: Change to sysfs directory, create ttar file from fixtures directory\n         $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/\nUSAGE\nexit \"$1\"\n}\n\nfunction vecho {\n    if [ \"${VERBOSE:-}\" == \"yes\" ]; then\n        echo >&7 \"$@\"\n    fi\n}\n\nfunction set_cmd {\n    if [ -n \"$CMD\" ]; then\n        echo \"ERROR: more than one command given\"\n        echo\n        usage 2\n    fi\n    CMD=$1\n}\n\nwhile getopts :cf:htxvC: opt; do\n    case $opt in\n        c)\n            set_cmd \"create\"\n            ;;\n        f)\n            ARCHIVE=$OPTARG\n            ;;\n        h)\n            usage 0\n            ;;\n        t)\n            set_cmd \"list\"\n            ;;\n        x)\n            set_cmd \"extract\"\n            ;;\n        v)\n            VERBOSE=yes\n            exec 7>&1\n            ;;\n        C)\n            CDIR=$OPTARG\n            ;;\n        *)\n            echo >&2 \"ERROR: invalid option -$OPTARG\"\n            echo\n            usage 1\n            ;;\n    esac\ndone\n\n# Remove processed options from arguments\nshift $(( OPTIND - 1 ));\n\nif [ \"${CMD:-}\" == \"\" ]; then\n    echo >&2 \"ERROR: no command given\"\n    echo\n    usage 1\nelif [ \"${ARCHIVE:-}\" == \"\" ]; then\n    echo >&2 \"ERROR: no archive name given\"\n    echo\n    usage 1\nfi\n\nfunction list {\n    local path=\"\"\n    local size=0\n    local line_no=0\n    local ttar_file=$1\n    if [ -n \"${2:-}\" ]; then\n        echo >&2 \"ERROR: too many arguments.\"\n        echo\n        usage 1\n    fi\n    if [ ! -e \"$ttar_file\" ]; then\n        echo >&2 \"ERROR: file not found ($ttar_file)\"\n        echo\n        usage 1\n    fi\n    while read -r line; do\n        line_no=$(( line_no + 1 ))\n        if [ $size -gt 0 ]; then\n            size=$(( size - 1 ))\n            continue\n        fi\n        if [[ $line =~ ^Path:\\ (.*)$ ]]; then\n            path=${BASH_REMATCH[1]}\n        elif [[ $line =~ ^Lines:\\ (.*)$ ]]; then\n            size=${BASH_REMATCH[1]}\n            echo \"$path\"\n        elif [[ $line =~ ^Directory:\\ (.*)$ ]]; then\n            path=${BASH_REMATCH[1]}\n            echo \"$path/\"\n        elif [[ $line =~ ^SymlinkTo:\\ (.*)$ ]]; then\n            echo  \"$path -> ${BASH_REMATCH[1]}\"\n        fi\n    done < \"$ttar_file\"\n}\n\nfunction extract {\n    local path=\"\"\n    local size=0\n    local line_no=0\n    local ttar_file=$1\n    if [ -n \"${2:-}\" ]; then\n        echo >&2 \"ERROR: too many arguments.\"\n        echo\n        usage 1\n    fi\n    if [ ! -e \"$ttar_file\" ]; then\n        echo >&2 \"ERROR: file not found ($ttar_file)\"\n        echo\n        usage 1\n    fi\n    while IFS= read -r line; do\n        line_no=$(( line_no + 1 ))\n        if [ \"$size\" -gt 0 ]; then\n            echo \"$line\" >> \"$path\"\n            size=$(( size - 1 ))\n            continue\n        fi\n        if [[ $line =~ ^Path:\\ (.*)$ ]]; then\n            path=${BASH_REMATCH[1]}\n            if [ -e \"$path\" ] || [ -L \"$path\" ]; then\n                rm \"$path\"\n            fi\n        elif [[ $line =~ ^Lines:\\ (.*)$ ]]; then\n            size=${BASH_REMATCH[1]}\n            # Create file even if it is zero-length.\n            touch \"$path\"\n            vecho \"    $path\"\n        elif [[ $line =~ ^Mode:\\ (.*)$ ]]; then\n            mode=${BASH_REMATCH[1]}\n            chmod \"$mode\" \"$path\"\n            vecho \"$mode\"\n        elif [[ $line =~ ^Directory:\\ (.*)$ ]]; then\n            path=${BASH_REMATCH[1]}\n            mkdir -p \"$path\"\n            vecho \"    $path/\"\n        elif [[ $line =~ ^SymlinkTo:\\ (.*)$ ]]; then\n            ln -s \"${BASH_REMATCH[1]}\" \"$path\"\n            vecho \"    $path -> ${BASH_REMATCH[1]}\"\n        elif [[ $line =~ ^# ]]; then\n            # Ignore comments between files\n            continue\n        else\n            echo >&2 \"ERROR: Unknown keyword on line $line_no: $line\"\n            exit 1\n        fi\n    done < \"$ttar_file\"\n}\n\nfunction div {\n    echo \"# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\" \\\n         \"- - - - - -\"\n}\n\nfunction get_mode {\n    local mfile=$1\n    if [ -z \"${STAT_OPTION:-}\" ]; then\n        if stat -c '%a' \"$mfile\" >/dev/null 2>&1; then\n            STAT_OPTION='-c'\n            STAT_FORMAT='%a'\n        else\n            STAT_OPTION='-f'\n            STAT_FORMAT='%A'\n        fi\n    fi\n    stat \"${STAT_OPTION}\" \"${STAT_FORMAT}\" \"$mfile\"\n}\n\nfunction _create {\n    shopt -s nullglob\n    local mode\n    while (( \"$#\" )); do\n        file=$1\n        if [ -L \"$file\" ]; then\n            echo \"Path: $file\"\n            symlinkTo=$(readlink \"$file\")\n            echo \"SymlinkTo: $symlinkTo\"\n            vecho \"    $file -> $symlinkTo\"\n            div\n        elif [ -d \"$file\" ]; then\n            # Strip trailing slash (if there is one)\n            file=${file%/}\n            echo \"Directory: $file\"\n            mode=$(get_mode \"$file\")\n            echo \"Mode: $mode\"\n            vecho \"$mode $file/\"\n            div\n            # Find all files and dirs, including hidden/dot files\n            for x in \"$file/\"{*,.[^.]*}; do\n                _create \"$x\"\n            done\n        elif [ -f \"$file\" ]; then\n            echo \"Path: $file\"\n            lines=$(wc -l \"$file\"|awk '{print $1}')\n            echo \"Lines: $lines\"\n            cat \"$file\"\n            mode=$(get_mode \"$file\")\n            echo \"Mode: $mode\"\n            vecho \"$mode $file\"\n            div\n        else\n            echo >&2 \"ERROR: file not found ($file in $(pwd))\"\n            exit 2\n        fi\n        shift\n    done\n}\n\nfunction create {\n    ttar_file=$1\n    shift\n    if [ -z \"${1:-}\" ]; then\n        echo >&2 \"ERROR: missing arguments.\"\n        echo\n        usage 1\n    fi\n    if [ -e \"$ttar_file\" ]; then\n        rm \"$ttar_file\"\n    fi\n    exec > \"$ttar_file\"\n    _create \"$@\"\n}\n\nif [ -n \"${CDIR:-}\" ]; then\n    if [[ \"$ARCHIVE\" != /* ]]; then\n        # Relative path: preserve the archive's location before changing\n        # directory\n        ARCHIVE=\"$(pwd)/$ARCHIVE\"\n    fi\n    cd \"$CDIR\"\nfi\n\n\"$CMD\" \"$ARCHIVE\" \"$@\"\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/xfrm.go",
    "content": "// Copyright 2017 Prometheus Team\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage procfs\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// XfrmStat models the contents of /proc/net/xfrm_stat.\ntype XfrmStat struct {\n\t// All errors which are not matched by other\n\tXfrmInError int\n\t// No buffer is left\n\tXfrmInBufferError int\n\t// Header Error\n\tXfrmInHdrError int\n\t// No state found\n\t// i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong\n\tXfrmInNoStates int\n\t// Transformation protocol specific error\n\t// e.g. SA Key is wrong\n\tXfrmInStateProtoError int\n\t// Transformation mode specific error\n\tXfrmInStateModeError int\n\t// Sequence error\n\t// e.g. sequence number is out of window\n\tXfrmInStateSeqError int\n\t// State is expired\n\tXfrmInStateExpired int\n\t// State has mismatch option\n\t// e.g. UDP encapsulation type is mismatched\n\tXfrmInStateMismatch int\n\t// State is invalid\n\tXfrmInStateInvalid int\n\t// No matching template for states\n\t// e.g. Inbound SAs are correct but SP rule is wrong\n\tXfrmInTmplMismatch int\n\t// No policy is found for states\n\t// e.g. Inbound SAs are correct but no SP is found\n\tXfrmInNoPols int\n\t// Policy discards\n\tXfrmInPolBlock int\n\t// Policy error\n\tXfrmInPolError int\n\t// All errors which are not matched by others\n\tXfrmOutError int\n\t// Bundle generation error\n\tXfrmOutBundleGenError int\n\t// Bundle check error\n\tXfrmOutBundleCheckError int\n\t// No state was found\n\tXfrmOutNoStates int\n\t// Transformation protocol specific error\n\tXfrmOutStateProtoError int\n\t// Transportation mode specific error\n\tXfrmOutStateModeError int\n\t// Sequence error\n\t// i.e sequence number overflow\n\tXfrmOutStateSeqError int\n\t// State is expired\n\tXfrmOutStateExpired int\n\t// Policy discads\n\tXfrmOutPolBlock int\n\t// Policy is dead\n\tXfrmOutPolDead int\n\t// Policy Error\n\tXfrmOutPolError     int\n\tXfrmFwdHdrError     int\n\tXfrmOutStateInvalid int\n\tXfrmAcquireError    int\n}\n\n// NewXfrmStat reads the xfrm_stat statistics.\nfunc NewXfrmStat() (XfrmStat, error) {\n\tfs, err := NewFS(DefaultMountPoint)\n\tif err != nil {\n\t\treturn XfrmStat{}, err\n\t}\n\n\treturn fs.NewXfrmStat()\n}\n\n// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.\nfunc (fs FS) NewXfrmStat() (XfrmStat, error) {\n\tfile, err := os.Open(fs.Path(\"net/xfrm_stat\"))\n\tif err != nil {\n\t\treturn XfrmStat{}, err\n\t}\n\tdefer file.Close()\n\n\tvar (\n\t\tx = XfrmStat{}\n\t\ts = bufio.NewScanner(file)\n\t)\n\n\tfor s.Scan() {\n\t\tfields := strings.Fields(s.Text())\n\n\t\tif len(fields) != 2 {\n\t\t\treturn XfrmStat{}, fmt.Errorf(\n\t\t\t\t\"couldnt parse %s line %s\", file.Name(), s.Text())\n\t\t}\n\n\t\tname := fields[0]\n\t\tvalue, err := strconv.Atoi(fields[1])\n\t\tif err != nil {\n\t\t\treturn XfrmStat{}, err\n\t\t}\n\n\t\tswitch name {\n\t\tcase \"XfrmInError\":\n\t\t\tx.XfrmInError = value\n\t\tcase \"XfrmInBufferError\":\n\t\t\tx.XfrmInBufferError = value\n\t\tcase \"XfrmInHdrError\":\n\t\t\tx.XfrmInHdrError = value\n\t\tcase \"XfrmInNoStates\":\n\t\t\tx.XfrmInNoStates = value\n\t\tcase \"XfrmInStateProtoError\":\n\t\t\tx.XfrmInStateProtoError = value\n\t\tcase \"XfrmInStateModeError\":\n\t\t\tx.XfrmInStateModeError = value\n\t\tcase \"XfrmInStateSeqError\":\n\t\t\tx.XfrmInStateSeqError = value\n\t\tcase \"XfrmInStateExpired\":\n\t\t\tx.XfrmInStateExpired = value\n\t\tcase \"XfrmInStateInvalid\":\n\t\t\tx.XfrmInStateInvalid = value\n\t\tcase \"XfrmInTmplMismatch\":\n\t\t\tx.XfrmInTmplMismatch = value\n\t\tcase \"XfrmInNoPols\":\n\t\t\tx.XfrmInNoPols = value\n\t\tcase \"XfrmInPolBlock\":\n\t\t\tx.XfrmInPolBlock = value\n\t\tcase \"XfrmInPolError\":\n\t\t\tx.XfrmInPolError = value\n\t\tcase \"XfrmOutError\":\n\t\t\tx.XfrmOutError = value\n\t\tcase \"XfrmInStateMismatch\":\n\t\t\tx.XfrmInStateMismatch = value\n\t\tcase \"XfrmOutBundleGenError\":\n\t\t\tx.XfrmOutBundleGenError = value\n\t\tcase \"XfrmOutBundleCheckError\":\n\t\t\tx.XfrmOutBundleCheckError = value\n\t\tcase \"XfrmOutNoStates\":\n\t\t\tx.XfrmOutNoStates = value\n\t\tcase \"XfrmOutStateProtoError\":\n\t\t\tx.XfrmOutStateProtoError = value\n\t\tcase \"XfrmOutStateModeError\":\n\t\t\tx.XfrmOutStateModeError = value\n\t\tcase \"XfrmOutStateSeqError\":\n\t\t\tx.XfrmOutStateSeqError = value\n\t\tcase \"XfrmOutStateExpired\":\n\t\t\tx.XfrmOutStateExpired = value\n\t\tcase \"XfrmOutPolBlock\":\n\t\t\tx.XfrmOutPolBlock = value\n\t\tcase \"XfrmOutPolDead\":\n\t\t\tx.XfrmOutPolDead = value\n\t\tcase \"XfrmOutPolError\":\n\t\t\tx.XfrmOutPolError = value\n\t\tcase \"XfrmFwdHdrError\":\n\t\t\tx.XfrmFwdHdrError = value\n\t\tcase \"XfrmOutStateInvalid\":\n\t\t\tx.XfrmOutStateInvalid = value\n\t\tcase \"XfrmAcquireError\":\n\t\t\tx.XfrmAcquireError = value\n\t\t}\n\n\t}\n\n\treturn x, s.Err()\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/xfrm_test.go",
    "content": "// Copyright 2017 Prometheus Team\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage procfs\n\nimport (\n\t\"testing\"\n)\n\nfunc TestXfrmStats(t *testing.T) {\n\txfrmStats, err := FS(\"fixtures\").NewXfrmStat()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tfor _, test := range []struct {\n\t\tname string\n\t\twant int\n\t\tgot  int\n\t}{\n\t\t{name: \"XfrmInError\", want: 1, got: xfrmStats.XfrmInError},\n\t\t{name: \"XfrmInBufferError\", want: 2, got: xfrmStats.XfrmInBufferError},\n\t\t{name: \"XfrmInHdrError\", want: 4, got: xfrmStats.XfrmInHdrError},\n\t\t{name: \"XfrmInNoStates\", want: 3, got: xfrmStats.XfrmInNoStates},\n\t\t{name: \"XfrmInStateProtoError\", want: 40, got: xfrmStats.XfrmInStateProtoError},\n\t\t{name: \"XfrmInStateModeError\", want: 100, got: xfrmStats.XfrmInStateModeError},\n\t\t{name: \"XfrmInStateSeqError\", want: 6000, got: xfrmStats.XfrmInStateSeqError},\n\t\t{name: \"XfrmInStateExpired\", want: 4, got: xfrmStats.XfrmInStateExpired},\n\t\t{name: \"XfrmInStateMismatch\", want: 23451, got: xfrmStats.XfrmInStateMismatch},\n\t\t{name: \"XfrmInStateInvalid\", want: 55555, got: xfrmStats.XfrmInStateInvalid},\n\t\t{name: \"XfrmInTmplMismatch\", want: 51, got: xfrmStats.XfrmInTmplMismatch},\n\t\t{name: \"XfrmInNoPols\", want: 65432, got: xfrmStats.XfrmInNoPols},\n\t\t{name: \"XfrmInPolBlock\", want: 100, got: xfrmStats.XfrmInPolBlock},\n\t\t{name: \"XfrmInPolError\", want: 10000, got: xfrmStats.XfrmInPolError},\n\t\t{name: \"XfrmOutError\", want: 1000000, got: xfrmStats.XfrmOutError},\n\t\t{name: \"XfrmOutBundleGenError\", want: 43321, got: xfrmStats.XfrmOutBundleGenError},\n\t\t{name: \"XfrmOutBundleCheckError\", want: 555, got: xfrmStats.XfrmOutBundleCheckError},\n\t\t{name: \"XfrmOutNoStates\", want: 869, got: xfrmStats.XfrmOutNoStates},\n\t\t{name: \"XfrmOutStateProtoError\", want: 4542, got: xfrmStats.XfrmOutStateProtoError},\n\t\t{name: \"XfrmOutStateModeError\", want: 4, got: xfrmStats.XfrmOutStateModeError},\n\t\t{name: \"XfrmOutStateSeqError\", want: 543, got: xfrmStats.XfrmOutStateSeqError},\n\t\t{name: \"XfrmOutStateExpired\", want: 565, got: xfrmStats.XfrmOutStateExpired},\n\t\t{name: \"XfrmOutPolBlock\", want: 43456, got: xfrmStats.XfrmOutPolBlock},\n\t\t{name: \"XfrmOutPolDead\", want: 7656, got: xfrmStats.XfrmOutPolDead},\n\t\t{name: \"XfrmOutPolError\", want: 1454, got: xfrmStats.XfrmOutPolError},\n\t\t{name: \"XfrmFwdHdrError\", want: 6654, got: xfrmStats.XfrmFwdHdrError},\n\t\t{name: \"XfrmOutStateInvaliad\", want: 28765, got: xfrmStats.XfrmOutStateInvalid},\n\t\t{name: \"XfrmAcquireError\", want: 24532, got: xfrmStats.XfrmAcquireError},\n\t\t{name: \"XfrmInStateInvalid\", want: 55555, got: xfrmStats.XfrmInStateInvalid},\n\t\t{name: \"XfrmOutError\", want: 1000000, got: xfrmStats.XfrmOutError},\n\t} {\n\t\tif test.want != test.got {\n\t\t\tt.Errorf(\"Want %s %d, have %d\", test.name, test.want, test.got)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/xfs/parse.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage xfs\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// ParseStats parses a Stats from an input io.Reader, using the format\n// found in /proc/fs/xfs/stat.\nfunc ParseStats(r io.Reader) (*Stats, error) {\n\tconst (\n\t\t// Fields parsed into stats structures.\n\t\tfieldExtentAlloc = \"extent_alloc\"\n\t\tfieldAbt         = \"abt\"\n\t\tfieldBlkMap      = \"blk_map\"\n\t\tfieldBmbt        = \"bmbt\"\n\t\tfieldDir         = \"dir\"\n\t\tfieldTrans       = \"trans\"\n\t\tfieldIg          = \"ig\"\n\t\tfieldLog         = \"log\"\n\t\tfieldRw          = \"rw\"\n\t\tfieldAttr        = \"attr\"\n\t\tfieldIcluster    = \"icluster\"\n\t\tfieldVnodes      = \"vnodes\"\n\t\tfieldBuf         = \"buf\"\n\t\tfieldXpc         = \"xpc\"\n\n\t\t// Unimplemented at this time due to lack of documentation.\n\t\tfieldPushAil = \"push_ail\"\n\t\tfieldXstrat  = \"xstrat\"\n\t\tfieldAbtb2   = \"abtb2\"\n\t\tfieldAbtc2   = \"abtc2\"\n\t\tfieldBmbt2   = \"bmbt2\"\n\t\tfieldIbt2    = \"ibt2\"\n\t\tfieldFibt2   = \"fibt2\"\n\t\tfieldQm      = \"qm\"\n\t\tfieldDebug   = \"debug\"\n\t)\n\n\tvar xfss Stats\n\n\ts := bufio.NewScanner(r)\n\tfor s.Scan() {\n\t\t// Expect at least a string label and a single integer value, ex:\n\t\t//   - abt 0\n\t\t//   - rw 1 2\n\t\tss := strings.Fields(string(s.Bytes()))\n\t\tif len(ss) < 2 {\n\t\t\tcontinue\n\t\t}\n\t\tlabel := ss[0]\n\n\t\t// Extended precision counters are uint64 values.\n\t\tif label == fieldXpc {\n\t\t\tus, err := parseUint64s(ss[1:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\txfss.ExtendedPrecision, err = extendedPrecisionStats(us)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// All other counters are uint32 values.\n\t\tus, err := parseUint32s(ss[1:])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tswitch label {\n\t\tcase fieldExtentAlloc:\n\t\t\txfss.ExtentAllocation, err = extentAllocationStats(us)\n\t\tcase fieldAbt:\n\t\t\txfss.AllocationBTree, err = btreeStats(us)\n\t\tcase fieldBlkMap:\n\t\t\txfss.BlockMapping, err = blockMappingStats(us)\n\t\tcase fieldBmbt:\n\t\t\txfss.BlockMapBTree, err = btreeStats(us)\n\t\tcase fieldDir:\n\t\t\txfss.DirectoryOperation, err = directoryOperationStats(us)\n\t\tcase fieldTrans:\n\t\t\txfss.Transaction, err = transactionStats(us)\n\t\tcase fieldIg:\n\t\t\txfss.InodeOperation, err = inodeOperationStats(us)\n\t\tcase fieldLog:\n\t\t\txfss.LogOperation, err = logOperationStats(us)\n\t\tcase fieldRw:\n\t\t\txfss.ReadWrite, err = readWriteStats(us)\n\t\tcase fieldAttr:\n\t\t\txfss.AttributeOperation, err = attributeOperationStats(us)\n\t\tcase fieldIcluster:\n\t\t\txfss.InodeClustering, err = inodeClusteringStats(us)\n\t\tcase fieldVnodes:\n\t\t\txfss.Vnode, err = vnodeStats(us)\n\t\tcase fieldBuf:\n\t\t\txfss.Buffer, err = bufferStats(us)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &xfss, s.Err()\n}\n\n// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s.\nfunc extentAllocationStats(us []uint32) (ExtentAllocationStats, error) {\n\tif l := len(us); l != 4 {\n\t\treturn ExtentAllocationStats{}, fmt.Errorf(\"incorrect number of values for XFS extent allocation stats: %d\", l)\n\t}\n\n\treturn ExtentAllocationStats{\n\t\tExtentsAllocated: us[0],\n\t\tBlocksAllocated:  us[1],\n\t\tExtentsFreed:     us[2],\n\t\tBlocksFreed:      us[3],\n\t}, nil\n}\n\n// btreeStats builds a BTreeStats from a slice of uint32s.\nfunc btreeStats(us []uint32) (BTreeStats, error) {\n\tif l := len(us); l != 4 {\n\t\treturn BTreeStats{}, fmt.Errorf(\"incorrect number of values for XFS btree stats: %d\", l)\n\t}\n\n\treturn BTreeStats{\n\t\tLookups:         us[0],\n\t\tCompares:        us[1],\n\t\tRecordsInserted: us[2],\n\t\tRecordsDeleted:  us[3],\n\t}, nil\n}\n\n// BlockMappingStat builds a BlockMappingStats from a slice of uint32s.\nfunc blockMappingStats(us []uint32) (BlockMappingStats, error) {\n\tif l := len(us); l != 7 {\n\t\treturn BlockMappingStats{}, fmt.Errorf(\"incorrect number of values for XFS block mapping stats: %d\", l)\n\t}\n\n\treturn BlockMappingStats{\n\t\tReads:                us[0],\n\t\tWrites:               us[1],\n\t\tUnmaps:               us[2],\n\t\tExtentListInsertions: us[3],\n\t\tExtentListDeletions:  us[4],\n\t\tExtentListLookups:    us[5],\n\t\tExtentListCompares:   us[6],\n\t}, nil\n}\n\n// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s.\nfunc directoryOperationStats(us []uint32) (DirectoryOperationStats, error) {\n\tif l := len(us); l != 4 {\n\t\treturn DirectoryOperationStats{}, fmt.Errorf(\"incorrect number of values for XFS directory operation stats: %d\", l)\n\t}\n\n\treturn DirectoryOperationStats{\n\t\tLookups:  us[0],\n\t\tCreates:  us[1],\n\t\tRemoves:  us[2],\n\t\tGetdents: us[3],\n\t}, nil\n}\n\n// TransactionStats builds a TransactionStats from a slice of uint32s.\nfunc transactionStats(us []uint32) (TransactionStats, error) {\n\tif l := len(us); l != 3 {\n\t\treturn TransactionStats{}, fmt.Errorf(\"incorrect number of values for XFS transaction stats: %d\", l)\n\t}\n\n\treturn TransactionStats{\n\t\tSync:  us[0],\n\t\tAsync: us[1],\n\t\tEmpty: us[2],\n\t}, nil\n}\n\n// InodeOperationStats builds an InodeOperationStats from a slice of uint32s.\nfunc inodeOperationStats(us []uint32) (InodeOperationStats, error) {\n\tif l := len(us); l != 7 {\n\t\treturn InodeOperationStats{}, fmt.Errorf(\"incorrect number of values for XFS inode operation stats: %d\", l)\n\t}\n\n\treturn InodeOperationStats{\n\t\tAttempts:        us[0],\n\t\tFound:           us[1],\n\t\tRecycle:         us[2],\n\t\tMissed:          us[3],\n\t\tDuplicate:       us[4],\n\t\tReclaims:        us[5],\n\t\tAttributeChange: us[6],\n\t}, nil\n}\n\n// LogOperationStats builds a LogOperationStats from a slice of uint32s.\nfunc logOperationStats(us []uint32) (LogOperationStats, error) {\n\tif l := len(us); l != 5 {\n\t\treturn LogOperationStats{}, fmt.Errorf(\"incorrect number of values for XFS log operation stats: %d\", l)\n\t}\n\n\treturn LogOperationStats{\n\t\tWrites:            us[0],\n\t\tBlocks:            us[1],\n\t\tNoInternalBuffers: us[2],\n\t\tForce:             us[3],\n\t\tForceSleep:        us[4],\n\t}, nil\n}\n\n// ReadWriteStats builds a ReadWriteStats from a slice of uint32s.\nfunc readWriteStats(us []uint32) (ReadWriteStats, error) {\n\tif l := len(us); l != 2 {\n\t\treturn ReadWriteStats{}, fmt.Errorf(\"incorrect number of values for XFS read write stats: %d\", l)\n\t}\n\n\treturn ReadWriteStats{\n\t\tRead:  us[0],\n\t\tWrite: us[1],\n\t}, nil\n}\n\n// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s.\nfunc attributeOperationStats(us []uint32) (AttributeOperationStats, error) {\n\tif l := len(us); l != 4 {\n\t\treturn AttributeOperationStats{}, fmt.Errorf(\"incorrect number of values for XFS attribute operation stats: %d\", l)\n\t}\n\n\treturn AttributeOperationStats{\n\t\tGet:    us[0],\n\t\tSet:    us[1],\n\t\tRemove: us[2],\n\t\tList:   us[3],\n\t}, nil\n}\n\n// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s.\nfunc inodeClusteringStats(us []uint32) (InodeClusteringStats, error) {\n\tif l := len(us); l != 3 {\n\t\treturn InodeClusteringStats{}, fmt.Errorf(\"incorrect number of values for XFS inode clustering stats: %d\", l)\n\t}\n\n\treturn InodeClusteringStats{\n\t\tIflush:     us[0],\n\t\tFlush:      us[1],\n\t\tFlushInode: us[2],\n\t}, nil\n}\n\n// VnodeStats builds a VnodeStats from a slice of uint32s.\nfunc vnodeStats(us []uint32) (VnodeStats, error) {\n\t// The attribute \"Free\" appears to not be available on older XFS\n\t// stats versions.  Therefore, 7 or 8 elements may appear in\n\t// this slice.\n\tl := len(us)\n\tif l != 7 && l != 8 {\n\t\treturn VnodeStats{}, fmt.Errorf(\"incorrect number of values for XFS vnode stats: %d\", l)\n\t}\n\n\ts := VnodeStats{\n\t\tActive:   us[0],\n\t\tAllocate: us[1],\n\t\tGet:      us[2],\n\t\tHold:     us[3],\n\t\tRelease:  us[4],\n\t\tReclaim:  us[5],\n\t\tRemove:   us[6],\n\t}\n\n\t// Skip adding free, unless it is present. The zero value will\n\t// be used in place of an actual count.\n\tif l == 7 {\n\t\treturn s, nil\n\t}\n\n\ts.Free = us[7]\n\treturn s, nil\n}\n\n// BufferStats builds a BufferStats from a slice of uint32s.\nfunc bufferStats(us []uint32) (BufferStats, error) {\n\tif l := len(us); l != 9 {\n\t\treturn BufferStats{}, fmt.Errorf(\"incorrect number of values for XFS buffer stats: %d\", l)\n\t}\n\n\treturn BufferStats{\n\t\tGet:             us[0],\n\t\tCreate:          us[1],\n\t\tGetLocked:       us[2],\n\t\tGetLockedWaited: us[3],\n\t\tBusyLocked:      us[4],\n\t\tMissLocked:      us[5],\n\t\tPageRetries:     us[6],\n\t\tPageFound:       us[7],\n\t\tGetRead:         us[8],\n\t}, nil\n}\n\n// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s.\nfunc extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) {\n\tif l := len(us); l != 3 {\n\t\treturn ExtendedPrecisionStats{}, fmt.Errorf(\"incorrect number of values for XFS extended precision stats: %d\", l)\n\t}\n\n\treturn ExtendedPrecisionStats{\n\t\tFlushBytes: us[0],\n\t\tWriteBytes: us[1],\n\t\tReadBytes:  us[2],\n\t}, nil\n}\n\n// parseUint32s parses a slice of strings into a slice of uint32s.\nfunc parseUint32s(ss []string) ([]uint32, error) {\n\tus := make([]uint32, 0, len(ss))\n\tfor _, s := range ss {\n\t\tu, err := strconv.ParseUint(s, 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tus = append(us, uint32(u))\n\t}\n\n\treturn us, nil\n}\n\n// parseUint64s parses a slice of strings into a slice of uint64s.\nfunc parseUint64s(ss []string) ([]uint64, error) {\n\tus := make([]uint64, 0, len(ss))\n\tfor _, s := range ss {\n\t\tu, err := strconv.ParseUint(s, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tus = append(us, u)\n\t}\n\n\treturn us, nil\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/xfs/parse_test.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\npackage xfs_test\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/prometheus/procfs\"\n\t\"github.com/prometheus/procfs/xfs\"\n)\n\nfunc TestParseStats(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\ts       string\n\t\tfs      bool\n\t\tstats   *xfs.Stats\n\t\tinvalid bool\n\t}{\n\t\t{\n\t\t\tname: \"empty file OK\",\n\t\t},\n\t\t{\n\t\t\tname:  \"short or empty lines and unknown labels ignored\",\n\t\t\ts:     \"one\\n\\ntwo 1 2 3\\n\",\n\t\t\tstats: &xfs.Stats{},\n\t\t},\n\t\t{\n\t\t\tname:    \"bad uint32\",\n\t\t\ts:       \"extent_alloc XXX\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"bad uint64\",\n\t\t\ts:       \"xpc XXX\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname:    \"extent_alloc bad\",\n\t\t\ts:       \"extent_alloc 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"extent_alloc OK\",\n\t\t\ts:    \"extent_alloc 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtentAllocation: xfs.ExtentAllocationStats{\n\t\t\t\t\tExtentsAllocated: 1,\n\t\t\t\t\tBlocksAllocated:  2,\n\t\t\t\t\tExtentsFreed:     3,\n\t\t\t\t\tBlocksFreed:      4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"abt bad\",\n\t\t\ts:       \"abt 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"abt OK\",\n\t\t\ts:    \"abt 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tAllocationBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups:         1,\n\t\t\t\t\tCompares:        2,\n\t\t\t\t\tRecordsInserted: 3,\n\t\t\t\t\tRecordsDeleted:  4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"blk_map bad\",\n\t\t\ts:       \"blk_map 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"blk_map OK\",\n\t\t\ts:    \"blk_map 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBlockMapping: xfs.BlockMappingStats{\n\t\t\t\t\tReads:                1,\n\t\t\t\t\tWrites:               2,\n\t\t\t\t\tUnmaps:               3,\n\t\t\t\t\tExtentListInsertions: 4,\n\t\t\t\t\tExtentListDeletions:  5,\n\t\t\t\t\tExtentListLookups:    6,\n\t\t\t\t\tExtentListCompares:   7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"bmbt bad\",\n\t\t\ts:       \"bmbt 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"bmbt OK\",\n\t\t\ts:    \"bmbt 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBlockMapBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups:         1,\n\t\t\t\t\tCompares:        2,\n\t\t\t\t\tRecordsInserted: 3,\n\t\t\t\t\tRecordsDeleted:  4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"dir bad\",\n\t\t\ts:       \"dir 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"dir OK\",\n\t\t\ts:    \"dir 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tDirectoryOperation: xfs.DirectoryOperationStats{\n\t\t\t\t\tLookups:  1,\n\t\t\t\t\tCreates:  2,\n\t\t\t\t\tRemoves:  3,\n\t\t\t\t\tGetdents: 4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"trans bad\",\n\t\t\ts:       \"trans 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"trans OK\",\n\t\t\ts:    \"trans 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tTransaction: xfs.TransactionStats{\n\t\t\t\t\tSync:  1,\n\t\t\t\t\tAsync: 2,\n\t\t\t\t\tEmpty: 3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"ig bad\",\n\t\t\ts:       \"ig 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"ig OK\",\n\t\t\ts:    \"ig 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tInodeOperation: xfs.InodeOperationStats{\n\t\t\t\t\tAttempts:        1,\n\t\t\t\t\tFound:           2,\n\t\t\t\t\tRecycle:         3,\n\t\t\t\t\tMissed:          4,\n\t\t\t\t\tDuplicate:       5,\n\t\t\t\t\tReclaims:        6,\n\t\t\t\t\tAttributeChange: 7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"log bad\",\n\t\t\ts:       \"log 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"log OK\",\n\t\t\ts:    \"log 1 2 3 4 5\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tLogOperation: xfs.LogOperationStats{\n\t\t\t\t\tWrites:            1,\n\t\t\t\t\tBlocks:            2,\n\t\t\t\t\tNoInternalBuffers: 3,\n\t\t\t\t\tForce:             4,\n\t\t\t\t\tForceSleep:        5,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"rw bad\",\n\t\t\ts:       \"rw 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"rw OK\",\n\t\t\ts:    \"rw 1 2\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tReadWrite: xfs.ReadWriteStats{\n\t\t\t\t\tRead:  1,\n\t\t\t\t\tWrite: 2,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"attr bad\",\n\t\t\ts:       \"attr 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"attr OK\",\n\t\t\ts:    \"attr 1 2 3 4\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tAttributeOperation: xfs.AttributeOperationStats{\n\t\t\t\t\tGet:    1,\n\t\t\t\t\tSet:    2,\n\t\t\t\t\tRemove: 3,\n\t\t\t\t\tList:   4,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"icluster bad\",\n\t\t\ts:       \"icluster 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"icluster OK\",\n\t\t\ts:    \"icluster 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tInodeClustering: xfs.InodeClusteringStats{\n\t\t\t\t\tIflush:     1,\n\t\t\t\t\tFlush:      2,\n\t\t\t\t\tFlushInode: 3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"vnodes bad\",\n\t\t\ts:       \"vnodes 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"vnodes (missing free) OK\",\n\t\t\ts:    \"vnodes 1 2 3 4 5 6 7\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive:   1,\n\t\t\t\t\tAllocate: 2,\n\t\t\t\t\tGet:      3,\n\t\t\t\t\tHold:     4,\n\t\t\t\t\tRelease:  5,\n\t\t\t\t\tReclaim:  6,\n\t\t\t\t\tRemove:   7,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"vnodes (with free) OK\",\n\t\t\ts:    \"vnodes 1 2 3 4 5 6 7 8\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive:   1,\n\t\t\t\t\tAllocate: 2,\n\t\t\t\t\tGet:      3,\n\t\t\t\t\tHold:     4,\n\t\t\t\t\tRelease:  5,\n\t\t\t\t\tReclaim:  6,\n\t\t\t\t\tRemove:   7,\n\t\t\t\t\tFree:     8,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"buf bad\",\n\t\t\ts:       \"buf 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"buf OK\",\n\t\t\ts:    \"buf 1 2 3 4 5 6 7 8 9\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tBuffer: xfs.BufferStats{\n\t\t\t\t\tGet:             1,\n\t\t\t\t\tCreate:          2,\n\t\t\t\t\tGetLocked:       3,\n\t\t\t\t\tGetLockedWaited: 4,\n\t\t\t\t\tBusyLocked:      5,\n\t\t\t\t\tMissLocked:      6,\n\t\t\t\t\tPageRetries:     7,\n\t\t\t\t\tPageFound:       8,\n\t\t\t\t\tGetRead:         9,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"xpc bad\",\n\t\t\ts:       \"xpc 1\",\n\t\t\tinvalid: true,\n\t\t},\n\t\t{\n\t\t\tname: \"xpc OK\",\n\t\t\ts:    \"xpc 1 2 3\",\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtendedPrecision: xfs.ExtendedPrecisionStats{\n\t\t\t\t\tFlushBytes: 1,\n\t\t\t\t\tWriteBytes: 2,\n\t\t\t\t\tReadBytes:  3,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"fixtures OK\",\n\t\t\tfs:   true,\n\t\t\tstats: &xfs.Stats{\n\t\t\t\tExtentAllocation: xfs.ExtentAllocationStats{\n\t\t\t\t\tExtentsAllocated: 92447,\n\t\t\t\t\tBlocksAllocated:  97589,\n\t\t\t\t\tExtentsFreed:     92448,\n\t\t\t\t\tBlocksFreed:      93751,\n\t\t\t\t},\n\t\t\t\tAllocationBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups:         0,\n\t\t\t\t\tCompares:        0,\n\t\t\t\t\tRecordsInserted: 0,\n\t\t\t\t\tRecordsDeleted:  0,\n\t\t\t\t},\n\t\t\t\tBlockMapping: xfs.BlockMappingStats{\n\t\t\t\t\tReads:                1767055,\n\t\t\t\t\tWrites:               188820,\n\t\t\t\t\tUnmaps:               184891,\n\t\t\t\t\tExtentListInsertions: 92447,\n\t\t\t\t\tExtentListDeletions:  92448,\n\t\t\t\t\tExtentListLookups:    2140766,\n\t\t\t\t\tExtentListCompares:   0,\n\t\t\t\t},\n\t\t\t\tBlockMapBTree: xfs.BTreeStats{\n\t\t\t\t\tLookups:         0,\n\t\t\t\t\tCompares:        0,\n\t\t\t\t\tRecordsInserted: 0,\n\t\t\t\t\tRecordsDeleted:  0,\n\t\t\t\t},\n\t\t\t\tDirectoryOperation: xfs.DirectoryOperationStats{\n\t\t\t\t\tLookups:  185039,\n\t\t\t\t\tCreates:  92447,\n\t\t\t\t\tRemoves:  92444,\n\t\t\t\t\tGetdents: 136422,\n\t\t\t\t},\n\t\t\t\tTransaction: xfs.TransactionStats{\n\t\t\t\t\tSync:  706,\n\t\t\t\t\tAsync: 944304,\n\t\t\t\t\tEmpty: 0,\n\t\t\t\t},\n\t\t\t\tInodeOperation: xfs.InodeOperationStats{\n\t\t\t\t\tAttempts:        185045,\n\t\t\t\t\tFound:           58807,\n\t\t\t\t\tRecycle:         0,\n\t\t\t\t\tMissed:          126238,\n\t\t\t\t\tDuplicate:       0,\n\t\t\t\t\tReclaims:        33637,\n\t\t\t\t\tAttributeChange: 22,\n\t\t\t\t},\n\t\t\t\tLogOperation: xfs.LogOperationStats{\n\t\t\t\t\tWrites:            2883,\n\t\t\t\t\tBlocks:            113448,\n\t\t\t\t\tNoInternalBuffers: 9,\n\t\t\t\t\tForce:             17360,\n\t\t\t\t\tForceSleep:        739,\n\t\t\t\t},\n\t\t\t\tReadWrite: xfs.ReadWriteStats{\n\t\t\t\t\tRead:  107739,\n\t\t\t\t\tWrite: 94045,\n\t\t\t\t},\n\t\t\t\tAttributeOperation: xfs.AttributeOperationStats{\n\t\t\t\t\tGet:    4,\n\t\t\t\t\tSet:    0,\n\t\t\t\t\tRemove: 0,\n\t\t\t\t\tList:   0,\n\t\t\t\t},\n\t\t\t\tInodeClustering: xfs.InodeClusteringStats{\n\t\t\t\t\tIflush:     8677,\n\t\t\t\t\tFlush:      7849,\n\t\t\t\t\tFlushInode: 135802,\n\t\t\t\t},\n\t\t\t\tVnode: xfs.VnodeStats{\n\t\t\t\t\tActive:   92601,\n\t\t\t\t\tAllocate: 0,\n\t\t\t\t\tGet:      0,\n\t\t\t\t\tHold:     0,\n\t\t\t\t\tRelease:  92444,\n\t\t\t\t\tReclaim:  92444,\n\t\t\t\t\tRemove:   92444,\n\t\t\t\t\tFree:     0,\n\t\t\t\t},\n\t\t\t\tBuffer: xfs.BufferStats{\n\t\t\t\t\tGet:             2666287,\n\t\t\t\t\tCreate:          7122,\n\t\t\t\t\tGetLocked:       2659202,\n\t\t\t\t\tGetLockedWaited: 3599,\n\t\t\t\t\tBusyLocked:      2,\n\t\t\t\t\tMissLocked:      7085,\n\t\t\t\t\tPageRetries:     0,\n\t\t\t\t\tPageFound:       10297,\n\t\t\t\t\tGetRead:         7085,\n\t\t\t\t},\n\t\t\t\tExtendedPrecision: xfs.ExtendedPrecisionStats{\n\t\t\t\t\tFlushBytes: 399724544,\n\t\t\t\t\tWriteBytes: 92823103,\n\t\t\t\t\tReadBytes:  86219234,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tvar (\n\t\t\tstats *xfs.Stats\n\t\t\terr   error\n\t\t)\n\n\t\tif tt.s != \"\" {\n\t\t\tstats, err = xfs.ParseStats(strings.NewReader(tt.s))\n\t\t}\n\t\tif tt.fs {\n\t\t\tstats, err = procfs.FS(\"../fixtures\").XFSStats()\n\t\t}\n\n\t\tif tt.invalid && err == nil {\n\t\t\tt.Error(\"expected an error, but none occurred\")\n\t\t}\n\t\tif !tt.invalid && err != nil {\n\t\t\tt.Errorf(\"unexpected error: %v\", err)\n\t\t}\n\n\t\tif want, have := tt.stats, stats; !reflect.DeepEqual(want, have) {\n\t\t\tt.Errorf(\"unexpected XFS stats:\\nwant:\\n%v\\nhave:\\n%v\", want, have)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/prometheus/procfs/xfs/xfs.go",
    "content": "// Copyright 2017 The Prometheus Authors\n// Licensed under the Apache License, Version 2.0 (the \"License\");\n// you may not use this file except in compliance with the License.\n// You may obtain a copy of the License at\n//\n// http://www.apache.org/licenses/LICENSE-2.0\n//\n// Unless required by applicable law or agreed to in writing, software\n// distributed under the License is distributed on an \"AS IS\" BASIS,\n// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n// See the License for the specific language governing permissions and\n// limitations under the License.\n\n// Package xfs provides access to statistics exposed by the XFS filesystem.\npackage xfs\n\n// Stats contains XFS filesystem runtime statistics, parsed from\n// /proc/fs/xfs/stat.\n//\n// The names and meanings of each statistic were taken from\n// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux\n// kernel source. Most counters are uint32s (same data types used in\n// xfs_stats.h), but some of the \"extended precision stats\" are uint64s.\ntype Stats struct {\n\t// The name of the filesystem used to source these statistics.\n\t// If empty, this indicates aggregated statistics for all XFS\n\t// filesystems on the host.\n\tName string\n\n\tExtentAllocation   ExtentAllocationStats\n\tAllocationBTree    BTreeStats\n\tBlockMapping       BlockMappingStats\n\tBlockMapBTree      BTreeStats\n\tDirectoryOperation DirectoryOperationStats\n\tTransaction        TransactionStats\n\tInodeOperation     InodeOperationStats\n\tLogOperation       LogOperationStats\n\tReadWrite          ReadWriteStats\n\tAttributeOperation AttributeOperationStats\n\tInodeClustering    InodeClusteringStats\n\tVnode              VnodeStats\n\tBuffer             BufferStats\n\tExtendedPrecision  ExtendedPrecisionStats\n}\n\n// ExtentAllocationStats contains statistics regarding XFS extent allocations.\ntype ExtentAllocationStats struct {\n\tExtentsAllocated uint32\n\tBlocksAllocated  uint32\n\tExtentsFreed     uint32\n\tBlocksFreed      uint32\n}\n\n// BTreeStats contains statistics regarding an XFS internal B-tree.\ntype BTreeStats struct {\n\tLookups         uint32\n\tCompares        uint32\n\tRecordsInserted uint32\n\tRecordsDeleted  uint32\n}\n\n// BlockMappingStats contains statistics regarding XFS block maps.\ntype BlockMappingStats struct {\n\tReads                uint32\n\tWrites               uint32\n\tUnmaps               uint32\n\tExtentListInsertions uint32\n\tExtentListDeletions  uint32\n\tExtentListLookups    uint32\n\tExtentListCompares   uint32\n}\n\n// DirectoryOperationStats contains statistics regarding XFS directory entries.\ntype DirectoryOperationStats struct {\n\tLookups  uint32\n\tCreates  uint32\n\tRemoves  uint32\n\tGetdents uint32\n}\n\n// TransactionStats contains statistics regarding XFS metadata transactions.\ntype TransactionStats struct {\n\tSync  uint32\n\tAsync uint32\n\tEmpty uint32\n}\n\n// InodeOperationStats contains statistics regarding XFS inode operations.\ntype InodeOperationStats struct {\n\tAttempts        uint32\n\tFound           uint32\n\tRecycle         uint32\n\tMissed          uint32\n\tDuplicate       uint32\n\tReclaims        uint32\n\tAttributeChange uint32\n}\n\n// LogOperationStats contains statistics regarding the XFS log buffer.\ntype LogOperationStats struct {\n\tWrites            uint32\n\tBlocks            uint32\n\tNoInternalBuffers uint32\n\tForce             uint32\n\tForceSleep        uint32\n}\n\n// ReadWriteStats contains statistics regarding the number of read and write\n// system calls for XFS filesystems.\ntype ReadWriteStats struct {\n\tRead  uint32\n\tWrite uint32\n}\n\n// AttributeOperationStats contains statistics regarding manipulation of\n// XFS extended file attributes.\ntype AttributeOperationStats struct {\n\tGet    uint32\n\tSet    uint32\n\tRemove uint32\n\tList   uint32\n}\n\n// InodeClusteringStats contains statistics regarding XFS inode clustering\n// operations.\ntype InodeClusteringStats struct {\n\tIflush     uint32\n\tFlush      uint32\n\tFlushInode uint32\n}\n\n// VnodeStats contains statistics regarding XFS vnode operations.\ntype VnodeStats struct {\n\tActive   uint32\n\tAllocate uint32\n\tGet      uint32\n\tHold     uint32\n\tRelease  uint32\n\tReclaim  uint32\n\tRemove   uint32\n\tFree     uint32\n}\n\n// BufferStats contains statistics regarding XFS read/write I/O buffers.\ntype BufferStats struct {\n\tGet             uint32\n\tCreate          uint32\n\tGetLocked       uint32\n\tGetLockedWaited uint32\n\tBusyLocked      uint32\n\tMissLocked      uint32\n\tPageRetries     uint32\n\tPageFound       uint32\n\tGetRead         uint32\n}\n\n// ExtendedPrecisionStats contains high precision counters used to track the\n// total number of bytes read, written, or flushed, during XFS operations.\ntype ExtendedPrecisionStats struct {\n\tFlushBytes uint64\n\tWriteBytes uint64\n\tReadBytes  uint64\n}\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/.gitignore",
    "content": "# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\n\n# Architecture specific extensions/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n\n.DS_Store\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/.travis.yml",
    "content": "language: go\n\nsudo: false\n\ngo:\n  - 1.1\n  - 1.2\n  - 1.3\n  - 1.4\n  - 1.5\n  - 1.6\n  - 1.7\n  - tip\n\nscript:\n  - go test -v ./...\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/LICENCE.txt",
    "content": "Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell\n\nPlease consider promoting this project if you find it useful.\n\nPermission is hereby granted, free of charge, to any person \nobtaining a copy of this software and associated documentation \nfiles (the \"Software\"), to deal in the Software without restriction, \nincluding without limitation the rights to use, copy, modify, merge, \npublish, distribute, sublicense, and/or sell copies of the Software, \nand to permit persons to whom the Software is furnished to do so, \nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, \nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES \nOF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. \nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, \nDAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT \nOR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/LICENSE",
    "content": "Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell\n\nPlease consider promoting this project if you find it useful.\n\nPermission is hereby granted, free of charge, to any person \nobtaining a copy of this software and associated documentation \nfiles (the \"Software\"), to deal in the Software without restriction, \nincluding without limitation the rights to use, copy, modify, merge, \npublish, distribute, sublicense, and/or sell copies of the Software, \nand to permit persons to whom the Software is furnished to do so, \nsubject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included\nin all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, \nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES \nOF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. \nIN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, \nDAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT \nOR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE \nOR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/README.md",
    "content": "Testify - Thou Shalt Write Tests\n================================\n\n[![Build Status](https://travis-ci.org/stretchr/testify.svg)](https://travis-ci.org/stretchr/testify) [![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/testify)](https://goreportcard.com/report/github.com/stretchr/testify) [![GoDoc](https://godoc.org/github.com/stretchr/testify?status.svg)](https://godoc.org/github.com/stretchr/testify)\n\nGo code (golang) set of packages that provide many tools for testifying that your code will behave as you intend.\n\nFeatures include:\n\n  * [Easy assertions](#assert-package)\n  * [Mocking](#mock-package)\n  * [HTTP response trapping](#http-package)\n  * [Testing suite interfaces and functions](#suite-package)\n\nGet started:\n\n  * Install testify with [one line of code](#installation), or [update it with another](#staying-up-to-date)\n  * For an introduction to writing test code in Go, see http://golang.org/doc/code.html#Testing\n  * Check out the API Documentation http://godoc.org/github.com/stretchr/testify\n  * To make your testing life easier, check out our other project, [gorc](http://github.com/stretchr/gorc)\n  * A little about [Test-Driven Development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development)\n\n\n\n[`assert`](http://godoc.org/github.com/stretchr/testify/assert \"API documentation\") package\n-------------------------------------------------------------------------------------------\n\nThe `assert` package provides some helpful methods that allow you to write better test code in Go.\n\n  * Prints friendly, easy to read failure descriptions\n  * Allows for very readable code\n  * Optionally annotate each assertion with a message\n\nSee it in action:\n\n```go\npackage yours\n\nimport (\n  \"testing\"\n  \"github.com/stretchr/testify/assert\"\n)\n\nfunc TestSomething(t *testing.T) {\n\n  // assert equality\n  assert.Equal(t, 123, 123, \"they should be equal\")\n\n  // assert inequality\n  assert.NotEqual(t, 123, 456, \"they should not be equal\")\n\n  // assert for nil (good for errors)\n  assert.Nil(t, object)\n\n  // assert for not nil (good when you expect something)\n  if assert.NotNil(t, object) {\n\n    // now we know that object isn't nil, we are safe to make\n    // further assertions without causing any errors\n    assert.Equal(t, \"Something\", object.Value)\n\n  }\n\n}\n```\n\n  * Every assert func takes the `testing.T` object as the first argument.  This is how it writes the errors out through the normal `go test` capabilities.\n  * Every assert func returns a bool indicating whether the assertion was successful or not, this is useful for if you want to go on making further assertions under certain conditions.\n\nif you assert many times, use the below:\n\n```go\npackage yours\n\nimport (\n  \"testing\"\n  \"github.com/stretchr/testify/assert\"\n)\n\nfunc TestSomething(t *testing.T) {\n  assert := assert.New(t)\n\n  // assert equality\n  assert.Equal(123, 123, \"they should be equal\")\n\n  // assert inequality\n  assert.NotEqual(123, 456, \"they should not be equal\")\n\n  // assert for nil (good for errors)\n  assert.Nil(object)\n\n  // assert for not nil (good when you expect something)\n  if assert.NotNil(object) {\n\n    // now we know that object isn't nil, we are safe to make\n    // further assertions without causing any errors\n    assert.Equal(\"Something\", object.Value)\n  }\n}\n```\n\n[`require`](http://godoc.org/github.com/stretchr/testify/require \"API documentation\") package\n---------------------------------------------------------------------------------------------\n\nThe `require` package provides same global functions as the `assert` package, but instead of returning a boolean result they terminate current test.\n\nSee [t.FailNow](http://golang.org/pkg/testing/#T.FailNow) for details.\n\n\n[`http`](http://godoc.org/github.com/stretchr/testify/http \"API documentation\") package\n---------------------------------------------------------------------------------------\n\nThe `http` package contains test objects useful for testing code that relies on the `net/http` package.  Check out the [(deprecated) API documentation for the `http` package](http://godoc.org/github.com/stretchr/testify/http).\n\nWe recommend you use [httptest](http://golang.org/pkg/net/http/httptest) instead.\n\n[`mock`](http://godoc.org/github.com/stretchr/testify/mock \"API documentation\") package\n----------------------------------------------------------------------------------------\n\nThe `mock` package provides a mechanism for easily writing mock objects that can be used in place of real objects when writing test code.\n\nAn example test function that tests a piece of code that relies on an external object `testObj`, can setup expectations (testify) and assert that they indeed happened:\n\n```go\npackage yours\n\nimport (\n  \"testing\"\n  \"github.com/stretchr/testify/mock\"\n)\n\n/*\n  Test objects\n*/\n\n// MyMockedObject is a mocked object that implements an interface\n// that describes an object that the code I am testing relies on.\ntype MyMockedObject struct{\n  mock.Mock\n}\n\n// DoSomething is a method on MyMockedObject that implements some interface\n// and just records the activity, and returns what the Mock object tells it to.\n//\n// In the real object, this method would do something useful, but since this\n// is a mocked object - we're just going to stub it out.\n//\n// NOTE: This method is not being tested here, code that uses this object is.\nfunc (m *MyMockedObject) DoSomething(number int) (bool, error) {\n\n  args := m.Called(number)\n  return args.Bool(0), args.Error(1)\n\n}\n\n/*\n  Actual test functions\n*/\n\n// TestSomething is an example of how to use our test object to\n// make assertions about some target code we are testing.\nfunc TestSomething(t *testing.T) {\n\n  // create an instance of our test object\n  testObj := new(MyMockedObject)\n\n  // setup expectations\n  testObj.On(\"DoSomething\", 123).Return(true, nil)\n\n  // call the code we are testing\n  targetFuncThatDoesSomethingWithObj(testObj)\n\n  // assert that the expectations were met\n  testObj.AssertExpectations(t)\n\n}\n```\n\nFor more information on how to write mock code, check out the [API documentation for the `mock` package](http://godoc.org/github.com/stretchr/testify/mock).\n\nYou can use the [mockery tool](http://github.com/vektra/mockery) to autogenerate the mock code against an interface as well, making using mocks much quicker.\n\n[`suite`](http://godoc.org/github.com/stretchr/testify/suite \"API documentation\") package\n-----------------------------------------------------------------------------------------\n\nThe `suite` package provides functionality that you might be used to from more common object oriented languages.  With it, you can build a testing suite as a struct, build setup/teardown methods and testing methods on your struct, and run them with 'go test' as per normal.\n\nAn example suite is shown below:\n\n```go\n// Basic imports\nimport (\n    \"testing\"\n    \"github.com/stretchr/testify/assert\"\n    \"github.com/stretchr/testify/suite\"\n)\n\n// Define the suite, and absorb the built-in basic suite\n// functionality from testify - including a T() method which\n// returns the current testing context\ntype ExampleTestSuite struct {\n    suite.Suite\n    VariableThatShouldStartAtFive int\n}\n\n// Make sure that VariableThatShouldStartAtFive is set to five\n// before each test\nfunc (suite *ExampleTestSuite) SetupTest() {\n    suite.VariableThatShouldStartAtFive = 5\n}\n\n// All methods that begin with \"Test\" are run as tests within a\n// suite.\nfunc (suite *ExampleTestSuite) TestExample() {\n    assert.Equal(suite.T(), 5, suite.VariableThatShouldStartAtFive)\n}\n\n// In order for 'go test' to run this suite, we need to create\n// a normal test function and pass our suite to suite.Run\nfunc TestExampleTestSuite(t *testing.T) {\n    suite.Run(t, new(ExampleTestSuite))\n}\n```\n\nFor a more complete example, using all of the functionality provided by the suite package, look at our [example testing suite](https://github.com/stretchr/testify/blob/master/suite/suite_test.go)\n\nFor more information on writing suites, check out the [API documentation for the `suite` package](http://godoc.org/github.com/stretchr/testify/suite).\n\n`Suite` object has assertion methods:\n\n```go\n// Basic imports\nimport (\n    \"testing\"\n    \"github.com/stretchr/testify/suite\"\n)\n\n// Define the suite, and absorb the built-in basic suite\n// functionality from testify - including assertion methods.\ntype ExampleTestSuite struct {\n    suite.Suite\n    VariableThatShouldStartAtFive int\n}\n\n// Make sure that VariableThatShouldStartAtFive is set to five\n// before each test\nfunc (suite *ExampleTestSuite) SetupTest() {\n    suite.VariableThatShouldStartAtFive = 5\n}\n\n// All methods that begin with \"Test\" are run as tests within a\n// suite.\nfunc (suite *ExampleTestSuite) TestExample() {\n    suite.Equal(suite.VariableThatShouldStartAtFive, 5)\n}\n\n// In order for 'go test' to run this suite, we need to create\n// a normal test function and pass our suite to suite.Run\nfunc TestExampleTestSuite(t *testing.T) {\n    suite.Run(t, new(ExampleTestSuite))\n}\n```\n\n------\n\nInstallation\n============\n\nTo install Testify, use `go get`:\n\n    * Latest version: go get github.com/stretchr/testify\n    * Specific version: go get gopkg.in/stretchr/testify.v1\n\nThis will then make the following packages available to you:\n\n    github.com/stretchr/testify/assert\n    github.com/stretchr/testify/mock\n    github.com/stretchr/testify/http\n\nImport the `testify/assert` package into your code using this template:\n\n```go\npackage yours\n\nimport (\n  \"testing\"\n  \"github.com/stretchr/testify/assert\"\n)\n\nfunc TestSomething(t *testing.T) {\n\n  assert.True(t, true, \"True is true!\")\n\n}\n```\n\n------\n\nStaying up to date\n==================\n\nTo update Testify to the latest version, use `go get -u github.com/stretchr/testify`.\n\n------\n\nVersion History\n===============\n\n   * 1.0 - New package versioning strategy adopted.\n\n------\n\nContributing\n============\n\nPlease feel free to submit issues, fork the repository and send pull requests!\n\nWhen submitting an issue, we ask that you please include a complete test function that demonstrates the issue.  Extra credit for those using Testify to write the test code that demonstrates it.\n\n------\n\nLicence\n=======\nCopyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell\n\nPlease consider promoting this project if you find it useful.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/assertion_forward.go",
    "content": "/*\n* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen\n* THIS FILE MUST NOT BE EDITED BY HAND\n*/\n\npackage assert\n\nimport (\n\n\thttp \"net/http\"\n\turl \"net/url\"\n\ttime \"time\"\n)\n\n\n// Condition uses a Comparison to assert a complex condition.\nfunc (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool {\n\treturn Condition(a.t, comp, msgAndArgs...)\n}\n\n\n// Contains asserts that the specified string, list(array, slice...) or map contains the\n// specified substring or element.\n// \n//    a.Contains(\"Hello World\", \"World\", \"But 'Hello World' does contain 'World'\")\n//    a.Contains([\"Hello\", \"World\"], \"World\", \"But [\"Hello\", \"World\"] does contain 'World'\")\n//    a.Contains({\"Hello\": \"World\"}, \"Hello\", \"But {'Hello': 'World'} does contain 'Hello'\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {\n\treturn Contains(a.t, s, contains, msgAndArgs...)\n}\n\n\n// Empty asserts that the specified object is empty.  I.e. nil, \"\", false, 0 or either\n// a slice or a channel with len == 0.\n// \n//  a.Empty(obj)\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {\n\treturn Empty(a.t, object, msgAndArgs...)\n}\n\n\n// Equal asserts that two objects are equal.\n// \n//    a.Equal(123, 123, \"123 and 123 should be equal\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {\n\treturn Equal(a.t, expected, actual, msgAndArgs...)\n}\n\n\n// EqualError asserts that a function returned an error (i.e. not `nil`)\n// and that it is equal to the provided error.\n// \n//   actualObj, err := SomeFunction()\n//   if assert.Error(t, err, \"An error was expected\") {\n// \t   assert.Equal(t, err, expectedError)\n//   }\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {\n\treturn EqualError(a.t, theError, errString, msgAndArgs...)\n}\n\n\n// EqualValues asserts that two objects are equal or convertable to the same types\n// and equal.\n// \n//    a.EqualValues(uint32(123), int32(123), \"123 and 123 should be equal\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {\n\treturn EqualValues(a.t, expected, actual, msgAndArgs...)\n}\n\n\n// Error asserts that a function returned an error (i.e. not `nil`).\n// \n//   actualObj, err := SomeFunction()\n//   if a.Error(err, \"An error was expected\") {\n// \t   assert.Equal(t, err, expectedError)\n//   }\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {\n\treturn Error(a.t, err, msgAndArgs...)\n}\n\n\n// Exactly asserts that two objects are equal is value and type.\n// \n//    a.Exactly(int32(123), int64(123), \"123 and 123 should NOT be equal\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {\n\treturn Exactly(a.t, expected, actual, msgAndArgs...)\n}\n\n\n// Fail reports a failure through\nfunc (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {\n\treturn Fail(a.t, failureMessage, msgAndArgs...)\n}\n\n\n// FailNow fails test\nfunc (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool {\n\treturn FailNow(a.t, failureMessage, msgAndArgs...)\n}\n\n\n// False asserts that the specified value is false.\n// \n//    a.False(myBool, \"myBool should be false\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {\n\treturn False(a.t, value, msgAndArgs...)\n}\n\n\n// HTTPBodyContains asserts that a specified handler returns a\n// body that contains a string.\n// \n//  a.HTTPBodyContains(myHandler, \"www.google.com\", nil, \"I'm Feeling Lucky\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {\n\treturn HTTPBodyContains(a.t, handler, method, url, values, str)\n}\n\n\n// HTTPBodyNotContains asserts that a specified handler returns a\n// body that does not contain a string.\n// \n//  a.HTTPBodyNotContains(myHandler, \"www.google.com\", nil, \"I'm Feeling Lucky\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {\n\treturn HTTPBodyNotContains(a.t, handler, method, url, values, str)\n}\n\n\n// HTTPError asserts that a specified handler returns an error status code.\n// \n//  a.HTTPError(myHandler, \"POST\", \"/a/b/c\", url.Values{\"a\": []string{\"b\", \"c\"}}\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool {\n\treturn HTTPError(a.t, handler, method, url, values)\n}\n\n\n// HTTPRedirect asserts that a specified handler returns a redirect status code.\n// \n//  a.HTTPRedirect(myHandler, \"GET\", \"/a/b/c\", url.Values{\"a\": []string{\"b\", \"c\"}}\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool {\n\treturn HTTPRedirect(a.t, handler, method, url, values)\n}\n\n\n// HTTPSuccess asserts that a specified handler returns a success status code.\n// \n//  a.HTTPSuccess(myHandler, \"POST\", \"http://www.google.com\", nil)\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool {\n\treturn HTTPSuccess(a.t, handler, method, url, values)\n}\n\n\n// Implements asserts that an object is implemented by the specified interface.\n// \n//    a.Implements((*MyInterface)(nil), new(MyObject), \"MyObject\")\nfunc (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {\n\treturn Implements(a.t, interfaceObject, object, msgAndArgs...)\n}\n\n\n// InDelta asserts that the two numerals are within delta of each other.\n// \n// \t a.InDelta(math.Pi, (22 / 7.0), 0.01)\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {\n\treturn InDelta(a.t, expected, actual, delta, msgAndArgs...)\n}\n\n\n// InDeltaSlice is the same as InDelta, except it compares two slices.\nfunc (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {\n\treturn InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)\n}\n\n\n// InEpsilon asserts that expected and actual have a relative error less than epsilon\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {\n\treturn InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...)\n}\n\n\n// InEpsilonSlice is the same as InEpsilon, except it compares two slices.\nfunc (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {\n\treturn InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...)\n}\n\n\n// IsType asserts that the specified objects are of the same type.\nfunc (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {\n\treturn IsType(a.t, expectedType, object, msgAndArgs...)\n}\n\n\n// JSONEq asserts that two JSON strings are equivalent.\n// \n//  a.JSONEq(`{\"hello\": \"world\", \"foo\": \"bar\"}`, `{\"foo\": \"bar\", \"hello\": \"world\"}`)\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool {\n\treturn JSONEq(a.t, expected, actual, msgAndArgs...)\n}\n\n\n// Len asserts that the specified object has specific length.\n// Len also fails if the object has a type that len() not accept.\n// \n//    a.Len(mySlice, 3, \"The size of slice is not 3\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {\n\treturn Len(a.t, object, length, msgAndArgs...)\n}\n\n\n// Nil asserts that the specified object is nil.\n// \n//    a.Nil(err, \"err should be nothing\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {\n\treturn Nil(a.t, object, msgAndArgs...)\n}\n\n\n// NoError asserts that a function returned no error (i.e. `nil`).\n// \n//   actualObj, err := SomeFunction()\n//   if a.NoError(err) {\n// \t   assert.Equal(t, actualObj, expectedObj)\n//   }\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {\n\treturn NoError(a.t, err, msgAndArgs...)\n}\n\n\n// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the\n// specified substring or element.\n// \n//    a.NotContains(\"Hello World\", \"Earth\", \"But 'Hello World' does NOT contain 'Earth'\")\n//    a.NotContains([\"Hello\", \"World\"], \"Earth\", \"But ['Hello', 'World'] does NOT contain 'Earth'\")\n//    a.NotContains({\"Hello\": \"World\"}, \"Earth\", \"But {'Hello': 'World'} does NOT contain 'Earth'\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {\n\treturn NotContains(a.t, s, contains, msgAndArgs...)\n}\n\n\n// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, \"\", false, 0 or either\n// a slice or a channel with len == 0.\n// \n//  if a.NotEmpty(obj) {\n//    assert.Equal(t, \"two\", obj[1])\n//  }\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool {\n\treturn NotEmpty(a.t, object, msgAndArgs...)\n}\n\n\n// NotEqual asserts that the specified values are NOT equal.\n// \n//    a.NotEqual(obj1, obj2, \"two objects shouldn't be equal\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {\n\treturn NotEqual(a.t, expected, actual, msgAndArgs...)\n}\n\n\n// NotNil asserts that the specified object is not nil.\n// \n//    a.NotNil(err, \"err should be something\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {\n\treturn NotNil(a.t, object, msgAndArgs...)\n}\n\n\n// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.\n// \n//   a.NotPanics(func(){\n//     RemainCalm()\n//   }, \"Calling RemainCalm() should NOT panic\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {\n\treturn NotPanics(a.t, f, msgAndArgs...)\n}\n\n\n// NotRegexp asserts that a specified regexp does not match a string.\n// \n//  a.NotRegexp(regexp.MustCompile(\"starts\"), \"it's starting\")\n//  a.NotRegexp(\"^start\", \"it's not starting\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {\n\treturn NotRegexp(a.t, rx, str, msgAndArgs...)\n}\n\n\n// NotZero asserts that i is not the zero value for its type and returns the truth.\nfunc (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {\n\treturn NotZero(a.t, i, msgAndArgs...)\n}\n\n\n// Panics asserts that the code inside the specified PanicTestFunc panics.\n// \n//   a.Panics(func(){\n//     GoCrazy()\n//   }, \"Calling GoCrazy() should panic\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {\n\treturn Panics(a.t, f, msgAndArgs...)\n}\n\n\n// Regexp asserts that a specified regexp matches a string.\n// \n//  a.Regexp(regexp.MustCompile(\"start\"), \"it's starting\")\n//  a.Regexp(\"start...$\", \"it's not starting\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {\n\treturn Regexp(a.t, rx, str, msgAndArgs...)\n}\n\n\n// True asserts that the specified value is true.\n// \n//    a.True(myBool, \"myBool should be true\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {\n\treturn True(a.t, value, msgAndArgs...)\n}\n\n\n// WithinDuration asserts that the two times are within duration delta of each other.\n// \n//   a.WithinDuration(time.Now(), time.Now(), 10*time.Second, \"The difference should not be more than 10s\")\n// \n// Returns whether the assertion was successful (true) or not (false).\nfunc (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {\n\treturn WithinDuration(a.t, expected, actual, delta, msgAndArgs...)\n}\n\n\n// Zero asserts that i is the zero value for its type and returns the truth.\nfunc (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {\n\treturn Zero(a.t, i, msgAndArgs...)\n}\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl",
    "content": "{{.CommentWithoutT \"a\"}}\nfunc (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool {\n\treturn {{.DocInfo.Name}}(a.t, {{.ForwardedParams}})\n}\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/assertions.go",
    "content": "package assert\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n\n\t\"github.com/davecgh/go-spew/spew\"\n\t\"github.com/pmezard/go-difflib/difflib\"\n)\n\nfunc init() {\n\tspew.Config.SortKeys = true\n}\n\n// TestingT is an interface wrapper around *testing.T\ntype TestingT interface {\n\tErrorf(format string, args ...interface{})\n}\n\n// Comparison a custom function that returns true on success and false on failure\ntype Comparison func() (success bool)\n\n/*\n\tHelper functions\n*/\n\n// ObjectsAreEqual determines if two objects are considered equal.\n//\n// This function does no assertion of any kind.\nfunc ObjectsAreEqual(expected, actual interface{}) bool {\n\n\tif expected == nil || actual == nil {\n\t\treturn expected == actual\n\t}\n\n\treturn reflect.DeepEqual(expected, actual)\n\n}\n\n// ObjectsAreEqualValues gets whether two objects are equal, or if their\n// values are equal.\nfunc ObjectsAreEqualValues(expected, actual interface{}) bool {\n\tif ObjectsAreEqual(expected, actual) {\n\t\treturn true\n\t}\n\n\tactualType := reflect.TypeOf(actual)\n\tif actualType == nil {\n\t\treturn false\n\t}\n\texpectedValue := reflect.ValueOf(expected)\n\tif expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {\n\t\t// Attempt comparison after type conversion\n\t\treturn reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)\n\t}\n\n\treturn false\n}\n\n/* CallerInfo is necessary because the assert functions use the testing object\ninternally, causing it to print the file:line of the assert method, rather than where\nthe problem actually occurred in calling code.*/\n\n// CallerInfo returns an array of strings containing the file and line number\n// of each stack frame leading from the current test to the assert call that\n// failed.\nfunc CallerInfo() []string {\n\n\tpc := uintptr(0)\n\tfile := \"\"\n\tline := 0\n\tok := false\n\tname := \"\"\n\n\tcallers := []string{}\n\tfor i := 0; ; i++ {\n\t\tpc, file, line, ok = runtime.Caller(i)\n\t\tif !ok {\n\t\t\t// The breaks below failed to terminate the loop, and we ran off the\n\t\t\t// end of the call stack.\n\t\t\tbreak\n\t\t}\n\n\t\t// This is a huge edge case, but it will panic if this is the case, see #180\n\t\tif file == \"<autogenerated>\" {\n\t\t\tbreak\n\t\t}\n\n\t\tf := runtime.FuncForPC(pc)\n\t\tif f == nil {\n\t\t\tbreak\n\t\t}\n\t\tname = f.Name()\n\n\t\t// testing.tRunner is the standard library function that calls\n\t\t// tests. Subtests are called directly by tRunner, without going through\n\t\t// the Test/Benchmark/Example function that contains the t.Run calls, so\n\t\t// with subtests we should break when we hit tRunner, without adding it\n\t\t// to the list of callers.\n\t\tif name == \"testing.tRunner\" {\n\t\t\tbreak\n\t\t}\n\n\t\tparts := strings.Split(file, \"/\")\n\t\tdir := parts[len(parts)-2]\n\t\tfile = parts[len(parts)-1]\n\t\tif (dir != \"assert\" && dir != \"mock\" && dir != \"require\") || file == \"mock_test.go\" {\n\t\t\tcallers = append(callers, fmt.Sprintf(\"%s:%d\", file, line))\n\t\t}\n\n\t\t// Drop the package\n\t\tsegments := strings.Split(name, \".\")\n\t\tname = segments[len(segments)-1]\n\t\tif isTest(name, \"Test\") ||\n\t\t\tisTest(name, \"Benchmark\") ||\n\t\t\tisTest(name, \"Example\") {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn callers\n}\n\n// Stolen from the `go test` tool.\n// isTest tells whether name looks like a test (or benchmark, according to prefix).\n// It is a Test (say) if there is a character after Test that is not a lower-case letter.\n// We don't want TesticularCancer.\nfunc isTest(name, prefix string) bool {\n\tif !strings.HasPrefix(name, prefix) {\n\t\treturn false\n\t}\n\tif len(name) == len(prefix) { // \"Test\" is ok\n\t\treturn true\n\t}\n\trune, _ := utf8.DecodeRuneInString(name[len(prefix):])\n\treturn !unicode.IsLower(rune)\n}\n\n// getWhitespaceString returns a string that is long enough to overwrite the default\n// output from the go testing framework.\nfunc getWhitespaceString() string {\n\n\t_, file, line, ok := runtime.Caller(1)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\tparts := strings.Split(file, \"/\")\n\tfile = parts[len(parts)-1]\n\n\treturn strings.Repeat(\" \", len(fmt.Sprintf(\"%s:%d:      \", file, line)))\n\n}\n\nfunc messageFromMsgAndArgs(msgAndArgs ...interface{}) string {\n\tif len(msgAndArgs) == 0 || msgAndArgs == nil {\n\t\treturn \"\"\n\t}\n\tif len(msgAndArgs) == 1 {\n\t\treturn msgAndArgs[0].(string)\n\t}\n\tif len(msgAndArgs) > 1 {\n\t\treturn fmt.Sprintf(msgAndArgs[0].(string), msgAndArgs[1:]...)\n\t}\n\treturn \"\"\n}\n\n// Indents all lines of the message by appending a number of tabs to each line, in an output format compatible with Go's\n// test printing (see inner comment for specifics)\nfunc indentMessageLines(message string, tabs int) string {\n\toutBuf := new(bytes.Buffer)\n\n\tfor i, scanner := 0, bufio.NewScanner(strings.NewReader(message)); scanner.Scan(); i++ {\n\t\tif i != 0 {\n\t\t\toutBuf.WriteRune('\\n')\n\t\t}\n\t\tfor ii := 0; ii < tabs; ii++ {\n\t\t\toutBuf.WriteRune('\\t')\n\t\t\t// Bizarrely, all lines except the first need one fewer tabs prepended, so deliberately advance the counter\n\t\t\t// by 1 prematurely.\n\t\t\tif ii == 0 && i > 0 {\n\t\t\t\tii++\n\t\t\t}\n\t\t}\n\t\toutBuf.WriteString(scanner.Text())\n\t}\n\n\treturn outBuf.String()\n}\n\ntype failNower interface {\n\tFailNow()\n}\n\n// FailNow fails test\nfunc FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {\n\tFail(t, failureMessage, msgAndArgs...)\n\n\t// We cannot extend TestingT with FailNow() and\n\t// maintain backwards compatibility, so we fallback\n\t// to panicking when FailNow is not available in\n\t// TestingT.\n\t// See issue #263\n\n\tif t, ok := t.(failNower); ok {\n\t\tt.FailNow()\n\t} else {\n\t\tpanic(\"test failed and t is missing `FailNow()`\")\n\t}\n\treturn false\n}\n\n// Fail reports a failure through\nfunc Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {\n\n\tmessage := messageFromMsgAndArgs(msgAndArgs...)\n\n\terrorTrace := strings.Join(CallerInfo(), \"\\n\\r\\t\\t\\t\")\n\tif len(message) > 0 {\n\t\tt.Errorf(\"\\r%s\\r\\tError Trace:\\t%s\\n\"+\n\t\t\t\"\\r\\tError:%s\\n\"+\n\t\t\t\"\\r\\tMessages:\\t%s\\n\\r\",\n\t\t\tgetWhitespaceString(),\n\t\t\terrorTrace,\n\t\t\tindentMessageLines(failureMessage, 2),\n\t\t\tmessage)\n\t} else {\n\t\tt.Errorf(\"\\r%s\\r\\tError Trace:\\t%s\\n\"+\n\t\t\t\"\\r\\tError:%s\\n\\r\",\n\t\t\tgetWhitespaceString(),\n\t\t\terrorTrace,\n\t\t\tindentMessageLines(failureMessage, 2))\n\t}\n\n\treturn false\n}\n\n// Implements asserts that an object is implemented by the specified interface.\n//\n//    assert.Implements(t, (*MyInterface)(nil), new(MyObject), \"MyObject\")\nfunc Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {\n\n\tinterfaceType := reflect.TypeOf(interfaceObject).Elem()\n\n\tif !reflect.TypeOf(object).Implements(interfaceType) {\n\t\treturn Fail(t, fmt.Sprintf(\"%T must implement %v\", object, interfaceType), msgAndArgs...)\n\t}\n\n\treturn true\n\n}\n\n// IsType asserts that the specified objects are of the same type.\nfunc IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {\n\n\tif !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) {\n\t\treturn Fail(t, fmt.Sprintf(\"Object expected to be of type %v, but was %v\", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...)\n\t}\n\n\treturn true\n}\n\n// Equal asserts that two objects are equal.\n//\n//    assert.Equal(t, 123, 123, \"123 and 123 should be equal\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {\n\n\tif !ObjectsAreEqual(expected, actual) {\n\t\tdiff := diff(expected, actual)\n\t\texpected, actual = formatUnequalValues(expected, actual)\n\t\treturn Fail(t, fmt.Sprintf(\"Not equal: %s (expected)\\n\"+\n\t\t\t\"        != %s (actual)%s\", expected, actual, diff), msgAndArgs...)\n\t}\n\n\treturn true\n\n}\n\n// formatUnequalValues takes two values of arbitrary types and returns string\n// representations appropriate to be presented to the user.\n//\n// If the values are not of like type, the returned strings will be prefixed\n// with the type name, and the value will be enclosed in parenthesis similar\n// to a type conversion in the Go grammar.\nfunc formatUnequalValues(expected, actual interface{}) (e string, a string) {\n\taType := reflect.TypeOf(expected)\n\tbType := reflect.TypeOf(actual)\n\n\tif aType != bType && isNumericType(aType) && isNumericType(bType) {\n\t\treturn fmt.Sprintf(\"%v(%#v)\", aType, expected),\n\t\t\tfmt.Sprintf(\"%v(%#v)\", bType, actual)\n\t}\n\n\treturn fmt.Sprintf(\"%#v\", expected),\n\t\tfmt.Sprintf(\"%#v\", actual)\n}\n\nfunc isNumericType(t reflect.Type) bool {\n\tswitch t.Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn true\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\treturn true\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// EqualValues asserts that two objects are equal or convertable to the same types\n// and equal.\n//\n//    assert.EqualValues(t, uint32(123), int32(123), \"123 and 123 should be equal\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {\n\n\tif !ObjectsAreEqualValues(expected, actual) {\n\t\treturn Fail(t, fmt.Sprintf(\"Not equal: %#v (expected)\\n\"+\n\t\t\t\"        != %#v (actual)\", expected, actual), msgAndArgs...)\n\t}\n\n\treturn true\n\n}\n\n// Exactly asserts that two objects are equal is value and type.\n//\n//    assert.Exactly(t, int32(123), int64(123), \"123 and 123 should NOT be equal\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {\n\n\taType := reflect.TypeOf(expected)\n\tbType := reflect.TypeOf(actual)\n\n\tif aType != bType {\n\t\treturn Fail(t, fmt.Sprintf(\"Types expected to match exactly\\n\\r\\t%v != %v\", aType, bType), msgAndArgs...)\n\t}\n\n\treturn Equal(t, expected, actual, msgAndArgs...)\n\n}\n\n// NotNil asserts that the specified object is not nil.\n//\n//    assert.NotNil(t, err, \"err should be something\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {\n\tif !isNil(object) {\n\t\treturn true\n\t}\n\treturn Fail(t, \"Expected value not to be nil.\", msgAndArgs...)\n}\n\n// isNil checks if a specified object is nil or not, without Failing.\nfunc isNil(object interface{}) bool {\n\tif object == nil {\n\t\treturn true\n\t}\n\n\tvalue := reflect.ValueOf(object)\n\tkind := value.Kind()\n\tif kind >= reflect.Chan && kind <= reflect.Slice && value.IsNil() {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// Nil asserts that the specified object is nil.\n//\n//    assert.Nil(t, err, \"err should be nothing\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {\n\tif isNil(object) {\n\t\treturn true\n\t}\n\treturn Fail(t, fmt.Sprintf(\"Expected nil, but got: %#v\", object), msgAndArgs...)\n}\n\nvar numericZeros = []interface{}{\n\tint(0),\n\tint8(0),\n\tint16(0),\n\tint32(0),\n\tint64(0),\n\tuint(0),\n\tuint8(0),\n\tuint16(0),\n\tuint32(0),\n\tuint64(0),\n\tfloat32(0),\n\tfloat64(0),\n}\n\n// isEmpty gets whether the specified object is considered empty or not.\nfunc isEmpty(object interface{}) bool {\n\n\tif object == nil {\n\t\treturn true\n\t} else if object == \"\" {\n\t\treturn true\n\t} else if object == false {\n\t\treturn true\n\t}\n\n\tfor _, v := range numericZeros {\n\t\tif object == v {\n\t\t\treturn true\n\t\t}\n\t}\n\n\tobjValue := reflect.ValueOf(object)\n\n\tswitch objValue.Kind() {\n\tcase reflect.Map:\n\t\tfallthrough\n\tcase reflect.Slice, reflect.Chan:\n\t\t{\n\t\t\treturn (objValue.Len() == 0)\n\t\t}\n\tcase reflect.Struct:\n\t\tswitch object.(type) {\n\t\tcase time.Time:\n\t\t\treturn object.(time.Time).IsZero()\n\t\t}\n\tcase reflect.Ptr:\n\t\t{\n\t\t\tif objValue.IsNil() {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tswitch object.(type) {\n\t\t\tcase *time.Time:\n\t\t\t\treturn object.(*time.Time).IsZero()\n\t\t\tdefault:\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\n// Empty asserts that the specified object is empty.  I.e. nil, \"\", false, 0 or either\n// a slice or a channel with len == 0.\n//\n//  assert.Empty(t, obj)\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {\n\n\tpass := isEmpty(object)\n\tif !pass {\n\t\tFail(t, fmt.Sprintf(\"Should be empty, but was %v\", object), msgAndArgs...)\n\t}\n\n\treturn pass\n\n}\n\n// NotEmpty asserts that the specified object is NOT empty.  I.e. not nil, \"\", false, 0 or either\n// a slice or a channel with len == 0.\n//\n//  if assert.NotEmpty(t, obj) {\n//    assert.Equal(t, \"two\", obj[1])\n//  }\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {\n\n\tpass := !isEmpty(object)\n\tif !pass {\n\t\tFail(t, fmt.Sprintf(\"Should NOT be empty, but was %v\", object), msgAndArgs...)\n\t}\n\n\treturn pass\n\n}\n\n// getLen try to get length of object.\n// return (false, 0) if impossible.\nfunc getLen(x interface{}) (ok bool, length int) {\n\tv := reflect.ValueOf(x)\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tok = false\n\t\t}\n\t}()\n\treturn true, v.Len()\n}\n\n// Len asserts that the specified object has specific length.\n// Len also fails if the object has a type that len() not accept.\n//\n//    assert.Len(t, mySlice, 3, \"The size of slice is not 3\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {\n\tok, l := getLen(object)\n\tif !ok {\n\t\treturn Fail(t, fmt.Sprintf(\"\\\"%s\\\" could not be applied builtin len()\", object), msgAndArgs...)\n\t}\n\n\tif l != length {\n\t\treturn Fail(t, fmt.Sprintf(\"\\\"%s\\\" should have %d item(s), but has %d\", object, length, l), msgAndArgs...)\n\t}\n\treturn true\n}\n\n// True asserts that the specified value is true.\n//\n//    assert.True(t, myBool, \"myBool should be true\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc True(t TestingT, value bool, msgAndArgs ...interface{}) bool {\n\n\tif value != true {\n\t\treturn Fail(t, \"Should be true\", msgAndArgs...)\n\t}\n\n\treturn true\n\n}\n\n// False asserts that the specified value is false.\n//\n//    assert.False(t, myBool, \"myBool should be false\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc False(t TestingT, value bool, msgAndArgs ...interface{}) bool {\n\n\tif value != false {\n\t\treturn Fail(t, \"Should be false\", msgAndArgs...)\n\t}\n\n\treturn true\n\n}\n\n// NotEqual asserts that the specified values are NOT equal.\n//\n//    assert.NotEqual(t, obj1, obj2, \"two objects shouldn't be equal\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {\n\n\tif ObjectsAreEqual(expected, actual) {\n\t\treturn Fail(t, fmt.Sprintf(\"Should not be: %#v\\n\", actual), msgAndArgs...)\n\t}\n\n\treturn true\n\n}\n\n// containsElement try loop over the list check if the list includes the element.\n// return (false, false) if impossible.\n// return (true, false) if element was not found.\n// return (true, true) if element was found.\nfunc includeElement(list interface{}, element interface{}) (ok, found bool) {\n\n\tlistValue := reflect.ValueOf(list)\n\telementValue := reflect.ValueOf(element)\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tok = false\n\t\t\tfound = false\n\t\t}\n\t}()\n\n\tif reflect.TypeOf(list).Kind() == reflect.String {\n\t\treturn true, strings.Contains(listValue.String(), elementValue.String())\n\t}\n\n\tif reflect.TypeOf(list).Kind() == reflect.Map {\n\t\tmapKeys := listValue.MapKeys()\n\t\tfor i := 0; i < len(mapKeys); i++ {\n\t\t\tif ObjectsAreEqual(mapKeys[i].Interface(), element) {\n\t\t\t\treturn true, true\n\t\t\t}\n\t\t}\n\t\treturn true, false\n\t}\n\n\tfor i := 0; i < listValue.Len(); i++ {\n\t\tif ObjectsAreEqual(listValue.Index(i).Interface(), element) {\n\t\t\treturn true, true\n\t\t}\n\t}\n\treturn true, false\n\n}\n\n// Contains asserts that the specified string, list(array, slice...) or map contains the\n// specified substring or element.\n//\n//    assert.Contains(t, \"Hello World\", \"World\", \"But 'Hello World' does contain 'World'\")\n//    assert.Contains(t, [\"Hello\", \"World\"], \"World\", \"But [\"Hello\", \"World\"] does contain 'World'\")\n//    assert.Contains(t, {\"Hello\": \"World\"}, \"Hello\", \"But {'Hello': 'World'} does contain 'Hello'\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {\n\n\tok, found := includeElement(s, contains)\n\tif !ok {\n\t\treturn Fail(t, fmt.Sprintf(\"\\\"%s\\\" could not be applied builtin len()\", s), msgAndArgs...)\n\t}\n\tif !found {\n\t\treturn Fail(t, fmt.Sprintf(\"\\\"%s\\\" does not contain \\\"%s\\\"\", s, contains), msgAndArgs...)\n\t}\n\n\treturn true\n\n}\n\n// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the\n// specified substring or element.\n//\n//    assert.NotContains(t, \"Hello World\", \"Earth\", \"But 'Hello World' does NOT contain 'Earth'\")\n//    assert.NotContains(t, [\"Hello\", \"World\"], \"Earth\", \"But ['Hello', 'World'] does NOT contain 'Earth'\")\n//    assert.NotContains(t, {\"Hello\": \"World\"}, \"Earth\", \"But {'Hello': 'World'} does NOT contain 'Earth'\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {\n\n\tok, found := includeElement(s, contains)\n\tif !ok {\n\t\treturn Fail(t, fmt.Sprintf(\"\\\"%s\\\" could not be applied builtin len()\", s), msgAndArgs...)\n\t}\n\tif found {\n\t\treturn Fail(t, fmt.Sprintf(\"\\\"%s\\\" should not contain \\\"%s\\\"\", s, contains), msgAndArgs...)\n\t}\n\n\treturn true\n\n}\n\n// Condition uses a Comparison to assert a complex condition.\nfunc Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {\n\tresult := comp()\n\tif !result {\n\t\tFail(t, \"Condition failed!\", msgAndArgs...)\n\t}\n\treturn result\n}\n\n// PanicTestFunc defines a func that should be passed to the assert.Panics and assert.NotPanics\n// methods, and represents a simple func that takes no arguments, and returns nothing.\ntype PanicTestFunc func()\n\n// didPanic returns true if the function passed to it panics. Otherwise, it returns false.\nfunc didPanic(f PanicTestFunc) (bool, interface{}) {\n\n\tdidPanic := false\n\tvar message interface{}\n\tfunc() {\n\n\t\tdefer func() {\n\t\t\tif message = recover(); message != nil {\n\t\t\t\tdidPanic = true\n\t\t\t}\n\t\t}()\n\n\t\t// call the target function\n\t\tf()\n\n\t}()\n\n\treturn didPanic, message\n\n}\n\n// Panics asserts that the code inside the specified PanicTestFunc panics.\n//\n//   assert.Panics(t, func(){\n//     GoCrazy()\n//   }, \"Calling GoCrazy() should panic\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {\n\n\tif funcDidPanic, panicValue := didPanic(f); !funcDidPanic {\n\t\treturn Fail(t, fmt.Sprintf(\"func %#v should panic\\n\\r\\tPanic value:\\t%v\", f, panicValue), msgAndArgs...)\n\t}\n\n\treturn true\n}\n\n// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.\n//\n//   assert.NotPanics(t, func(){\n//     RemainCalm()\n//   }, \"Calling RemainCalm() should NOT panic\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {\n\n\tif funcDidPanic, panicValue := didPanic(f); funcDidPanic {\n\t\treturn Fail(t, fmt.Sprintf(\"func %#v should not panic\\n\\r\\tPanic value:\\t%v\", f, panicValue), msgAndArgs...)\n\t}\n\n\treturn true\n}\n\n// WithinDuration asserts that the two times are within duration delta of each other.\n//\n//   assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, \"The difference should not be more than 10s\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {\n\n\tdt := expected.Sub(actual)\n\tif dt < -delta || dt > delta {\n\t\treturn Fail(t, fmt.Sprintf(\"Max difference between %v and %v allowed is %v, but difference was %v\", expected, actual, delta, dt), msgAndArgs...)\n\t}\n\n\treturn true\n}\n\nfunc toFloat(x interface{}) (float64, bool) {\n\tvar xf float64\n\txok := true\n\n\tswitch xn := x.(type) {\n\tcase uint8:\n\t\txf = float64(xn)\n\tcase uint16:\n\t\txf = float64(xn)\n\tcase uint32:\n\t\txf = float64(xn)\n\tcase uint64:\n\t\txf = float64(xn)\n\tcase int:\n\t\txf = float64(xn)\n\tcase int8:\n\t\txf = float64(xn)\n\tcase int16:\n\t\txf = float64(xn)\n\tcase int32:\n\t\txf = float64(xn)\n\tcase int64:\n\t\txf = float64(xn)\n\tcase float32:\n\t\txf = float64(xn)\n\tcase float64:\n\t\txf = float64(xn)\n\tdefault:\n\t\txok = false\n\t}\n\n\treturn xf, xok\n}\n\n// InDelta asserts that the two numerals are within delta of each other.\n//\n// \t assert.InDelta(t, math.Pi, (22 / 7.0), 0.01)\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {\n\n\taf, aok := toFloat(expected)\n\tbf, bok := toFloat(actual)\n\n\tif !aok || !bok {\n\t\treturn Fail(t, fmt.Sprintf(\"Parameters must be numerical\"), msgAndArgs...)\n\t}\n\n\tif math.IsNaN(af) {\n\t\treturn Fail(t, fmt.Sprintf(\"Actual must not be NaN\"), msgAndArgs...)\n\t}\n\n\tif math.IsNaN(bf) {\n\t\treturn Fail(t, fmt.Sprintf(\"Expected %v with delta %v, but was NaN\", expected, delta), msgAndArgs...)\n\t}\n\n\tdt := af - bf\n\tif dt < -delta || dt > delta {\n\t\treturn Fail(t, fmt.Sprintf(\"Max difference between %v and %v allowed is %v, but difference was %v\", expected, actual, delta, dt), msgAndArgs...)\n\t}\n\n\treturn true\n}\n\n// InDeltaSlice is the same as InDelta, except it compares two slices.\nfunc InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool {\n\tif expected == nil || actual == nil ||\n\t\treflect.TypeOf(actual).Kind() != reflect.Slice ||\n\t\treflect.TypeOf(expected).Kind() != reflect.Slice {\n\t\treturn Fail(t, fmt.Sprintf(\"Parameters must be slice\"), msgAndArgs...)\n\t}\n\n\tactualSlice := reflect.ValueOf(actual)\n\texpectedSlice := reflect.ValueOf(expected)\n\n\tfor i := 0; i < actualSlice.Len(); i++ {\n\t\tresult := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta)\n\t\tif !result {\n\t\t\treturn result\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc calcRelativeError(expected, actual interface{}) (float64, error) {\n\taf, aok := toFloat(expected)\n\tif !aok {\n\t\treturn 0, fmt.Errorf(\"expected value %q cannot be converted to float\", expected)\n\t}\n\tif af == 0 {\n\t\treturn 0, fmt.Errorf(\"expected value must have a value other than zero to calculate the relative error\")\n\t}\n\tbf, bok := toFloat(actual)\n\tif !bok {\n\t\treturn 0, fmt.Errorf(\"expected value %q cannot be converted to float\", actual)\n\t}\n\n\treturn math.Abs(af-bf) / math.Abs(af), nil\n}\n\n// InEpsilon asserts that expected and actual have a relative error less than epsilon\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {\n\tactualEpsilon, err := calcRelativeError(expected, actual)\n\tif err != nil {\n\t\treturn Fail(t, err.Error(), msgAndArgs...)\n\t}\n\tif actualEpsilon > epsilon {\n\t\treturn Fail(t, fmt.Sprintf(\"Relative error is too high: %#v (expected)\\n\"+\n\t\t\t\"        < %#v (actual)\", actualEpsilon, epsilon), msgAndArgs...)\n\t}\n\n\treturn true\n}\n\n// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices.\nfunc InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool {\n\tif expected == nil || actual == nil ||\n\t\treflect.TypeOf(actual).Kind() != reflect.Slice ||\n\t\treflect.TypeOf(expected).Kind() != reflect.Slice {\n\t\treturn Fail(t, fmt.Sprintf(\"Parameters must be slice\"), msgAndArgs...)\n\t}\n\n\tactualSlice := reflect.ValueOf(actual)\n\texpectedSlice := reflect.ValueOf(expected)\n\n\tfor i := 0; i < actualSlice.Len(); i++ {\n\t\tresult := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon)\n\t\tif !result {\n\t\t\treturn result\n\t\t}\n\t}\n\n\treturn true\n}\n\n/*\n\tErrors\n*/\n\n// NoError asserts that a function returned no error (i.e. `nil`).\n//\n//   actualObj, err := SomeFunction()\n//   if assert.NoError(t, err) {\n//\t   assert.Equal(t, actualObj, expectedObj)\n//   }\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {\n\tif err != nil {\n\t\treturn Fail(t, fmt.Sprintf(\"Received unexpected error %+v\", err), msgAndArgs...)\n\t}\n\n\treturn true\n}\n\n// Error asserts that a function returned an error (i.e. not `nil`).\n//\n//   actualObj, err := SomeFunction()\n//   if assert.Error(t, err, \"An error was expected\") {\n//\t   assert.Equal(t, err, expectedError)\n//   }\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Error(t TestingT, err error, msgAndArgs ...interface{}) bool {\n\n\tif err == nil {\n\t\treturn Fail(t, \"An error is expected but got nil.\", msgAndArgs...)\n\t}\n\n\treturn true\n}\n\n// EqualError asserts that a function returned an error (i.e. not `nil`)\n// and that it is equal to the provided error.\n//\n//   actualObj, err := SomeFunction()\n//   assert.EqualError(t, err,  expectedErrorString, \"An error was expected\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {\n\n\tmessage := messageFromMsgAndArgs(msgAndArgs...)\n\tif !NotNil(t, theError, \"An error is expected but got nil. %s\", message) {\n\t\treturn false\n\t}\n\ts := \"An error with value \\\"%s\\\" is expected but got \\\"%s\\\". %s\"\n\treturn Equal(t, errString, theError.Error(),\n\t\ts, errString, theError.Error(), message)\n}\n\n// matchRegexp return true if a specified regexp matches a string.\nfunc matchRegexp(rx interface{}, str interface{}) bool {\n\n\tvar r *regexp.Regexp\n\tif rr, ok := rx.(*regexp.Regexp); ok {\n\t\tr = rr\n\t} else {\n\t\tr = regexp.MustCompile(fmt.Sprint(rx))\n\t}\n\n\treturn (r.FindStringIndex(fmt.Sprint(str)) != nil)\n\n}\n\n// Regexp asserts that a specified regexp matches a string.\n//\n//  assert.Regexp(t, regexp.MustCompile(\"start\"), \"it's starting\")\n//  assert.Regexp(t, \"start...$\", \"it's not starting\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {\n\n\tmatch := matchRegexp(rx, str)\n\n\tif !match {\n\t\tFail(t, fmt.Sprintf(\"Expect \\\"%v\\\" to match \\\"%v\\\"\", str, rx), msgAndArgs...)\n\t}\n\n\treturn match\n}\n\n// NotRegexp asserts that a specified regexp does not match a string.\n//\n//  assert.NotRegexp(t, regexp.MustCompile(\"starts\"), \"it's starting\")\n//  assert.NotRegexp(t, \"^start\", \"it's not starting\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) bool {\n\tmatch := matchRegexp(rx, str)\n\n\tif match {\n\t\tFail(t, fmt.Sprintf(\"Expect \\\"%v\\\" to NOT match \\\"%v\\\"\", str, rx), msgAndArgs...)\n\t}\n\n\treturn !match\n\n}\n\n// Zero asserts that i is the zero value for its type and returns the truth.\nfunc Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {\n\tif i != nil && !reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {\n\t\treturn Fail(t, fmt.Sprintf(\"Should be zero, but was %v\", i), msgAndArgs...)\n\t}\n\treturn true\n}\n\n// NotZero asserts that i is not the zero value for its type and returns the truth.\nfunc NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) bool {\n\tif i == nil || reflect.DeepEqual(i, reflect.Zero(reflect.TypeOf(i)).Interface()) {\n\t\treturn Fail(t, fmt.Sprintf(\"Should not be zero, but was %v\", i), msgAndArgs...)\n\t}\n\treturn true\n}\n\n// JSONEq asserts that two JSON strings are equivalent.\n//\n//  assert.JSONEq(t, `{\"hello\": \"world\", \"foo\": \"bar\"}`, `{\"foo\": \"bar\", \"hello\": \"world\"}`)\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) bool {\n\tvar expectedJSONAsInterface, actualJSONAsInterface interface{}\n\n\tif err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil {\n\t\treturn Fail(t, fmt.Sprintf(\"Expected value ('%s') is not valid json.\\nJSON parsing error: '%s'\", expected, err.Error()), msgAndArgs...)\n\t}\n\n\tif err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {\n\t\treturn Fail(t, fmt.Sprintf(\"Input ('%s') needs to be valid json.\\nJSON parsing error: '%s'\", actual, err.Error()), msgAndArgs...)\n\t}\n\n\treturn Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...)\n}\n\nfunc typeAndKind(v interface{}) (reflect.Type, reflect.Kind) {\n\tt := reflect.TypeOf(v)\n\tk := t.Kind()\n\n\tif k == reflect.Ptr {\n\t\tt = t.Elem()\n\t\tk = t.Kind()\n\t}\n\treturn t, k\n}\n\n// diff returns a diff of both values as long as both are of the same type and\n// are a struct, map, slice or array. Otherwise it returns an empty string.\nfunc diff(expected interface{}, actual interface{}) string {\n\tif expected == nil || actual == nil {\n\t\treturn \"\"\n\t}\n\n\tet, ek := typeAndKind(expected)\n\tat, _ := typeAndKind(actual)\n\n\tif et != at {\n\t\treturn \"\"\n\t}\n\n\tif ek != reflect.Struct && ek != reflect.Map && ek != reflect.Slice && ek != reflect.Array {\n\t\treturn \"\"\n\t}\n\n\te := spew.Sdump(expected)\n\ta := spew.Sdump(actual)\n\n\tdiff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{\n\t\tA:        difflib.SplitLines(e),\n\t\tB:        difflib.SplitLines(a),\n\t\tFromFile: \"Expected\",\n\t\tFromDate: \"\",\n\t\tToFile:   \"Actual\",\n\t\tToDate:   \"\",\n\t\tContext:  1,\n\t})\n\n\treturn \"\\n\\nDiff:\\n\" + diff\n}\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/assertions_test.go",
    "content": "package assert\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"math\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n)\n\nvar (\n\ti     interface{}\n\tzeros = []interface{}{\n\t\tfalse,\n\t\tbyte(0),\n\t\tcomplex64(0),\n\t\tcomplex128(0),\n\t\tfloat32(0),\n\t\tfloat64(0),\n\t\tint(0),\n\t\tint8(0),\n\t\tint16(0),\n\t\tint32(0),\n\t\tint64(0),\n\t\trune(0),\n\t\tuint(0),\n\t\tuint8(0),\n\t\tuint16(0),\n\t\tuint32(0),\n\t\tuint64(0),\n\t\tuintptr(0),\n\t\t\"\",\n\t\t[0]interface{}{},\n\t\t[]interface{}(nil),\n\t\tstruct{ x int }{},\n\t\t(*interface{})(nil),\n\t\t(func())(nil),\n\t\tnil,\n\t\tinterface{}(nil),\n\t\tmap[interface{}]interface{}(nil),\n\t\t(chan interface{})(nil),\n\t\t(<-chan interface{})(nil),\n\t\t(chan<- interface{})(nil),\n\t}\n\tnonZeros = []interface{}{\n\t\ttrue,\n\t\tbyte(1),\n\t\tcomplex64(1),\n\t\tcomplex128(1),\n\t\tfloat32(1),\n\t\tfloat64(1),\n\t\tint(1),\n\t\tint8(1),\n\t\tint16(1),\n\t\tint32(1),\n\t\tint64(1),\n\t\trune(1),\n\t\tuint(1),\n\t\tuint8(1),\n\t\tuint16(1),\n\t\tuint32(1),\n\t\tuint64(1),\n\t\tuintptr(1),\n\t\t\"s\",\n\t\t[1]interface{}{1},\n\t\t[]interface{}{},\n\t\tstruct{ x int }{1},\n\t\t(*interface{})(&i),\n\t\t(func())(func() {}),\n\t\tinterface{}(1),\n\t\tmap[interface{}]interface{}{},\n\t\t(chan interface{})(make(chan interface{})),\n\t\t(<-chan interface{})(make(chan interface{})),\n\t\t(chan<- interface{})(make(chan interface{})),\n\t}\n)\n\n// AssertionTesterInterface defines an interface to be used for testing assertion methods\ntype AssertionTesterInterface interface {\n\tTestMethod()\n}\n\n// AssertionTesterConformingObject is an object that conforms to the AssertionTesterInterface interface\ntype AssertionTesterConformingObject struct {\n}\n\nfunc (a *AssertionTesterConformingObject) TestMethod() {\n}\n\n// AssertionTesterNonConformingObject is an object that does not conform to the AssertionTesterInterface interface\ntype AssertionTesterNonConformingObject struct {\n}\n\nfunc TestObjectsAreEqual(t *testing.T) {\n\n\tif !ObjectsAreEqual(\"Hello World\", \"Hello World\") {\n\t\tt.Error(\"objectsAreEqual should return true\")\n\t}\n\tif !ObjectsAreEqual(123, 123) {\n\t\tt.Error(\"objectsAreEqual should return true\")\n\t}\n\tif !ObjectsAreEqual(123.5, 123.5) {\n\t\tt.Error(\"objectsAreEqual should return true\")\n\t}\n\tif !ObjectsAreEqual([]byte(\"Hello World\"), []byte(\"Hello World\")) {\n\t\tt.Error(\"objectsAreEqual should return true\")\n\t}\n\tif !ObjectsAreEqual(nil, nil) {\n\t\tt.Error(\"objectsAreEqual should return true\")\n\t}\n\tif ObjectsAreEqual(map[int]int{5: 10}, map[int]int{10: 20}) {\n\t\tt.Error(\"objectsAreEqual should return false\")\n\t}\n\tif ObjectsAreEqual('x', \"x\") {\n\t\tt.Error(\"objectsAreEqual should return false\")\n\t}\n\tif ObjectsAreEqual(\"x\", 'x') {\n\t\tt.Error(\"objectsAreEqual should return false\")\n\t}\n\tif ObjectsAreEqual(0, 0.1) {\n\t\tt.Error(\"objectsAreEqual should return false\")\n\t}\n\tif ObjectsAreEqual(0.1, 0) {\n\t\tt.Error(\"objectsAreEqual should return false\")\n\t}\n\tif ObjectsAreEqual(uint32(10), int32(10)) {\n\t\tt.Error(\"objectsAreEqual should return false\")\n\t}\n\tif !ObjectsAreEqualValues(uint32(10), int32(10)) {\n\t\tt.Error(\"ObjectsAreEqualValues should return true\")\n\t}\n\tif ObjectsAreEqualValues(0, nil) {\n\t\tt.Fail()\n\t}\n\tif ObjectsAreEqualValues(nil, 0) {\n\t\tt.Fail()\n\t}\n\n}\n\nfunc TestImplements(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface\")\n\t}\n\tif Implements(mockT, (*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) {\n\t\tt.Error(\"Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface\")\n\t}\n\n}\n\nfunc TestIsType(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject\")\n\t}\n\tif IsType(mockT, new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) {\n\t\tt.Error(\"IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject\")\n\t}\n\n}\n\nfunc TestEqual(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !Equal(mockT, \"Hello World\", \"Hello World\") {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !Equal(mockT, 123, 123) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !Equal(mockT, 123.5, 123.5) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !Equal(mockT, []byte(\"Hello World\"), []byte(\"Hello World\")) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !Equal(mockT, nil, nil) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !Equal(mockT, int32(123), int32(123)) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !Equal(mockT, uint64(123), uint64(123)) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\n}\n\nfunc TestFormatUnequalValues(t *testing.T) {\n\texpected, actual := formatUnequalValues(\"foo\", \"bar\")\n\tEqual(t, `\"foo\"`, expected, \"value should not include type\")\n\tEqual(t, `\"bar\"`, actual, \"value should not include type\")\n\n\texpected, actual = formatUnequalValues(123, 123)\n\tEqual(t, `123`, expected, \"value should not include type\")\n\tEqual(t, `123`, actual, \"value should not include type\")\n\n\texpected, actual = formatUnequalValues(int64(123), int32(123))\n\tEqual(t, `int64(123)`, expected, \"value should include type\")\n\tEqual(t, `int32(123)`, actual, \"value should include type\")\n\n\ttype testStructType struct {\n\t\tVal string\n\t}\n\n\texpected, actual = formatUnequalValues(&testStructType{Val: \"test\"}, &testStructType{Val: \"test\"})\n\tEqual(t, `&assert.testStructType{Val:\"test\"}`, expected, \"value should not include type annotation\")\n\tEqual(t, `&assert.testStructType{Val:\"test\"}`, actual, \"value should not include type annotation\")\n}\n\nfunc TestNotNil(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !NotNil(mockT, new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"NotNil should return true: object is not nil\")\n\t}\n\tif NotNil(mockT, nil) {\n\t\tt.Error(\"NotNil should return false: object is nil\")\n\t}\n\tif NotNil(mockT, (*struct{})(nil)) {\n\t\tt.Error(\"NotNil should return false: object is (*struct{})(nil)\")\n\t}\n\n}\n\nfunc TestNil(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !Nil(mockT, nil) {\n\t\tt.Error(\"Nil should return true: object is nil\")\n\t}\n\tif !Nil(mockT, (*struct{})(nil)) {\n\t\tt.Error(\"Nil should return true: object is (*struct{})(nil)\")\n\t}\n\tif Nil(mockT, new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"Nil should return false: object is not nil\")\n\t}\n\n}\n\nfunc TestTrue(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !True(mockT, true) {\n\t\tt.Error(\"True should return true\")\n\t}\n\tif True(mockT, false) {\n\t\tt.Error(\"True should return false\")\n\t}\n\n}\n\nfunc TestFalse(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !False(mockT, false) {\n\t\tt.Error(\"False should return true\")\n\t}\n\tif False(mockT, true) {\n\t\tt.Error(\"False should return false\")\n\t}\n\n}\n\nfunc TestExactly(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\ta := float32(1)\n\tb := float64(1)\n\tc := float32(1)\n\td := float32(2)\n\n\tif Exactly(mockT, a, b) {\n\t\tt.Error(\"Exactly should return false\")\n\t}\n\tif Exactly(mockT, a, d) {\n\t\tt.Error(\"Exactly should return false\")\n\t}\n\tif !Exactly(mockT, a, c) {\n\t\tt.Error(\"Exactly should return true\")\n\t}\n\n\tif Exactly(mockT, nil, a) {\n\t\tt.Error(\"Exactly should return false\")\n\t}\n\tif Exactly(mockT, a, nil) {\n\t\tt.Error(\"Exactly should return false\")\n\t}\n\n}\n\nfunc TestNotEqual(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !NotEqual(mockT, \"Hello World\", \"Hello World!\") {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tif !NotEqual(mockT, 123, 1234) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tif !NotEqual(mockT, 123.5, 123.55) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tif !NotEqual(mockT, []byte(\"Hello World\"), []byte(\"Hello World!\")) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tif !NotEqual(mockT, nil, new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tfuncA := func() int { return 23 }\n\tfuncB := func() int { return 42 }\n\tif !NotEqual(mockT, funcA, funcB) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\n\tif NotEqual(mockT, \"Hello World\", \"Hello World\") {\n\t\tt.Error(\"NotEqual should return false\")\n\t}\n\tif NotEqual(mockT, 123, 123) {\n\t\tt.Error(\"NotEqual should return false\")\n\t}\n\tif NotEqual(mockT, 123.5, 123.5) {\n\t\tt.Error(\"NotEqual should return false\")\n\t}\n\tif NotEqual(mockT, []byte(\"Hello World\"), []byte(\"Hello World\")) {\n\t\tt.Error(\"NotEqual should return false\")\n\t}\n\tif NotEqual(mockT, new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"NotEqual should return false\")\n\t}\n}\n\ntype A struct {\n\tName, Value string\n}\n\nfunc TestContains(t *testing.T) {\n\n\tmockT := new(testing.T)\n\tlist := []string{\"Foo\", \"Bar\"}\n\tcomplexList := []*A{\n\t\t{\"b\", \"c\"},\n\t\t{\"d\", \"e\"},\n\t\t{\"g\", \"h\"},\n\t\t{\"j\", \"k\"},\n\t}\n\tsimpleMap := map[interface{}]interface{}{\"Foo\": \"Bar\"}\n\n\tif !Contains(mockT, \"Hello World\", \"Hello\") {\n\t\tt.Error(\"Contains should return true: \\\"Hello World\\\" contains \\\"Hello\\\"\")\n\t}\n\tif Contains(mockT, \"Hello World\", \"Salut\") {\n\t\tt.Error(\"Contains should return false: \\\"Hello World\\\" does not contain \\\"Salut\\\"\")\n\t}\n\n\tif !Contains(mockT, list, \"Bar\") {\n\t\tt.Error(\"Contains should return true: \\\"[\\\"Foo\\\", \\\"Bar\\\"]\\\" contains \\\"Bar\\\"\")\n\t}\n\tif Contains(mockT, list, \"Salut\") {\n\t\tt.Error(\"Contains should return false: \\\"[\\\"Foo\\\", \\\"Bar\\\"]\\\" does not contain \\\"Salut\\\"\")\n\t}\n\tif !Contains(mockT, complexList, &A{\"g\", \"h\"}) {\n\t\tt.Error(\"Contains should return true: complexList contains {\\\"g\\\", \\\"h\\\"}\")\n\t}\n\tif Contains(mockT, complexList, &A{\"g\", \"e\"}) {\n\t\tt.Error(\"Contains should return false: complexList contains {\\\"g\\\", \\\"e\\\"}\")\n\t}\n\tif Contains(mockT, complexList, &A{\"g\", \"e\"}) {\n\t\tt.Error(\"Contains should return false: complexList contains {\\\"g\\\", \\\"e\\\"}\")\n\t}\n\tif !Contains(mockT, simpleMap, \"Foo\") {\n\t\tt.Error(\"Contains should return true: \\\"{\\\"Foo\\\": \\\"Bar\\\"}\\\" contains \\\"Foo\\\"\")\n\t}\n\tif Contains(mockT, simpleMap, \"Bar\") {\n\t\tt.Error(\"Contains should return false: \\\"{\\\"Foo\\\": \\\"Bar\\\"}\\\" does not contains \\\"Bar\\\"\")\n\t}\n}\n\nfunc TestNotContains(t *testing.T) {\n\n\tmockT := new(testing.T)\n\tlist := []string{\"Foo\", \"Bar\"}\n\tsimpleMap := map[interface{}]interface{}{\"Foo\": \"Bar\"}\n\n\tif !NotContains(mockT, \"Hello World\", \"Hello!\") {\n\t\tt.Error(\"NotContains should return true: \\\"Hello World\\\" does not contain \\\"Hello!\\\"\")\n\t}\n\tif NotContains(mockT, \"Hello World\", \"Hello\") {\n\t\tt.Error(\"NotContains should return false: \\\"Hello World\\\" contains \\\"Hello\\\"\")\n\t}\n\n\tif !NotContains(mockT, list, \"Foo!\") {\n\t\tt.Error(\"NotContains should return true: \\\"[\\\"Foo\\\", \\\"Bar\\\"]\\\" does not contain \\\"Foo!\\\"\")\n\t}\n\tif NotContains(mockT, list, \"Foo\") {\n\t\tt.Error(\"NotContains should return false: \\\"[\\\"Foo\\\", \\\"Bar\\\"]\\\" contains \\\"Foo\\\"\")\n\t}\n\tif NotContains(mockT, simpleMap, \"Foo\") {\n\t\tt.Error(\"Contains should return true: \\\"{\\\"Foo\\\": \\\"Bar\\\"}\\\" contains \\\"Foo\\\"\")\n\t}\n\tif !NotContains(mockT, simpleMap, \"Bar\") {\n\t\tt.Error(\"Contains should return false: \\\"{\\\"Foo\\\": \\\"Bar\\\"}\\\" does not contains \\\"Bar\\\"\")\n\t}\n}\n\nfunc Test_includeElement(t *testing.T) {\n\n\tlist1 := []string{\"Foo\", \"Bar\"}\n\tlist2 := []int{1, 2}\n\tsimpleMap := map[interface{}]interface{}{\"Foo\": \"Bar\"}\n\n\tok, found := includeElement(\"Hello World\", \"World\")\n\tTrue(t, ok)\n\tTrue(t, found)\n\n\tok, found = includeElement(list1, \"Foo\")\n\tTrue(t, ok)\n\tTrue(t, found)\n\n\tok, found = includeElement(list1, \"Bar\")\n\tTrue(t, ok)\n\tTrue(t, found)\n\n\tok, found = includeElement(list2, 1)\n\tTrue(t, ok)\n\tTrue(t, found)\n\n\tok, found = includeElement(list2, 2)\n\tTrue(t, ok)\n\tTrue(t, found)\n\n\tok, found = includeElement(list1, \"Foo!\")\n\tTrue(t, ok)\n\tFalse(t, found)\n\n\tok, found = includeElement(list2, 3)\n\tTrue(t, ok)\n\tFalse(t, found)\n\n\tok, found = includeElement(list2, \"1\")\n\tTrue(t, ok)\n\tFalse(t, found)\n\n\tok, found = includeElement(simpleMap, \"Foo\")\n\tTrue(t, ok)\n\tTrue(t, found)\n\n\tok, found = includeElement(simpleMap, \"Bar\")\n\tTrue(t, ok)\n\tFalse(t, found)\n\n\tok, found = includeElement(1433, \"1\")\n\tFalse(t, ok)\n\tFalse(t, found)\n}\n\nfunc TestCondition(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tif !Condition(mockT, func() bool { return true }, \"Truth\") {\n\t\tt.Error(\"Condition should return true\")\n\t}\n\n\tif Condition(mockT, func() bool { return false }, \"Lie\") {\n\t\tt.Error(\"Condition should return false\")\n\t}\n\n}\n\nfunc TestDidPanic(t *testing.T) {\n\n\tif funcDidPanic, _ := didPanic(func() {\n\t\tpanic(\"Panic!\")\n\t}); !funcDidPanic {\n\t\tt.Error(\"didPanic should return true\")\n\t}\n\n\tif funcDidPanic, _ := didPanic(func() {\n\t}); funcDidPanic {\n\t\tt.Error(\"didPanic should return false\")\n\t}\n\n}\n\nfunc TestPanics(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !Panics(mockT, func() {\n\t\tpanic(\"Panic!\")\n\t}) {\n\t\tt.Error(\"Panics should return true\")\n\t}\n\n\tif Panics(mockT, func() {\n\t}) {\n\t\tt.Error(\"Panics should return false\")\n\t}\n\n}\n\nfunc TestNotPanics(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\tif !NotPanics(mockT, func() {\n\t}) {\n\t\tt.Error(\"NotPanics should return true\")\n\t}\n\n\tif NotPanics(mockT, func() {\n\t\tpanic(\"Panic!\")\n\t}) {\n\t\tt.Error(\"NotPanics should return false\")\n\t}\n\n}\n\nfunc TestNoError(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\t// start with a nil error\n\tvar err error\n\n\tTrue(t, NoError(mockT, err), \"NoError should return True for nil arg\")\n\n\t// now set an error\n\terr = errors.New(\"some error\")\n\n\tFalse(t, NoError(mockT, err), \"NoError with error should return False\")\n\n\t// returning an empty error interface\n\terr = func() error {\n\t\tvar err *customError\n\t\tif err != nil {\n\t\t\tt.Fatal(\"err should be nil here\")\n\t\t}\n\t\treturn err\n\t}()\n\n\tif err == nil { // err is not nil here!\n\t\tt.Errorf(\"Error should be nil due to empty interface\", err)\n\t}\n\n\tFalse(t, NoError(mockT, err), \"NoError should fail with empty error interface\")\n}\n\ntype customError struct{}\n\nfunc (*customError) Error() string { return \"fail\" }\n\nfunc TestError(t *testing.T) {\n\n\tmockT := new(testing.T)\n\n\t// start with a nil error\n\tvar err error\n\n\tFalse(t, Error(mockT, err), \"Error should return False for nil arg\")\n\n\t// now set an error\n\terr = errors.New(\"some error\")\n\n\tTrue(t, Error(mockT, err), \"Error with error should return True\")\n\n\t// returning an empty error interface\n\terr = func() error {\n\t\tvar err *customError\n\t\tif err != nil {\n\t\t\tt.Fatal(\"err should be nil here\")\n\t\t}\n\t\treturn err\n\t}()\n\n\tif err == nil { // err is not nil here!\n\t\tt.Errorf(\"Error should be nil due to empty interface\", err)\n\t}\n\n\tTrue(t, Error(mockT, err), \"Error should pass with empty error interface\")\n}\n\nfunc TestEqualError(t *testing.T) {\n\tmockT := new(testing.T)\n\n\t// start with a nil error\n\tvar err error\n\tFalse(t, EqualError(mockT, err, \"\"),\n\t\t\"EqualError should return false for nil arg\")\n\n\t// now set an error\n\terr = errors.New(\"some error\")\n\tFalse(t, EqualError(mockT, err, \"Not some error\"),\n\t\t\"EqualError should return false for different error string\")\n\tTrue(t, EqualError(mockT, err, \"some error\"),\n\t\t\"EqualError should return true\")\n}\n\nfunc Test_isEmpty(t *testing.T) {\n\n\tchWithValue := make(chan struct{}, 1)\n\tchWithValue <- struct{}{}\n\n\tTrue(t, isEmpty(\"\"))\n\tTrue(t, isEmpty(nil))\n\tTrue(t, isEmpty([]string{}))\n\tTrue(t, isEmpty(0))\n\tTrue(t, isEmpty(int32(0)))\n\tTrue(t, isEmpty(int64(0)))\n\tTrue(t, isEmpty(false))\n\tTrue(t, isEmpty(map[string]string{}))\n\tTrue(t, isEmpty(new(time.Time)))\n\tTrue(t, isEmpty(time.Time{}))\n\tTrue(t, isEmpty(make(chan struct{})))\n\tFalse(t, isEmpty(\"something\"))\n\tFalse(t, isEmpty(errors.New(\"something\")))\n\tFalse(t, isEmpty([]string{\"something\"}))\n\tFalse(t, isEmpty(1))\n\tFalse(t, isEmpty(true))\n\tFalse(t, isEmpty(map[string]string{\"Hello\": \"World\"}))\n\tFalse(t, isEmpty(chWithValue))\n\n}\n\nfunc TestEmpty(t *testing.T) {\n\n\tmockT := new(testing.T)\n\tchWithValue := make(chan struct{}, 1)\n\tchWithValue <- struct{}{}\n\tvar tiP *time.Time\n\tvar tiNP time.Time\n\tvar s *string\n\tvar f *os.File\n\n\tTrue(t, Empty(mockT, \"\"), \"Empty string is empty\")\n\tTrue(t, Empty(mockT, nil), \"Nil is empty\")\n\tTrue(t, Empty(mockT, []string{}), \"Empty string array is empty\")\n\tTrue(t, Empty(mockT, 0), \"Zero int value is empty\")\n\tTrue(t, Empty(mockT, false), \"False value is empty\")\n\tTrue(t, Empty(mockT, make(chan struct{})), \"Channel without values is empty\")\n\tTrue(t, Empty(mockT, s), \"Nil string pointer is empty\")\n\tTrue(t, Empty(mockT, f), \"Nil os.File pointer is empty\")\n\tTrue(t, Empty(mockT, tiP), \"Nil time.Time pointer is empty\")\n\tTrue(t, Empty(mockT, tiNP), \"time.Time is empty\")\n\n\tFalse(t, Empty(mockT, \"something\"), \"Non Empty string is not empty\")\n\tFalse(t, Empty(mockT, errors.New(\"something\")), \"Non nil object is not empty\")\n\tFalse(t, Empty(mockT, []string{\"something\"}), \"Non empty string array is not empty\")\n\tFalse(t, Empty(mockT, 1), \"Non-zero int value is not empty\")\n\tFalse(t, Empty(mockT, true), \"True value is not empty\")\n\tFalse(t, Empty(mockT, chWithValue), \"Channel with values is not empty\")\n}\n\nfunc TestNotEmpty(t *testing.T) {\n\n\tmockT := new(testing.T)\n\tchWithValue := make(chan struct{}, 1)\n\tchWithValue <- struct{}{}\n\n\tFalse(t, NotEmpty(mockT, \"\"), \"Empty string is empty\")\n\tFalse(t, NotEmpty(mockT, nil), \"Nil is empty\")\n\tFalse(t, NotEmpty(mockT, []string{}), \"Empty string array is empty\")\n\tFalse(t, NotEmpty(mockT, 0), \"Zero int value is empty\")\n\tFalse(t, NotEmpty(mockT, false), \"False value is empty\")\n\tFalse(t, NotEmpty(mockT, make(chan struct{})), \"Channel without values is empty\")\n\n\tTrue(t, NotEmpty(mockT, \"something\"), \"Non Empty string is not empty\")\n\tTrue(t, NotEmpty(mockT, errors.New(\"something\")), \"Non nil object is not empty\")\n\tTrue(t, NotEmpty(mockT, []string{\"something\"}), \"Non empty string array is not empty\")\n\tTrue(t, NotEmpty(mockT, 1), \"Non-zero int value is not empty\")\n\tTrue(t, NotEmpty(mockT, true), \"True value is not empty\")\n\tTrue(t, NotEmpty(mockT, chWithValue), \"Channel with values is not empty\")\n}\n\nfunc Test_getLen(t *testing.T) {\n\tfalseCases := []interface{}{\n\t\tnil,\n\t\t0,\n\t\ttrue,\n\t\tfalse,\n\t\t'A',\n\t\tstruct{}{},\n\t}\n\tfor _, v := range falseCases {\n\t\tok, l := getLen(v)\n\t\tFalse(t, ok, \"Expected getLen fail to get length of %#v\", v)\n\t\tEqual(t, 0, l, \"getLen should return 0 for %#v\", v)\n\t}\n\n\tch := make(chan int, 5)\n\tch <- 1\n\tch <- 2\n\tch <- 3\n\ttrueCases := []struct {\n\t\tv interface{}\n\t\tl int\n\t}{\n\t\t{[]int{1, 2, 3}, 3},\n\t\t{[...]int{1, 2, 3}, 3},\n\t\t{\"ABC\", 3},\n\t\t{map[int]int{1: 2, 2: 4, 3: 6}, 3},\n\t\t{ch, 3},\n\n\t\t{[]int{}, 0},\n\t\t{map[int]int{}, 0},\n\t\t{make(chan int), 0},\n\n\t\t{[]int(nil), 0},\n\t\t{map[int]int(nil), 0},\n\t\t{(chan int)(nil), 0},\n\t}\n\n\tfor _, c := range trueCases {\n\t\tok, l := getLen(c.v)\n\t\tTrue(t, ok, \"Expected getLen success to get length of %#v\", c.v)\n\t\tEqual(t, c.l, l)\n\t}\n}\n\nfunc TestLen(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tFalse(t, Len(mockT, nil, 0), \"nil does not have length\")\n\tFalse(t, Len(mockT, 0, 0), \"int does not have length\")\n\tFalse(t, Len(mockT, true, 0), \"true does not have length\")\n\tFalse(t, Len(mockT, false, 0), \"false does not have length\")\n\tFalse(t, Len(mockT, 'A', 0), \"Rune does not have length\")\n\tFalse(t, Len(mockT, struct{}{}, 0), \"Struct does not have length\")\n\n\tch := make(chan int, 5)\n\tch <- 1\n\tch <- 2\n\tch <- 3\n\n\tcases := []struct {\n\t\tv interface{}\n\t\tl int\n\t}{\n\t\t{[]int{1, 2, 3}, 3},\n\t\t{[...]int{1, 2, 3}, 3},\n\t\t{\"ABC\", 3},\n\t\t{map[int]int{1: 2, 2: 4, 3: 6}, 3},\n\t\t{ch, 3},\n\n\t\t{[]int{}, 0},\n\t\t{map[int]int{}, 0},\n\t\t{make(chan int), 0},\n\n\t\t{[]int(nil), 0},\n\t\t{map[int]int(nil), 0},\n\t\t{(chan int)(nil), 0},\n\t}\n\n\tfor _, c := range cases {\n\t\tTrue(t, Len(mockT, c.v, c.l), \"%#v have %d items\", c.v, c.l)\n\t}\n\n\tcases = []struct {\n\t\tv interface{}\n\t\tl int\n\t}{\n\t\t{[]int{1, 2, 3}, 4},\n\t\t{[...]int{1, 2, 3}, 2},\n\t\t{\"ABC\", 2},\n\t\t{map[int]int{1: 2, 2: 4, 3: 6}, 4},\n\t\t{ch, 2},\n\n\t\t{[]int{}, 1},\n\t\t{map[int]int{}, 1},\n\t\t{make(chan int), 1},\n\n\t\t{[]int(nil), 1},\n\t\t{map[int]int(nil), 1},\n\t\t{(chan int)(nil), 1},\n\t}\n\n\tfor _, c := range cases {\n\t\tFalse(t, Len(mockT, c.v, c.l), \"%#v have %d items\", c.v, c.l)\n\t}\n}\n\nfunc TestWithinDuration(t *testing.T) {\n\n\tmockT := new(testing.T)\n\ta := time.Now()\n\tb := a.Add(10 * time.Second)\n\n\tTrue(t, WithinDuration(mockT, a, b, 10*time.Second), \"A 10s difference is within a 10s time difference\")\n\tTrue(t, WithinDuration(mockT, b, a, 10*time.Second), \"A 10s difference is within a 10s time difference\")\n\n\tFalse(t, WithinDuration(mockT, a, b, 9*time.Second), \"A 10s difference is not within a 9s time difference\")\n\tFalse(t, WithinDuration(mockT, b, a, 9*time.Second), \"A 10s difference is not within a 9s time difference\")\n\n\tFalse(t, WithinDuration(mockT, a, b, -9*time.Second), \"A 10s difference is not within a 9s time difference\")\n\tFalse(t, WithinDuration(mockT, b, a, -9*time.Second), \"A 10s difference is not within a 9s time difference\")\n\n\tFalse(t, WithinDuration(mockT, a, b, -11*time.Second), \"A 10s difference is not within a 9s time difference\")\n\tFalse(t, WithinDuration(mockT, b, a, -11*time.Second), \"A 10s difference is not within a 9s time difference\")\n}\n\nfunc TestInDelta(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tTrue(t, InDelta(mockT, 1.001, 1, 0.01), \"|1.001 - 1| <= 0.01\")\n\tTrue(t, InDelta(mockT, 1, 1.001, 0.01), \"|1 - 1.001| <= 0.01\")\n\tTrue(t, InDelta(mockT, 1, 2, 1), \"|1 - 2| <= 1\")\n\tFalse(t, InDelta(mockT, 1, 2, 0.5), \"Expected |1 - 2| <= 0.5 to fail\")\n\tFalse(t, InDelta(mockT, 2, 1, 0.5), \"Expected |2 - 1| <= 0.5 to fail\")\n\tFalse(t, InDelta(mockT, \"\", nil, 1), \"Expected non numerals to fail\")\n\tFalse(t, InDelta(mockT, 42, math.NaN(), 0.01), \"Expected NaN for actual to fail\")\n\tFalse(t, InDelta(mockT, math.NaN(), 42, 0.01), \"Expected NaN for expected to fail\")\n\n\tcases := []struct {\n\t\ta, b  interface{}\n\t\tdelta float64\n\t}{\n\t\t{uint8(2), uint8(1), 1},\n\t\t{uint16(2), uint16(1), 1},\n\t\t{uint32(2), uint32(1), 1},\n\t\t{uint64(2), uint64(1), 1},\n\n\t\t{int(2), int(1), 1},\n\t\t{int8(2), int8(1), 1},\n\t\t{int16(2), int16(1), 1},\n\t\t{int32(2), int32(1), 1},\n\t\t{int64(2), int64(1), 1},\n\n\t\t{float32(2), float32(1), 1},\n\t\t{float64(2), float64(1), 1},\n\t}\n\n\tfor _, tc := range cases {\n\t\tTrue(t, InDelta(mockT, tc.a, tc.b, tc.delta), \"Expected |%V - %V| <= %v\", tc.a, tc.b, tc.delta)\n\t}\n}\n\nfunc TestInDeltaSlice(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tTrue(t, InDeltaSlice(mockT,\n\t\t[]float64{1.001, 0.999},\n\t\t[]float64{1, 1},\n\t\t0.1), \"{1.001, 0.009} is element-wise close to {1, 1} in delta=0.1\")\n\n\tTrue(t, InDeltaSlice(mockT,\n\t\t[]float64{1, 2},\n\t\t[]float64{0, 3},\n\t\t1), \"{1, 2} is element-wise close to {0, 3} in delta=1\")\n\n\tFalse(t, InDeltaSlice(mockT,\n\t\t[]float64{1, 2},\n\t\t[]float64{0, 3},\n\t\t0.1), \"{1, 2} is not element-wise close to {0, 3} in delta=0.1\")\n\n\tFalse(t, InDeltaSlice(mockT, \"\", nil, 1), \"Expected non numeral slices to fail\")\n}\n\nfunc TestInEpsilon(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tcases := []struct {\n\t\ta, b    interface{}\n\t\tepsilon float64\n\t}{\n\t\t{uint8(2), uint16(2), .001},\n\t\t{2.1, 2.2, 0.1},\n\t\t{2.2, 2.1, 0.1},\n\t\t{-2.1, -2.2, 0.1},\n\t\t{-2.2, -2.1, 0.1},\n\t\t{uint64(100), uint8(101), 0.01},\n\t\t{0.1, -0.1, 2},\n\t\t{0.1, 0, 2},\n\t}\n\n\tfor _, tc := range cases {\n\t\tTrue(t, InEpsilon(t, tc.a, tc.b, tc.epsilon, \"Expected %V and %V to have a relative difference of %v\", tc.a, tc.b, tc.epsilon), \"test: %q\", tc)\n\t}\n\n\tcases = []struct {\n\t\ta, b    interface{}\n\t\tepsilon float64\n\t}{\n\t\t{uint8(2), int16(-2), .001},\n\t\t{uint64(100), uint8(102), 0.01},\n\t\t{2.1, 2.2, 0.001},\n\t\t{2.2, 2.1, 0.001},\n\t\t{2.1, -2.2, 1},\n\t\t{2.1, \"bla-bla\", 0},\n\t\t{0.1, -0.1, 1.99},\n\t\t{0, 0.1, 2}, // expected must be different to zero\n\t}\n\n\tfor _, tc := range cases {\n\t\tFalse(t, InEpsilon(mockT, tc.a, tc.b, tc.epsilon, \"Expected %V and %V to have a relative difference of %v\", tc.a, tc.b, tc.epsilon))\n\t}\n\n}\n\nfunc TestInEpsilonSlice(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tTrue(t, InEpsilonSlice(mockT,\n\t\t[]float64{2.2, 2.0},\n\t\t[]float64{2.1, 2.1},\n\t\t0.06), \"{2.2, 2.0} is element-wise close to {2.1, 2.1} in espilon=0.06\")\n\n\tFalse(t, InEpsilonSlice(mockT,\n\t\t[]float64{2.2, 2.0},\n\t\t[]float64{2.1, 2.1},\n\t\t0.04), \"{2.2, 2.0} is not element-wise close to {2.1, 2.1} in espilon=0.04\")\n\n\tFalse(t, InEpsilonSlice(mockT, \"\", nil, 1), \"Expected non numeral slices to fail\")\n}\n\nfunc TestRegexp(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tcases := []struct {\n\t\trx, str string\n\t}{\n\t\t{\"^start\", \"start of the line\"},\n\t\t{\"end$\", \"in the end\"},\n\t\t{\"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}\", \"My phone number is 650.12.34\"},\n\t}\n\n\tfor _, tc := range cases {\n\t\tTrue(t, Regexp(mockT, tc.rx, tc.str))\n\t\tTrue(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str))\n\t\tFalse(t, NotRegexp(mockT, tc.rx, tc.str))\n\t\tFalse(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str))\n\t}\n\n\tcases = []struct {\n\t\trx, str string\n\t}{\n\t\t{\"^asdfastart\", \"Not the start of the line\"},\n\t\t{\"end$\", \"in the end.\"},\n\t\t{\"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}\", \"My phone number is 650.12a.34\"},\n\t}\n\n\tfor _, tc := range cases {\n\t\tFalse(t, Regexp(mockT, tc.rx, tc.str), \"Expected \\\"%s\\\" to not match \\\"%s\\\"\", tc.rx, tc.str)\n\t\tFalse(t, Regexp(mockT, regexp.MustCompile(tc.rx), tc.str))\n\t\tTrue(t, NotRegexp(mockT, tc.rx, tc.str))\n\t\tTrue(t, NotRegexp(mockT, regexp.MustCompile(tc.rx), tc.str))\n\t}\n}\n\nfunc testAutogeneratedFunction() {\n\tdefer func() {\n\t\tif err := recover(); err == nil {\n\t\t\tpanic(\"did not panic\")\n\t\t}\n\t\tCallerInfo()\n\t}()\n\tt := struct {\n\t\tio.Closer\n\t}{}\n\tvar c io.Closer\n\tc = t\n\tc.Close()\n}\n\nfunc TestCallerInfoWithAutogeneratedFunctions(t *testing.T) {\n\tNotPanics(t, func() {\n\t\ttestAutogeneratedFunction()\n\t})\n}\n\nfunc TestZero(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tfor _, test := range zeros {\n\t\tTrue(t, Zero(mockT, test, \"%#v is not the %v zero value\", test, reflect.TypeOf(test)))\n\t}\n\n\tfor _, test := range nonZeros {\n\t\tFalse(t, Zero(mockT, test, \"%#v is not the %v zero value\", test, reflect.TypeOf(test)))\n\t}\n}\n\nfunc TestNotZero(t *testing.T) {\n\tmockT := new(testing.T)\n\n\tfor _, test := range zeros {\n\t\tFalse(t, NotZero(mockT, test, \"%#v is not the %v zero value\", test, reflect.TypeOf(test)))\n\t}\n\n\tfor _, test := range nonZeros {\n\t\tTrue(t, NotZero(mockT, test, \"%#v is not the %v zero value\", test, reflect.TypeOf(test)))\n\t}\n}\n\nfunc TestJSONEq_EqualSONString(t *testing.T) {\n\tmockT := new(testing.T)\n\tTrue(t, JSONEq(mockT, `{\"hello\": \"world\", \"foo\": \"bar\"}`, `{\"hello\": \"world\", \"foo\": \"bar\"}`))\n}\n\nfunc TestJSONEq_EquivalentButNotEqual(t *testing.T) {\n\tmockT := new(testing.T)\n\tTrue(t, JSONEq(mockT, `{\"hello\": \"world\", \"foo\": \"bar\"}`, `{\"foo\": \"bar\", \"hello\": \"world\"}`))\n}\n\nfunc TestJSONEq_HashOfArraysAndHashes(t *testing.T) {\n\tmockT := new(testing.T)\n\tTrue(t, JSONEq(mockT, \"{\\r\\n\\t\\\"numeric\\\": 1.5,\\r\\n\\t\\\"array\\\": [{\\\"foo\\\": \\\"bar\\\"}, 1, \\\"string\\\", [\\\"nested\\\", \\\"array\\\", 5.5]],\\r\\n\\t\\\"hash\\\": {\\\"nested\\\": \\\"hash\\\", \\\"nested_slice\\\": [\\\"this\\\", \\\"is\\\", \\\"nested\\\"]},\\r\\n\\t\\\"string\\\": \\\"foo\\\"\\r\\n}\",\n\t\t\"{\\r\\n\\t\\\"numeric\\\": 1.5,\\r\\n\\t\\\"hash\\\": {\\\"nested\\\": \\\"hash\\\", \\\"nested_slice\\\": [\\\"this\\\", \\\"is\\\", \\\"nested\\\"]},\\r\\n\\t\\\"string\\\": \\\"foo\\\",\\r\\n\\t\\\"array\\\": [{\\\"foo\\\": \\\"bar\\\"}, 1, \\\"string\\\", [\\\"nested\\\", \\\"array\\\", 5.5]]\\r\\n}\"))\n}\n\nfunc TestJSONEq_Array(t *testing.T) {\n\tmockT := new(testing.T)\n\tTrue(t, JSONEq(mockT, `[\"foo\", {\"hello\": \"world\", \"nested\": \"hash\"}]`, `[\"foo\", {\"nested\": \"hash\", \"hello\": \"world\"}]`))\n}\n\nfunc TestJSONEq_HashAndArrayNotEquivalent(t *testing.T) {\n\tmockT := new(testing.T)\n\tFalse(t, JSONEq(mockT, `[\"foo\", {\"hello\": \"world\", \"nested\": \"hash\"}]`, `{\"foo\": \"bar\", {\"nested\": \"hash\", \"hello\": \"world\"}}`))\n}\n\nfunc TestJSONEq_HashesNotEquivalent(t *testing.T) {\n\tmockT := new(testing.T)\n\tFalse(t, JSONEq(mockT, `{\"foo\": \"bar\"}`, `{\"foo\": \"bar\", \"hello\": \"world\"}`))\n}\n\nfunc TestJSONEq_ActualIsNotJSON(t *testing.T) {\n\tmockT := new(testing.T)\n\tFalse(t, JSONEq(mockT, `{\"foo\": \"bar\"}`, \"Not JSON\"))\n}\n\nfunc TestJSONEq_ExpectedIsNotJSON(t *testing.T) {\n\tmockT := new(testing.T)\n\tFalse(t, JSONEq(mockT, \"Not JSON\", `{\"foo\": \"bar\", \"hello\": \"world\"}`))\n}\n\nfunc TestJSONEq_ExpectedAndActualNotJSON(t *testing.T) {\n\tmockT := new(testing.T)\n\tFalse(t, JSONEq(mockT, \"Not JSON\", \"Not JSON\"))\n}\n\nfunc TestJSONEq_ArraysOfDifferentOrder(t *testing.T) {\n\tmockT := new(testing.T)\n\tFalse(t, JSONEq(mockT, `[\"foo\", {\"hello\": \"world\", \"nested\": \"hash\"}]`, `[{ \"hello\": \"world\", \"nested\": \"hash\"}, \"foo\"]`))\n}\n\nfunc TestDiff(t *testing.T) {\n\texpected := `\n\nDiff:\n--- Expected\n+++ Actual\n@@ -1,3 +1,3 @@\n (struct { foo string }) {\n- foo: (string) (len=5) \"hello\"\n+ foo: (string) (len=3) \"bar\"\n }\n`\n\tactual := diff(\n\t\tstruct{ foo string }{\"hello\"},\n\t\tstruct{ foo string }{\"bar\"},\n\t)\n\tEqual(t, expected, actual)\n\n\texpected = `\n\nDiff:\n--- Expected\n+++ Actual\n@@ -2,5 +2,5 @@\n  (int) 1,\n- (int) 2,\n  (int) 3,\n- (int) 4\n+ (int) 5,\n+ (int) 7\n }\n`\n\tactual = diff(\n\t\t[]int{1, 2, 3, 4},\n\t\t[]int{1, 3, 5, 7},\n\t)\n\tEqual(t, expected, actual)\n\n\texpected = `\n\nDiff:\n--- Expected\n+++ Actual\n@@ -2,4 +2,4 @@\n  (int) 1,\n- (int) 2,\n- (int) 3\n+ (int) 3,\n+ (int) 5\n }\n`\n\tactual = diff(\n\t\t[]int{1, 2, 3, 4}[0:3],\n\t\t[]int{1, 3, 5, 7}[0:3],\n\t)\n\tEqual(t, expected, actual)\n\n\texpected = `\n\nDiff:\n--- Expected\n+++ Actual\n@@ -1,6 +1,6 @@\n (map[string]int) (len=4) {\n- (string) (len=4) \"four\": (int) 4,\n+ (string) (len=4) \"five\": (int) 5,\n  (string) (len=3) \"one\": (int) 1,\n- (string) (len=5) \"three\": (int) 3,\n- (string) (len=3) \"two\": (int) 2\n+ (string) (len=5) \"seven\": (int) 7,\n+ (string) (len=5) \"three\": (int) 3\n }\n`\n\n\tactual = diff(\n\t\tmap[string]int{\"one\": 1, \"two\": 2, \"three\": 3, \"four\": 4},\n\t\tmap[string]int{\"one\": 1, \"three\": 3, \"five\": 5, \"seven\": 7},\n\t)\n\tEqual(t, expected, actual)\n}\n\nfunc TestDiffEmptyCases(t *testing.T) {\n\tEqual(t, \"\", diff(nil, nil))\n\tEqual(t, \"\", diff(struct{ foo string }{}, nil))\n\tEqual(t, \"\", diff(nil, struct{ foo string }{}))\n\tEqual(t, \"\", diff(1, 2))\n\tEqual(t, \"\", diff(1, 2))\n\tEqual(t, \"\", diff([]int{1}, []bool{true}))\n}\n\n// Ensure there are no data races\nfunc TestDiffRace(t *testing.T) {\n\tt.Parallel()\n\n\texpected := map[string]string{\n\t\t\"a\": \"A\",\n\t\t\"b\": \"B\",\n\t\t\"c\": \"C\",\n\t}\n\n\tactual := map[string]string{\n\t\t\"d\": \"D\",\n\t\t\"e\": \"E\",\n\t\t\"f\": \"F\",\n\t}\n\n\t// run diffs in parallel simulating tests with t.Parallel()\n\tnumRoutines := 10\n\trChans := make([]chan string, numRoutines)\n\tfor idx := range rChans {\n\t\trChans[idx] = make(chan string)\n\t\tgo func(ch chan string) {\n\t\t\tdefer close(ch)\n\t\t\tch <- diff(expected, actual)\n\t\t}(rChans[idx])\n\t}\n\n\tfor _, ch := range rChans {\n\t\tfor msg := range ch {\n\t\t\tNotZero(t, msg) // dummy assert\n\t\t}\n\t}\n}\n\ntype mockTestingT struct {\n}\n\nfunc (m *mockTestingT) Errorf(format string, args ...interface{}) {}\n\nfunc TestFailNowWithPlainTestingT(t *testing.T) {\n\tmockT := &mockTestingT{}\n\n\tPanics(t, func() {\n\t\tFailNow(mockT, \"failed\")\n\t}, \"should panic since mockT is missing FailNow()\")\n}\n\ntype mockFailNowTestingT struct {\n}\n\nfunc (m *mockFailNowTestingT) Errorf(format string, args ...interface{}) {}\n\nfunc (m *mockFailNowTestingT) FailNow() {}\n\nfunc TestFailNowWithFullTestingT(t *testing.T) {\n\tmockT := &mockFailNowTestingT{}\n\n\tNotPanics(t, func() {\n\t\tFailNow(mockT, \"failed\")\n\t}, \"should call mockT.FailNow() rather than panicking\")\n}\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/doc.go",
    "content": "// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.\n//\n// Example Usage\n//\n// The following is a complete example using assert in a standard test function:\n//    import (\n//      \"testing\"\n//      \"github.com/stretchr/testify/assert\"\n//    )\n//\n//    func TestSomething(t *testing.T) {\n//\n//      var a string = \"Hello\"\n//      var b string = \"Hello\"\n//\n//      assert.Equal(t, a, b, \"The two words should be the same.\")\n//\n//    }\n//\n// if you assert many times, use the format below:\n//\n//    import (\n//      \"testing\"\n//      \"github.com/stretchr/testify/assert\"\n//    )\n//\n//    func TestSomething(t *testing.T) {\n//      assert := assert.New(t)\n//\n//      var a string = \"Hello\"\n//      var b string = \"Hello\"\n//\n//      assert.Equal(a, b, \"The two words should be the same.\")\n//    }\n//\n// Assertions\n//\n// Assertions allow you to easily write test code, and are global funcs in the `assert` package.\n// All assertion functions take, as the first argument, the `*testing.T` object provided by the\n// testing framework. This allows the assertion funcs to write the failings and other details to\n// the correct place.\n//\n// Every assertion function also takes an optional string message as the final argument,\n// allowing custom error messages to be appended to the message the assertion method outputs.\npackage assert\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/errors.go",
    "content": "package assert\n\nimport (\n\t\"errors\"\n)\n\n// AnError is an error instance useful for testing.  If the code does not care\n// about error specifics, and only needs to return the error for example, this\n// error should be used to make the test code more readable.\nvar AnError = errors.New(\"assert.AnError general error for testing\")\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/forward_assertions.go",
    "content": "package assert\n\n// Assertions provides assertion methods around the\n// TestingT interface.\ntype Assertions struct {\n\tt TestingT\n}\n\n// New makes a new Assertions object for the specified TestingT.\nfunc New(t TestingT) *Assertions {\n\treturn &Assertions{\n\t\tt: t,\n\t}\n}\n\n//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/forward_assertions_test.go",
    "content": "package assert\n\nimport (\n\t\"errors\"\n\t\"regexp\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestImplementsWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tif !assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"Implements method should return true: AssertionTesterConformingObject implements AssertionTesterInterface\")\n\t}\n\tif assert.Implements((*AssertionTesterInterface)(nil), new(AssertionTesterNonConformingObject)) {\n\t\tt.Error(\"Implements method should return false: AssertionTesterNonConformingObject does not implements AssertionTesterInterface\")\n\t}\n}\n\nfunc TestIsTypeWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tif !assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"IsType should return true: AssertionTesterConformingObject is the same type as AssertionTesterConformingObject\")\n\t}\n\tif assert.IsType(new(AssertionTesterConformingObject), new(AssertionTesterNonConformingObject)) {\n\t\tt.Error(\"IsType should return false: AssertionTesterConformingObject is not the same type as AssertionTesterNonConformingObject\")\n\t}\n\n}\n\nfunc TestEqualWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tif !assert.Equal(\"Hello World\", \"Hello World\") {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !assert.Equal(123, 123) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !assert.Equal(123.5, 123.5) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !assert.Equal([]byte(\"Hello World\"), []byte(\"Hello World\")) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n\tif !assert.Equal(nil, nil) {\n\t\tt.Error(\"Equal should return true\")\n\t}\n}\n\nfunc TestEqualValuesWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tif !assert.EqualValues(uint32(10), int32(10)) {\n\t\tt.Error(\"EqualValues should return true\")\n\t}\n}\n\nfunc TestNotNilWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tif !assert.NotNil(new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"NotNil should return true: object is not nil\")\n\t}\n\tif assert.NotNil(nil) {\n\t\tt.Error(\"NotNil should return false: object is nil\")\n\t}\n\n}\n\nfunc TestNilWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tif !assert.Nil(nil) {\n\t\tt.Error(\"Nil should return true: object is nil\")\n\t}\n\tif assert.Nil(new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"Nil should return false: object is not nil\")\n\t}\n\n}\n\nfunc TestTrueWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tif !assert.True(true) {\n\t\tt.Error(\"True should return true\")\n\t}\n\tif assert.True(false) {\n\t\tt.Error(\"True should return false\")\n\t}\n\n}\n\nfunc TestFalseWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tif !assert.False(false) {\n\t\tt.Error(\"False should return true\")\n\t}\n\tif assert.False(true) {\n\t\tt.Error(\"False should return false\")\n\t}\n\n}\n\nfunc TestExactlyWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\ta := float32(1)\n\tb := float64(1)\n\tc := float32(1)\n\td := float32(2)\n\n\tif assert.Exactly(a, b) {\n\t\tt.Error(\"Exactly should return false\")\n\t}\n\tif assert.Exactly(a, d) {\n\t\tt.Error(\"Exactly should return false\")\n\t}\n\tif !assert.Exactly(a, c) {\n\t\tt.Error(\"Exactly should return true\")\n\t}\n\n\tif assert.Exactly(nil, a) {\n\t\tt.Error(\"Exactly should return false\")\n\t}\n\tif assert.Exactly(a, nil) {\n\t\tt.Error(\"Exactly should return false\")\n\t}\n\n}\n\nfunc TestNotEqualWrapper(t *testing.T) {\n\n\tassert := New(new(testing.T))\n\n\tif !assert.NotEqual(\"Hello World\", \"Hello World!\") {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tif !assert.NotEqual(123, 1234) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tif !assert.NotEqual(123.5, 123.55) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tif !assert.NotEqual([]byte(\"Hello World\"), []byte(\"Hello World!\")) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n\tif !assert.NotEqual(nil, new(AssertionTesterConformingObject)) {\n\t\tt.Error(\"NotEqual should return true\")\n\t}\n}\n\nfunc TestContainsWrapper(t *testing.T) {\n\n\tassert := New(new(testing.T))\n\tlist := []string{\"Foo\", \"Bar\"}\n\n\tif !assert.Contains(\"Hello World\", \"Hello\") {\n\t\tt.Error(\"Contains should return true: \\\"Hello World\\\" contains \\\"Hello\\\"\")\n\t}\n\tif assert.Contains(\"Hello World\", \"Salut\") {\n\t\tt.Error(\"Contains should return false: \\\"Hello World\\\" does not contain \\\"Salut\\\"\")\n\t}\n\n\tif !assert.Contains(list, \"Foo\") {\n\t\tt.Error(\"Contains should return true: \\\"[\\\"Foo\\\", \\\"Bar\\\"]\\\" contains \\\"Foo\\\"\")\n\t}\n\tif assert.Contains(list, \"Salut\") {\n\t\tt.Error(\"Contains should return false: \\\"[\\\"Foo\\\", \\\"Bar\\\"]\\\" does not contain \\\"Salut\\\"\")\n\t}\n\n}\n\nfunc TestNotContainsWrapper(t *testing.T) {\n\n\tassert := New(new(testing.T))\n\tlist := []string{\"Foo\", \"Bar\"}\n\n\tif !assert.NotContains(\"Hello World\", \"Hello!\") {\n\t\tt.Error(\"NotContains should return true: \\\"Hello World\\\" does not contain \\\"Hello!\\\"\")\n\t}\n\tif assert.NotContains(\"Hello World\", \"Hello\") {\n\t\tt.Error(\"NotContains should return false: \\\"Hello World\\\" contains \\\"Hello\\\"\")\n\t}\n\n\tif !assert.NotContains(list, \"Foo!\") {\n\t\tt.Error(\"NotContains should return true: \\\"[\\\"Foo\\\", \\\"Bar\\\"]\\\" does not contain \\\"Foo!\\\"\")\n\t}\n\tif assert.NotContains(list, \"Foo\") {\n\t\tt.Error(\"NotContains should return false: \\\"[\\\"Foo\\\", \\\"Bar\\\"]\\\" contains \\\"Foo\\\"\")\n\t}\n\n}\n\nfunc TestConditionWrapper(t *testing.T) {\n\n\tassert := New(new(testing.T))\n\n\tif !assert.Condition(func() bool { return true }, \"Truth\") {\n\t\tt.Error(\"Condition should return true\")\n\t}\n\n\tif assert.Condition(func() bool { return false }, \"Lie\") {\n\t\tt.Error(\"Condition should return false\")\n\t}\n\n}\n\nfunc TestDidPanicWrapper(t *testing.T) {\n\n\tif funcDidPanic, _ := didPanic(func() {\n\t\tpanic(\"Panic!\")\n\t}); !funcDidPanic {\n\t\tt.Error(\"didPanic should return true\")\n\t}\n\n\tif funcDidPanic, _ := didPanic(func() {\n\t}); funcDidPanic {\n\t\tt.Error(\"didPanic should return false\")\n\t}\n\n}\n\nfunc TestPanicsWrapper(t *testing.T) {\n\n\tassert := New(new(testing.T))\n\n\tif !assert.Panics(func() {\n\t\tpanic(\"Panic!\")\n\t}) {\n\t\tt.Error(\"Panics should return true\")\n\t}\n\n\tif assert.Panics(func() {\n\t}) {\n\t\tt.Error(\"Panics should return false\")\n\t}\n\n}\n\nfunc TestNotPanicsWrapper(t *testing.T) {\n\n\tassert := New(new(testing.T))\n\n\tif !assert.NotPanics(func() {\n\t}) {\n\t\tt.Error(\"NotPanics should return true\")\n\t}\n\n\tif assert.NotPanics(func() {\n\t\tpanic(\"Panic!\")\n\t}) {\n\t\tt.Error(\"NotPanics should return false\")\n\t}\n\n}\n\nfunc TestNoErrorWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\t// start with a nil error\n\tvar err error\n\n\tassert.True(mockAssert.NoError(err), \"NoError should return True for nil arg\")\n\n\t// now set an error\n\terr = errors.New(\"Some error\")\n\n\tassert.False(mockAssert.NoError(err), \"NoError with error should return False\")\n\n}\n\nfunc TestErrorWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\t// start with a nil error\n\tvar err error\n\n\tassert.False(mockAssert.Error(err), \"Error should return False for nil arg\")\n\n\t// now set an error\n\terr = errors.New(\"Some error\")\n\n\tassert.True(mockAssert.Error(err), \"Error with error should return True\")\n\n}\n\nfunc TestEqualErrorWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\t// start with a nil error\n\tvar err error\n\tassert.False(mockAssert.EqualError(err, \"\"),\n\t\t\"EqualError should return false for nil arg\")\n\n\t// now set an error\n\terr = errors.New(\"some error\")\n\tassert.False(mockAssert.EqualError(err, \"Not some error\"),\n\t\t\"EqualError should return false for different error string\")\n\tassert.True(mockAssert.EqualError(err, \"some error\"),\n\t\t\"EqualError should return true\")\n}\n\nfunc TestEmptyWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\tassert.True(mockAssert.Empty(\"\"), \"Empty string is empty\")\n\tassert.True(mockAssert.Empty(nil), \"Nil is empty\")\n\tassert.True(mockAssert.Empty([]string{}), \"Empty string array is empty\")\n\tassert.True(mockAssert.Empty(0), \"Zero int value is empty\")\n\tassert.True(mockAssert.Empty(false), \"False value is empty\")\n\n\tassert.False(mockAssert.Empty(\"something\"), \"Non Empty string is not empty\")\n\tassert.False(mockAssert.Empty(errors.New(\"something\")), \"Non nil object is not empty\")\n\tassert.False(mockAssert.Empty([]string{\"something\"}), \"Non empty string array is not empty\")\n\tassert.False(mockAssert.Empty(1), \"Non-zero int value is not empty\")\n\tassert.False(mockAssert.Empty(true), \"True value is not empty\")\n\n}\n\nfunc TestNotEmptyWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\tassert.False(mockAssert.NotEmpty(\"\"), \"Empty string is empty\")\n\tassert.False(mockAssert.NotEmpty(nil), \"Nil is empty\")\n\tassert.False(mockAssert.NotEmpty([]string{}), \"Empty string array is empty\")\n\tassert.False(mockAssert.NotEmpty(0), \"Zero int value is empty\")\n\tassert.False(mockAssert.NotEmpty(false), \"False value is empty\")\n\n\tassert.True(mockAssert.NotEmpty(\"something\"), \"Non Empty string is not empty\")\n\tassert.True(mockAssert.NotEmpty(errors.New(\"something\")), \"Non nil object is not empty\")\n\tassert.True(mockAssert.NotEmpty([]string{\"something\"}), \"Non empty string array is not empty\")\n\tassert.True(mockAssert.NotEmpty(1), \"Non-zero int value is not empty\")\n\tassert.True(mockAssert.NotEmpty(true), \"True value is not empty\")\n\n}\n\nfunc TestLenWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\tassert.False(mockAssert.Len(nil, 0), \"nil does not have length\")\n\tassert.False(mockAssert.Len(0, 0), \"int does not have length\")\n\tassert.False(mockAssert.Len(true, 0), \"true does not have length\")\n\tassert.False(mockAssert.Len(false, 0), \"false does not have length\")\n\tassert.False(mockAssert.Len('A', 0), \"Rune does not have length\")\n\tassert.False(mockAssert.Len(struct{}{}, 0), \"Struct does not have length\")\n\n\tch := make(chan int, 5)\n\tch <- 1\n\tch <- 2\n\tch <- 3\n\n\tcases := []struct {\n\t\tv interface{}\n\t\tl int\n\t}{\n\t\t{[]int{1, 2, 3}, 3},\n\t\t{[...]int{1, 2, 3}, 3},\n\t\t{\"ABC\", 3},\n\t\t{map[int]int{1: 2, 2: 4, 3: 6}, 3},\n\t\t{ch, 3},\n\n\t\t{[]int{}, 0},\n\t\t{map[int]int{}, 0},\n\t\t{make(chan int), 0},\n\n\t\t{[]int(nil), 0},\n\t\t{map[int]int(nil), 0},\n\t\t{(chan int)(nil), 0},\n\t}\n\n\tfor _, c := range cases {\n\t\tassert.True(mockAssert.Len(c.v, c.l), \"%#v have %d items\", c.v, c.l)\n\t}\n}\n\nfunc TestWithinDurationWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\ta := time.Now()\n\tb := a.Add(10 * time.Second)\n\n\tassert.True(mockAssert.WithinDuration(a, b, 10*time.Second), \"A 10s difference is within a 10s time difference\")\n\tassert.True(mockAssert.WithinDuration(b, a, 10*time.Second), \"A 10s difference is within a 10s time difference\")\n\n\tassert.False(mockAssert.WithinDuration(a, b, 9*time.Second), \"A 10s difference is not within a 9s time difference\")\n\tassert.False(mockAssert.WithinDuration(b, a, 9*time.Second), \"A 10s difference is not within a 9s time difference\")\n\n\tassert.False(mockAssert.WithinDuration(a, b, -9*time.Second), \"A 10s difference is not within a 9s time difference\")\n\tassert.False(mockAssert.WithinDuration(b, a, -9*time.Second), \"A 10s difference is not within a 9s time difference\")\n\n\tassert.False(mockAssert.WithinDuration(a, b, -11*time.Second), \"A 10s difference is not within a 9s time difference\")\n\tassert.False(mockAssert.WithinDuration(b, a, -11*time.Second), \"A 10s difference is not within a 9s time difference\")\n}\n\nfunc TestInDeltaWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tTrue(t, assert.InDelta(1.001, 1, 0.01), \"|1.001 - 1| <= 0.01\")\n\tTrue(t, assert.InDelta(1, 1.001, 0.01), \"|1 - 1.001| <= 0.01\")\n\tTrue(t, assert.InDelta(1, 2, 1), \"|1 - 2| <= 1\")\n\tFalse(t, assert.InDelta(1, 2, 0.5), \"Expected |1 - 2| <= 0.5 to fail\")\n\tFalse(t, assert.InDelta(2, 1, 0.5), \"Expected |2 - 1| <= 0.5 to fail\")\n\tFalse(t, assert.InDelta(\"\", nil, 1), \"Expected non numerals to fail\")\n\n\tcases := []struct {\n\t\ta, b  interface{}\n\t\tdelta float64\n\t}{\n\t\t{uint8(2), uint8(1), 1},\n\t\t{uint16(2), uint16(1), 1},\n\t\t{uint32(2), uint32(1), 1},\n\t\t{uint64(2), uint64(1), 1},\n\n\t\t{int(2), int(1), 1},\n\t\t{int8(2), int8(1), 1},\n\t\t{int16(2), int16(1), 1},\n\t\t{int32(2), int32(1), 1},\n\t\t{int64(2), int64(1), 1},\n\n\t\t{float32(2), float32(1), 1},\n\t\t{float64(2), float64(1), 1},\n\t}\n\n\tfor _, tc := range cases {\n\t\tTrue(t, assert.InDelta(tc.a, tc.b, tc.delta), \"Expected |%V - %V| <= %v\", tc.a, tc.b, tc.delta)\n\t}\n}\n\nfunc TestInEpsilonWrapper(t *testing.T) {\n\tassert := New(new(testing.T))\n\n\tcases := []struct {\n\t\ta, b    interface{}\n\t\tepsilon float64\n\t}{\n\t\t{uint8(2), uint16(2), .001},\n\t\t{2.1, 2.2, 0.1},\n\t\t{2.2, 2.1, 0.1},\n\t\t{-2.1, -2.2, 0.1},\n\t\t{-2.2, -2.1, 0.1},\n\t\t{uint64(100), uint8(101), 0.01},\n\t\t{0.1, -0.1, 2},\n\t}\n\n\tfor _, tc := range cases {\n\t\tTrue(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, \"Expected %V and %V to have a relative difference of %v\", tc.a, tc.b, tc.epsilon))\n\t}\n\n\tcases = []struct {\n\t\ta, b    interface{}\n\t\tepsilon float64\n\t}{\n\t\t{uint8(2), int16(-2), .001},\n\t\t{uint64(100), uint8(102), 0.01},\n\t\t{2.1, 2.2, 0.001},\n\t\t{2.2, 2.1, 0.001},\n\t\t{2.1, -2.2, 1},\n\t\t{2.1, \"bla-bla\", 0},\n\t\t{0.1, -0.1, 1.99},\n\t}\n\n\tfor _, tc := range cases {\n\t\tFalse(t, assert.InEpsilon(tc.a, tc.b, tc.epsilon, \"Expected %V and %V to have a relative difference of %v\", tc.a, tc.b, tc.epsilon))\n\t}\n}\n\nfunc TestRegexpWrapper(t *testing.T) {\n\n\tassert := New(new(testing.T))\n\n\tcases := []struct {\n\t\trx, str string\n\t}{\n\t\t{\"^start\", \"start of the line\"},\n\t\t{\"end$\", \"in the end\"},\n\t\t{\"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}\", \"My phone number is 650.12.34\"},\n\t}\n\n\tfor _, tc := range cases {\n\t\tTrue(t, assert.Regexp(tc.rx, tc.str))\n\t\tTrue(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str))\n\t\tFalse(t, assert.NotRegexp(tc.rx, tc.str))\n\t\tFalse(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str))\n\t}\n\n\tcases = []struct {\n\t\trx, str string\n\t}{\n\t\t{\"^asdfastart\", \"Not the start of the line\"},\n\t\t{\"end$\", \"in the end.\"},\n\t\t{\"[0-9]{3}[.-]?[0-9]{2}[.-]?[0-9]{2}\", \"My phone number is 650.12a.34\"},\n\t}\n\n\tfor _, tc := range cases {\n\t\tFalse(t, assert.Regexp(tc.rx, tc.str), \"Expected \\\"%s\\\" to not match \\\"%s\\\"\", tc.rx, tc.str)\n\t\tFalse(t, assert.Regexp(regexp.MustCompile(tc.rx), tc.str))\n\t\tTrue(t, assert.NotRegexp(tc.rx, tc.str))\n\t\tTrue(t, assert.NotRegexp(regexp.MustCompile(tc.rx), tc.str))\n\t}\n}\n\nfunc TestZeroWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\tfor _, test := range zeros {\n\t\tassert.True(mockAssert.Zero(test), \"Zero should return true for %v\", test)\n\t}\n\n\tfor _, test := range nonZeros {\n\t\tassert.False(mockAssert.Zero(test), \"Zero should return false for %v\", test)\n\t}\n}\n\nfunc TestNotZeroWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\tfor _, test := range zeros {\n\t\tassert.False(mockAssert.NotZero(test), \"Zero should return true for %v\", test)\n\t}\n\n\tfor _, test := range nonZeros {\n\t\tassert.True(mockAssert.NotZero(test), \"Zero should return false for %v\", test)\n\t}\n}\n\nfunc TestJSONEqWrapper_EqualSONString(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif !assert.JSONEq(`{\"hello\": \"world\", \"foo\": \"bar\"}`, `{\"hello\": \"world\", \"foo\": \"bar\"}`) {\n\t\tt.Error(\"JSONEq should return true\")\n\t}\n\n}\n\nfunc TestJSONEqWrapper_EquivalentButNotEqual(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif !assert.JSONEq(`{\"hello\": \"world\", \"foo\": \"bar\"}`, `{\"foo\": \"bar\", \"hello\": \"world\"}`) {\n\t\tt.Error(\"JSONEq should return true\")\n\t}\n\n}\n\nfunc TestJSONEqWrapper_HashOfArraysAndHashes(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif !assert.JSONEq(\"{\\r\\n\\t\\\"numeric\\\": 1.5,\\r\\n\\t\\\"array\\\": [{\\\"foo\\\": \\\"bar\\\"}, 1, \\\"string\\\", [\\\"nested\\\", \\\"array\\\", 5.5]],\\r\\n\\t\\\"hash\\\": {\\\"nested\\\": \\\"hash\\\", \\\"nested_slice\\\": [\\\"this\\\", \\\"is\\\", \\\"nested\\\"]},\\r\\n\\t\\\"string\\\": \\\"foo\\\"\\r\\n}\",\n\t\t\"{\\r\\n\\t\\\"numeric\\\": 1.5,\\r\\n\\t\\\"hash\\\": {\\\"nested\\\": \\\"hash\\\", \\\"nested_slice\\\": [\\\"this\\\", \\\"is\\\", \\\"nested\\\"]},\\r\\n\\t\\\"string\\\": \\\"foo\\\",\\r\\n\\t\\\"array\\\": [{\\\"foo\\\": \\\"bar\\\"}, 1, \\\"string\\\", [\\\"nested\\\", \\\"array\\\", 5.5]]\\r\\n}\") {\n\t\tt.Error(\"JSONEq should return true\")\n\t}\n}\n\nfunc TestJSONEqWrapper_Array(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif !assert.JSONEq(`[\"foo\", {\"hello\": \"world\", \"nested\": \"hash\"}]`, `[\"foo\", {\"nested\": \"hash\", \"hello\": \"world\"}]`) {\n\t\tt.Error(\"JSONEq should return true\")\n\t}\n\n}\n\nfunc TestJSONEqWrapper_HashAndArrayNotEquivalent(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif assert.JSONEq(`[\"foo\", {\"hello\": \"world\", \"nested\": \"hash\"}]`, `{\"foo\": \"bar\", {\"nested\": \"hash\", \"hello\": \"world\"}}`) {\n\t\tt.Error(\"JSONEq should return false\")\n\t}\n}\n\nfunc TestJSONEqWrapper_HashesNotEquivalent(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif assert.JSONEq(`{\"foo\": \"bar\"}`, `{\"foo\": \"bar\", \"hello\": \"world\"}`) {\n\t\tt.Error(\"JSONEq should return false\")\n\t}\n}\n\nfunc TestJSONEqWrapper_ActualIsNotJSON(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif assert.JSONEq(`{\"foo\": \"bar\"}`, \"Not JSON\") {\n\t\tt.Error(\"JSONEq should return false\")\n\t}\n}\n\nfunc TestJSONEqWrapper_ExpectedIsNotJSON(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif assert.JSONEq(\"Not JSON\", `{\"foo\": \"bar\", \"hello\": \"world\"}`) {\n\t\tt.Error(\"JSONEq should return false\")\n\t}\n}\n\nfunc TestJSONEqWrapper_ExpectedAndActualNotJSON(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif assert.JSONEq(\"Not JSON\", \"Not JSON\") {\n\t\tt.Error(\"JSONEq should return false\")\n\t}\n}\n\nfunc TestJSONEqWrapper_ArraysOfDifferentOrder(t *testing.T) {\n\tassert := New(new(testing.T))\n\tif assert.JSONEq(`[\"foo\", {\"hello\": \"world\", \"nested\": \"hash\"}]`, `[{ \"hello\": \"world\", \"nested\": \"hash\"}, \"foo\"]`) {\n\t\tt.Error(\"JSONEq should return false\")\n\t}\n}\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/http_assertions.go",
    "content": "package assert\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strings\"\n)\n\n// httpCode is a helper that returns HTTP code of the response. It returns -1\n// if building a new request fails.\nfunc httpCode(handler http.HandlerFunc, method, url string, values url.Values) int {\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(method, url+\"?\"+values.Encode(), nil)\n\tif err != nil {\n\t\treturn -1\n\t}\n\thandler(w, req)\n\treturn w.Code\n}\n\n// HTTPSuccess asserts that a specified handler returns a success status code.\n//\n//  assert.HTTPSuccess(t, myHandler, \"POST\", \"http://www.google.com\", nil)\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {\n\tcode := httpCode(handler, method, url, values)\n\tif code == -1 {\n\t\treturn false\n\t}\n\treturn code >= http.StatusOK && code <= http.StatusPartialContent\n}\n\n// HTTPRedirect asserts that a specified handler returns a redirect status code.\n//\n//  assert.HTTPRedirect(t, myHandler, \"GET\", \"/a/b/c\", url.Values{\"a\": []string{\"b\", \"c\"}}\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {\n\tcode := httpCode(handler, method, url, values)\n\tif code == -1 {\n\t\treturn false\n\t}\n\treturn code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect\n}\n\n// HTTPError asserts that a specified handler returns an error status code.\n//\n//  assert.HTTPError(t, myHandler, \"POST\", \"/a/b/c\", url.Values{\"a\": []string{\"b\", \"c\"}}\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {\n\tcode := httpCode(handler, method, url, values)\n\tif code == -1 {\n\t\treturn false\n\t}\n\treturn code >= http.StatusBadRequest\n}\n\n// HTTPBody is a helper that returns HTTP body of the response. It returns\n// empty string if building a new request fails.\nfunc HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {\n\tw := httptest.NewRecorder()\n\treq, err := http.NewRequest(method, url+\"?\"+values.Encode(), nil)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\thandler(w, req)\n\treturn w.Body.String()\n}\n\n// HTTPBodyContains asserts that a specified handler returns a\n// body that contains a string.\n//\n//  assert.HTTPBodyContains(t, myHandler, \"www.google.com\", nil, \"I'm Feeling Lucky\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {\n\tbody := HTTPBody(handler, method, url, values)\n\n\tcontains := strings.Contains(body, fmt.Sprint(str))\n\tif !contains {\n\t\tFail(t, fmt.Sprintf(\"Expected response body for \\\"%s\\\" to contain \\\"%s\\\" but found \\\"%s\\\"\", url+\"?\"+values.Encode(), str, body))\n\t}\n\n\treturn contains\n}\n\n// HTTPBodyNotContains asserts that a specified handler returns a\n// body that does not contain a string.\n//\n//  assert.HTTPBodyNotContains(t, myHandler, \"www.google.com\", nil, \"I'm Feeling Lucky\")\n//\n// Returns whether the assertion was successful (true) or not (false).\nfunc HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool {\n\tbody := HTTPBody(handler, method, url, values)\n\n\tcontains := strings.Contains(body, fmt.Sprint(str))\n\tif contains {\n\t\tFail(t, fmt.Sprintf(\"Expected response body for \\\"%s\\\" to NOT contain \\\"%s\\\" but found \\\"%s\\\"\", url+\"?\"+values.Encode(), str, body))\n\t}\n\n\treturn !contains\n}\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/assert/http_assertions_test.go",
    "content": "package assert\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"testing\"\n)\n\nfunc httpOK(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc httpRedirect(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusTemporaryRedirect)\n}\n\nfunc httpError(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusInternalServerError)\n}\n\nfunc TestHTTPStatuses(t *testing.T) {\n\tassert := New(t)\n\tmockT := new(testing.T)\n\n\tassert.Equal(HTTPSuccess(mockT, httpOK, \"GET\", \"/\", nil), true)\n\tassert.Equal(HTTPSuccess(mockT, httpRedirect, \"GET\", \"/\", nil), false)\n\tassert.Equal(HTTPSuccess(mockT, httpError, \"GET\", \"/\", nil), false)\n\n\tassert.Equal(HTTPRedirect(mockT, httpOK, \"GET\", \"/\", nil), false)\n\tassert.Equal(HTTPRedirect(mockT, httpRedirect, \"GET\", \"/\", nil), true)\n\tassert.Equal(HTTPRedirect(mockT, httpError, \"GET\", \"/\", nil), false)\n\n\tassert.Equal(HTTPError(mockT, httpOK, \"GET\", \"/\", nil), false)\n\tassert.Equal(HTTPError(mockT, httpRedirect, \"GET\", \"/\", nil), false)\n\tassert.Equal(HTTPError(mockT, httpError, \"GET\", \"/\", nil), true)\n}\n\nfunc TestHTTPStatusesWrapper(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\tassert.Equal(mockAssert.HTTPSuccess(httpOK, \"GET\", \"/\", nil), true)\n\tassert.Equal(mockAssert.HTTPSuccess(httpRedirect, \"GET\", \"/\", nil), false)\n\tassert.Equal(mockAssert.HTTPSuccess(httpError, \"GET\", \"/\", nil), false)\n\n\tassert.Equal(mockAssert.HTTPRedirect(httpOK, \"GET\", \"/\", nil), false)\n\tassert.Equal(mockAssert.HTTPRedirect(httpRedirect, \"GET\", \"/\", nil), true)\n\tassert.Equal(mockAssert.HTTPRedirect(httpError, \"GET\", \"/\", nil), false)\n\n\tassert.Equal(mockAssert.HTTPError(httpOK, \"GET\", \"/\", nil), false)\n\tassert.Equal(mockAssert.HTTPError(httpRedirect, \"GET\", \"/\", nil), false)\n\tassert.Equal(mockAssert.HTTPError(httpError, \"GET\", \"/\", nil), true)\n}\n\nfunc httpHelloName(w http.ResponseWriter, r *http.Request) {\n\tname := r.FormValue(\"name\")\n\tw.Write([]byte(fmt.Sprintf(\"Hello, %s!\", name)))\n}\n\nfunc TestHttpBody(t *testing.T) {\n\tassert := New(t)\n\tmockT := new(testing.T)\n\n\tassert.True(HTTPBodyContains(mockT, httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"Hello, World!\"))\n\tassert.True(HTTPBodyContains(mockT, httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"World\"))\n\tassert.False(HTTPBodyContains(mockT, httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"world\"))\n\n\tassert.False(HTTPBodyNotContains(mockT, httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"Hello, World!\"))\n\tassert.False(HTTPBodyNotContains(mockT, httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"World\"))\n\tassert.True(HTTPBodyNotContains(mockT, httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"world\"))\n}\n\nfunc TestHttpBodyWrappers(t *testing.T) {\n\tassert := New(t)\n\tmockAssert := New(new(testing.T))\n\n\tassert.True(mockAssert.HTTPBodyContains(httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"Hello, World!\"))\n\tassert.True(mockAssert.HTTPBodyContains(httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"World\"))\n\tassert.False(mockAssert.HTTPBodyContains(httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"world\"))\n\n\tassert.False(mockAssert.HTTPBodyNotContains(httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"Hello, World!\"))\n\tassert.False(mockAssert.HTTPBodyNotContains(httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"World\"))\n\tassert.True(mockAssert.HTTPBodyNotContains(httpHelloName, \"GET\", \"/\", url.Values{\"name\": []string{\"World\"}}, \"world\"))\n\n}\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/doc.go",
    "content": "// Package testify is a set of packages that provide many tools for testifying that your code will behave as you intend.\n//\n// testify contains the following packages:\n//\n// The assert package provides a comprehensive set of assertion functions that tie in to the Go testing system.\n//\n// The http package contains tools to make it easier to test http activity using the Go testing system.\n//\n// The mock package provides a system by which it is possible to mock your objects and verify calls are happening as expected.\n//\n// The suite package provides a basic structure for using structs as testing suites, and methods on those structs as tests.  It includes setup/teardown functionality in the way of interfaces.\npackage testify\n\n// blank imports help docs.\nimport (\n\t// assert package\n\t_ \"github.com/stretchr/testify/assert\"\n\t// http package\n\t_ \"github.com/stretchr/testify/http\"\n\t// mock package\n\t_ \"github.com/stretchr/testify/mock\"\n)\n"
  },
  {
    "path": "vendor/github.com/stretchr/testify/package_test.go",
    "content": "package testify\n\nimport (\n\t\"github.com/stretchr/testify/assert\"\n\t\"testing\"\n)\n\nfunc TestImports(t *testing.T) {\n\tif assert.Equal(t, 1, 1) != true {\n\t\tt.Error(\"Something is wrong.\")\n\t}\n}\n"
  },
  {
    "path": "window.go",
    "content": "package morgoth\n\ntype Window struct {\n\tData []float64\n}\n\nfunc (self *Window) Copy() *Window {\n\tdata := make([]float64, len(self.Data))\n\tcopy(data, self.Data)\n\n\treturn &Window{\n\t\tData: data,\n\t}\n}\n"
  }
]