[
  {
    "path": "LICENSE",
    "content": "Copyright (c) 2015 The Go Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
  },
  {
    "path": "abi/abi.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\n// To analyze kubelet:\n//\n//     ( X=$PWD; cd -q ~/s/kubernetes && $X/abi $(go list -deps ./cmd/kubelet) )\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"go/types\"\n\t\"io\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"golang.org/x/tools/go/packages\"\n)\n\nconst (\n\tminIntRegs = 0\n\tmaxIntRegs = 16\n\n\t// The number of floating-point registers has little\n\t// effect. Just fix it at 8.\n\tminFloatRegs = 8\n\tmaxFloatRegs = 8\n\n\t// Comparison mode.\n\tmodeCompare = false\n)\n\nfunc main() {\n\tflag.Parse()\n\tpkgPaths := flag.Args()\n\n\t// Get the package count to give the user some feedback.\n\tcfg := &packages.Config{}\n\tcfg.Mode = packages.NeedName\n\tpkgs, err := packages.Load(cfg, pkgPaths...)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif packages.PrintErrors(pkgs) > 0 {\n\t\tos.Exit(1)\n\t}\n\tfmt.Fprintf(os.Stderr, \"checking %d packages...\\n\", len(pkgs))\n\n\t// Parse and type-check the packages.\n\tcfg.Mode = packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedTypesSizes\n\tpkgs, err = packages.Load(cfg, pkgPaths...)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif packages.PrintErrors(pkgs) > 0 {\n\t\tos.Exit(1)\n\t}\n\n\t// Extract all the functions.\n\tvar funcs []*types.Func\n\tvar sizes types.Sizes\n\tfor _, pkg := range pkgs {\n\t\tsizes = pkg.TypesSizes\n\t\tfor _, obj := range pkg.TypesInfo.Defs {\n\t\t\tif obj, ok := obj.(*types.Func); ok {\n\t\t\t\tfuncs = append(funcs, obj)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Analyze.\n\tqtiles := []float64{0.5, 0.95, 0.99}\n\tqtileLabels := []string{\"p50\", \"p95\", \"p99\"}\n\ttable := [][]interface{}{\n\t\t{\"\", \"\", \"\", \"stack args\", \"spills\", \"stack total\"},\n\t\t{\"ints\", \"floats\", \"% fit\", qtileLabels, qtileLabels, qtileLabels},\n\t}\n\tif modeCompare {\n\t\tqtiles = []float64{0.5, 0.95, 0.99, 1.0}\n\t\tqtileLabels = []string{\"p50\", \"p95\", \"p99\", \"max\"}\n\t\ttable = [][]interface{}{\n\t\t\t{\"\", \"\", \"\", \"\", \"Δ stack bytes\"},\n\t\t\t{\"ints\", \"floats\", \"Δ % fit\", \"diff\", qtileLabels, \"% bigger\"},\n\t\t}\n\t}\n\topts := ABIOptions{\n\t\tEmptyArray:   true,\n\t\tOneArray:     true,\n\t\tSplitArrays:  false,\n\t\tIgnoreBlank:  false,\n\t\tSpillRegs:    false,\n\t\tEmptyOnStack: true,\n\t}\n\tcmp := opts\n\tcmp.ABI0 = true\n\n\tconst infinity = math.MaxInt32\n\tanalyze := func(opts, cmp ABIOptions) {\n\t\tvar stackBytes []int\n\t\tvar spillBytes []int\n\t\tvar stackTotal []int\n\t\tvar overheads []int // Stack bytes vs ABI0\n\t\tfit := 0            // # functions that fit entirely in registers\n\t\tcmpFit := 0\n\t\tcmpDiff := 0   // # functions with any frame difference\n\t\tcmpLarger := 0 // # functions with larger stack frames in cmp\n\n\t\tfor _, f := range funcs {\n\t\t\tsig := f.Type().(*types.Signature)\n\n\t\t\tframe := opts.Assign(sig, sizes)\n\n\t\t\tstackBytes = append(stackBytes, frame.StackBytes)\n\t\t\tspillBytes = append(spillBytes, frame.StackSpillBytes)\n\t\t\tstackTotal = append(stackTotal, frame.StackTotal)\n\t\t\tif frame.StackBytes == 0 {\n\t\t\t\tfit++\n\t\t\t}\n\n\t\t\tif modeCompare {\n\t\t\t\t// Compare to alternate options.\n\t\t\t\tframeCmp := cmp.Assign(sig, sizes)\n\t\t\t\toverhead := frameCmp.StackTotal - frame.StackTotal\n\t\t\t\toverheads = append(overheads, overhead)\n\t\t\t\tif frameCmp.StackBytes == 0 {\n\t\t\t\t\tcmpFit++\n\t\t\t\t}\n\t\t\t\tif frame != frameCmp {\n\t\t\t\t\tcmpDiff++\n\t\t\t\t}\n\t\t\t\tif overhead > 0 {\n\t\t\t\t\tcmpLarger++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\trow := []interface{}{opts.IntRegs, opts.FloatRegs}\n\t\tif opts.IntRegs == infinity {\n\t\t\trow[0] = \"∞\"\n\t\t}\n\t\tif opts.FloatRegs == infinity {\n\t\t\trow[1] = \"∞\"\n\t\t}\n\n\t\tif modeCompare {\n\t\t\tpct := func(n int) string {\n\t\t\t\treturn fmt.Sprintf(\"%5.2f%%\", 100*float64(n)/float64(len(funcs)))\n\t\t\t}\n\t\t\trow = append(row, pct(cmpFit-fit))\n\t\t\trow = append(row, []interface{}{cmpDiff, pct(cmpDiff)})\n\t\t\trow = append(row, intQuantiles(overheads, qtiles...))\n\t\t\trow = append(row, pct(cmpLarger))\n\t\t} else {\n\t\t\trow = append(row, fmt.Sprintf(\"%4.1f%%\", 100*float64(fit)/float64(len(funcs))))\n\t\t\trow = append(row, intQuantiles(stackBytes, qtiles...))\n\t\t\trow = append(row, intQuantiles(spillBytes, qtiles...))\n\t\t\trow = append(row, intQuantiles(stackTotal, qtiles...))\n\t\t}\n\n\t\ttable = append(table, row)\n\t}\n\tanalyze(opts, cmp)\n\tfor opts.IntRegs = minIntRegs; opts.IntRegs <= maxIntRegs; opts.IntRegs++ {\n\t\tfor opts.FloatRegs = minFloatRegs; opts.FloatRegs <= maxFloatRegs; opts.FloatRegs++ {\n\t\t\tcmp.IntRegs, cmp.FloatRegs = opts.IntRegs, opts.FloatRegs\n\t\t\tanalyze(opts, cmp)\n\t\t}\n\t}\n\topts.IntRegs, opts.FloatRegs = infinity, maxFloatRegs\n\tcmp.IntRegs, cmp.FloatRegs = opts.IntRegs, opts.FloatRegs\n\tanalyze(opts, cmp)\n\n\t// Print results.\n\tprintTable(os.Stdout, table)\n}\n\ntype ABIOptions struct {\n\tIntRegs, FloatRegs int\n\n\tABI0 bool // Use ABI0 (other options are ignored)\n\n\tEmptyArray   bool // Empty arrays don't stack-assign\n\tOneArray     bool // Size-1 arrays don't stack-assign\n\tSplitArrays  bool // Stack-assign arrays separately from rest of arg\n\tIgnoreBlank  bool // Skip assigning blank fields\n\tSpillRegs    bool // Structure spill space as register words\n\tEmptyOnStack bool // Stack-assign zero-sized values\n}\n\ntype frameBuilder struct {\n\topts    *ABIOptions\n\tsizes   types.Sizes\n\tptrSize int\n\n\tints, floats int\n\n\tFrame\n}\n\ntype Frame struct {\n\tArgInts, ArgFloats int\n\tResInts, ResFloats int\n\n\tStackBytes      int // Stack bytes without spill slots\n\tStackSpillBytes int // Stack bytes of spill slots\n\tStackTotal      int // Stack bytes for complete argument frame.\n}\n\nfunc (a *ABIOptions) Assign(sig *types.Signature, sizes types.Sizes) Frame {\n\tptrSize := int(sizes.Sizeof(types.Typ[types.Uintptr]))\n\tf := frameBuilder{opts: a, sizes: sizes, ptrSize: ptrSize}\n\n\t// Arguments\n\tif r := sig.Recv(); r != nil {\n\t\tf.AddArg(r.Type(), true)\n\t}\n\tps := sig.Params()\n\tfor i := 0; i < ps.Len(); i++ {\n\t\tf.AddArg(ps.At(i).Type(), true)\n\t}\n\tf.ArgInts, f.ArgFloats = f.ints, f.floats\n\tf.StackBytes = align(f.StackBytes, ptrSize)\n\tf.StackSpillBytes = align(f.StackSpillBytes, ptrSize)\n\n\t// Results\n\tf.ints, f.floats = 0, 0\n\trs := sig.Results()\n\tfor i := 0; i < rs.Len(); i++ {\n\t\tf.AddArg(rs.At(i).Type(), false)\n\t}\n\tf.StackBytes = align(f.StackBytes, ptrSize)\n\tf.ResInts, f.ResFloats = f.ints, f.floats\n\n\tf.StackTotal = f.StackBytes + f.StackSpillBytes\n\n\treturn f.Frame\n}\n\nfunc (f *frameBuilder) AddArg(arg types.Type, needsSpill bool) {\n\tif f.opts.ABI0 {\n\t\tf.StackAssign(arg)\n\t\treturn\n\t}\n\n\tsi, sf, sb := f.ints, f.floats, f.StackBytes\n\tif f.RegAssign(arg, true) {\n\t\tif needsSpill {\n\t\t\t// Assign spill space.\n\t\t\tif f.opts.SpillRegs {\n\t\t\t\tf.StackSpillBytes += (f.ints-si)*f.ptrSize + (f.floats-sf)*8\n\t\t\t} else {\n\t\t\t\tf.StackSpillBytes = align(f.StackSpillBytes, int(f.sizes.Alignof(arg)))\n\t\t\t\tf.StackSpillBytes += int(f.sizes.Sizeof(arg))\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Stack-assign the whole thing.\n\t\tf.ints, f.floats, f.StackBytes = si, sf, sb\n\t\tf.StackAssign(arg)\n\t}\n}\n\nfunc (f *frameBuilder) RegAssign(arg types.Type, top bool) bool {\n\tswitch arg := arg.(type) {\n\tdefault:\n\t\tlog.Fatal(\"unknown type: \", arg)\n\t\treturn false\n\n\tcase *types.Named:\n\t\treturn f.RegAssign(arg.Underlying(), top)\n\n\tcase *types.Array:\n\t\tif f.opts.EmptyArray && arg.Len() == 0 {\n\t\t\t// Special-case empty arrays.\n\t\t\treturn true\n\t\t}\n\t\tif f.opts.OneArray && arg.Len() == 1 {\n\t\t\t// Special-case length-1 arrays.\n\t\t\treturn f.RegAssign(arg.Elem(), false)\n\t\t}\n\t\tif f.opts.SplitArrays {\n\t\t\t// Arrays can go on the stack without failing\n\t\t\t// the whole argument.\n\t\t\tf.StackAssign(arg)\n\t\t\treturn true\n\t\t} else {\n\t\t\t// Arrays fail the whole argument.\n\t\t\treturn false\n\t\t}\n\n\tcase *types.Struct:\n\t\tfor i := 0; i < arg.NumFields(); i++ {\n\t\t\tif f.opts.IgnoreBlank && arg.Field(i).Name() == \"_\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !f.RegAssign(arg.Field(i).Type(), false) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\n\tcase *types.Basic:\n\t\tswitch arg.Kind() {\n\t\tcase types.Bool, types.Int, types.Int8, types.Int16, types.Int32, types.Int64,\n\t\t\ttypes.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.Uintptr:\n\t\t\t// TODO: 64-bit on 32-bit arch needs two regs.\n\t\t\tf.ints++\n\n\t\tcase types.Float32, types.Float64:\n\t\t\tf.floats++\n\n\t\tcase types.Complex64, types.Complex128:\n\t\t\tf.floats += 2\n\n\t\tcase types.String:\n\t\t\tf.ints += 2\n\n\t\tcase types.UnsafePointer:\n\t\t\tf.ints++\n\n\t\tdefault:\n\t\t\tlog.Fatal(\"unknown basic kind: \", arg)\n\t\t}\n\n\tcase *types.Chan, *types.Map, *types.Pointer, *types.Signature:\n\t\t// These are all represented as a single pointer word.\n\t\tf.ints++\n\n\tcase *types.Interface:\n\t\t// Two pointer words.\n\t\tf.ints += 2\n\n\tcase *types.Slice:\n\t\t// One pointer word plus two scalar words.\n\t\tf.ints += 3\n\t}\n\n\t// Check for out-of-registers.\n\treturn f.ints <= f.opts.IntRegs && f.floats <= f.opts.FloatRegs\n}\n\nfunc (f *frameBuilder) StackAssign(arg types.Type) {\n\tf.StackBytes = align(f.StackBytes, int(f.sizes.Alignof(arg)))\n\tf.StackBytes += int(f.sizes.Sizeof(arg))\n}\n\nfunc align(x, n int) int {\n\treturn (x + n - 1) &^ (n - 1)\n}\n\nfunc intQuantiles(xs []int, qs ...float64) []int {\n\tsort.Ints(xs)\n\tvs := make([]int, 0, len(qs))\n\tfor _, q := range qs {\n\t\ti := int(q * float64(len(xs)))\n\t\tif i < 0 {\n\t\t\ti = 0\n\t\t} else if i >= len(xs) {\n\t\t\ti = len(xs) - 1\n\t\t}\n\t\tvs = append(vs, xs[i])\n\t}\n\treturn vs\n}\n\nfunc floatQuantiles(xs []float64, qs ...float64) []float64 {\n\tsort.Float64s(xs)\n\tvs := make([]float64, 0, len(qs))\n\tfor _, q := range qs {\n\t\ti := int(q * float64(len(xs)))\n\t\tif i < 0 {\n\t\t\ti = 0\n\t\t} else if i >= len(xs) {\n\t\t\ti = len(xs)\n\t\t}\n\t\tvs = append(vs, xs[i])\n\t}\n\treturn vs\n}\n\nfunc printTable(w io.Writer, table [][]interface{}) {\n\ttype layoutNode struct {\n\t\tw        int\n\t\tchildren []*layoutNode\n\t}\n\ttype cellKey struct {\n\t\trow int\n\t\tcol *layoutNode\n\t}\n\n\t// Stringify cells and construct layout\n\tcells := make(map[cellKey]string)\n\tlayout := &layoutNode{}\n\tvar walk func(ri int, row reflect.Value, node *layoutNode) int\n\twalk = func(ri int, row reflect.Value, node *layoutNode) int {\n\t\tif row.Kind() == reflect.Interface {\n\t\t\trow = row.Elem()\n\t\t}\n\n\t\tif row.Kind() != reflect.Slice {\n\t\t\t// This is a cell.\n\t\t\tval := fmt.Sprint(row)\n\t\t\tif len(val) > node.w {\n\t\t\t\tnode.w = len(val)\n\t\t\t}\n\t\t\tcells[cellKey{ri, node}] = val\n\t\t\treturn node.w\n\t\t}\n\n\t\t// This is a slice.\n\t\ttotalW := 0\n\t\trowLen := row.Len()\n\t\tfor vi := 0; vi < rowLen; vi++ {\n\t\t\tvar child *layoutNode\n\t\t\tif vi < len(node.children) {\n\t\t\t\tchild = node.children[vi]\n\t\t\t} else {\n\t\t\t\tchild = &layoutNode{}\n\t\t\t\tnode.children = append(node.children, child)\n\t\t\t}\n\t\t\ttotalW += walk(ri, row.Index(vi), child)\n\t\t}\n\t\t// Add in interior column spacing.\n\t\ttotalW += 3 * (rowLen - 1)\n\t\tif totalW > node.w {\n\t\t\tnode.w = totalW\n\t\t}\n\t\treturn node.w\n\t}\n\tfor ri, row := range table {\n\t\twalk(ri, reflect.ValueOf(row), layout)\n\t}\n\n\t// Print table\n\tvar printNode func(ri int, node *layoutNode, fillW int)\n\tprintNode = func(ri int, node *layoutNode, fillW int) {\n\t\tif val, ok := cells[cellKey{ri, node}]; ok {\n\t\t\tif fillW < node.w {\n\t\t\t\tfillW = node.w\n\t\t\t}\n\t\t\tfmt.Fprintf(w, \"| %*s \", fillW, val)\n\t\t\treturn\n\t\t}\n\n\t\tfor ci, child := range node.children {\n\t\t\tparentW := 0\n\t\t\tif ci == len(node.children)-1 {\n\t\t\t\tparentW = fillW\n\t\t\t} else {\n\t\t\t\tfillW -= child.w\n\t\t\t}\n\t\t\tprintNode(ri, child, parentW)\n\t\t}\n\t}\n\tfor ri := range table {\n\t\tprintNode(ri, layout, 0)\n\t\tfmt.Fprintf(w, \"|\\n\")\n\t}\n}\n"
  },
  {
    "path": "abi/go.mod",
    "content": "module abi\n\ngo 1.15\n\nrequire golang.org/x/tools v0.0.0-20200815165600-90abf76919f3\n"
  },
  {
    "path": "abi/go.sum",
    "content": "github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20200815165600-90abf76919f3 h1:0aScV/0rLmANzEYIhjCOi2pTvDyhZNduBUMD2q3iqs4=\ngolang.org/x/tools v0.0.0-20200815165600-90abf76919f3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\n"
  },
  {
    "path": "bench/parse.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package bench reads and writes Go benchmarks results files.\n//\n// This format is specified at:\n// https://github.com/golang/proposal/blob/master/design/14313-benchmark-format.md\npackage bench\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n)\n\n// Benchmark records the configuration and results of a single\n// benchmark run (a single line of a benchmark results file).\ntype Benchmark struct {\n\t// Name is the name of the benchmark, without the \"Benchmark\"\n\t// prefix and without the trailing GOMAXPROCS number.\n\tName string\n\n\t// Iterations is the number of times this benchmark executed.\n\tIterations int\n\n\t// Config is the set of configuration pairs for this\n\t// Benchmark. These can be specified in both configuration\n\t// blocks and in individual benchmark lines. If the benchmark\n\t// name is of the form \"BenchmarkX-N\", the N is stripped out\n\t// and stored as \"gomaxprocs\" here.\n\tConfig map[string]*Config\n\n\t// Result is the set of (unit, value) metrics for this\n\t// benchmark run.\n\tResult map[string]float64\n}\n\n// Config represents a single key/value configuration pair.\ntype Config struct {\n\t// Value is the parsed value of this configuration value.\n\tValue interface{}\n\n\t// RawValue is the value of this configuration value, exactly\n\t// as written in the original benchmark file.\n\tRawValue string\n\n\t// InBlock indicates that this configuration value was\n\t// specified in a configuration block line. Otherwise, it was\n\t// specified in the benchmark line.\n\tInBlock bool\n}\n\nvar configRe = regexp.MustCompile(`^(\\p{Ll}[^\\p{Lu}\\s\\x85\\xa0\\x{1680}\\x{2000}-\\x{200a}\\x{2028}\\x{2029}\\x{202f}\\x{205f}\\x{3000}]*):(?:[ \\t]+(.*))?$`)\n\n// Parse parses a standard Go benchmark results file from r. It\n// returns a *Benchmark for each benchmark result line in the file.\n// There may be many result lines for the same benchmark name and\n// configuration, indicating that the benchmark was run multiple\n// times.\n//\n// In the returned Benchmarks, RawValue is set, but Value is always\n// nil. Use ParseValues to convert raw values to structured types.\nfunc Parse(r io.Reader) ([]*Benchmark, error) {\n\tbenchmarks := []*Benchmark{}\n\tconfig := make(map[string]*Config)\n\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\tif line == \"testing: warning: no tests to run\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Configuration lines.\n\t\tm := configRe.FindStringSubmatch(line)\n\t\tif m != nil {\n\t\t\tconfig[m[1]] = &Config{RawValue: m[2], InBlock: true}\n\t\t\tcontinue\n\t\t}\n\n\t\t// Benchmark lines.\n\t\tif strings.HasPrefix(line, \"Benchmark\") {\n\t\t\tb := parseBenchmark(line, config)\n\t\t\tif b != nil {\n\t\t\t\tbenchmarks = append(benchmarks, b)\n\t\t\t}\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn benchmarks, nil\n}\n\nfunc parseBenchmark(line string, gconfig map[string]*Config) *Benchmark {\n\t// TODO: Consider using scanner to avoid the slice allocation.\n\tf := strings.Fields(line)\n\tif len(f) < 4 {\n\t\treturn nil\n\t}\n\tif f[0] != \"Benchmark\" {\n\t\tnext, _ := utf8.DecodeRuneInString(f[0][len(\"Benchmark\"):])\n\t\tif !unicode.IsUpper(next) {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tb := &Benchmark{\n\t\tConfig: make(map[string]*Config),\n\t\tResult: make(map[string]float64),\n\t}\n\n\t// Copy global config.\n\tfor k, v := range gconfig {\n\t\tb.Config[k] = v\n\t}\n\n\t// Parse name and configuration.\n\tname := strings.TrimPrefix(f[0], \"Benchmark\")\n\tif strings.Contains(name, \"/\") {\n\t\tparts := strings.Split(name, \"/\")\n\t\tb.Name = parts[0]\n\t\tfor _, part := range parts[1:] {\n\t\t\tif i := strings.Index(part, \":\"); i >= 0 {\n\t\t\t\tk, v := part[:i], part[i+1:]\n\t\t\t\tb.Config[k] = &Config{RawValue: v}\n\t\t\t}\n\t\t}\n\t} else if i := strings.LastIndex(name, \"-\"); i >= 0 {\n\t\t_, err := strconv.Atoi(name[i+1:])\n\t\tif err == nil {\n\t\t\tb.Name = name[:i]\n\t\t\tb.Config[\"gomaxprocs\"] = &Config{RawValue: name[i+1:]}\n\t\t} else {\n\t\t\tb.Name = name\n\t\t}\n\t} else {\n\t\tb.Name = name\n\t}\n\tif b.Config[\"gomaxprocs\"] == nil {\n\t\tb.Config[\"gomaxprocs\"] = &Config{RawValue: \"1\"}\n\t}\n\n\t// Parse iterations.\n\tn, err := strconv.Atoi(f[1])\n\tif err != nil || n <= 0 {\n\t\treturn nil\n\t}\n\tb.Iterations = n\n\n\t// Parse results.\n\tfor i := 2; i+2 <= len(f); i += 2 {\n\t\tval, err := strconv.ParseFloat(f[i], 64)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tb.Result[f[i+1]] = val\n\t}\n\n\treturn b\n}\n\n// ValueParser is a function that parses a string value into a\n// structured type or returns an error if the string cannot be parsed.\ntype ValueParser func(string) (interface{}, error)\n\n// DefaultValueParsers is the default sequence of value parsers used\n// by ParseValues if no parsers are specified.\nvar DefaultValueParsers = []ValueParser{\n\tfunc(s string) (interface{}, error) { return strconv.Atoi(s) },\n\tfunc(s string) (interface{}, error) { return strconv.ParseFloat(s, 64) },\n\tfunc(s string) (interface{}, error) { return time.ParseDuration(s) },\n}\n\n// ParseValues parses the raw configuration values in benchmarks into\n// structured types using best-effort pattern-based parsing.\n//\n// If all of the raw values for a given configuration key can be\n// parsed by one of the valueParsers, ParseValues sets the parsed\n// values to the results of that ValueParser. If multiple ValueParsers\n// can parse all of the raw values, it uses the earliest such parser\n// in the valueParsers list.\n//\n// If valueParsers is nil, it uses DefaultValueParsers.\nfunc ParseValues(benchmarks []*Benchmark, valueParsers []ValueParser) {\n\tif valueParsers == nil {\n\t\tvalueParsers = DefaultValueParsers\n\t}\n\n\t// Collect all configuration keys.\n\tkeys := map[string]bool{}\n\tfor _, b := range benchmarks {\n\t\tfor k := range b.Config {\n\t\t\tkeys[k] = true\n\t\t}\n\t}\n\n\t// For each configuration key, try value parsers in priority order.\n\tfor key := range keys {\n\t\tgood := false\n\ttryParsers:\n\t\tfor _, vp := range valueParsers {\n\t\t\t// Clear all values. This way we can detect\n\t\t\t// aliasing and not parse the same value\n\t\t\t// multiple times.\n\t\t\tfor _, b := range benchmarks {\n\t\t\t\tc, ok := b.Config[key]\n\t\t\t\tif ok {\n\t\t\t\t\tc.Value = nil\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tgood = true\n\t\ttryValues:\n\t\t\tfor _, b := range benchmarks {\n\t\t\t\tc, ok := b.Config[key]\n\t\t\t\tif !ok || c.Value != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tres, err := vp(c.RawValue)\n\t\t\t\tif err != nil {\n\t\t\t\t\t// Parse error. Fail this parser.\n\t\t\t\t\tgood = false\n\t\t\t\t\tbreak tryValues\n\t\t\t\t}\n\t\t\t\tc.Value = res\n\t\t\t}\n\n\t\t\tif good {\n\t\t\t\t// This ValueParser converted all of\n\t\t\t\t// the values.\n\t\t\t\tbreak tryParsers\n\t\t\t}\n\t\t}\n\t\tif !good {\n\t\t\t// All of the value parsers failed. Fall back\n\t\t\t// to strings.\n\t\t\tfor _, b := range benchmarks {\n\t\t\t\tc, ok := b.Config[key]\n\t\t\t\tif ok {\n\t\t\t\t\tc.Value = nil\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, b := range benchmarks {\n\t\t\t\tc, ok := b.Config[key]\n\t\t\t\tif ok && c.Value == nil {\n\t\t\t\t\tc.Value = c.RawValue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "bench/parse_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bench\n\nimport (\n\t\"bytes\"\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestParse(t *testing.T) {\n\tfor _, test := range []struct {\n\t\tinput string\n\t\twant  []*Benchmark\n\t}{\n\t\t// Test basic line.\n\t\t{`\nBenchmarkX\t1\t2 ns/op 3 MB/s`,\n\t\t\t[]*Benchmark{\n\t\t\t\t{\"X\", 1, map[string]*Config{}, map[string]float64{\"ns/op\": 2, \"MB/s\": 3}},\n\t\t\t},\n\t\t},\n\n\t\t// Test short name.\n\t\t{`\nBenchmark\t1\t2 ns/op`,\n\t\t\t[]*Benchmark{\n\t\t\t\t{\"\", 1, map[string]*Config{}, map[string]float64{\"ns/op\": 2}},\n\t\t\t},\n\t\t},\n\n\t\t// Test bad names.\n\t\t{`\nBenchmarkx\t1\t2 ns/op\nbenchmarkx\t1\t2 ns/op\nbenchmarkX\t1\t2 ns/op`,\n\t\t\t[]*Benchmark{},\n\t\t},\n\n\t\t// Test short lines.\n\t\t{`\nBenchmarkX\nBenchmarkX\t1\nBenchmarkX\t1\t2`,\n\t\t\t[]*Benchmark{},\n\t\t},\n\n\t\t// Test -N.\n\t\t{`\nBenchmarkX-4\t1\t2 ns/op`,\n\t\t\t[]*Benchmark{\n\t\t\t\t{\"X\", 1, map[string]*Config{\n\t\t\t\t\t\"gomaxprocs\": &Config{\"gomaxprocs\", \"4\", \"4\", false},\n\t\t\t\t}, map[string]float64{\"ns/op\": 2}},\n\t\t\t},\n\t\t},\n\n\t\t// Test per-benchmark config.\n\t\t{`\nBenchmarkX/a:20/b:abc\t1\t2 ns/op\nBenchmarkY/c:123\t2\t4 ns/op`,\n\t\t\t[]*Benchmark{\n\t\t\t\t{\"X\", 1, map[string]*Config{\n\t\t\t\t\t\"a\": &Config{\"a\", \"20\", \"20\", false},\n\t\t\t\t\t\"b\": &Config{\"b\", \"abc\", \"abc\", false},\n\t\t\t\t}, map[string]float64{\"ns/op\": 2}},\n\t\t\t\t{\"Y\", 2, map[string]*Config{\n\t\t\t\t\t\"c\": &Config{\"c\", \"123\", \"123\", false},\n\t\t\t\t}, map[string]float64{\"ns/op\": 4}},\n\t\t\t},\n\t\t},\n\n\t\t// Test block config.\n\t\t{`\ncommit: 123456\ndate: Jan 1\ncolon:colon: 42\nblank:\n#not-config: x\nspa ce: x\nfunny space: x\nNot-config: x\nBenchmarkX\t1\t2 ns/op`,\n\t\t\t[]*Benchmark{\n\t\t\t\t{\"X\", 1, map[string]*Config{\n\t\t\t\t\t\"commit\":      &Config{\"commit\", \"123456\", \"123456\", true},\n\t\t\t\t\t\"date\":        &Config{\"date\", \"Jan 1\", \"Jan 1\", true},\n\t\t\t\t\t\"colon:colon\": &Config{\"colon:colon\", \"42\", \"42\", true},\n\t\t\t\t\t\"blank\":       &Config{\"blank\", \"\", \"\", true},\n\t\t\t\t}, map[string]float64{\"ns/op\": 2}},\n\t\t\t},\n\t\t},\n\n\t\t// Test benchmark config overriding block config.\n\t\t{`\ncommit: 123456\ndate: Jan 1\nBenchmarkX/commit:abcdef\t1\t2 ns/op`,\n\t\t\t[]*Benchmark{\n\t\t\t\t{\"X\", 1, map[string]*Config{\n\t\t\t\t\t\"commit\": &Config{\"commit\", \"abcdef\", \"abcdef\", false},\n\t\t\t\t\t\"date\":   &Config{\"date\", \"Jan 1\", \"Jan 1\", true},\n\t\t\t\t}, map[string]float64{\"ns/op\": 2}},\n\t\t\t},\n\t\t},\n\n\t\t// Test block config overriding block config.\n\t\t{`\ncommit: 123456\ncommit: abcdef\ndate: Jan 1\nBenchmarkX\t1\t2 ns/op`,\n\t\t\t[]*Benchmark{\n\t\t\t\t{\"X\", 1, map[string]*Config{\n\t\t\t\t\t\"commit\": &Config{\"commit\", \"abcdef\", \"abcdef\", true},\n\t\t\t\t\t\"date\":   &Config{\"date\", \"Jan 1\", \"Jan 1\", true},\n\t\t\t\t}, map[string]float64{\"ns/op\": 2}},\n\t\t\t},\n\t\t},\n\t} {\n\t\tr := bytes.NewBufferString(test.input)\n\t\tbs, err := Parse(r)\n\t\tif err != nil {\n\t\t\tt.Error(\"unexpected Parse error\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif !reflect.DeepEqual(bs, test.want) {\n\t\t\tt.Log(\"want:\")\n\t\t\tfor _, b := range test.want {\n\t\t\t\tt.Logf(\"%#v\", b)\n\t\t\t}\n\t\t\tt.Log(\"got:\")\n\t\t\tfor _, b := range bs {\n\t\t\t\tt.Logf(\"%#v\", b)\n\t\t\t}\n\t\t\tt.Fail()\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "bench/print.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage bench\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\t\"strings\"\n)\n\nfunc Print(bs []*Benchmark) error {\n\treturn Fprint(os.Stdout, bs)\n}\n\nfunc Fprint(w io.Writer, bs []*Benchmark) error {\n\ttype kv struct {\n\t\tk, v string\n\t}\n\ttype block struct {\n\t\tconfig []kv\n\t\tbs     []*Benchmark\n\t}\n\n\tconfigKeys := func(b *Benchmark, inBlock bool) []string {\n\t\tvar keys []string\n\t\tfor k, config := range b.Config {\n\t\t\tif config.InBlock == inBlock {\n\t\t\t\tkeys = append(keys, k)\n\t\t\t}\n\t\t}\n\t\tsort.Strings(keys)\n\t\treturn keys\n\t}\n\n\t// Split bs into configuration blocks.\n\tblocks := []block{}\n\tlastConfig := map[string]string{}\n\tfor _, b := range bs {\n\t\t// Find changed block configuration.\n\t\tvar changed []kv\n\t\tfor _, k := range configKeys(b, true) {\n\t\t\tconfig := b.Config[k]\n\t\t\tlc, ok := lastConfig[k]\n\t\t\tif ok && lc == config.RawValue {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tchanged = append(changed, kv{k, config.RawValue})\n\t\t\tlastConfig[k] = config.RawValue\n\t\t}\n\n\t\tif len(blocks) == 0 || changed != nil {\n\t\t\t// Start a new configuration block.\n\t\t\tblocks = append(blocks, block{changed, nil})\n\t\t}\n\n\t\t// Add benchmark to latest block.\n\t\tbbs := &blocks[len(blocks)-1].bs\n\t\t*bbs = append(*bbs, b)\n\t}\n\n\t// Format each configuration block.\n\tfor i, block := range blocks {\n\t\t// Print configuration values.\n\t\tif i > 0 {\n\t\t\tif _, err := fmt.Fprint(w, \"\\n\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfor _, kv := range block.config {\n\t\t\t// TODO: Syntax check.\n\t\t\tif _, err := fmt.Fprintf(w, \"%s: %s\\n\", kv.k, kv.v); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif len(block.config) > 0 {\n\t\t\tif _, err := fmt.Fprint(w, \"\\n\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\t// Construct benchmark lines.\n\t\tlines := make([][]string, len(block.bs))\n\t\tfor _, b := range block.bs {\n\t\t\t// Construct benchmark name.\n\t\t\tname := []string{\"Benchmark\" + b.Name}\n\t\t\tgomaxprocs, haveGMP := \"\", false\n\t\t\tfor _, k := range configKeys(b, false) {\n\t\t\t\tconfig := b.Config[k]\n\t\t\t\tif k == \"gomaxprocs\" {\n\t\t\t\t\tgomaxprocs = config.RawValue\n\t\t\t\t\thaveGMP = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// TODO: Syntax check.\n\t\t\t\tname = append(name, fmt.Sprintf(\"%s:%s\", k, config.RawValue))\n\t\t\t}\n\t\t\tif haveGMP && gomaxprocs != \"1\" {\n\t\t\t\tif len(name) == 1 {\n\t\t\t\t\t// Use short form.\n\t\t\t\t\tname[0] = fmt.Sprintf(\"%s-%s\", name[0], gomaxprocs)\n\t\t\t\t} else {\n\t\t\t\t\tname = append(name, fmt.Sprintf(\"gomaxprocs:%s\", gomaxprocs))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Construct results.\n\t\t\tline := []string{\n\t\t\t\tstrings.Join(name, \"/\"),\n\t\t\t\tfmt.Sprint(b.Iterations),\n\t\t\t}\n\t\t\tresultKeys := []string{}\n\t\t\tfor k := range b.Result {\n\t\t\t\tresultKeys = append(resultKeys, k)\n\t\t\t}\n\t\t\tsort.Sort(resultKeySorter(resultKeys))\n\t\t\tfor _, k := range resultKeys {\n\t\t\t\tresult := b.Result[k]\n\t\t\t\t// TODO: Syntax check.\n\t\t\t\tline = append(line, fmt.Sprint(result), k)\n\t\t\t}\n\n\t\t\tlines = append(lines, line)\n\t\t}\n\n\t\t// Compute column widths.\n\t\twidths := make([]int, 0)\n\t\tfor _, line := range lines {\n\t\t\tfor i, elt := range line {\n\t\t\t\tif i >= len(widths) {\n\t\t\t\t\twidths = append(widths, len(elt))\n\t\t\t\t} else if len(elt) > widths[i] {\n\t\t\t\t\twidths[i] = len(elt)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Print lines.\n\t\tfor _, line := range lines {\n\t\t\tfor i, elt := range line {\n\t\t\t\tvar err error\n\t\t\t\tp := widths[i]\n\t\t\t\tif i == 1 || i >= 2 && i%2 == 0 {\n\t\t\t\t\t// Right align.\n\t\t\t\t\t_, err = fmt.Fprintf(w, \"%*s  \", p, elt)\n\t\t\t\t} else if i < len(line)-1 {\n\t\t\t\t\t// Left align and pad.\n\t\t\t\t\t_, err = fmt.Fprintf(w, \"%-*s  \", p, elt)\n\t\t\t\t} else {\n\t\t\t\t\t// Left align, no pad, EOL.\n\t\t\t\t\t_, err = fmt.Fprintf(w, \"%s\\n\", elt)\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar fixedKeys = map[string]int{\n\t\"ns/op\": -2,\n\t\"MB/s\":  -1,\n}\n\ntype resultKeySorter []string\n\nfunc (s resultKeySorter) Len() int {\n\treturn len(s)\n}\n\nfunc (s resultKeySorter) Less(i, j int) bool {\n\tif fixedKeys[s[i]] != fixedKeys[s[j]] {\n\t\treturn fixedKeys[s[i]] < fixedKeys[s[j]]\n\t}\n\n\treturn s[i] < s[j]\n}\n\nfunc (s resultKeySorter) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n"
  },
  {
    "path": "benchcmd/main.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Command benchcmd times a shell command using Go benchmark format.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"time\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [-n iters] benchname cmd...\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n\tn := flag.Int(\"n\", 5, \"iterations\")\n\tflag.Parse()\n\tif flag.NArg() < 2 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\tbenchname := flag.Arg(0)\n\targs := flag.Args()[1:]\n\n\tfor i := 0; i < *n; i++ {\n\t\tcmd := exec.Command(args[0], args[1:]...)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\t\tbefore := time.Now()\n\t\tif err := cmd.Run(); err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tafter := time.Now()\n\t\tfmt.Printf(\"Benchmark%s\\t\", benchname)\n\t\tfmt.Printf(\"%d\\t%d ns/op\", 1, after.Sub(before))\n\t\tfmt.Printf(\"\\t%d user-ns/op\\t%d sys-ns/op\", cmd.ProcessState.UserTime(), cmd.ProcessState.SystemTime())\n\t\tif maxrss, ok := getMaxRSS(cmd.ProcessState); ok {\n\t\t\tfmt.Printf(\"\\t%d peak-RSS-bytes\", maxrss)\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t}\n}\n"
  },
  {
    "path": "benchcmd/rss_nounix.go",
    "content": "// Copyright 2025 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n//go:build !unix\n\npackage main\n\nimport \"os\"\n\nfunc getMaxRSS(ps *os.ProcessState) (bytes uint64, ok bool) {\n\treturn 0, false\n}\n"
  },
  {
    "path": "benchcmd/rss_unix.go",
    "content": "// Copyright 2025 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n//go:build unix\n\npackage main\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"syscall\"\n)\n\nfunc getMaxRSS(ps *os.ProcessState) (bytes uint64, ok bool) {\n\tru, ok := ps.SysUsage().(*syscall.Rusage)\n\tif !ok {\n\t\treturn 0, false\n\t}\n\n\tvar rssToBytes uint64\n\tswitch runtime.GOOS {\n\tdefault:\n\t\treturn 0, false\n\tcase \"aix\", \"android\", \"dragonfly\", \"freebsd\", \"linux\", \"netbsd\", \"openbsd\":\n\t\trssToBytes = 1 << 10\n\tcase \"darwin\", \"ios\":\n\t\trssToBytes = 1\n\tcase \"illumos\", \"solaris\":\n\t\trssToBytes = uint64(syscall.Getpagesize())\n\t}\n\treturn uint64(ru.Maxrss) * rssToBytes, true\n}\n"
  },
  {
    "path": "benchmany/benchmany.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Benchmany runs Go benchmarks across many git commits.\n//\n// Usage:\n//\n//      benchmany [-C git-dir] [-n iterations] <commit or range>...\n//\n// benchmany runs the benchmarks in the current directory <iterations>\n// times for each commit in <commit or range> and writes the benchmark\n// results to bench.log. Benchmarks may be Go testing framework\n// benchmarks or benchmarks from golang.org/x/benchmarks.\n//\n// <commit or range>... can be either a list of individual commits or\n// a revision range. For the spelling of a revision range, see\n// \"SPECIFYING RANGES\" in gitrevisions(7). For exact details, see the\n// --no-walk option to git-rev-list(1).\n//\n// Benchmany will check out each revision in git-dir. The current\n// directory may or may not be in the same git repository as git-dir.\n// If git-dir refers to a Go installation, benchmany will run\n// make.bash at each revision; otherwise, it assumes go test can\n// rebuild the necessary dependencies. Benchmany also supports using\n// gover (https://godoc.org/github.com/aclements/go-misc/gover) to\n// save and reuse Go build trees. This is useful for saving time\n// across multiple benchmark runs and for benchmarks that depend on\n// the Go tree itself (such as compiler benchmarks).\n//\n// Benchmany supports multiple ways of prioritizing the order in which\n// individual iterations are run. By default, it runs in \"sequential\"\n// mode: it runs the first iteration of all benchmarks, then the\n// second, and so forth. It also supports a \"spread\" mode designed to\n// quickly get coverage for large sets of revisions. This mode\n// randomizes the order to run iterations in, but biases this order\n// toward covering an evenly distributed set of revisions early and\n// finishing all of the iterations of the revisions it has started on\n// before moving on to new revisions. This way, if benchmany is\n// interrupted, the revisions benchmarked cover the space more-or-less\n// evenly. Finally, it supports a \"metric\" mode, which zeroes in on\n// changes in a benchmark metric by selecting the commit half way\n// between the pair of commits with the biggest difference in the\n// metric. This is like \"git bisect\", but for performance.\n//\n// Benchmany is safe to interrupt. If it is restarted, it will parse\n// the benchmark log files to recover its state.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n)\n\nvar gitDir string\nvar dryRun bool\n\n// maxFails is the maximum number of benchmark run failures to\n// tolerate for a commit before giving up on trying to benchmark that\n// commit. Build failures always disqualify a commit.\nconst maxFails = 5\n\nfunc main() {\n\tflag.Parse()\n\tdoRun()\n}\n\n// git runs git subcommand subcmd and returns its stdout. If git\n// fails, it prints the failure and exits.\nfunc git(subcmd string, args ...string) string {\n\tgitargs := []string{}\n\tif gitDir != \"\" {\n\t\tgitargs = append(gitargs, \"-C\", gitDir)\n\t}\n\tgitargs = append(gitargs, subcmd)\n\tgitargs = append(gitargs, args...)\n\tcmd := exec.Command(\"git\", gitargs...)\n\tcmd.Stderr = os.Stderr\n\tif dryRun {\n\t\tdryPrint(cmd)\n\t\tif !(subcmd == \"rev-parse\" || subcmd == \"rev-list\" || subcmd == \"show\") {\n\t\t\treturn \"\"\n\t\t}\n\t}\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"git %s failed: %s\\n\", shellEscapeList(gitargs), err)\n\t\tos.Exit(1)\n\t}\n\treturn string(out)\n}\n\nfunc dryPrint(cmd *exec.Cmd) {\n\tout := shellEscape(cmd.Path)\n\tfor _, a := range cmd.Args[1:] {\n\t\tout += \" \" + shellEscape(a)\n\t}\n\tif cmd.Dir != \"\" {\n\t\tout = fmt.Sprintf(\"(cd %s && %s)\", shellEscape(cmd.Dir), out)\n\t}\n\tfmt.Fprintln(os.Stderr, out)\n}\n\nfunc shellEscape(x string) string {\n\tif len(x) == 0 {\n\t\treturn \"''\"\n\t}\n\tfor _, r := range x {\n\t\tif 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || strings.ContainsRune(\"@%_-+:,./\", r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Unsafe character.\n\t\treturn \"'\" + strings.Replace(x, \"'\", \"'\\\"'\\\"'\", -1) + \"'\"\n\t}\n\treturn x\n}\n\nfunc shellEscapeList(xs []string) string {\n\tout := make([]string, len(xs))\n\tfor i, x := range xs {\n\t\tout[i] = shellEscape(x)\n\t}\n\treturn strings.Join(out, \" \")\n}\n\nfunc exists(path string) bool {\n\t_, err := os.Stat(path)\n\treturn !os.IsNotExist(err)\n}\n\nfunc trimNL(s string) string {\n\treturn strings.TrimRight(s, \"\\n\")\n}\n\n// indent returns s with each line indented by four spaces. If s is\n// non-empty, the returned string is guaranteed to end in a \"\\n\".\nfunc indent(s string) string {\n\tif len(s) == 0 {\n\t\treturn s\n\t}\n\tif strings.HasSuffix(s, \"\\n\") {\n\t\ts = s[:len(s)-1]\n\t}\n\treturn \"    \" + strings.Replace(s, \"\\n\", \"\\n    \", -1) + \"\\n\"\n}\n\n// lines splits s in to lines. It omits a final blank line, if any.\nfunc lines(s string) []string {\n\tl := strings.Split(s, \"\\n\")\n\tif len(l) > 0 && l[len(l)-1] == \"\" {\n\t\tl = l[:len(l)-1]\n\t}\n\treturn l\n}\n"
  },
  {
    "path": "benchmany/commits.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype commitInfo struct {\n\thash         string\n\tcommitDate   time.Time\n\tgover        bool\n\tlogPath      string\n\tcount, fails int\n\tbuildFailed  bool\n}\n\n// getCommits returns the commit info for all of the revisions in the\n// given git revision range, where the revision range is spelled as\n// documented in gitrevisions(7). Commits are returned in reverse\n// chronological order, most recent commit first (the same as\n// git-rev-list(1)).\nfunc getCommits(revRange []string, logPath string) []*commitInfo {\n\t// Get commit sequence.\n\targs := append(append([]string{\"--no-walk\"}, revRange...), \"--\")\n\thashes := lines(git(\"rev-list\", args...))\n\tcommits := make([]*commitInfo, len(hashes))\n\tcommitMap := make(map[string]*commitInfo)\n\tfor i, hash := range hashes {\n\t\tcommits[i] = &commitInfo{\n\t\t\thash:    hash,\n\t\t\tlogPath: logPath,\n\t\t}\n\t\tcommitMap[hash] = commits[i]\n\t}\n\n\t// Get commit dates.\n\t//\n\t// TODO: This can produce a huge command line.\n\targs = append([]string{\"-s\", \"--format=format:%cI\"}, hashes...)\n\tdates := lines(git(\"show\", args...))\n\tfor i := range commits {\n\t\td, err := time.Parse(time.RFC3339, dates[i])\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"cannot parse commit date: %v\", err)\n\t\t}\n\t\tcommits[i].commitDate = d\n\t}\n\n\t// Get gover-cached builds. It's okay if this fails.\n\tif fis, err := ioutil.ReadDir(goverDir()); err == nil {\n\t\tfor _, fi := range fis {\n\t\t\tif ci := commitMap[fi.Name()]; ci != nil && fi.IsDir() {\n\t\t\t\tci.gover = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// Load current benchmark state.\n\tlogf, err := os.Open(logPath)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\tlog.Fatalf(\"opening %s: %v\", logPath, err)\n\t\t}\n\t} else {\n\t\tdefer logf.Close()\n\t\tparseLog(commitMap, logf)\n\t}\n\n\treturn commits\n}\n\n// goverDir returns the directory containing gover-cached builds.\nfunc goverDir() string {\n\tcache := os.Getenv(\"XDG_CACHE_HOME\")\n\tif cache == \"\" {\n\t\thome := os.Getenv(\"HOME\")\n\t\tif home == \"\" {\n\t\t\tu, err := user.Current()\n\t\t\tif err != nil {\n\t\t\t\thome = u.HomeDir\n\t\t\t}\n\t\t}\n\t\tcache = filepath.Join(home, \".cache\")\n\t}\n\treturn filepath.Join(cache, \"gover\")\n}\n\n// parseLog parses benchmark runs and failures from r and updates\n// commits in commitMap.\nfunc parseLog(commitMap map[string]*commitInfo, r io.Reader) {\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tb := scanner.Bytes()\n\t\tswitch {\n\t\tcase bytes.HasPrefix(b, []byte(\"commit: \")):\n\t\t\thash := scanner.Text()[len(\"commit: \"):]\n\t\t\tif ci := commitMap[hash]; ci != nil {\n\t\t\t\tci.count++\n\t\t\t}\n\n\t\tcase bytes.HasPrefix(b, []byte(\"# FAILED at \")):\n\t\t\thash := scanner.Text()[len(\"# FAILED at \"):]\n\t\t\tif ci := commitMap[hash]; ci != nil {\n\t\t\t\tci.fails++\n\t\t\t}\n\n\t\tcase bytes.HasPrefix(b, []byte(\"# BUILD FAILED at \")):\n\t\t\thash := scanner.Text()[len(\"# BUILD FAILED at \"):]\n\t\t\tif ci := commitMap[hash]; ci != nil {\n\t\t\t\tci.buildFailed = true\n\t\t\t}\n\t\t}\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(\"parsing benchmark log: \", err)\n\t}\n}\n\n// binPath returns the file name of the binary for this commit.\nfunc (c *commitInfo) binPath() string {\n\t// TODO: This assumes the short commit hash is unique.\n\treturn fmt.Sprintf(\"bench.%s\", c.hash[:7])\n}\n\n// failed returns whether commit c has failed and should not be run\n// any more.\nfunc (c *commitInfo) failed() bool {\n\treturn c.buildFailed || c.fails >= maxFails\n}\n\n// runnable returns whether commit c needs to be benchmarked at least\n// one more time.\nfunc (c *commitInfo) runnable() bool {\n\treturn !c.buildFailed && c.fails < maxFails && c.count < run.iterations\n}\n\n// partial returns true if this commit is both runnable and already\n// has some runs.\nfunc (c *commitInfo) partial() bool {\n\treturn c.count > 0 && c.runnable()\n}\n\nvar commitRe = regexp.MustCompile(`^commit: |^# FAILED|^# BUILD FAILED`)\n\n// cleanLog escapes lines in l that may confuse the log parser and\n// makes sure l is newline terminated.\nfunc cleanLog(l string) string {\n\tl = commitRe.ReplaceAllString(l, \"# $0\")\n\tif !strings.HasSuffix(l, \"\\n\") {\n\t\tl += \"\\n\"\n\t}\n\treturn l\n}\n\n// logRun updates c with a successful run.\nfunc (c *commitInfo) logRun(out string) {\n\tvar log bytes.Buffer\n\tfmt.Fprintf(&log, \"commit: %s\\n\", c.hash)\n\tfmt.Fprintf(&log, \"commit-time: %s\\n\", c.commitDate.UTC().Format(time.RFC3339))\n\tfmt.Fprintf(&log, \"\\n%s\\n\", cleanLog(out))\n\tc.writeLog(log.String())\n\tc.count++\n}\n\n// logFailed updates c with a failed run. If buildFailed is true, this\n// is considered a permanent failure and buildFailed is set.\nfunc (c *commitInfo) logFailed(buildFailed bool, out string) {\n\ttyp := \"FAILED\"\n\tif buildFailed {\n\t\ttyp = \"BUILD FAILED\"\n\t}\n\tc.writeLog(fmt.Sprintf(\"# %s at %s\\n# %s\\n\", typ, c.hash, strings.Replace(cleanLog(out), \"\\n\", \"\\n# \", -1)))\n\tif buildFailed {\n\t\tc.buildFailed = true\n\t} else {\n\t\tc.fails++\n\t}\n}\n\n// writeLog appends msg to c's log file. The caller is responsible for\n// properly formatting it.\nfunc (c *commitInfo) writeLog(msg string) {\n\tlogFile, err := os.OpenFile(c.logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)\n\tif err != nil {\n\t\tlog.Fatalf(\"opening %s: %v\", c.logPath, err)\n\t}\n\tif _, err := logFile.WriteString(msg); err != nil {\n\t\tlog.Fatalf(\"writing to %s: %v\", c.logPath, err)\n\t}\n\tif err := logFile.Close(); err != nil {\n\t\tlog.Fatalf(\"closing %s: %v\", c.logPath, err)\n\t}\n}\n"
  },
  {
    "path": "benchmany/readlog.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"io/ioutil\"\n\t\"log\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/aclements/go-moremath/stats\"\n)\n\n// ComputeStats updates the derived statistics in s from the raw\n// samples in s.Values.\nfunc (stat *Benchstat) ComputeStats() {\n\tstat.Mean = stats.Mean(stat.Values)\n}\n\n// A Benchstat is the metrics along one axis (e.g., ns/op or MB/s)\n// for all runs of a specific benchmark.\ntype Benchstat struct {\n\tUnit   string\n\tValues []float64 // metrics\n\tMean   float64   // mean of Values\n}\n\n// A BenchKey identifies one metric (e.g., \"ns/op\", \"B/op\") from one\n// benchmark (function name sans \"Benchmark\" prefix) in one\n// configuration (input file name).\ntype BenchKey struct {\n\tConfig, Benchmark, Unit string\n}\n\ntype Collection struct {\n\tStats map[BenchKey]*Benchstat\n\n\t// Keys gives all keys of Stats in the order added.\n\tKeys []BenchKey\n\n\t// Configs, Benchmarks, and Units give the set of configs,\n\t// benchmarks, and units from the keys in Stats in an order\n\t// meant to match the order the benchmarks were read in.\n\tConfigs, Benchmarks, Units []string\n\n\t// ConfigSet, BenchmarkSet, and UnitSet are set\n\t// representations of Configs, Benchmarks, and Units.\n\tConfigSet, BenchmarkSet, UnitSet map[string]bool\n}\n\nfunc (c *Collection) AddStat(key BenchKey) *Benchstat {\n\tif stat, ok := c.Stats[key]; ok {\n\t\treturn stat\n\t}\n\n\tc.addKey(key)\n\tstat := &Benchstat{Unit: key.Unit}\n\tc.Stats[key] = stat\n\treturn stat\n}\n\nfunc (c *Collection) addKey(key BenchKey) {\n\taddString := func(strings *[]string, set map[string]bool, add string) {\n\t\tif set[add] {\n\t\t\treturn\n\t\t}\n\t\t*strings = append(*strings, add)\n\t\tset[add] = true\n\t}\n\tc.Keys = append(c.Keys, key)\n\taddString(&c.Configs, c.ConfigSet, key.Config)\n\taddString(&c.Benchmarks, c.BenchmarkSet, key.Benchmark)\n\taddString(&c.Units, c.UnitSet, key.Unit)\n}\n\nfunc (c *Collection) Filter(key BenchKey) *Collection {\n\tc2 := NewCollection()\n\tfor _, k := range c.Keys {\n\t\tif (key.Config == \"\" || key.Config == k.Config) &&\n\t\t\t(key.Benchmark == \"\" || key.Benchmark == k.Benchmark) &&\n\t\t\t(key.Unit == \"\" || key.Unit == k.Unit) {\n\t\t\tc2.addKey(k)\n\t\t\tc2.Stats[k] = c.Stats[k]\n\t\t}\n\t}\n\treturn c2\n}\n\nfunc NewCollection() *Collection {\n\treturn &Collection{\n\t\tStats:        make(map[BenchKey]*Benchstat),\n\t\tConfigSet:    make(map[string]bool),\n\t\tBenchmarkSet: make(map[string]bool),\n\t\tUnitSet:      make(map[string]bool),\n\t}\n}\n\n// readFiles reads a set of benchmark files as a Collection.\nfunc readFiles(files ...string) *Collection {\n\tc := NewCollection()\n\tfor _, file := range files {\n\t\treadFile(file, c)\n\t}\n\treturn c\n}\n\nvar unitOfXMetric = map[string]string{\n\t\"time\":           \"ns/op\",\n\t\"allocated\":      \"allocated bytes/op\",      // ΔMemStats.TotalAlloc / N\n\t\"allocs\":         \"allocs/op\",               // ΔMemStats.Mallocs / N\n\t\"sys-total\":      \"bytes from system\",       // MemStats.Sys\n\t\"sys-heap\":       \"heap bytes from system\",  // MemStats.HeapSys\n\t\"sys-stack\":      \"stack bytes from system\", // MemStats.StackSys\n\t\"sys-gc\":         \"GC bytes from system\",    // MemStats.GCSys\n\t\"sys-other\":      \"other bytes from system\", // MemStats.OtherSys+MSpanSys+MCacheSys+BuckHashSys\n\t\"gc-pause-total\": \"STW ns/op\",               // ΔMemStats.PauseTotalNs / N\n\t\"gc-pause-one\":   \"STW ns/GC\",               // ΔMemStats.PauseTotalNs / ΔNumGC\n\t\"rss\":            \"max RSS bytes\",           // Rusage.Maxrss * 1<<10\n\t\"cputime\":        \"user+sys ns/op\",          // Rusage.Utime+Stime\n\t\"virtual-mem\":    \"peak VM bytes\",           // /proc/self/status VmPeak\n}\n\n// readFile reads a set of benchmarks from a file in to a Collection.\nfunc readFile(file string, c *Collection) {\n\tc.Configs = append(c.Configs, file)\n\tkey := BenchKey{Config: file}\n\n\ttext, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, line := range strings.Split(string(text), \"\\n\") {\n\t\tif strings.HasPrefix(line, \"GOPERF-METRIC:\") {\n\t\t\t// x/benchmarks-style output.\n\t\t\tline := line[14:]\n\t\t\tf := strings.Split(line, \"=\")\n\t\t\tval, err := strconv.ParseFloat(f[1], 64)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tkey.Benchmark = f[0]\n\t\t\tkey.Unit = unitOfXMetric[f[0]]\n\t\t\tif key.Unit == \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tstat := c.AddStat(key)\n\t\t\tstat.Values = append(stat.Values, val)\n\t\t\tcontinue\n\t\t}\n\n\t\tf := strings.Fields(line)\n\t\tif len(f) < 4 {\n\t\t\tcontinue\n\t\t}\n\t\tname := f[0]\n\t\tif !strings.HasPrefix(name, \"Benchmark\") {\n\t\t\tcontinue\n\t\t}\n\t\tname = strings.TrimPrefix(name, \"Benchmark\")\n\t\tn, _ := strconv.Atoi(f[1])\n\t\tif n == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey.Benchmark = name\n\t\tfor i := 2; i+2 <= len(f); i += 2 {\n\t\t\tval, err := strconv.ParseFloat(f[i], 64)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tkey.Unit = f[i+1]\n\t\t\tstat := c.AddStat(key)\n\t\t\tstat.Values = append(stat.Values, val)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "benchmany/run.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"math\"\n\t\"math/rand\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/aclements/go-misc/bench\"\n\t\"github.com/aclements/go-moremath/stats\"\n)\n\n// TODO: Check CPU performance governor before each benchmark.\n\n// TODO: Support running pre-built binaries without specific hashes.\n// This is useful for testing things that aren't yet committed or that\n// require unusual build steps.\n\nvar run struct {\n\torder      string\n\tmetric     string\n\tbenchFlags string\n\tbuildCmd   string\n\titerations int\n\tsaveTree   bool\n\ttimeout    time.Duration\n\tclean      bool\n\tcleanFlags string\n\n\tlogPath string\n\tbinDir  string\n}\n\nfunc init() {\n\t// TODO: This makes a mess of flags during testing.\n\tisXBenchmark := false\n\tif abs, _ := os.Getwd(); strings.HasSuffix(abs, \"golang.org/x/benchmarks/bench\") {\n\t\tisXBenchmark = true\n\t}\n\n\tf := flag.CommandLine\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [flags] <revision range>\\n\", os.Args[0])\n\t\tf.PrintDefaults()\n\t}\n\tf.StringVar(&run.order, \"order\", \"seq\", \"run benchmarks in `order`, which must be one of: seq, spread, metric\")\n\tf.StringVar(&run.metric, \"metric\", \"ns/op\", \"for -order metric, the benchmark metric to find differences in\")\n\tf.StringVar(&gitDir, \"C\", \"\", \"run git in `dir`\")\n\tdefaultBenchFlags := \"-test.run NONE -test.bench .\"\n\tif isXBenchmark {\n\t\tdefaultBenchFlags = \"\"\n\t}\n\tf.StringVar(&run.benchFlags, \"benchflags\", defaultBenchFlags, \"pass `flags` to benchmark\")\n\tdefaultBuildCmd := \"go test -c\"\n\tif isXBenchmark {\n\t\tdefaultBuildCmd = \"go build\"\n\t}\n\tf.StringVar(&run.buildCmd, \"buildcmd\", defaultBuildCmd, \"build benchmark using \\\"`cmd` -o <bin>\\\"\")\n\tf.IntVar(&run.iterations, \"n\", 5, \"run each benchmark `N` times\")\n\tf.StringVar(&run.logPath, \"o\", \"\", \"write benchmark results to `file` (default \\\"bench.log\\\" in -d directory)\")\n\tf.StringVar(&run.binDir, \"d\", \".\", \"write binaries to `directory`\")\n\tf.BoolVar(&run.saveTree, \"save-tree\", false, \"save Go trees using gover and run benchmarks under saved trees\")\n\tf.DurationVar(&run.timeout, \"timeout\", 30*time.Minute, \"time out a run after `duration`\")\n\tf.BoolVar(&dryRun, \"dry-run\", false, \"print commands but do not run them\")\n\tf.BoolVar(&run.clean, \"clean\", false, \"run \\\"git clean -f\\\" after every checkout\")\n\tf.StringVar(&run.cleanFlags, \"cleanflags\", \"\", \"add `flags` to git clean command\")\n}\n\nfunc doRun() {\n\tif flag.NArg() < 1 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tvar pickCommit func([]*commitInfo) *commitInfo\n\tswitch run.order {\n\tcase \"seq\":\n\t\tpickCommit = pickCommitSeq\n\tcase \"spread\":\n\t\tpickCommit = pickCommitSpread\n\tcase \"metric\":\n\t\tpickCommit = pickCommitMetric\n\tdefault:\n\t\tfmt.Fprintf(os.Stderr, \"unknown order: %s\\n\", run.order)\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tif run.logPath == \"\" {\n\t\trun.logPath = filepath.Join(run.binDir, \"bench.log\")\n\t}\n\n\tcommits := getCommits(flag.Args(), run.logPath)\n\n\t// Write header block to log.\n\tif len(commits) > 0 {\n\t\theader := new(bytes.Buffer)\n\t\tfmt.Fprintf(header, \"# Run started at %s\\n\", time.Now())\n\t\twriteHeader(header)\n\t\tfmt.Fprintf(header, \"\\n\")\n\t\tcommits[0].writeLog(header.String())\n\t}\n\n\t// Always run git from the top level of the git tree. Some\n\t// commands, like git clean, care about this.\n\tgitDir = trimNL(git(\"rev-parse\", \"--show-toplevel\"))\n\n\tstatus := NewStatusReporter()\n\tdefer status.Stop()\n\n\tfor {\n\t\tdoneIters, totalIters, partialCommits, doneCommits, failedCommits := runStats(commits)\n\t\tunstartedCommits := len(commits) - (partialCommits + doneCommits + failedCommits)\n\t\tmsg := fmt.Sprintf(\"%d/%d runs, %d unstarted+%d partial+%d done+%d failed commits\", doneIters, totalIters, unstartedCommits, partialCommits, doneCommits, failedCommits)\n\t\t// TODO: Count builds and runs separately.\n\t\tstatus.Progress(msg, float64(doneIters)/float64(totalIters))\n\n\t\tcommit := pickCommit(commits)\n\t\tif commit == nil {\n\t\t\tbreak\n\t\t}\n\t\trunBenchmark(commit, status)\n\t}\n}\n\nfunc writeHeader(w io.Writer) {\n\tgoos, err := exec.Command(\"go\", \"env\", \"GOOS\").Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"error running go env GOOS: %s\", err)\n\t}\n\tfmt.Fprintf(w, \"goos: %s\\n\", strings.TrimSpace(string(goos)))\n\n\tgoarch, err := exec.Command(\"go\", \"env\", \"GOARCH\").Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"error running go env GOARCH: %s\", err)\n\t}\n\tfmt.Fprintf(w, \"goarch: %s\\n\", strings.TrimSpace(string(goarch)))\n\n\tkernel, err := exec.Command(\"uname\", \"-sr\").Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"error running uname -sr: %s\", err)\n\t}\n\tfmt.Fprintf(w, \"uname-sr: %s\\n\", strings.TrimSpace(string(kernel)))\n\n\tcpuinfo, err := ioutil.ReadFile(\"/proc/cpuinfo\")\n\tif err == nil {\n\t\tsubs := regexp.MustCompile(`(?m)^model name\\s*:\\s*(.*)`).FindSubmatch(cpuinfo)\n\t\tif subs != nil {\n\t\t\tfmt.Fprintf(w, \"cpu: %s\\n\", string(subs[1]))\n\t\t}\n\t}\n\n\tfmt.Fprintf(w, \"tool: benchmany\\n\")\n}\n\nfunc runStats(commits []*commitInfo) (doneIters, totalIters, partialCommits, doneCommits, failedCommits int) {\n\tfor _, c := range commits {\n\t\tif c.count >= run.iterations {\n\t\t\t// Don't care if it failed.\n\t\t\tdoneIters += c.count\n\t\t\ttotalIters += c.count\n\t\t} else if c.runnable() {\n\t\t\tdoneIters += c.count\n\t\t\ttotalIters += run.iterations\n\t\t}\n\n\t\tif c.count == run.iterations {\n\t\t\tdoneCommits++\n\t\t} else if c.runnable() {\n\t\t\tif c.count != 0 {\n\t\t\t\tpartialCommits++\n\t\t\t}\n\t\t} else {\n\t\t\tfailedCommits++\n\t\t}\n\t}\n\treturn\n}\n\n// pickCommitSeq picks the next commit to run based on the most recent\n// commit with the fewest iterations.\nfunc pickCommitSeq(commits []*commitInfo) *commitInfo {\n\tvar minCommit *commitInfo\n\tfor _, commit := range commits {\n\t\tif !commit.runnable() {\n\t\t\tcontinue\n\t\t}\n\t\tif minCommit == nil || commit.count < minCommit.count {\n\t\t\tminCommit = commit\n\t\t}\n\t}\n\treturn minCommit\n}\n\n// pickCommitSpread picks the next commit to run from commits using an\n// algorithm that spreads out the runs.\nfunc pickCommitSpread(commits []*commitInfo) *commitInfo {\n\t// Assign weights to each commit. This is thoroughly\n\t// heuristic, but it's geared toward either increasing the\n\t// iteration count of commits that we have, or picking a new\n\t// commit so as to spread out the commits we have.\n\tweights := make([]int, len(commits))\n\ttotalWeight := 0\n\n\tnPartial := 0\n\tfor _, commit := range commits {\n\t\tif commit.partial() {\n\t\t\tnPartial++\n\t\t}\n\t}\n\tif nPartial >= len(commits)/10 {\n\t\t// Limit the number of partially completed revisions\n\t\t// to 10% by only choosing a partial commit in this\n\t\t// case.\n\t\tfor i, commit := range commits {\n\t\t\tif commit.partial() {\n\t\t\t\t// Bias toward commits that are\n\t\t\t\t// further from done.\n\t\t\t\tweights[i] = run.iterations - commit.count\n\t\t\t}\n\t\t}\n\t} else {\n\t\t// Pick a new commit weighted by its distance from a\n\t\t// commit that we already have.\n\n\t\t// Find distance from left to right.\n\t\tdistance := len(commits)\n\t\thaveAny := false\n\t\tfor i, commit := range commits {\n\t\t\tif commit.count > 0 {\n\t\t\t\tdistance = 1\n\t\t\t\thaveAny = true\n\t\t\t} else if commit.runnable() {\n\t\t\t\tdistance++\n\t\t\t}\n\t\t\tweights[i] = distance\n\t\t}\n\n\t\t// Find distance from right to left.\n\t\tdistance = len(commits)\n\t\tfor i := len(commits) - 1; i >= 0; i-- {\n\t\t\tcommit := commits[i]\n\t\t\tif commit.count > 0 {\n\t\t\t\tdistance = 1\n\t\t\t} else if commit.runnable() {\n\t\t\t\tdistance++\n\t\t\t}\n\n\t\t\tif distance < weights[i] {\n\t\t\t\tweights[i] = distance\n\t\t\t}\n\t\t}\n\n\t\tif !haveAny {\n\t\t\t// We don't have any commits. Pick one uniformly.\n\t\t\tfor i := range commits {\n\t\t\t\tweights[i] = 1\n\t\t\t}\n\t\t}\n\n\t\t// Zero non-runnable commits.\n\t\tfor i, commit := range commits {\n\t\t\tif !commit.runnable() {\n\t\t\t\tweights[i] = 0\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, w := range weights {\n\t\ttotalWeight += w\n\t}\n\tif totalWeight == 0 {\n\t\treturn nil\n\t}\n\n\t// Pick a commit based on the weights.\n\tx := rand.Intn(totalWeight)\n\tcumulative := 0\n\tfor i, w := range weights {\n\t\tcumulative += w\n\t\tif cumulative > x {\n\t\t\treturn commits[i]\n\t\t}\n\t}\n\tpanic(\"unreachable\")\n}\n\nfunc pickCommitMetric(commits []*commitInfo) *commitInfo {\n\t// If there are any partial commits, finish them up.\n\tfor _, c := range commits {\n\t\tif c.partial() {\n\t\t\treturn c\n\t\t}\n\t}\n\n\t// Remove failed commits. This makes it easier to avoid\n\t// picking a failed commit below.\n\tncommits := []*commitInfo{}\n\tfor _, c := range commits {\n\t\tif !c.failed() {\n\t\t\tncommits = append(ncommits, c)\n\t\t}\n\t}\n\tcommits = ncommits\n\tif len(ncommits) == 0 {\n\t\treturn nil\n\t}\n\n\t// Make sure we've run the most recent commit.\n\tif commits[0].runnable() {\n\t\treturn commits[0]\n\t}\n\n\t// Make sure we've run the earliest commit.\n\tif c := commits[len(commits)-1]; c.runnable() {\n\t\treturn c\n\t}\n\n\t// We're bounded from both sides and every commit we've run\n\t// has the best stats we're going to get. Parse run.metric\n\t// from the log file.\n\tlogf, err := os.Open(run.logPath)\n\tif err != nil {\n\t\tlog.Fatal(\"opening benchmark log: \", err)\n\t}\n\tdefer logf.Close()\n\tbs, err := bench.Parse(logf)\n\tif err != nil {\n\t\tlog.Fatal(\"parsing benchmark log for metrics: \", err)\n\t}\n\tresults := make(map[string]map[string][]float64)\n\tfor _, b := range bs {\n\t\tvar hash string\n\t\tif commitConfig, ok := b.Config[\"commit\"]; !ok {\n\t\t\tcontinue\n\t\t} else {\n\t\t\thash = commitConfig.RawValue\n\t\t}\n\t\tresult, ok := b.Result[run.metric]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif results[hash] == nil {\n\t\t\tresults[hash] = make(map[string][]float64)\n\t\t}\n\t\tresults[hash][b.Name] = append(results[hash][b.Name], result)\n\t}\n\tgeomeans := make(map[string]float64)\n\tfor hash, benches := range results {\n\t\tvar means []float64\n\t\tfor _, results := range benches {\n\t\t\tmeans = append(means, stats.Mean(results))\n\t\t}\n\t\tgeomeans[hash] = stats.GeoMean(means)\n\t}\n\n\t// Find the pair of commits with the biggest difference in the\n\t// metric.\n\tprevI := -1\n\tmaxDiff, maxMid := -1.0, (*commitInfo)(nil)\n\tfor i, c := range commits {\n\t\tif c.count == 0 || geomeans[c.hash] == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif prevI == -1 {\n\t\t\tprevI = i\n\t\t\tcontinue\n\t\t}\n\n\t\tif i > prevI+1 {\n\t\t\t// TODO: This isn't branch-aware. We should\n\t\t\t// only compare commits with an ancestry\n\t\t\t// relationship.\n\t\t\tdiff := math.Abs(geomeans[c.hash] - geomeans[commits[prevI].hash])\n\t\t\tif diff > maxDiff {\n\t\t\t\tmaxDiff = diff\n\t\t\t\tmaxMid = commits[(prevI+i)/2]\n\t\t\t}\n\t\t}\n\t\tprevI = i\n\t}\n\treturn maxMid\n}\n\n// runBenchmark runs the benchmark at commit. It updates commit.count,\n// commit.fails, and commit.buildFailed as appropriate and writes to\n// the commit log to record the outcome.\nfunc runBenchmark(commit *commitInfo, status *StatusReporter) {\n\t// Build the benchmark if necessary.\n\tbinPath := filepath.Join(run.binDir, commit.binPath())\n\tif !exists(binPath) {\n\t\trunStatus(status, commit, \"building\")\n\n\t\t// Check out the appropriate commit. This is necessary\n\t\t// even if we're using gover because the benchmark\n\t\t// itself might have changed (e.g., bug fixes).\n\t\tgit(\"checkout\", \"-q\", commit.hash)\n\n\t\tif run.clean {\n\t\t\targs := append([]string{\"-f\"}, strings.Fields(run.cleanFlags)...)\n\t\t\tgit(\"clean\", args...)\n\t\t}\n\n\t\tvar buildCmd []string\n\t\tif commit.gover {\n\t\t\tbuildCmd = []string{\"gover\", \"with\", commit.hash}\n\t\t} else {\n\t\t\t// If this is the Go toolchain, do a full\n\t\t\t// make.bash. Otherwise, we assume that go\n\t\t\t// test -c will build the necessary\n\t\t\t// dependencies.\n\t\t\tif exists(filepath.Join(gitDir, \"src\", \"make.bash\")) {\n\t\t\t\tcmd := exec.Command(\"./make.bash\")\n\t\t\t\tcmd.Dir = filepath.Join(gitDir, \"src\")\n\t\t\t\tif dryRun {\n\t\t\t\t\tdryPrint(cmd)\n\t\t\t\t} else if out, err := combinedOutputTimeout(cmd); err != nil {\n\t\t\t\t\tdetail := indent(string(out)) + indent(err.Error())\n\t\t\t\t\tfmt.Fprintf(os.Stderr, \"failed to build toolchain at %s:\\n%s\", commit.hash, detail)\n\t\t\t\t\tcommit.logFailed(true, detail)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif run.saveTree && doGoverSave() == nil {\n\t\t\t\t\tcommit.gover = true\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Assume build command is in $PATH.\n\t\t\t//\n\t\t\t// TODO: Force PATH if we built the toolchain.\n\t\t\tbuildCmd = []string{}\n\t\t}\n\n\t\tbuildCmd = append(buildCmd, strings.Fields(run.buildCmd)...)\n\t\tbuildCmd = append(buildCmd, \"-o\", binPath)\n\t\tcmd := exec.Command(buildCmd[0], buildCmd[1:]...)\n\t\tif dryRun {\n\t\t\tdryPrint(cmd)\n\t\t} else if out, err := combinedOutputTimeout(cmd); err != nil {\n\t\t\tdetail := indent(string(out)) + indent(err.Error())\n\t\t\tfmt.Fprintf(os.Stderr, \"failed to build tests at %s:\\n%s\", commit.hash, detail)\n\t\t\tcommit.logFailed(true, detail)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Run the benchmark.\n\trunStatus(status, commit, \"running\")\n\tif filepath.Base(binPath) == binPath {\n\t\t// Make exec.Command treat this as a relative path.\n\t\tbinPath = \"./\" + binPath\n\t}\n\targs := append([]string{binPath}, strings.Fields(run.benchFlags)...)\n\tif run.saveTree {\n\t\targs = append([]string{\"gover\", \"with\", commit.hash}, args...)\n\t}\n\tcmd := exec.Command(args[0], args[1:]...)\n\tif dryRun {\n\t\tdryPrint(cmd)\n\t\tcommit.count++\n\t\treturn\n\t}\n\tout, err := combinedOutputTimeout(cmd)\n\tif err == nil {\n\t\tcommit.logRun(string(out))\n\t} else {\n\t\tdetail := indent(string(out)) + indent(err.Error())\n\t\tfmt.Fprintf(os.Stderr, \"failed to run benchmark at %s:\\n%s\", commit.hash, detail)\n\t\tcommit.logFailed(false, detail)\n\t}\n}\n\nfunc doGoverSave() error {\n\tcmd := exec.Command(\"gover\", \"save\")\n\tcmd.Dir = gitDir\n\tif dryRun {\n\t\tdryPrint(cmd)\n\t\treturn nil\n\t} else {\n\t\tout, err := cmd.CombinedOutput()\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"gover save failed: %s:\\n%s\", err, indent(string(out)))\n\t\t}\n\t\treturn err\n\t}\n}\n\n// runStatus updates the status message for commit.\nfunc runStatus(sr *StatusReporter, commit *commitInfo, status string) {\n\tsr.Message(fmt.Sprintf(\"commit %s, iteration %d/%d: %s...\", commit.hash[:7], commit.count+1, run.iterations, status))\n}\n\n// combinedOutputTimeout is like c.CombinedOutput(), but if\n// run.timeout != 0, it will kill c after run.timeout time expires.\nfunc combinedOutputTimeout(c *exec.Cmd) (out []byte, err error) {\n\tvar b bytes.Buffer\n\tc.Stdout = &b\n\tc.Stderr = &b\n\tif err := c.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif run.timeout == 0 {\n\t\terr := c.Wait()\n\t\treturn b.Bytes(), err\n\t}\n\n\ttick := time.NewTimer(run.timeout)\n\ttrace := signalTrace\n\tdone := make(chan error)\n\tgo func() {\n\t\tdone <- c.Wait()\n\t}()\nloop:\n\tfor {\n\t\tselect {\n\t\tcase err = <-done:\n\t\t\tbreak loop\n\t\tcase <-tick.C:\n\t\t\tif trace != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"command timed out; sending %v\\n\", trace)\n\t\t\t\tc.Process.Signal(trace)\n\t\t\t\ttick = time.NewTimer(5 * time.Second)\n\t\t\t\ttrace = nil\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"command timed out; killing\\n\")\n\t\t\t\tc.Process.Kill()\n\t\t\t}\n\t\t}\n\t}\n\ttick.Stop()\n\treturn b.Bytes(), err\n}\n"
  },
  {
    "path": "benchmany/run_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"math/rand\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/aclements/go-misc/bench\"\n)\n\nfunc TestPickSpread(t *testing.T) {\n\trun.iterations = 5\n\n\tfor iter := 0; iter < 10; iter++ {\n\t\tcommits := []*commitInfo{}\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tcommits = append(commits, &commitInfo{})\n\t\t}\n\n\t\tfor {\n\t\t\tcommit := pickCommitSpread(commits)\n\t\t\tif commit == nil {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif rand.Intn(50) == 0 {\n\t\t\t\tcommit.buildFailed = true\n\t\t\t} else if rand.Intn(50) == 1 {\n\t\t\t\tcommit.fails++\n\t\t\t} else {\n\t\t\t\tcommit.count++\n\t\t\t}\n\t\t}\n\n\t\t// Test that all of the commits ran the expected\n\t\t// number of times.\n\t\tfor _, c := range commits {\n\t\t\tif c.runnable() {\n\t\t\t\tt.Fatalf(\"commit still runnable %+v\", c)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestRun(t *testing.T) {\n\t// Create a git repo for testing.\n\trepo, err := ioutil.TempDir(\"\", \"benchmany-test\")\n\tif err != nil {\n\t\tt.Fatal(\"creating temp dir: \", err)\n\t}\n\tdefer os.RemoveAll(repo)\n\ttgit(t, repo, \"init\")\n\ttgit(t, repo, \"config\", \"user.name\", \"gopher\")\n\ttgit(t, repo, \"config\", \"user.email\", \"gopher@example.com\")\n\n\t// Write benchmark.\n\terr = ioutil.WriteFile(filepath.Join(repo, \"x_test.go\"), []byte(`\npackage main\n\nimport \"testing\"\n\nfunc TestMain(m *testing.M) {\n\tprintln(\"BenchmarkX 1 100 ns/op\")\n}`), 0666)\n\tif err != nil {\n\t\tt.Fatal(\"writing x_test.go: \", err)\n\t}\n\ttgit(t, repo, \"add\", \"x_test.go\")\n\ttgit(t, repo, \"commit\", \"-m\", \"initial\")\n\n\t// Create several commits.\n\tvar revs []string\n\tfor i := 0; i < 3; i++ {\n\t\tstr := fmt.Sprintf(\"%d\", i)\n\t\terr = ioutil.WriteFile(filepath.Join(repo, \"x\"), []byte(str), 0666)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"writing x: \", err)\n\t\t}\n\t\ttgit(t, repo, \"add\", \"x\")\n\t\ttgit(t, repo, \"commit\", \"-m\", str)\n\t\trevs = append(revs, trimNL(tgit(t, repo, \"rev-parse\", \"HEAD\")))\n\t}\n\n\tfor iters := 4; iters <= 5; iters++ {\n\t\t// Run benchmark.\n\t\ttgit(t, repo, \"checkout\", \"master\")\n\t\toldArgs := os.Args\n\t\toldWD, err := os.Getwd()\n\t\tif err != nil {\n\t\t\tt.Fatal(\"Getwd: \", err)\n\t\t}\n\t\tos.Args = []string{os.Args[0], \"-n\", fmt.Sprintf(\"%d\", iters), \"HEAD~3..HEAD\"}\n\t\tos.Chdir(repo)\n\t\tdefer func() {\n\t\t\tos.Args = oldArgs\n\t\t\tos.Chdir(oldWD)\n\t\t}()\n\t\tmain()\n\n\t\t// Check results.\n\t\tf, err := os.Open(filepath.Join(repo, \"bench.log\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(\"opening bench.log: \", err)\n\t\t}\n\t\tdefer f.Close()\n\t\tbs, err := bench.Parse(f)\n\t\tif err != nil {\n\t\t\tt.Fatal(\"malformed benchmark log: \", err)\n\t\t}\n\t\tcounts := make(map[string]int)\n\t\tfor _, b := range bs {\n\t\t\tt.Log(b, b.Config[\"commit\"].RawValue)\n\t\t\tcounts[b.Config[\"commit\"].RawValue]++\n\n\t\t\tif uname, ok := b.Config[\"uname-sr\"]; !ok {\n\t\t\t\tt.Errorf(\"missing uname-sr config\")\n\t\t\t} else {\n\t\t\t\tt.Logf(\"uname-sr: %s\", uname)\n\t\t\t}\n\t\t}\n\t\tfor _, rev := range revs {\n\t\t\tif counts[rev] != iters {\n\t\t\t\tt.Errorf(\"expected %d results for %s, got %d\", iters, rev, counts[rev])\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc tgit(t *testing.T, repo string, args ...string) string {\n\tcmd := exec.Command(\"git\", args...)\n\tcmd.Dir = repo\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"git %s failed: %v\\n%s\", args, err, out)\n\t}\n\treturn string(out)\n}\n"
  },
  {
    "path": "benchmany/signal_notunix.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build plan9 windows\n\npackage main\n\nimport \"os\"\n\nvar signalTrace os.Signal = nil\n"
  },
  {
    "path": "benchmany/signal_unix.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build !plan9,!windows\n\npackage main\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nvar signalTrace os.Signal = syscall.SIGQUIT\n"
  },
  {
    "path": "benchmany/status.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/aclements/go-moremath/fit\"\n\t\"golang.org/x/crypto/ssh/terminal\"\n)\n\ntype StatusReporter struct {\n\tupdate chan<- statusUpdate\n\tdone   chan bool\n}\n\ntype statusUpdate struct {\n\tprogress float64\n\tmessage  string\n}\n\nfunc NewStatusReporter() *StatusReporter {\n\tif os.Getenv(\"TERM\") == \"dumb\" || !terminal.IsTerminal(1) {\n\t\treturn &StatusReporter{}\n\t}\n\tupdate := make(chan statusUpdate)\n\tsr := &StatusReporter{update: update}\n\tgo sr.loop(update)\n\treturn sr\n}\n\nfunc (sr *StatusReporter) Progress(msg string, frac float64) {\n\tif sr.update != nil {\n\t\tsr.update <- statusUpdate{message: msg, progress: frac}\n\t}\n}\n\nfunc (sr *StatusReporter) Message(msg string) {\n\tif sr.update == nil {\n\t\tfmt.Println(msg)\n\t} else {\n\t\tsr.update <- statusUpdate{message: msg, progress: -1}\n\t}\n}\n\nfunc (sr *StatusReporter) Stop() {\n\tif sr.update != nil {\n\t\tsr.done = make(chan bool)\n\t\tclose(sr.update)\n\t\t<-sr.done\n\t\tsr.update = nil\n\t}\n}\n\nfunc (sr *StatusReporter) loop(updates <-chan statusUpdate) {\n\tconst resetLine = \"\\r\\x1b[2K\"\n\tconst wrapOff = \"\\x1b[?7l\"\n\tconst wrapOn = \"\\x1b[?7h\"\n\n\ttick := time.NewTicker(time.Second / 4)\n\tdefer tick.Stop()\n\n\tvar end time.Time\n\tt0 := time.Now()\n\n\tvar times, progress, weights []float64\n\tvar msg string\n\tfor {\n\t\tselect {\n\t\tcase update, ok := <-updates:\n\t\t\tif !ok {\n\t\t\t\tfmt.Print(resetLine)\n\t\t\t\tclose(sr.done)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif update.progress == -1 {\n\t\t\t\tfmt.Print(resetLine)\n\t\t\t\tfmt.Println(update.message)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnow := float64(time.Now().Sub(t0))\n\t\t\ttimes = append(times, float64(now))\n\t\t\tprogress = append(progress, update.progress)\n\t\t\tweights = append(weights, 0)\n\t\t\tmsg = update.message\n\n\t\t\t// Compute ETA using linear regression with\n\t\t\t// exponentially decaying weights.\n\t\t\tconst halfLife = 150 * time.Second\n\t\t\tfor i, t := range times {\n\t\t\t\tweights[i] = math.Exp(-1 / float64(halfLife) * (now - t))\n\t\t\t}\n\t\t\treg := fit.PolynomialRegression(times, progress, weights, 1)\n\t\t\ta, b := reg.Coefficients[0], reg.Coefficients[1]\n\n\t\t\t// The intercept of a + b*x - 1 is the ending\n\t\t\t// time.\n\t\t\tif b == 0 {\n\t\t\t\tend = time.Time{}\n\t\t\t} else {\n\t\t\t\tend = t0.Add(time.Duration((1 - a) / b))\n\t\t\t}\n\n\t\tcase <-tick.C:\n\t\t}\n\n\t\tvar eta string\n\n\t\tif end.IsZero() {\n\t\t\teta = \"unknown\"\n\t\t} else {\n\t\t\tetaDur := end.Sub(time.Now())\n\t\t\t// Trim off sub-second precision.\n\t\t\tetaDur -= etaDur % time.Second\n\t\t\tif etaDur <= 0 {\n\t\t\t\teta = \"0s\"\n\t\t\t} else {\n\t\t\t\teta = etaDur.String()\n\t\t\t}\n\t\t}\n\t\tif msg == \"\" {\n\t\t\teta = \"ETA \" + eta\n\t\t} else {\n\t\t\teta = \", ETA \" + eta\n\t\t}\n\t\t// TODO: This isn't quite right. If we hit the right\n\t\t// edge of the terminal, it won't wrap, but the\n\t\t// right-most character will be the *last* character\n\t\t// in the string, since terminal keeps overwriting it.\n\t\tfmt.Printf(\"%s%s%s%s%s\", resetLine, wrapOff, msg, eta, wrapOn)\n\t}\n}\n"
  },
  {
    "path": "benchplot/git.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n)\n\ntype CommitInfo struct {\n\tHash, Subject, Branch  string\n\tAuthorDate, CommitDate time.Time\n\n\tParents, Children []string\n}\n\nfunc Commits(repo string, revs ...string) (commits []CommitInfo) {\n\targs := []string{\"-C\", repo, \"log\", \"-s\",\n\t\t\"--format=format:%H %aI %cI %P\\n%s\\n\"}\n\tif len(revs) == 0 {\n\t\targs = append(args, \"--all\")\n\t} else {\n\t\targs = append(append(args, \"--\"), revs...)\n\t}\n\tcmd := exec.Command(\"git\", args...)\n\tcmd.Stderr = os.Stderr\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tlog.Fatal(\"git show failed: \", err)\n\t}\n\tfor _, line := range strings.Split(string(out), \"\\n\\n\") {\n\t\tparts := strings.Split(line, \"\\n\")\n\t\tsubject := parts[1]\n\t\tparts = strings.Split(parts[0], \" \")\n\n\t\tadate, err := time.Parse(time.RFC3339, parts[1])\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"cannot parse author date: \", err)\n\t\t}\n\t\tcdate, err := time.Parse(time.RFC3339, parts[2])\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"cannot parse commit date: \", err)\n\t\t}\n\n\t\tcommits = append(commits, CommitInfo{\n\t\t\tparts[0], subject, \"\", adate, cdate,\n\t\t\tparts[3:], nil,\n\t\t})\n\t}\n\n\t// Compute hash indexes.\n\thashset := make(map[string]*CommitInfo)\n\tfor i := range commits {\n\t\thashset[commits[i].Hash] = &commits[i]\n\t}\n\n\t// Compute children hashes.\n\tfor h, ci := range hashset {\n\t\tfor _, parent := range ci.Parents {\n\t\t\tif ci2, ok := hashset[parent]; ok {\n\t\t\t\tci2.Children = append(ci2.Children, h)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Compute branch names.\n\tvar branchRe = regexp.MustCompile(`^\\[[^] ]+\\] `)\n\tvar branchOf func(ci *CommitInfo) string\n\tbranchOf = func(ci *CommitInfo) string {\n\t\tsubject := ci.Subject\n\t\tif strings.HasPrefix(subject, \"[\") {\n\t\t\tm := branchRe.FindString(subject)\n\t\t\tif m != \"\" {\n\t\t\t\treturn m[1 : len(m)-2]\n\t\t\t}\n\t\t}\n\t\tif strings.HasPrefix(subject, \"Merge\") || strings.HasPrefix(subject, \"Revert\") {\n\t\t\t// Walk children looking for a branch name.\n\t\t\tfor _, child := range ci.Children {\n\t\t\t\tif ci2 := hashset[child]; ci2 != nil {\n\t\t\t\t\tbranch := branchOf(ci2)\n\t\t\t\t\tif branch != \"master\" {\n\t\t\t\t\t\treturn branch\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn \"master\"\n\t}\n\tfor _, ci := range hashset {\n\t\tci.Branch = branchOf(ci)\n\t}\n\t// Clean up missing branch tags: if all parents and children\n\t// of a commit have the same non-master branch, that commit\n\t// must also have been from that branch.\ncleanBranches:\n\tfor _, ci := range hashset {\n\t\tif ci.Branch == \"master\" {\n\t\t\talt := \"\"\n\t\t\tfor _, child := range ci.Children {\n\t\t\t\tif ci2 := hashset[child]; ci2 != nil {\n\t\t\t\t\tif alt == \"\" {\n\t\t\t\t\t\talt = ci2.Branch\n\t\t\t\t\t} else if ci2.Branch != alt {\n\t\t\t\t\t\tcontinue cleanBranches\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfor _, parent := range ci.Parents {\n\t\t\t\tif ci2 := hashset[parent]; ci2 != nil {\n\t\t\t\t\tif alt == \"\" {\n\t\t\t\t\t\talt = ci2.Branch\n\t\t\t\t\t} else if ci2.Branch != alt {\n\t\t\t\t\t\tcontinue cleanBranches\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif alt != \"\" {\n\t\t\t\tci.Branch = alt\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}\n"
  },
  {
    "path": "benchplot/kza.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport \"math\"\n\n// TODO: This all assumes that data is sampled at a regular interval\n// and there are no missing values. It could be generalized to accept\n// missing values (perhaps represented by NaN), or generalized much\n// further by accepting (t, x) pairs and a vector of times at which to\n// evaluate the filter (and an arbitrary window size). I would have to\n// figure out how that affects the difference array in KZA.\n\n// TODO: These can generate a lot of garbage. Perhaps the caller\n// should pass in the target slice? Or these should just overwrite the\n// input array and leave it to the caller to copy if necessary?\n\n// MovingAverage performs a moving average (MA) filter of xs with\n// window size m. m must be a positive odd integer.\n//\n// Note that this is filter is often described in terms of the half\n// length of the window (m-1)/2.\nfunc MovingAverage(xs []float64, m int) []float64 {\n\tif m <= 0 || m%2 != 1 {\n\t\tpanic(\"m must be a positive, odd integer\")\n\t}\n\tys := make([]float64, len(xs))\n\tsum, n := 0.0, 0\n\tfor l, i, r := -m, -(m-1)/2, 0; i < len(ys); l, i, r = l+1, i+1, r+1 {\n\t\tif l >= 0 {\n\t\t\tsum -= xs[l]\n\t\t\tn--\n\t\t}\n\t\tif r < len(xs) {\n\t\t\tsum += xs[r]\n\t\t\tn++\n\t\t}\n\t\tif i >= 0 {\n\t\t\tys[i] = sum / float64(n)\n\t\t}\n\t}\n\treturn ys\n}\n\n// KolmogorovZurbenko performs a Kolmogorov-Zurbenko (KZ) filter of xs\n// with window size m and k iterations. m must be a positive odd\n// integer. k must be positive.\nfunc KolmogorovZurbenko(xs []float64, m, k int) []float64 {\n\t// k is typically small, and MA is quite efficient, so just do\n\t// the iterated moving average rather than bothering to\n\t// compute the binomial coefficient kernel.\n\tfor i := 0; i < k; i++ {\n\t\t// TODO: Generate less garbage.\n\t\txs = MovingAverage(xs, m)\n\t}\n\treturn xs\n}\n\n// AdaptiveKolmogorovZurbenko performs an adaptive Kolmogorov-Zurbenko\n// (KZA) filter of xs using an initial window size m and k iterations.\n// m must be a positive odd integer. k must be positive.\n//\n// See Zurbenko, et al. 1996: Detecting discontinuities in time series\n// of upper air data: Demonstration of an adaptive filter technique.\n// Journal of Climate, 9, 3548–3560.\nfunc AdaptiveKolmogorovZurbenko(xs []float64, m, k int) []float64 {\n\t// Perform initial KZ filter.\n\tz := KolmogorovZurbenko(xs, m, k)\n\n\t// Compute differenced values.\n\tq := (m - 1) / 2\n\td := make([]float64, len(z)+1)\n\tmaxD := 0.0\n\tfor i := q; i < len(z)-q; i++ {\n\t\td[i] = math.Abs(z[i+q] - z[i-q])\n\t\tif d[i] > maxD {\n\t\t\tmaxD = d[i]\n\t\t}\n\t}\n\n\tif maxD == 0 {\n\t\t// xs is constant, so no amount of filtering will do\n\t\t// anything. Avoid dividing 0/0 below.\n\t\treturn xs\n\t}\n\n\t// Compute adaptive filter.\n\tys := make([]float64, len(xs))\n\tfor t := range ys {\n\t\tdPrime := d[t+1] - d[t]\n\t\tf := 1 - d[t]/maxD\n\n\t\tqt := q\n\t\tif dPrime <= 0 {\n\t\t\t// Zurbenko doesn't specify what to do with\n\t\t\t// the fractional part of qt and qh, so we\n\t\t\t// interpret this as summing all points of xs\n\t\t\t// between qt and qh.\n\t\t\tqt = int(math.Ceil(float64(q) * f))\n\t\t}\n\t\tif t-qt < 0 {\n\t\t\tqt = t\n\t\t}\n\n\t\tqh := q\n\t\tif dPrime >= 0 {\n\t\t\tqh = int(math.Floor(float64(q) * f))\n\t\t}\n\t\tif t+qh >= len(xs) {\n\t\t\tqh = len(xs) - t - 1\n\t\t}\n\n\t\tsum := 0.0\n\t\tfor i := t - qt; i <= t+qh; i++ {\n\t\t\tsum += xs[i]\n\t\t}\n\t\t// Zurbenko divides by qh+qt, but this undercounts the\n\t\t// number of terms in the sum by 1.\n\t\tys[t] = sum / float64(qh+qt+1)\n\t}\n\n\treturn ys\n}\n"
  },
  {
    "path": "benchplot/kza_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"math/rand\"\n\t\"testing\"\n)\n\n// Aeq returns true if expect and got are equal to 8 significant\n// figures (1 part in 100 million).\nfunc Aeq(expect, got float64) bool {\n\tif expect < 0 && got < 0 {\n\t\texpect, got = -expect, -got\n\t}\n\treturn expect*0.99999999 <= got && got*0.99999999 <= expect\n}\n\nfunc TestMovingAverage(t *testing.T) {\n\t// Test MovingAverage against the obvious (but slow)\n\t// implementation.\n\txs := make([]float64, 100)\n\tfor iter := 0; iter < 10; iter++ {\n\t\tfor i := range xs {\n\t\t\txs[i] = rand.Float64()\n\t\t}\n\t\tm := 1 + 2*rand.Intn(100)\n\t\tys1, ys2 := MovingAverage(xs, m), slowMovingAverage(xs, m)\n\n\t\t// TODO: Use stuff from mathtest.\n\t\tfor i, y1 := range ys1 {\n\t\t\tif !Aeq(y1, ys2[i]) {\n\t\t\t\tt.Fatalf(\"want %v, got %v\", ys2, ys1)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc slowMovingAverage(xs []float64, m int) []float64 {\n\tys := make([]float64, len(xs))\n\tfor i := range ys {\n\t\tpsum, n := 0.0, 0\n\t\tfor j := i - (m-1)/2; j <= i+(m-1)/2; j++ {\n\t\t\tif 0 <= j && j < len(xs) {\n\t\t\t\tpsum += xs[j]\n\t\t\t\tn++\n\t\t\t}\n\t\t}\n\t\tys[i] = psum / float64(n)\n\t}\n\treturn ys\n}\n"
  },
  {
    "path": "benchplot/main.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Command benchplot plots the results of benchmarks over time.\n//\n// benchplot takes an input file in Go benchmark format [1]. Each\n// benchmark result must have a \"commit\" configuration key that gives\n// the full commit hash of the revision that gave that result.\n// benchplot will cross-reference these hashes against the specified\n// Git repository and plot each metric over time for each benchmark.\n//\n// [1] https://github.com/golang/proposal/blob/master/design/14313-benchmark-format.md\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"runtime\"\n\t\"runtime/pprof\"\n\t\"strings\"\n\n\t\"github.com/aclements/go-gg/gg\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-misc/bench\"\n)\n\nfunc main() {\n\tlog.SetPrefix(\"benchplot: \")\n\tlog.SetFlags(0)\n\n\tdefaultGitDir, _ := exec.Command(\"git\", \"rev-parse\", \"--show-toplevel\").Output()\n\tdefaultGitDir = bytes.TrimRight(defaultGitDir, \"\\n\")\n\tvar (\n\t\tflagCPUProfile = flag.String(\"cpuprofile\", \"\", \"write CPU profile to `file`\")\n\t\tflagMemProfile = flag.String(\"memprofile\", \"\", \"write heap profile to `file`\")\n\t\tflagGitDir     = flag.String(\"C\", string(defaultGitDir), \"run git in `dir`\")\n\t\tflagOut        = flag.String(\"o\", \"\", \"write output to `file` (default: stdout)\")\n\t\tflagTable      = flag.Bool(\"table\", false, \"output a table instead of a plot\")\n\t)\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [flags] [inputs...]\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\n\tif *flagCPUProfile != \"\" {\n\t\tf, err := os.Create(*flagCPUProfile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tif *flagMemProfile != \"\" {\n\t\tdefer func() {\n\t\t\truntime.GC()\n\t\t\tf, err := os.Create(*flagMemProfile)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tpprof.WriteHeapProfile(f)\n\t\t\tf.Close()\n\t\t}()\n\t}\n\n\t// Parse benchmark inputs.\n\tpaths := flag.Args()\n\tif len(paths) == 0 {\n\t\tpaths = []string{\"-\"}\n\t}\n\tvar benchmarks []*bench.Benchmark\n\tfor _, path := range paths {\n\t\tfunc() {\n\t\t\tf := os.Stdin\n\t\t\tif path != \"-\" {\n\t\t\t\tvar err error\n\t\t\t\tf, err = os.Open(path)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\tdefer f.Close()\n\t\t\t}\n\n\t\t\tbs, err := bench.Parse(f)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tbenchmarks = append(benchmarks, bs...)\n\t\t}()\n\t}\n\tbench.ParseValues(benchmarks, nil)\n\n\t// Prepare gg tables.\n\tvar tab table.Grouping\n\tbtab, configCols, resultCols := benchmarksToTable(benchmarks)\n\tif btab.Column(\"commit\") == nil {\n\t\ttab = btab\n\t} else {\n\t\tgtab := commitsToTable(Commits(*flagGitDir))\n\t\ttab = table.Join(btab, \"commit\", gtab, \"commit\")\n\t}\n\n\t// Prepare for output.\n\tf := os.Stdout\n\tif *flagOut != \"\" {\n\t\tvar err error\n\t\tf, err = os.Create(*flagOut)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer f.Close()\n\t}\n\n\t// Output table.\n\tif *flagTable {\n\t\ttable.Fprint(f, tab)\n\t\treturn\n\t}\n\n\t// Plot.\n\t//\n\t// TODO: Collect nrows/ncols from the plot itself.\n\tp, nrows, ncols := plot(tab, configCols, resultCols)\n\tif !(len(paths) == 1 && paths[0] == \"-\") {\n\t\tp.Add(gg.Title(strings.Join(paths, \" \")))\n\t}\n\n\t// Render plot.\n\tp.WriteSVG(f, 500*ncols, 350*nrows)\n}\n"
  },
  {
    "path": "benchplot/plot.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/gg\"\n\t\"github.com/aclements/go-gg/ggstat\"\n\t\"github.com/aclements/go-gg/table\"\n)\n\n// TODO: Support plotting non-normalized results.\n\nfunc plot(t table.Grouping, configCols, resultCols []string) (*gg.Plot, int, int) {\n\t//t = table.Flatten(table.HeadTables(table.GroupBy(t, \"name\"), 9))\n\n\t// Filter to just the master branch.\n\t//\n\t// TODO: Flag to control this? Or separate filter command? Or\n\t// accept a filter expression in the argument?\n\tt = table.FilterEq(t, \"branch\", \"master\")\n\n\t// Compute rows and columns.\n\tncols := len(resultCols)\n\tnrows := len(table.GroupBy(t, \"name\").Tables())\n\n\tplot := gg.NewPlot(t)\n\n\t// Turn ordered commit date into a \"commit index\" column.\n\tplot.SortBy(\"commit date\")\n\tplot.Stat(commitIndex{})\n\n\t// Average each result at each commit (but keep columns names\n\t// the same to keep things easier to read).\n\tplot.Stat(ggstat.Agg(\"commit\", \"name\")(ggstat.AggMean(resultCols...)))\n\tfor _, rcol := range resultCols {\n\t\tplot.SetData(table.Rename(plot.Data(), \"mean \"+rcol, rcol))\n\t}\n\n\t// Unpivot all of the metrics into one column.\n\tplot.Stat(convertFloat{resultCols})\n\tplot.SetData(table.Unpivot(plot.Data(), \"metric\", \"result\", resultCols...))\n\ty := \"result\"\n\n\t// Normalize to earliest commit on master. It's important to\n\t// do this before the geomean if there are commits missing.\n\t// Unfortunately, that also means we have to *temporarily*\n\t// group by name and metric, since the geomean needs to be\n\t// done on a different grouping.\n\tplot.GroupBy(\"name\", \"metric\")\n\tplot.Stat(ggstat.Normalize{X: \"branch\", By: firstMasterIndex, Cols: []string{\"result\"}})\n\ty = \"normalized \" + y\n\tplot.SetData(table.Remove(plot.Data(), \"result\"))\n\tplot.SetData(table.Ungroup(table.Ungroup(plot.Data())))\n\n\t// Compute geomean for each metric at each commit if there's\n\t// more than one benchmark.\n\tif len(table.GroupBy(t, \"name\").Tables()) > 1 {\n\t\tgt := removeNaNs(plot.Data(), y)\n\t\tgt = ggstat.Agg(\"commit\", \"metric\")(ggstat.AggGeoMean(y)).F(gt)\n\t\tgt = table.MapTables(gt, func(_ table.GroupID, t *table.Table) *table.Table {\n\t\t\treturn table.NewBuilder(t).AddConst(\"name\", \" geomean\").Done()\n\t\t})\n\t\tgt = table.Rename(gt, \"geomean \"+y, y)\n\t\tplot.SetData(table.Concat(plot.Data(), gt))\n\t\tnrows++\n\t}\n\n\t// Facet by name and metric.\n\tplot.Add(gg.FacetY{Col: \"name\"}, gg.FacetX{Col: \"metric\"})\n\n\t// Filter the data to reduce noise.\n\tplot.Stat(kza{y, 15, 3})\n\ty = \"filtered \" + y\n\n\t// Always show Y=0.\n\tplot.SetScale(\"y\", gg.NewLinearScaler().Include(0))\n\n\tplot.Add(gg.LayerLines{\n\t\tX: \"commit index\",\n\t\tY: y,\n\t\t//Color: \"branch\",\n\t})\n\t// plot.Add(gg.LayerTags{X: \"commit index\", Y: y, Label: \"branch\"})\n\n\t// Interactive tooltip with short hash.\n\tplot.Stat(tooltip{y})\n\tplot.Add(gg.LayerTooltips{X: \"commit index\", Y: y, Label: \"tooltip\"})\n\n\treturn plot, nrows, ncols\n}\n\nfunc firstMasterIndex(bs []string) int {\n\treturn slice.Index(bs, \"master\")\n}\n\ntype commitIndex struct{}\n\nfunc (commitIndex) F(g table.Grouping) table.Grouping {\n\treturn table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {\n\t\tidxs := make([]int, t.Len())\n\t\tlast, idx := \"\", -1\n\t\tfor i, hash := range t.MustColumn(\"commit\").([]string) {\n\t\t\tif hash != last {\n\t\t\t\tidx++\n\t\t\t\tlast = hash\n\t\t\t}\n\t\t\tidxs[i] = idx\n\t\t}\n\t\tt = table.NewBuilder(t).Add(\"commit index\", idxs).Done()\n\n\t\treturn t\n\t})\n}\n\ntype convertFloat struct {\n\tcols []string\n}\n\nfunc (c convertFloat) F(g table.Grouping) table.Grouping {\n\treturn table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {\n\t\tb := table.NewBuilder(t)\n\t\tfor _, col := range c.cols {\n\t\t\tvar ncol []float64\n\t\t\tslice.Convert(&ncol, t.MustColumn(col))\n\t\t\tb.Add(col, ncol)\n\t\t}\n\t\treturn b.Done()\n\t})\n}\n\nfunc removeNaNs(g table.Grouping, col string) table.Grouping {\n\treturn table.Filter(g, func(result float64) bool {\n\t\treturn !math.IsNaN(result)\n\t}, col)\n}\n\ntype kza struct {\n\tX    string\n\tM, K int\n}\n\nfunc (k kza) F(g table.Grouping) table.Grouping {\n\treturn table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {\n\t\tvar xs []float64\n\t\tslice.Convert(&xs, t.MustColumn(k.X))\n\t\tnxs := AdaptiveKolmogorovZurbenko(xs, k.M, k.K)\n\t\treturn table.NewBuilder(t).Add(\"filtered \"+k.X, nxs).Done()\n\t})\n}\n\ntype tooltip struct {\n\tY string\n}\n\nfunc (t tooltip) F(g table.Grouping) table.Grouping {\n\treturn table.MapCols(g,\n\t\tfunc(commit []string, result []float64, tooltip []string) {\n\t\t\tfor i, c := range commit {\n\t\t\t\ttooltip[i] = fmt.Sprintf(\"%s %.2fX\", c[:7], result[i])\n\t\t\t}\n\t\t}, \"commit\", t.Y)(\"tooltip\")\n}\n"
  },
  {
    "path": "benchplot/table.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-misc/bench\"\n)\n\nfunc benchmarksToTable(bs []*bench.Benchmark) (t *table.Table, configCols, resultCols []string) {\n\t// Gather name, config, and result columns.\n\tnan := math.NaN()\n\tnames := make([]string, len(bs))\n\tconfigs, results := map[string]reflect.Value{}, map[string][]float64{}\n\tfor i, b := range bs {\n\t\tnames[i] = b.Name\n\n\t\tfor k, c := range b.Config {\n\t\t\tseq, ok := configs[k]\n\t\t\tif !ok {\n\t\t\t\tt := reflect.SliceOf(reflect.TypeOf(c.Value))\n\t\t\t\tseq = reflect.MakeSlice(t, len(bs), len(bs))\n\t\t\t\tconfigs[k] = seq\n\t\t\t}\n\t\t\tseq.Index(i).Set(reflect.ValueOf(c.Value))\n\t\t}\n\n\t\tfor k, v := range b.Result {\n\t\t\tseq, ok := results[k]\n\t\t\tif !ok {\n\t\t\t\tseq = make([]float64, len(bs))\n\t\t\t\tfor i := range seq {\n\t\t\t\t\tseq[i] = nan\n\t\t\t\t}\n\t\t\t\tresults[k] = seq\n\t\t\t}\n\t\t\tseq[i] = v\n\t\t}\n\t}\n\n\t// Build table.\n\ttab := new(table.Builder).Add(\"name\", names)\n\n\tkeys := make([]string, 0, len(configs))\n\tfor k := range configs {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, key := range keys {\n\t\tnicekey := strings.Replace(key, \"-\", \" \", -1)\n\t\tniceval := configs[key].Interface()\n\t\tif n, ok := niceval.([]time.Time); ok {\n\t\t\tniceval = byTime(n)\n\t\t}\n\n\t\ttab.Add(nicekey, niceval)\n\t\tconfigCols = append(configCols, nicekey)\n\t}\n\n\tkeys = make([]string, 0, len(results))\n\tfor k := range results {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, key := range keys {\n\t\tnicekey := strings.Replace(key, \"-\", \" \", -1)\n\t\tif nicekey == \"ns/op\" {\n\t\t\t// TODO: Use the unit parser from benchstat.\n\t\t\tnicekey = \"time/op\"\n\t\t\tdurations := make([]time.Duration, len(results[key]))\n\t\t\tfor i, x := range results[key] {\n\t\t\t\tdurations[i] = time.Duration(x)\n\t\t\t}\n\t\t\ttab.Add(nicekey, durations)\n\t\t} else {\n\t\t\ttab.Add(nicekey, results[key])\n\t\t}\n\t\tresultCols = append(resultCols, nicekey)\n\t}\n\n\treturn tab.Done(), configCols, resultCols\n}\n\nfunc commitsToTable(commits []CommitInfo) *table.Table {\n\thashCol := make([]string, len(commits))\n\tauthorDateCol := make(byTime, len(commits))\n\tcommitDateCol := make(byTime, len(commits))\n\tbranchCol := make([]string, len(commits))\n\tj := 0\n\tfor i := range commits {\n\t\tci := &commits[i]\n\n\t\thashCol[j] = ci.Hash\n\t\tauthorDateCol[j] = ci.AuthorDate\n\t\tcommitDateCol[j] = ci.CommitDate\n\t\tbranchCol[j] = ci.Branch\n\t\tj++\n\t}\n\n\treturn new(table.Builder).\n\t\tAdd(\"commit\", hashCol).\n\t\tAdd(\"author date\", authorDateCol).\n\t\tAdd(\"commit date\", commitDateCol).\n\t\tAdd(\"branch\", branchCol).\n\t\tDone()\n}\n\ntype byTime []time.Time\n\nfunc (s byTime) Len() int {\n\treturn len(s)\n}\n\nfunc (s byTime) Less(i, j int) bool {\n\treturn s[i].Before(s[j])\n}\n\nfunc (s byTime) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/LICENSE",
    "content": "Copyright (c) 2016 The Go Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/README.md",
    "content": "# gg [![](https://godoc.org/github.com/aclements/go-gg?status.svg)](https://godoc.org/github.com/aclements/go-gg)\n\ngg is a plotting package for Go inspired by the Grammar of Graphics.\n\nNote that gg is currently very experimental and the API is still in\nflux. Please vendor this package before using it.\n\nTo fetch gg, run\n\n    go get github.com/aclements/go-gg\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/doc.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package generic provides type-generic functions.\npackage generic\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/error.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage generic\n\nimport \"reflect\"\n\ntype TypeError struct {\n\tType1, Type2 reflect.Type\n\tExtra        string\n}\n\nfunc (e TypeError) Error() string {\n\tmsg := e.Type1.String()\n\tif e.Type2 != nil {\n\t\tmsg += \" and \" + e.Type2.String()\n\t}\n\tmsg += \" \" + e.Extra\n\treturn msg\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/order.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage generic\n\nimport \"reflect\"\n\n// CanOrder returns whether the values a and b are orderable according\n// to the Go language specification.\nfunc CanOrder(a, b interface{}) bool {\n\tak, bk := reflect.ValueOf(a).Kind(), reflect.ValueOf(b).Kind()\n\tif ak != bk {\n\t\treturn false\n\t}\n\treturn CanOrderR(ak)\n}\n\nvar orderable = map[reflect.Kind]bool{\n\treflect.Int:     true,\n\treflect.Int8:    true,\n\treflect.Int16:   true,\n\treflect.Int32:   true,\n\treflect.Int64:   true,\n\treflect.Uint:    true,\n\treflect.Uintptr: true,\n\treflect.Uint8:   true,\n\treflect.Uint16:  true,\n\treflect.Uint32:  true,\n\treflect.Uint64:  true,\n\treflect.Float32: true,\n\treflect.Float64: true,\n\treflect.String:  true,\n}\n\n// CanOrderR returns whether two values of kind k are orderable\n// according to the Go language specification.\nfunc CanOrderR(k reflect.Kind) bool {\n\treturn orderable[k]\n}\n\n// Order returns the order of values a and b: -1 if a < b, 0 if a ==\n// b, 1 if a > b. The results are undefined if either a or b is NaN.\n//\n// Order panics if a and b are not orderable according to the Go\n// language specification.\nfunc Order(a, b interface{}) int {\n\treturn OrderR(reflect.ValueOf(a), reflect.ValueOf(b))\n}\n\n// OrderR is equivalent to Order, but operates on reflect.Values.\nfunc OrderR(a, b reflect.Value) int {\n\tif a.Kind() != b.Kind() {\n\t\tpanic(&TypeError{a.Type(), b.Type(), \"are not orderable because they are different kinds\"})\n\t}\n\n\tswitch a.Kind() {\n\tcase reflect.Float32, reflect.Float64:\n\t\ta, b := a.Float(), b.Float()\n\t\tif a < b {\n\t\t\treturn -1\n\t\t} else if a > b {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\ta, b := a.Int(), b.Int()\n\t\tif a < b {\n\t\t\treturn -1\n\t\t} else if a > b {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\n\tcase reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\ta, b := a.Uint(), b.Uint()\n\t\tif a < b {\n\t\t\treturn -1\n\t\t} else if a > b {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\n\tcase reflect.String:\n\t\ta, b := a.String(), b.String()\n\t\tif a < b {\n\t\t\treturn -1\n\t\t} else if a > b {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\t}\n\n\tpanic(&TypeError{a.Type(), nil, \"is not orderable\"})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/concat.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// Concat returns the concatenation of all of ss. The types of all of\n// the arguments must be identical or Concat will panic with a\n// *generic.TypeError. The returned slice will have the same type as the\n// inputs. If there are 0 arguments, Concat returns nil. Concat does\n// not modify any of the input slices.\nfunc Concat(ss ...T) T {\n\tif len(ss) == 0 {\n\t\treturn nil\n\t}\n\n\trvs := make([]reflect.Value, len(ss))\n\ttotal := 0\n\tvar typ reflect.Type\n\tfor i, s := range ss {\n\t\trvs[i] = reflectSlice(s)\n\t\ttotal += rvs[i].Len()\n\t\tif i == 0 {\n\t\t\ttyp = rvs[i].Type()\n\t\t} else if rvs[i].Type() != typ {\n\t\t\tpanic(&generic.TypeError{typ, rvs[i].Type(), \"have different types\"})\n\t\t}\n\t}\n\n\tout := reflect.MakeSlice(typ, 0, total)\n\tfor _, rv := range rvs {\n\t\tout = reflect.AppendSlice(out, rv)\n\t}\n\treturn out.Interface()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/concat_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport \"testing\"\n\nfunc TestConcat(t *testing.T) {\n\tif g := Concat(); g != nil {\n\t\tt.Errorf(\"Concat() should be nil; got %v\", g)\n\t}\n\n\tif g, w := Concat([]int{}), []int{}; !de(w, g) {\n\t\tt.Errorf(\"want %v; got %v\", w, g)\n\t}\n\n\tif g, w := Concat([]int(nil)), []int{}; !de(w, g) {\n\t\tt.Errorf(\"want %v; got %v\", w, g)\n\t}\n\n\tif g, w := Concat([]int{1, 2}, []int{3, 4}), []int{1, 2, 3, 4}; !de(w, g) {\n\t\tt.Errorf(\"want %v; got %v\", w, g)\n\t}\n\n\tshouldPanic(t, \"have different types\", func() {\n\t\tConcat([]int{}, []string{})\n\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/convert.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// Convert converts each element in from and assigns it to *to. to\n// must be a pointer to a slice. Convert slices or extends *to to\n// len(from) and then assigns to[i] = T(from[i]) where T is the type\n// of *to's elements. If from and *to have the same element type, it\n// simply assigns *to = from.\nfunc Convert(to interface{}, from T) {\n\tfv := reflectSlice(from)\n\ttv := reflect.ValueOf(to)\n\tif tv.Kind() != reflect.Ptr {\n\t\tpanic(&generic.TypeError{tv.Type(), nil, \"is not a *[]T\"})\n\t}\n\ttst := tv.Type().Elem()\n\tif tst.Kind() != reflect.Slice {\n\t\tpanic(&generic.TypeError{tv.Type(), nil, \"is not a *[]T\"})\n\t}\n\n\tif fv.Type().AssignableTo(tst) {\n\t\ttv.Elem().Set(fv)\n\t\treturn\n\t}\n\n\teltt := tst.Elem()\n\tif !fv.Type().Elem().ConvertibleTo(eltt) {\n\t\tpanic(&generic.TypeError{fv.Type(), tst, \"cannot be converted\"})\n\t}\n\n\tswitch to := to.(type) {\n\tcase *[]float64:\n\t\t// This is extremely common.\n\t\t*to = (*to)[:0]\n\t\tfor i, len := 0, fv.Len(); i < len; i++ {\n\t\t\t*to = append(*to, fv.Index(i).Convert(eltt).Float())\n\t\t}\n\n\tdefault:\n\t\ttsv := tv.Elem()\n\t\ttsv.SetLen(0)\n\t\tfor i, len := 0, fv.Len(); i < len; i++ {\n\t\t\ttsv = reflect.Append(tsv, fv.Index(i).Convert(eltt))\n\t\t}\n\t\ttv.Elem().Set(tsv)\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/convert_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport \"testing\"\n\nfunc TestConvert(t *testing.T) {\n\tvar is []int\n\tConvert(&is, []int{1, 2, 3})\n\tif w := []int{1, 2, 3}; !de(w, is) {\n\t\tt.Errorf(\"want %v; got %v\", w, is)\n\t}\n\tConvert(&is, []float64{1, 2, 3})\n\tif w := []int{1, 2, 3}; !de(w, is) {\n\t\tt.Errorf(\"want %v; got %v\", w, is)\n\t}\n\n\tvar fs []float64\n\tConvert(&fs, []int{1, 2, 3})\n\tif w := []float64{1, 2, 3}; !de(w, fs) {\n\t\tt.Errorf(\"want %v; got %v\", w, fs)\n\t}\n\tConvert(&fs, []float64{1, 2, 3})\n\tif w := []float64{1, 2, 3}; !de(w, fs) {\n\t\tt.Errorf(\"want %v; got %v\", w, fs)\n\t}\n\n\tshouldPanic(t, \"cannot be converted\", func() {\n\t\tConvert(&is, []string{\"1\", \"2\", \"3\"})\n\t})\n\tshouldPanic(t, `is not a \\*\\[\\]T`, func() {\n\t\tConvert(is, []int{1, 2, 3})\n\t})\n\tshouldPanic(t, `is not a \\*\\[\\]T`, func() {\n\t\tx := 1\n\t\tConvert(&x, []int{1, 2, 3})\n\t})\n\tshouldPanic(t, \"is not a slice\", func() {\n\t\tConvert(&is, 1)\n\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/cycle.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport \"reflect\"\n\n// Cycle constructs a slice of length length by repeatedly\n// concatenating s to itself. If len(s) >= length, it returns\n// s[:length]. Otherwise, it allocates a new slice. If len(s) == 0 and\n// length != 0, Cycle panics.\nfunc Cycle(s T, length int) T {\n\trv := reflectSlice(s)\n\tif rv.Len() >= length {\n\t\treturn rv.Slice(0, length).Interface()\n\t}\n\n\tif rv.Len() == 0 {\n\t\tpanic(\"empty slice\")\n\t}\n\n\t// Allocate a new slice of the appropriate length.\n\tout := reflect.MakeSlice(rv.Type(), length, length)\n\n\t// Copy elements to out.\n\tfor pos := 0; pos < length; {\n\t\tpos += reflect.Copy(out.Slice(pos, length), rv)\n\t}\n\n\treturn out.Interface()\n}\n\n// Repeat returns a slice consisting of length copies of v.\nfunc Repeat(v interface{}, length int) T {\n\tif length < 0 {\n\t\tlength = 0\n\t}\n\trv := reflect.ValueOf(v)\n\tout := reflect.MakeSlice(reflect.SliceOf(rv.Type()), length, length)\n\tfor i := 0; i < length; i++ {\n\t\tout.Index(i).Set(rv)\n\t}\n\treturn out.Interface()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/doc.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package slice provides generic slice functions.\npackage slice\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/find.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// Index returns the index of the first instance of val in s, or -1 if\n// val is not present in s. val's type must be s's element type.\nfunc Index(s T, val interface{}) int {\n\trs := reflectSlice(s)\n\tif vt := reflect.TypeOf(val); rs.Type().Elem() != vt {\n\t\t// TODO: Better \"<seq> is not a sequence of <val>\".\n\t\tpanic(&generic.TypeError{rs.Type(), vt, \"cannot find\"})\n\t}\n\n\tfor i, l := 0, rs.Len(); i < l; i++ {\n\t\tif rs.Index(i).Interface() == val {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n// LastIndex returns the index of the last instance of val in s, or -1\n// if val is not present in s. val's type must be s's element type.\nfunc LastIndex(s T, val interface{}) int {\n\trs := reflectSlice(s)\n\tif vt := reflect.TypeOf(val); rs.Type().Elem() != vt {\n\t\t// TODO: Better \"<seq> is not a sequence of <val>\".\n\t\tpanic(&generic.TypeError{rs.Type(), vt, \"cannot find\"})\n\t}\n\n\tfor i := rs.Len() - 1; i >= 0; i-- {\n\t\tif rs.Index(i).Interface() == val {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\n// Contains reports whether val is within s. val's type must be s's\n// element type.\nfunc Contains(s T, val interface{}) bool {\n\treturn Index(s, val) >= 0\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/index.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// Select returns a slice w such that w[i] = v[indexes[i]].\nfunc Select(v T, indexes []int) T {\n\tswitch v := v.(type) {\n\tcase []int:\n\t\tres := make([]int, len(indexes))\n\t\tfor i, x := range indexes {\n\t\t\tres[i] = v[x]\n\t\t}\n\t\treturn res\n\n\tcase []float64:\n\t\tres := make([]float64, len(indexes))\n\t\tfor i, x := range indexes {\n\t\t\tres[i] = v[x]\n\t\t}\n\t\treturn res\n\n\tcase []string:\n\t\tres := make([]string, len(indexes))\n\t\tfor i, x := range indexes {\n\t\t\tres[i] = v[x]\n\t\t}\n\t\treturn res\n\t}\n\n\trv := reflectSlice(v)\n\tres := reflect.MakeSlice(rv.Type(), len(indexes), len(indexes))\n\tfor i, x := range indexes {\n\t\tres.Index(i).Set(rv.Index(x))\n\t}\n\treturn res.Interface()\n}\n\n// SelectInto assigns out[i] = in[indexes[i]]. in and out must have\n// the same types and len(out) must be >= len(indexes). If in and out\n// overlap, the results are undefined.\nfunc SelectInto(out, in T, indexes []int) {\n\t// TODO: Maybe they should only have to be assignable?\n\tif it, ot := reflect.TypeOf(in), reflect.TypeOf(out); it != ot {\n\t\tpanic(&generic.TypeError{it, ot, \"must be the same type\"})\n\t}\n\n\tswitch in := in.(type) {\n\tcase []int:\n\t\tout := out.([]int)\n\t\tfor i, x := range indexes {\n\t\t\tout[i] = in[x]\n\t\t}\n\t\treturn\n\n\tcase []float64:\n\t\tout := out.([]float64)\n\t\tfor i, x := range indexes {\n\t\t\tout[i] = in[x]\n\t\t}\n\t\treturn\n\n\tcase []string:\n\t\tout := out.([]string)\n\t\tfor i, x := range indexes {\n\t\t\tout[i] = in[x]\n\t\t}\n\t\treturn\n\t}\n\n\tinv, outv := reflectSlice(in), reflectSlice(out)\n\tfor i, x := range indexes {\n\t\toutv.Index(i).Set(inv.Index(x))\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/min.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// Min returns the minimum value in v. v must either implement\n// sort.Interface or its elements must be orderable. Min panics if v\n// is empty.\nfunc Min(v T) interface{} {\n\tx, _ := minmax(v, -1, true)\n\treturn x.Interface()\n}\n\n// ArgMin returns the index of the minimum value in v. If there are\n// multiple indexes equal to the minimum value, ArgMin returns the\n// lowest of them. v must be a slice whose elements are orderable, or\n// must implement sort.Interface. ArgMin panics if v is empty.\nfunc ArgMin(v interface{}) int {\n\t_, i := minmax(v, -1, false)\n\treturn i\n}\n\n// Max returns the maximum value in v. v must either implement\n// sort.Interface or its elements must be orderable. Max panics if v\n// is empty.\nfunc Max(v T) interface{} {\n\tx, _ := minmax(v, 1, true)\n\treturn x.Interface()\n}\n\n// ArgMax returns the index of the maximum value in v. If there are\n// multiple indexes equal to the maximum value, ArgMax returns the\n// lowest of them. v must be a slice whose elements are orderable, or\n// must implement sort.Interface. ArgMax panics if v is empty.\nfunc ArgMax(v interface{}) int {\n\t_, i := minmax(v, 1, false)\n\treturn i\n}\n\nfunc minmax(v interface{}, keep int, val bool) (reflect.Value, int) {\n\tswitch v := v.(type) {\n\tcase sort.Interface:\n\t\tif v.Len() == 0 {\n\t\t\tif keep < 0 {\n\t\t\t\tpanic(\"zero-length sequence has no minimum\")\n\t\t\t} else {\n\t\t\t\tpanic(\"zero-length sequence has no maximum\")\n\t\t\t}\n\t\t}\n\t\tmaxi := 0\n\t\tif keep < 0 {\n\t\t\tfor i, len := 0, v.Len(); i < len; i++ {\n\t\t\t\tif v.Less(i, maxi) {\n\t\t\t\t\tmaxi = i\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor i, len := 0, v.Len(); i < len; i++ {\n\t\t\t\tif v.Less(maxi, i) {\n\t\t\t\t\tmaxi = i\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif !val {\n\t\t\treturn reflect.Value{}, maxi\n\t\t}\n\n\t\trv := reflectSlice(v)\n\t\treturn rv.Index(maxi), maxi\n\t}\n\n\trv := reflectSlice(v)\n\tif !generic.CanOrderR(rv.Type().Elem().Kind()) {\n\t\tpanic(&generic.TypeError{rv.Type().Elem(), nil, \"is not orderable\"})\n\t}\n\tif rv.Len() == 0 {\n\t\tif keep < 0 {\n\t\t\tpanic(\"zero-length slice has no minimum\")\n\t\t} else {\n\t\t\tpanic(\"zero-length slice has no maximum\")\n\t\t}\n\t}\n\tmax, maxi := rv.Index(0), 0\n\tfor i, len := 1, rv.Len(); i < len; i++ {\n\t\tif elt := rv.Index(i); generic.OrderR(elt, max) == keep {\n\t\t\tmax, maxi = elt, i\n\t\t}\n\t}\n\treturn max, maxi\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/min_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"math\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestMin(t *testing.T) {\n\tshouldPanic(t, \"no min\", func() { Min([]float64{}) })\n\tshouldPanic(t, \"no min\", func() { ArgMin([]float64{}) })\n\tshouldPanic(t, \"no max\", func() { Max([]float64{}) })\n\tshouldPanic(t, \"no max\", func() { ArgMax([]float64{}) })\n\n\txs := []float64{5, 1, 8, 1, 8, 3}\n\tif x := Min(xs); x != 1.0 {\n\t\tt.Errorf(\"Min should be 1, got %v\", x)\n\t}\n\tif x := ArgMin(xs); x != 1 {\n\t\tt.Errorf(\"ArgMin should be 1, got %v\", x)\n\t}\n\tif x := Max(xs); x != 8.0 {\n\t\tt.Errorf(\"Max should be 8, got %v\", x)\n\t}\n\tif x := ArgMax(xs); x != 2 {\n\t\tt.Errorf(\"ArgMax should be 2, got %v\", x)\n\t}\n\n\txs = []float64{1, 5, math.NaN()}\n\tif x := Min(xs); x != 1.0 {\n\t\tt.Errorf(\"Min should be 1, got %v\", x)\n\t}\n\tif x := Max(xs); x != 5.0 {\n\t\tt.Errorf(\"Max should be 5, got %v\", x)\n\t}\n}\n\ntype fakeSortInterface struct {\n\tlen int\n}\n\nfunc (f fakeSortInterface) Len() int {\n\treturn f.len\n}\n\nfunc (f fakeSortInterface) Swap(i, j int) {\n\tpanic(\"can't\")\n}\n\nfunc (f fakeSortInterface) Less(i, j int) bool {\n\treturn i < j\n}\n\ntype timeSlice []time.Time\n\nfunc (s timeSlice) Len() int {\n\treturn len(s)\n}\n\nfunc (s timeSlice) Less(i, j int) bool {\n\treturn s[i].Before(s[j])\n}\n\nfunc (s timeSlice) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc TestMinSort(t *testing.T) {\n\tshouldPanic(t, \"no min\", func() { Min(fakeSortInterface{0}) })\n\tshouldPanic(t, \"no min\", func() { ArgMin(fakeSortInterface{0}) })\n\tshouldPanic(t, \"no max\", func() { Max(fakeSortInterface{0}) })\n\tshouldPanic(t, \"no max\", func() { ArgMax(fakeSortInterface{0}) })\n\n\tf := fakeSortInterface{5}\n\tif x := ArgMin(f); x != 0 {\n\t\tt.Errorf(\"ArgMin should be 0, got %v\", x)\n\t}\n\tif x := ArgMax(f); x != 4 {\n\t\tt.Errorf(\"ArgMax should be 4, got %v\", x)\n\t}\n\n\tz := time.Unix(0, 0)\n\tts := timeSlice{z.Add(time.Hour), z, z.Add(2 * time.Hour), z.Add(time.Hour)}\n\tif x := Min(ts); x != ts[1] {\n\t\tt.Errorf(\"Min should be %v, got %v\", ts[1], x)\n\t}\n\tif x := ArgMin(ts); x != 1 {\n\t\tt.Errorf(\"ArgMin should be 1, got %v\", x)\n\t}\n\tif x := Max(ts); x != ts[2] {\n\t\tt.Errorf(\"Max should be %v, got %v\", ts[2], x)\n\t}\n\tif x := ArgMax(ts); x != 2 {\n\t\tt.Errorf(\"ArgMax should be 2, got %v\", x)\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/nub.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport \"reflect\"\n\nvar trueVal = reflect.ValueOf(true)\n\n// Nub returns v with duplicates removed. It keeps the first instance\n// of each distinct value and preserves their order.\nfunc Nub(v T) T {\n\trv := reflectSlice(v)\n\tindexes := make([]int, 0)\n\tset := reflect.MakeMap(reflect.MapOf(rv.Type().Elem(), trueVal.Type()))\n\tfor i, l := 0, rv.Len(); i < l; i++ {\n\t\tx := rv.Index(i)\n\t\tif set.MapIndex(x).IsValid() {\n\t\t\tcontinue\n\t\t}\n\t\tset.SetMapIndex(x, trueVal)\n\t\tindexes = append(indexes, i)\n\t}\n\treturn Select(v, indexes)\n}\n\n// NubAppend is equivalent to appending all of the slices in vs and\n// then calling Nub on the result, but more efficient.\nfunc NubAppend(vs ...T) T {\n\tif len(vs) == 0 {\n\t\treturn nil\n\t}\n\n\trv := reflectSlice(vs[0])\n\tset := reflect.MakeMap(reflect.MapOf(rv.Type().Elem(), trueVal.Type()))\n\tout := reflect.MakeSlice(rv.Type(), 0, 0)\n\n\tfor _, v := range vs {\n\t\trv := reflectSlice(v)\n\t\tfor i, l := 0, rv.Len(); i < l; i++ {\n\t\t\tx := rv.Index(i)\n\t\t\tif set.MapIndex(x).IsValid() {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tset.SetMapIndex(x, trueVal)\n\t\t\tout = reflect.Append(out, x)\n\t\t}\n\t}\n\n\treturn out.Interface()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/select_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n)\n\nfunc TestSelect(t *testing.T) {\n\tx1 := []int{1, 2, 3}\n\tgot := Select(x1, []int{2, 1, 0})\n\tif want := []int{3, 2, 1}; !reflect.DeepEqual(got, want) {\n\t\tt.Fatalf(\"expected %v, got %v\", want, got)\n\t}\n\tgot = Select(x1, []int{1, 1, 1, 1})\n\tif want := []int{2, 2, 2, 2}; !reflect.DeepEqual(got, want) {\n\t\tt.Fatalf(\"expected %v, got %v\", want, got)\n\t}\n\n\ttype T struct{ x int }\n\tx2 := []T{{1}, {2}, {3}}\n\tgot = Select(x2, []int{2, 1, 0})\n\tif want := []T{{3}, {2}, {1}}; !reflect.DeepEqual(got, want) {\n\t\tt.Fatalf(\"expected %v, got %v\", want, got)\n\t}\n}\n\nfunc TestSelectType(t *testing.T) {\n\ttype T []float64\n\tx1 := T{1, 2, 3}\n\ty1 := Select(x1, []int{})\n\tif _, ok := y1.(T); !ok {\n\t\tt.Fatalf(\"result has wrong type; expected T, got %T\", y1)\n\t}\n\n\ttype U int\n\tx2 := []U{1, 2, 3}\n\ty2 := Select(x2, []int{})\n\tif _, ok := y2.([]U); !ok {\n\t\tt.Fatalf(\"result has wrong type; expected []U, got %T\", y2)\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/seq.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// T is a Go slice value of type []U.\n//\n// This is primarily for documentation. There is no way to statically\n// enforce this in Go; however, functions that expect a slice will\n// panic with a *generic.TypeError if passed a non-slice value.\ntype T interface{}\n\n// reflectSlice checks that s is a slice and returns its\n// reflect.Value. It panics with a *generic.TypeError if s is not a slice.\nfunc reflectSlice(s T) reflect.Value {\n\trv := reflect.ValueOf(s)\n\tif rv.Kind() != reflect.Slice {\n\t\tpanic(&generic.TypeError{rv.Type(), nil, \"is not a slice\"})\n\t}\n\treturn rv\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/sort.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"reflect\"\n\t\"sort\"\n\t\"time\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// CanSort returns whether the value v can be sorted.\nfunc CanSort(v interface{}) bool {\n\tswitch v.(type) {\n\tcase sort.Interface, []time.Time:\n\t\treturn true\n\t}\n\treturn generic.CanOrderR(reflect.TypeOf(v).Elem().Kind())\n}\n\n// Sort sorts v in increasing order. v must implement sort.Interface,\n// be a slice whose elements are orderable, or be a []time.Time.\nfunc Sort(v interface{}) {\n\tsort.Sort(Sorter(v))\n}\n\n// Sorter returns a sort.Interface for sorting v. v must implement\n// sort.Interface, be a slice whose elements are orderable, or be a\n// []time.Time.\nfunc Sorter(v interface{}) sort.Interface {\n\tswitch v := v.(type) {\n\tcase []int:\n\t\treturn sort.IntSlice(v)\n\tcase []float64:\n\t\treturn sort.Float64Slice(v)\n\tcase []string:\n\t\treturn sort.StringSlice(v)\n\tcase []time.Time:\n\t\treturn sortTimeSlice(v)\n\tcase sort.Interface:\n\t\treturn v\n\t}\n\n\trv := reflectSlice(v)\n\tswitch rv.Type().Elem().Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\treturn sortIntSlice{rv}\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\treturn sortUintSlice{rv}\n\tcase reflect.Float32, reflect.Float64:\n\t\treturn sortFloatSlice{rv}\n\tcase reflect.String:\n\t\treturn sortStringSlice{rv}\n\t}\n\tpanic(&generic.TypeError{rv.Type().Elem(), nil, \"is not orderable\"})\n}\n\ntype sortIntSlice struct {\n\treflect.Value\n}\n\nfunc (s sortIntSlice) Len() int {\n\treturn s.Value.Len()\n}\n\nfunc (s sortIntSlice) Less(i, j int) bool {\n\treturn s.Index(i).Int() < s.Index(j).Int()\n}\n\nfunc (s sortIntSlice) Swap(i, j int) {\n\ta, b := s.Index(i).Int(), s.Index(j).Int()\n\ts.Index(i).SetInt(b)\n\ts.Index(j).SetInt(a)\n}\n\ntype sortUintSlice struct {\n\treflect.Value\n}\n\nfunc (s sortUintSlice) Len() int {\n\treturn s.Value.Len()\n}\n\nfunc (s sortUintSlice) Less(i, j int) bool {\n\treturn s.Index(i).Uint() < s.Index(j).Uint()\n}\n\nfunc (s sortUintSlice) Swap(i, j int) {\n\ta, b := s.Index(i).Uint(), s.Index(j).Uint()\n\ts.Index(i).SetUint(b)\n\ts.Index(j).SetUint(a)\n}\n\ntype sortFloatSlice struct {\n\treflect.Value\n}\n\nfunc (s sortFloatSlice) Len() int {\n\treturn s.Value.Len()\n}\n\nfunc (s sortFloatSlice) Less(i, j int) bool {\n\treturn s.Index(i).Float() < s.Index(j).Float()\n}\n\nfunc (s sortFloatSlice) Swap(i, j int) {\n\ta, b := s.Index(i).Float(), s.Index(j).Float()\n\ts.Index(i).SetFloat(b)\n\ts.Index(j).SetFloat(a)\n}\n\ntype sortStringSlice struct {\n\treflect.Value\n}\n\nfunc (s sortStringSlice) Len() int {\n\treturn s.Value.Len()\n}\n\nfunc (s sortStringSlice) Less(i, j int) bool {\n\treturn s.Index(i).String() < s.Index(j).String()\n}\n\nfunc (s sortStringSlice) Swap(i, j int) {\n\ta, b := s.Index(i).String(), s.Index(j).String()\n\ts.Index(i).SetString(b)\n\ts.Index(j).SetString(a)\n}\n\ntype sortTimeSlice []time.Time\n\nfunc (s sortTimeSlice) Len() int           { return len(s) }\nfunc (s sortTimeSlice) Less(i, j int) bool { return s[i].Before(s[j]) }\nfunc (s sortTimeSlice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/generic/slice/util_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage slice\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"testing\"\n)\n\nfunc de(x, y interface{}) bool {\n\treturn reflect.DeepEqual(x, y)\n}\n\nfunc shouldPanic(t *testing.T, re string, f func()) {\n\tr := regexp.MustCompile(re)\n\tdefer func() {\n\t\terr := recover()\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"want panic matching %q; got no panic\", re)\n\t\t} else if !r.MatchString(fmt.Sprintf(\"%s\", err)) {\n\t\t\tt.Fatalf(\"want panic matching %q; got %s\", re, err)\n\t\t}\n\t}()\n\tf()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/example_scale_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/aclements/go-gg/table\"\n)\n\nfunc ExampleNewTimeScaler() {\n\tvar x []time.Time\n\tvar y []float64\n\tvar steps []time.Duration\n\tfor _, step := range []time.Duration{\n\t\t1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,\n\t\ttime.Minute, time.Hour, 24 * time.Hour, 7 * 24 * time.Hour,\n\t} {\n\t\tt := time.Now()\n\t\tfor i := 0; i < 100; i++ {\n\t\t\tx = append(x, t)\n\t\t\ty = append(y, rand.Float64()-.5)\n\t\t\tsteps = append(steps, 100*step)\n\t\t\tt = t.Add(-step)\n\t\t}\n\t}\n\n\ttb := table.NewBuilder(nil)\n\ttb.Add(\"x\", x).Add(\"y\", y).Add(\"steps\", steps)\n\n\tplot := NewPlot(tb.Done())\n\n\tplot.SetScale(\"x\", NewTimeScaler())\n\n\tplot.Add(FacetY{\n\t\tCol:          \"steps\",\n\t\tSplitXScales: true,\n\t})\n\n\tplot.Add(LayerLines{\n\t\tX: \"x\",\n\t\tY: \"y\",\n\t})\n\n\tf, err := os.Create(\"scale_time.svg\")\n\tif err != nil {\n\t\tpanic(\"unable to create scale_time.svg\")\n\t}\n\tdefer f.Close()\n\tplot.WriteSVG(f, 800, 1000)\n\tfmt.Println(\"ok\")\n\t// output:\n\t// ok\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/facet.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n)\n\n// TODO: What if there are already layers? Maybe they should be\n// repeated in all facets. ggplot2 apparently does this when the\n// faceting variable isn't in one of the data frames.\n\n// TODO: Subplot is getting rather complicated. If I want to make\n// facets only use public APIs, perhaps gg itself should only know\n// about some interface for table group labels that provides a layout\n// manager and the layout logic should live with the facets.\n\n// TODO: This is very nearly flexible enough to make pairwise plots.\n\n// TODO: Is this flexible enough to make marginal distribution plots?\n\n// TODO: There's logical overlap between how a facet chooses to\n// position and label a subplot and a discrete-ranged scalar. Perhaps\n// facets should use scalars to chose positions and labels?\n\n// FacetCommon is the base type for plot faceting operations. Faceting\n// is a grouping operation that subdivides a plot into subplots based\n// on the values in data column. Faceting operations may be composed:\n// if a faceting operation has already divided the plot into subplots,\n// a further faceting operation will subdivide each of those subplots.\ntype FacetCommon struct {\n\t// Col names the column to facet by. Each distinct value of\n\t// this column will become a separate plot. If Col is\n\t// orderable, the facets will be in value order; otherwise,\n\t// they will be in index order.\n\tCol string\n\n\t// SplitXScales indicates that each band (column for FacetX;\n\t// row for FacetY) created by this faceting operation should\n\t// have separate X axis scales. The default, false, indicates\n\t// that subplots should continue to share X scales.\n\t//\n\t// SplitXScales and SplitYScales, combined with facet\n\t// composition, give a great deal of control over how scales\n\t// are shared. Suppose you want to create an X/Y facet grid by\n\t// first performing a FacetX and then a FacetY. Here are some\n\t// common ways to share or split the scales:\n\t//\n\t// * To share the same scales between all subplots, set both\n\t// flags to false in both facet operations.\n\t//\n\t// * To have independent scales in all subplots, set both\n\t// flags to true in the FacetY (and it doesn't matter what\n\t// they are in the FacetX).\n\t//\n\t// * To share the X scale within each column and the Y scale\n\t// within each row, set SplitXScales in the FacetX and\n\t// SplitYScales in the FacetY.\n\tSplitXScales bool\n\n\t// SplitYScales is the equivalent of SplitXScales for Y axis\n\t// scales.\n\tSplitYScales bool\n\n\t// Labeler is a function that constructs facet labels from\n\t// data values. If this is nil, the default is fmt.Sprint.\n\t//\n\t// TODO: Call this through reflect to get the argument type\n\t// right?\n\tLabeler func(interface{}) string\n\n\t// Rows and Cols specify the number of rows or columns for\n\t// FacetWrap. If both are zero, FacetWrap chooses reasonable\n\t// defaults. Otherwise, one or the other should be zero.\n\tRows, Cols int\n\n\t// TODO: Wrap order and label side for FacetWrap.\n}\n\n// FacetX splits a plot into columns.\ntype FacetX FacetCommon\n\n// FacetY splits a plot into rows.\ntype FacetY FacetCommon\n\n// FacetWrap splits a plot into a grid of rows and columns.\ntype FacetWrap FacetCommon\n\nfunc (f FacetX) Apply(p *Plot) {\n\t(*FacetCommon)(&f).apply(p, \"x\")\n}\n\nfunc (f FacetY) Apply(p *Plot) {\n\t(*FacetCommon)(&f).apply(p, \"y\")\n}\n\nfunc (f FacetWrap) Apply(p *Plot) {\n\t(*FacetCommon)(&f).apply(p, \"-\")\n}\n\nfunc (f *FacetCommon) apply(p *Plot, dir string) {\n\tif f.Labeler == nil {\n\t\tf.Labeler = func(x interface{}) string { return fmt.Sprint(x) }\n\t}\n\n\tgrouped := table.GroupBy(p.Data(), f.Col)\n\n\t// TODO: What should this do if there are multiple faceting\n\t// operations and the results aren't a complete cross-product?\n\t// Using GroupBy to form the initial faceting groups will\n\t// leave out subplots with no data. Alternatively, I could\n\t// base this on the total set of values and force there to be\n\t// a complete cross-product.\n\n\t// TODO: If this is, say, and X faceting and different\n\t// existing columns have different sets of values, should I\n\t// only split a column on the values it has? Doing that right\n\t// would require grouping existing subplots in potentially\n\t// complex ways (for example, if I do a FacetWrap and then a\n\t// FacetX, grouping subplots by column alone will be wrong.)\n\n\t// Collect grouped values. If there was already grouping\n\t// structure, it's possible we'll have multiple groups with\n\t// the same value for Col.\n\ttype valInfo struct {\n\t\tindex int\n\t\tlabel string\n\t}\n\tvar valType reflect.Type\n\tvals := make(map[interface{}]*valInfo)\n\tfor i, gid := range grouped.Tables() {\n\t\tval := gid.Label()\n\t\tif _, ok := vals[val]; !ok {\n\t\t\tvals[val] = &valInfo{len(vals), f.Labeler(val)}\n\t\t}\n\t\tif i == 0 {\n\t\t\tvalType = reflect.TypeOf(val)\n\t\t}\n\t}\n\n\t// If f.Col is orderable, order and re-index values.\n\tif generic.CanOrderR(valType.Kind()) {\n\t\tvalSeq := reflect.MakeSlice(reflect.SliceOf(valType), 0, len(vals))\n\t\tfor val := range vals {\n\t\t\tvalSeq = reflect.Append(valSeq, reflect.ValueOf(val))\n\t\t}\n\t\tslice.Sort(valSeq.Interface())\n\t\tfor i := 0; i < valSeq.Len(); i++ {\n\t\t\tvals[valSeq.Index(i).Interface()].index = i\n\t\t}\n\t}\n\n\t// Compute FacetWrap rows and cols.\n\tif dir == \"-\" {\n\t\tcells := float64(len(vals))\n\t\tif f.Cols == 0 {\n\t\t\tif f.Rows == 0 {\n\t\t\t\t// Chose default Rows and Cols.\n\t\t\t\tf.Rows = int(math.Ceil(math.Sqrt(cells)))\n\t\t\t}\n\t\t\t// Compute Cols from Rows.\n\t\t\tf.Cols = int(math.Ceil(cells / float64(f.Rows)))\n\t\t} else {\n\t\t\t// Compute Rows from Cols.\n\t\t\tf.Rows = int(math.Ceil(cells / float64(f.Cols)))\n\t\t}\n\t}\n\n\t// Find existing subplots, split existing subplots and bands\n\t// into len(vals) new subplots and bands, and transform each\n\t// GroupBy group into its new subplot.\n\ttype bandKey struct {\n\t\t// band1 is the primary band. band2 is only used by\n\t\t// FacetWrap.\n\t\tband1, band2 *subplotBand\n\n\t\t// X and Y of band. This is a necessary part of the\n\t\t// key because FacetWrap creates rows but does not\n\t\t// create distant horizontal bands for them.\n\t\tx, y int\n\t}\n\ttype bandScale struct {\n\t\tband  *subplotBand\n\t\tscale Scaler\n\t}\n\tsubplots := make(map[*subplot][]*subplot)\n\tbands := make(map[bandKey][]*subplotBand)\n\tscales := make(map[bandScale]Scaler)\n\tvar ndata table.GroupingBuilder\n\tfor _, gid := range grouped.Tables() {\n\t\t// Find subplot by walking up group hierarchy.\n\t\tsub := subplotOf(gid)\n\n\t\t// Split old band into len(vals) new bands in the\n\t\t// orthogonal axis.\n\t\tvar obandKey bandKey\n\t\tif dir == \"x\" {\n\t\t\tobandKey = bandKey{band1: sub.vBand, x: sub.x}\n\t\t} else if dir == \"y\" {\n\t\t\tobandKey = bandKey{band1: sub.hBand, y: sub.y}\n\t\t} else {\n\t\t\tobandKey = bandKey{sub.vBand, sub.hBand, sub.x, sub.y}\n\t\t}\n\t\tnbands := bands[obandKey]\n\t\tif nbands == nil {\n\t\t\tnbands = make([]*subplotBand, len(vals))\n\t\t\tfor _, val := range vals {\n\t\t\t\tnb := &subplotBand{parent: obandKey.band1, label: val.label}\n\t\t\t\tnbands[val.index] = nb\n\t\t\t}\n\t\t\tbands[obandKey] = nbands\n\t\t}\n\n\t\t// Split old subplot into len(vals) new subplots.\n\t\tnsubplots := subplots[sub]\n\t\tif nsubplots == nil {\n\t\t\tnsubplots = make([]*subplot, len(vals))\n\t\t\tfor _, val := range vals {\n\t\t\t\tns := &subplot{parent: sub, x: sub.x, y: sub.y,\n\t\t\t\t\tvBand: sub.vBand, hBand: sub.hBand}\n\t\t\t\tif dir == \"x\" {\n\t\t\t\t\tns.x = sub.x*len(vals) + val.index\n\t\t\t\t\tns.vBand = nbands[val.index]\n\t\t\t\t} else if dir == \"y\" {\n\t\t\t\t\tns.y = sub.y*len(vals) + val.index\n\t\t\t\t\tns.hBand = nbands[val.index]\n\t\t\t\t} else {\n\t\t\t\t\tns.x = sub.x*f.Cols + val.index%f.Cols\n\t\t\t\t\tns.y = sub.y*f.Rows + val.index/f.Cols\n\t\t\t\t\tns.vBand = nbands[val.index]\n\t\t\t\t}\n\t\t\t\tnsubplots[val.index] = ns\n\t\t\t}\n\t\t\tsubplots[sub] = nsubplots\n\t\t}\n\n\t\t// Map this group to its new subplot.\n\t\tnsubplot := nsubplots[vals[gid.Label()].index]\n\t\tngid := gid.Parent().Extend(nsubplot)\n\t\tndata.Add(ngid, grouped.Table(gid))\n\n\t\t// Split scales if requested. At a high level, we want\n\t\t// to give each band a new scale, but there may\n\t\t// already be multiple scales within a band, so we\n\t\t// find the set of scales within a band and split each\n\t\t// distinct scale up.\n\t\tvar nband *subplotBand\n\t\tif dir == \"x\" {\n\t\t\tnband = nsubplot.vBand\n\t\t} else if dir == \"y\" {\n\t\t\tnband = nsubplot.hBand\n\t\t} else {\n\t\t\tif f.SplitXScales || f.SplitYScales {\n\t\t\t\t// TODO: I probably need to rephrase\n\t\t\t\t// this whole scale splitting\n\t\t\t\t// operation in terms of subplot X and\n\t\t\t\t// Y and possibly do it as a second\n\t\t\t\t// pass once all of the subplots are\n\t\t\t\t// created.\n\t\t\t\tpanic(\"not implemented: scale splitting for FacetWrap\")\n\t\t\t}\n\t\t}\n\t\tif f.SplitXScales {\n\t\t\tscaler := p.GetScaleAt(\"x\", gid)\n\t\t\tnscaler := scales[bandScale{nband, scaler}]\n\t\t\tif nscaler == nil {\n\t\t\t\tnscaler = scaler.CloneScaler()\n\t\t\t\tscales[bandScale{nband, scaler}] = nscaler\n\t\t\t}\n\t\t\tp.SetScaleAt(\"x\", nscaler, ngid)\n\t\t}\n\t\tif f.SplitYScales {\n\t\t\tscaler := p.GetScaleAt(\"y\", gid)\n\t\t\tnscaler := scales[bandScale{nband, scaler}]\n\t\t\tif nscaler == nil {\n\t\t\t\tnscaler = scaler.CloneScaler()\n\t\t\t\tscales[bandScale{nband, scaler}] = nscaler\n\t\t\t}\n\t\t\tp.SetScaleAt(\"y\", nscaler, ngid)\n\t\t}\n\t}\n\n\tp.SetData(ndata.Done())\n}\n\n// subplotBand represents a rectangular group of subplots in either a\n// vertical group (with a label on top) or a horizontal group (with a\n// label to the right).\ntype subplotBand struct {\n\tparent *subplotBand\n\tlabel  string\n}\n\ntype subplot struct {\n\tparent *subplot\n\n\t// x and y are the position of this subplot, where 0, 0 is the\n\t// top left.\n\tx, y int\n\n\tvBand, hBand *subplotBand\n}\n\nvar rootSubplot = &subplot{}\n\nfunc subplotOf(gid table.GroupID) *subplot {\n\tfor ; gid != table.RootGroupID; gid = gid.Parent() {\n\t\tsub, ok := gid.Label().(*subplot)\n\t\tif ok {\n\t\t\treturn sub\n\t\t}\n\t}\n\treturn rootSubplot\n}\n\nfunc (s subplot) String() string {\n\treturn fmt.Sprintf(\"[%d %d]\", s.x, s.y)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/group.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport \"github.com/aclements/go-gg/table\"\n\n// TODO: GroupByKey? Would the key function only work on one binding?\n// With a first-class row representation we could pass that.\n\n// GroupBy sub-divides all groups such that all of the rows in each\n// group have equal values for all of the named columns.\nfunc (p *Plot) GroupBy(cols ...string) *Plot {\n\t// TODO: Should this accept column expressions, like layers?\n\treturn p.SetData(table.GroupBy(p.Data(), cols...))\n}\n\n// GroupAuto groups p's data table on all columns that are comparable\n// but are not numeric (that is, all categorical columns).\n//\n// TODO: Maybe there should be a CategoricalBindings that returns the\n// set of categorical bindings, which callers could just pass to\n// GroupBy, possibly after manipulating.\n//\n// TODO: Does implementing sort.Interface make an otherwise cardinal\n// column ordinal?\nfunc (p *Plot) GroupAuto() *Plot {\n\t// Find the categorical columns.\n\tcategorical := []string{}\n\tg := p.Data()\n\tfor _, col := range g.Columns() {\n\t\tet := table.ColType(g, col).Elem()\n\t\tif et.Comparable() && !isCardinal(et.Kind()) {\n\t\t\tcategorical = append(categorical, col)\n\t\t}\n\t}\n\n\treturn p.GroupBy(categorical...)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/layer.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/aclements/go-gg/table\"\n)\n\nfunc defaultCols(p *Plot, cols ...*string) {\n\tdcols := p.Data().Columns()\n\tfor i, colp := range cols {\n\t\tif *colp == \"\" {\n\t\t\tif i >= len(dcols) {\n\t\t\t\tpanic(fmt.Sprintf(\"cannot get default column %d; table has only %d columns\", i, len(dcols)))\n\t\t\t}\n\t\t\t*colp = dcols[i]\n\t\t}\n\t}\n}\n\n// LayerLines is like LayerPaths, but connects data points in order by\n// the \"x\" property.\ntype LayerLines LayerPaths\n\nfunc (l LayerLines) Apply(p *Plot) {\n\tLayerPaths(l).apply(p, true)\n}\n\n//go:generate stringer -type StepMode\n\n// StepMode controls how LayerSteps connects subsequent points.\ntype StepMode int\n\nconst (\n\t// StepHV makes LayerSteps connect subsequent points with a\n\t// horizontal segment and then a vertical segment.\n\tStepHV StepMode = iota\n\n\t// StepVH makes LayerSteps connect subsequent points with a\n\t// vertical segment and then a horizontal segment.\n\tStepVH\n\n\t// StepHMid makes LayerSteps connect subsequent points A and B\n\t// with three segments: a horizontal segment from A to the\n\t// midpoint between A and B, followed by vertical segment,\n\t// followed by a horizontal segment from the midpoint to B.\n\tStepHMid\n\n\t// StepVMid makes LayerSteps connect subsequent points A and B\n\t// with three segments: a vertical segment from A to the\n\t// midpoint between A and B, followed by horizontal segment,\n\t// followed by a vertical segment from the midpoint to B.\n\tStepVMid\n)\n\n// LayerSteps is like LayerPaths, but connects data points with a path\n// consisting only of horizontal and vertical segments.\ntype LayerSteps struct {\n\tLayerPaths\n\n\tStep StepMode\n}\n\nfunc (l LayerSteps) Apply(p *Plot) {\n\t// TODO: Should this also support only showing horizontal or\n\t// vertical segments?\n\t//\n\t// TODO: This could be a data transform instead of a layer.\n\t// Then it could be used in conjunction with, for example,\n\t// ribbons.\n\n\tdefaultCols(p, &l.X, &l.Y)\n\tp.marks = append(p.marks, plotMark{&markSteps{\n\t\tl.Step,\n\t\tp.use(\"x\", l.X),\n\t\tp.use(\"y\", l.Y),\n\t\tp.use(\"stroke\", l.Color),\n\t\tp.use(\"fill\", l.Fill),\n\t}, p.Data().Tables()})\n}\n\n// LayerPaths groups by Color and Fill, and then connects successive\n// data points in each group with a path and/or a filled polygon.\ntype LayerPaths struct {\n\t// X and Y name columns that define the input and response of\n\t// each point on the path. If these are empty, they default to\n\t// the first and second columns, respectively.\n\tX, Y string\n\n\t// Color names a column that defines the stroke color of each\n\t// path. If Color is \"\", it defaults to constant black.\n\t// Otherwise, the data is grouped by Color.\n\tColor string\n\n\t// Fill names a column that defines the fill color of each\n\t// path. If Fill is \"\", it defaults to none. Otherwise, the\n\t// data is grouped by Fill.\n\tFill string\n\n\t// XXX Perhaps the theme should provide default values for\n\t// things like \"color\". That would suggest we need to resolve\n\t// defaults like that at render time. Possibly a special scale\n\t// that gets values from the theme could be used to resolve\n\t// them.\n\t//\n\t// XXX strokeOpacity, fillOpacity, strokeWidth, what other\n\t// properties do SVG strokes have?\n\t//\n\t// XXX Should the set of known styling bindings be fixed, and\n\t// all possible rendering targets have to know what to do with\n\t// them, or should the rendering target be able to have\n\t// different styling bindings they understand (presumably with\n\t// some reasonable base set)? If the renderer can determine\n\t// the known bindings, we would probably just capture the\n\t// environment here (and make it so a captured environment\n\t// does not change) and hand that to the renderer later.\n}\n\nfunc (l LayerPaths) Apply(p *Plot) {\n\tl.apply(p, false)\n}\n\nfunc (l LayerPaths) apply(p *Plot, sort bool) {\n\tdefaultCols(p, &l.X, &l.Y)\n\tif l.Color != \"\" {\n\t\tp.GroupBy(l.Color)\n\t}\n\tif l.Fill != \"\" {\n\t\tp.GroupBy(l.Fill)\n\t}\n\tif sort {\n\t\tdefer p.Save().Restore()\n\t\tp = p.SortBy(l.X)\n\t}\n\n\tp.marks = append(p.marks, plotMark{&markPath{\n\t\tp.use(\"x\", l.X),\n\t\tp.use(\"y\", l.Y),\n\t\tp.use(\"stroke\", l.Color),\n\t\tp.use(\"fill\", l.Fill),\n\t}, p.Data().Tables()})\n}\n\n// LayerArea shades the area between two columns with a polygon. It is\n// useful in conjunction with ggstat.AggMax and ggstat.AggMin for\n// drawing the extents of data.\ntype LayerArea struct {\n\t// X names the column that defines the input of each point. If\n\t// this is empty, it defaults to the first column.\n\tX string\n\n\t// Upper and Lower name columns that define the range of\n\t// response to shade. If either is \"\", it defaults to a\n\t// constant 0 value.\n\tUpper, Lower string\n\n\t// Fill names a column that defines the fill color of each\n\t// area. If Fill is \"\", it defaults to black. Otherwise, the\n\t// data is grouped by Fill.\n\tFill string\n\n\t// FillOpacity names a column that defines the fill opacity of\n\t// each area. If FillOpacity is \"\", it defaults to 0.5.\n\t// Otherwise, the data is grouped by FillOpacity.\n\tFillOpacity string\n}\n\nfunc (l LayerArea) Apply(p *Plot) {\n\tdefaultCols(p, &l.X)\n\tif l.Fill != \"\" {\n\t\tp.GroupBy(l.Fill)\n\t}\n\tif l.FillOpacity != \"\" {\n\t\tp.GroupBy(l.FillOpacity)\n\t}\n\tdefer p.Save().Restore()\n\tp = p.SortBy(l.X)\n\tupper, lower := l.Upper, l.Lower\n\tif upper == \"\" {\n\t\tupper = p.Const(0)\n\t}\n\tif lower == \"\" {\n\t\tlower = p.Const(0)\n\t}\n\tp.marks = append(p.marks, plotMark{&markArea{\n\t\tp.use(\"x\", l.X),\n\t\tp.use(\"y\", upper),\n\t\tp.use(\"y\", lower),\n\t\tp.use(\"fill\", l.Fill),\n\t\tp.use(\"opacity\", l.FillOpacity),\n\t}, p.Data().Tables()})\n}\n\n// LayerPoints layers a point mark at each data point.\ntype LayerPoints struct {\n\t// X and Y name columns that define input and response of each\n\t// point. If these are empty, they default to the first and\n\t// second columns, respectively.\n\tX, Y string\n\n\t// Color names the column that defines the fill color of each\n\t// point. If Color is \"\", it defaults to constant black.\n\tColor string\n\n\t// Opacity names the column that defines the opacity of each\n\t// point. If Opacity is \"\", it defaults to fully opaque. This\n\t// is multiplied by any alpha value specified by Color.\n\tOpacity string\n\n\t// Size names the column that defines the size of each point.\n\t// If Size is \"\", it defaults to 1% of the smallest plot\n\t// dimension.\n\tSize string\n\n\t// XXX fill vs stroke, shape\n}\n\nfunc (l LayerPoints) Apply(p *Plot) {\n\tdefaultCols(p, &l.X, &l.Y)\n\tp.marks = append(p.marks, plotMark{&markPoint{\n\t\tp.use(\"x\", l.X),\n\t\tp.use(\"y\", l.Y),\n\t\t// TODO: It's actually the fill color, but I generally\n\t\t// want it to match things that are stroke colors.\n\t\t// Maybe I should have a \"color\" aesthetic for the\n\t\t// \"primary\" color? Or I could have a hierarchy of\n\t\t// aesthetics, in which this uses \"stroke\" if it has a\n\t\t// scale, but otherwise uses \"color\".\n\t\tp.use(\"stroke\", l.Color),\n\t\t// TODO: What scale for opacity? Or should I assume\n\t\t// callers will use PreScaled values if they want\n\t\t// specific opacities? What's the physical type?\n\t\tp.use(\"opacity\", l.Opacity),\n\t\tp.use(\"size\", l.Size),\n\t}, p.Data().Tables()})\n}\n\n// LayerTiles layers a rectangle at each data point. The rectangle is\n// specified by its center, width, and height.\ntype LayerTiles struct {\n\t// X and Y name columns that define the input and response at\n\t// the center of each rectangle. If they are \"\", they default\n\t// to the first and second columns, respectively.\n\tX, Y string\n\n\t// Width and Height name columns that define the width and\n\t// height of each rectangle. If they are \"\", the width and/or\n\t// height are automatically determined from the smallest\n\t// spacing between distinct X and Y points.\n\tWidth, Height string\n\n\t// Fill names a column that defines the fill color of each\n\t// rectangle. If it is \"\", the default fill is black.\n\tFill string\n\n\t// XXX Stroke color/width, opacity, center adjustment.\n}\n\nfunc (l LayerTiles) Apply(p *Plot) {\n\tdefaultCols(p, &l.X, &l.Y)\n\tif l.Width != \"\" || l.Height != \"\" {\n\t\t// TODO: What scale are these in? (x+width) is in the\n\t\t// X scale, but width itself is not. It doesn't make\n\t\t// sense to train the X scale on width, and if there's\n\t\t// a scale transform, (x+width) has to happen before\n\t\t// the transform. OTOH, if x is discrete, I can't do\n\t\t// (x+width); maybe in that case you just can't\n\t\t// specify a width. OTOOH, if width is specified and\n\t\t// the value is unscaled, I could still do something\n\t\t// reasonable with that if x is discrete.\n\t\tpanic(\"not implemented: non-default width/height\")\n\t}\n\tp.marks = append(p.marks, plotMark{&markTiles{\n\t\tp.use(\"x\", l.X),\n\t\tp.use(\"y\", l.Y),\n\t\tp.use(\"fill\", l.Fill),\n\t}, p.Data().Tables()})\n}\n\n// LayerTags attaches text annotations to data points.\n//\n// TODO: Currently this groups by label and makes one annotation per\n// group. This should be a controllable.\ntype LayerTags struct {\n\t// X and Y name columns that define the input and response\n\t// each tag is attached to. If they are \"\", they default to\n\t// the first and second columns, respectively.\n\tX, Y string\n\n\t// Label names the column that gives the text to put in the\n\t// tag at X, Y. Label is required.\n\tLabel string\n\n\t// HPos controls the horizontal position of the tag if\n\t// multiple points have the same Label. The label will be\n\t// attached to the point closest to HPos between the left-most\n\t// (HPos == 0) and the right-most (HPos == 1) points on this\n\t// curve.\n\tHPos float64\n\n\t// Offset controls the pixel offset of the tag from the point\n\t// it is attached to. If these are both zero, they are treated\n\t// as -20, -20.\n\tOffsetX, OffsetY int\n}\n\nfunc (l LayerTags) Apply(p *Plot) {\n\t// TODO: Should there be special \"annotation marks\" that are\n\t// always on top and can perhaps extend outside the plot area?\n\n\tdefaultCols(p, &l.X, &l.Y)\n\tif l.OffsetX == 0 && l.OffsetY == 0 {\n\t\tl.OffsetX, l.OffsetY = -20, -20\n\t}\n\tdefer p.Save().Restore()\n\tp.GroupBy(l.Label)\n\t// TODO: I keep wanting an abstraction for a column across\n\t// groups like this.\n\tlabels := make(map[table.GroupID]table.Slice)\n\tfor _, gid := range p.Data().Tables() {\n\t\tlabels[gid] = p.Data().Table(gid).MustColumn(l.Label)\n\t}\n\n\tp.marks = append(p.marks, plotMark{&markTags{\n\t\tp.use(\"x\", l.X),\n\t\tp.use(\"y\", l.Y),\n\t\tlabels,\n\t\tl.HPos,\n\t\tl.OffsetX,\n\t\tl.OffsetY,\n\t}, p.Data().Tables()})\n}\n\n// LayerTooltips attaches hover tooltips to data points.\ntype LayerTooltips struct {\n\t// X and Y name columns that define locations of tooltips. If\n\t// they are \"\", they default to the first and second columns,\n\t// respectively.\n\tX, Y string\n\n\t// Label names the column that gives the text of the tooltip.\n\tLabel string\n\n\t// TODO: Text styling, closest X or closest point, multiple\n\t// tooltips if there are multiple points at the same X with\n\t// different Ys?\n}\n\nfunc (l LayerTooltips) Apply(p *Plot) {\n\tdefer p.Save().Restore()\n\n\tdefaultCols(p, &l.X, &l.Y)\n\n\t// Split up by subplot and flatten each subplot.\n\ttables := map[*subplot][]*table.Table{}\n\tgids := map[*subplot]table.GroupID{}\n\tfor _, gid := range p.Data().Tables() {\n\t\ts := subplotOf(gid)\n\t\ttables[s] = append(tables[s], p.Data().Table(gid))\n\t\tgids[s] = gid\n\t}\n\tvar ng table.GroupingBuilder\n\tfor k, ts := range tables {\n\t\tvar subg table.GroupingBuilder\n\t\tfor i, t := range ts {\n\t\t\tsubg.Add(table.RootGroupID.Extend(i), t)\n\t\t}\n\t\tngid := table.RootGroupID.Extend(k)\n\t\tng.Add(ngid, table.Flatten(subg.Done()))\n\t\tp.copyScales(gids[k], ngid)\n\t}\n\tp.SetData(ng.Done())\n\n\tlabels := make(map[table.GroupID]table.Slice)\n\tfor _, gid := range p.Data().Tables() {\n\t\tlabels[gid] = p.Data().Table(gid).MustColumn(l.Label)\n\t}\n\tp.marks = append(p.marks, plotMark{&markTooltips{\n\t\tp.use(\"x\", l.X),\n\t\tp.use(\"y\", l.Y),\n\t\tlabels,\n\t}, p.Data().Tables()})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/layout/grid.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage layout\n\nimport \"sort\"\n\n// Grid lays out elements in a two dimensional table. Each child is\n// assigned to a cell in the table and may optionally span multiple\n// rows and/or columns.\ntype Grid struct {\n\telts       []*gridElement\n\tcols, rows int\n\tx, y, w, h float64\n}\n\ntype gridElement struct {\n\te                      Element\n\tx, y, colSpan, rowSpan int\n}\n\n// Add adds Element e to Grid g, spanning cells (x,y) up to but not\n// including (x+colSpan, y+colSpan).\nfunc (g *Grid) Add(e Element, x, y, colSpan, rowSpan int) {\n\tif x+colSpan > g.cols {\n\t\tg.cols = x + colSpan\n\t}\n\tif y+rowSpan > g.rows {\n\t\tg.rows = y + rowSpan\n\t}\n\tg.elts = append(g.elts, &gridElement{e, x, y, colSpan, rowSpan})\n}\n\nfunc (g *Grid) Children() []Element {\n\tres := make([]Element, len(g.elts))\n\tfor i, elt := range g.elts {\n\t\tres[i] = elt.e\n\t}\n\treturn res\n}\n\nfunc (g *Grid) doLayout(byRow bool, allocated float64) (dims []float64, flexes []bool) {\n\tseq := func(n int) []int {\n\t\tres := make([]int, n)\n\t\tfor i := range res {\n\t\t\tres[i] = i\n\t\t}\n\t\treturn res\n\t}\n\tmax := func(x, y float64) float64 {\n\t\tif x > y {\n\t\t\treturn x\n\t\t}\n\t\treturn y\n\t}\n\n\tif byRow {\n\t\tdims = make([]float64, g.rows)\n\t\tflexes = make([]bool, g.rows)\n\t} else {\n\t\tdims = make([]float64, g.cols)\n\t\tflexes = make([]bool, g.cols)\n\t}\n\tfor i := range flexes {\n\t\t// TODO: Should empty columns be set to false?\n\t\tflexes[i] = true\n\t}\n\n\t// Sort elements by colSpan or rowSpan.\n\teltOrder := seq(len(g.elts))\n\tsort.Sort(&gridElementSorter{g.elts, eltOrder, byRow})\n\n\t// Add a fake element that spans everything and uses the\n\t// allocated space.\n\tif allocated > 0 {\n\t\teltOrder = append(eltOrder, -1)\n\t}\n\n\t// Process elements by increasing span.\n\tfor _, i := range eltOrder {\n\t\tvar (\n\t\t\tedim  float64\n\t\t\teflex bool\n\t\t\tepos  int\n\t\t\tespan int\n\t\t)\n\t\tif i == -1 {\n\t\t\t// Fake element for final space allocation.\n\t\t\tedim, eflex, epos, espan = allocated, true, 0, len(dims)\n\t\t} else {\n\t\t\te := g.elts[i]\n\t\t\t// TODO: We need to make one pass and get both size\n\t\t\t// hints or this will be exponential.\n\t\t\tif byRow {\n\t\t\t\t_, edim, _, eflex = e.e.SizeHint()\n\t\t\t\tepos, espan = e.y, e.rowSpan\n\t\t\t} else {\n\t\t\t\tedim, _, eflex, _ = e.e.SizeHint()\n\t\t\t\tepos, espan = e.x, e.colSpan\n\t\t\t}\n\t\t}\n\n\t\tif espan == 1 {\n\t\t\tdims[epos] = max(dims[epos], edim)\n\t\t\tif !eflex {\n\t\t\t\tflexes[epos] = false\n\t\t\t}\n\t\t} else if espan > 1 {\n\t\t\ttotal := edim\n\n\t\t\t// Expand flexible columns so that the total\n\t\t\t// dim is >= e's dim, and so the rows/columns\n\t\t\t// we do expand get equal dims. We don't\n\t\t\t// shrink any row/column. If all rows/columns\n\t\t\t// are fixed, we treat them all as flexible.\n\t\t\tvar subdims []float64\n\t\t\tforceFlex := false\n\t\t\tfor i := epos; i < epos+espan; i++ {\n\t\t\t\tif flexes[i] {\n\t\t\t\t\tsubdims = append(subdims, dims[i])\n\t\t\t\t} else {\n\t\t\t\t\t// This space is accounted for.\n\t\t\t\t\ttotal -= dims[i]\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(subdims) == 0 {\n\t\t\t\t// All rows/columns are fixed, so treat\n\t\t\t\t// them all as flexible.\n\t\t\t\tforceFlex = true\n\t\t\t\tsubdims = append(subdims, dims[epos:epos+espan]...)\n\t\t\t\ttotal = edim\n\t\t\t}\n\n\t\t\tif total <= 0 {\n\t\t\t\t// Fixed columns already take e's space.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Remove flex columns already wider than\n\t\t\t// total/count from consideration.\n\t\t\tcount := len(subdims)\n\t\t\tsort.Sort(sort.Reverse(sort.Float64Slice(subdims)))\n\t\t\tfor _, dim := range dims {\n\t\t\t\tif dim > total/float64(count) {\n\t\t\t\t\ttotal -= dim\n\t\t\t\t\tcount--\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Expand remaining rows/columns to total/count.\n\t\t\tif count <= 0 {\n\t\t\t\t// Flex columns already take e's space.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdim := total / float64(count)\n\t\t\tfor i := epos; i < epos+espan; i++ {\n\t\t\t\tif flexes[i] || forceFlex {\n\t\t\t\t\tdims[i] = max(dims[i], dim)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// TODO: What do I do with e's flex? Clearly\n\t\t\t// if a fixed element spans the whole grid,\n\t\t\t// the grid should be fixed, so I shouldn't\n\t\t\t// ignore it.\n\t\t}\n\t}\n\treturn\n}\n\nfunc (g *Grid) SizeHint() (w, h float64, flexw, flexh bool) {\n\tsum := func(xs []float64) float64 {\n\t\ts := 0.0\n\t\tfor _, x := range xs {\n\t\t\ts += x\n\t\t}\n\t\treturn s\n\t}\n\tany := func(xs []bool) bool {\n\t\tfor _, x := range xs {\n\t\t\tif x {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\txdims, xflexes := g.doLayout(false, 0)\n\tydims, yflexes := g.doLayout(true, 0)\n\treturn sum(xdims), sum(ydims), any(xflexes), any(yflexes)\n}\n\nfunc (g *Grid) SetLayout(x, y, w, h float64) {\n\t// Record layout.\n\tg.x, g.y, g.w, g.h = x, y, w, h\n\n\t// Layout children.\n\tcsum := func(xs []float64) []float64 {\n\t\tres, csum := make([]float64, len(xs)+1), 0.0\n\t\tfor i, x := range xs {\n\t\t\tres[i+1] = csum + x\n\t\t\tcsum += x\n\t\t}\n\t\treturn res\n\t}\n\txdims, _ := g.doLayout(false, w)\n\tydims, _ := g.doLayout(true, h)\n\txpos := csum(xdims)\n\typos := csum(ydims)\n\tfor _, elt := range g.elts {\n\t\telt.e.SetLayout(xpos[elt.x], ypos[elt.y], xpos[elt.x+elt.colSpan]-xpos[elt.x], ypos[elt.y+elt.rowSpan]-ypos[elt.y])\n\t}\n}\n\nfunc (g *Grid) Layout() (x, y, w, h float64) {\n\treturn g.x, g.y, g.w, g.h\n}\n\ntype gridElementSorter struct {\n\telts      []*gridElement\n\tseq       []int\n\tbyRowSpan bool\n}\n\nfunc (g *gridElementSorter) Len() int {\n\treturn len(g.seq)\n}\n\nfunc (g *gridElementSorter) Less(i, j int) bool {\n\te1, e2 := g.elts[g.seq[i]], g.elts[g.seq[j]]\n\tif g.byRowSpan {\n\t\treturn e1.rowSpan < e2.rowSpan\n\t}\n\treturn e1.colSpan < e2.colSpan\n}\n\nfunc (g *gridElementSorter) Swap(i, j int) {\n\tg.seq[i], g.seq[j] = g.seq[j], g.seq[i]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/layout/layout.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package layout provides helpers for laying out hierarchies of\n// rectangular elements in two dimensional space.\npackage layout\n\n// TODO: If I want to handle wrapped text, this API is insufficient.\n// In that case, I may need something more like Android where the\n// parent can pass in Unspecified, (Exactly x), or (AtMost x) for both\n// dimensions and make multiple calls. I would probably start out with\n// AtMost the allocated dimension for everything and if the total came\n// back too large, I would cut back space (possibly causing the other\n// dimension to grow if text wraps).\n\n// An Element is a rectangular feature in a layout.\ntype Element interface {\n\t// SizeHint returns this Element's desired size and whether it\n\t// can expand from that size in either direction.\n\tSizeHint() (w, h float64, flexw, flexh bool)\n\n\t// SetLayout sets this Element's layout relative to its parent\n\t// and, if this Element is a container, recursively lays out\n\t// this Element's children.\n\t//\n\t// w and h may be smaller than SizeHint() if the space is\n\t// constrained. They may also be larger, even if the element\n\t// isn't flexible, in which case the Element will position\n\t// itself within the assigned size using some gravity.\n\t//\n\t// TODO: Or should the parent be responsible for gravity if it\n\t// allocates too much space to a fixed element?\n\t//\n\t// TODO: Since an Element doesn't know its parent, it's\n\t// difficult to turn local coordinates into absolute\n\t// coordinates. These should either be absolute coordinates,\n\t// or Element should have a parent and it should be easy to\n\t// get absolute coordinates.\n\tSetLayout(x, y, w, h float64)\n\n\t// Layout returns this Element's layout.\n\tLayout() (x, y, w, h float64)\n}\n\n// A Group is an Element that manages the layout of child Elements.\ntype Group interface {\n\tElement\n\n\t// Children returns the child Elements laid out by this Group.\n\tChildren() []Element\n}\n\n// Leaf is a leaf in a layout hierarchy. It is meant for embedding: it\n// partially implements Element, leaving SizeHint to the embedding\n// type.\ntype Leaf struct {\n\tx, y, w, h float64\n}\n\nfunc (l *Leaf) SetLayout(x, y, w, h float64) {\n\tl.x, l.y, l.w, l.h = x, y, w, h\n}\n\nfunc (l *Leaf) Layout() (x, y, w, h float64) {\n\treturn l.x, l.y, l.w, l.h\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/layout.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com/aclements/go-gg/gg/layout\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/ajstarks/svgo\"\n)\n\n// A plotElt is a high-level element of a plot layout.\n//\n// plotElts are arranged in a 2D grid. Coordinates in the grid are\n// specified by a pair of \"paths\" rather than a simple pair of\n// indexes. For example, element A is to the left of element B if A's\n// X path is less than B's X path, where paths are compared as tuples\n// with an infinite number of trailing 0's. This makes it easy to, for\n// example, place an element to the right of another element without\n// having to renumber all of the elements that are already to its\n// right.\n//\n// The first level of the hierarchy is simply the coordinate of the\n// plot in the grid. Within this, we layout plot elements as follows:\n//\n//                           +----------------------+\n//                           | Label (x, y/-3/-1)   |\n//                           +----------------------+\n//                           | Label (x, y/-3/0)    |\n//                           +----------------------+\n//                           | Padding (x, y/-2)    |\n//    +-----------+----------+----------------------+----------+------------+\n//    | Padding   | YTicks   |                      | Padding  | Label      |\n//    | (x/-2, y) | (x/-1,y) | Subplot (x, y)       | (x/2, y) | (x/3/0, y) |\n//    |           |          |                      |          |            |\n//    +-----------+----------+----------------------+----------+------------+\n//                           | XTicks (x, y/1)      |\n//                           +----------------------+\n//                           | Padding (x, y/2)     |\n//                           +----------------------+\n//\n// TODO: Should I instead think of this as specifying the edges rather\n// than the cells?\ntype plotElt interface {\n\tlayout.Element\n\n\t// paths returns the top-left and bottom-right cells of this\n\t// element. x2Path and y2Path may be nil, indicating that they\n\t// are the same as xPath and yPath.\n\tpaths() (xPath, yPath, x2Path, y2Path eltPath)\n\n\t// render draws this plot element to r.svg.\n\trender(r *eltRender)\n}\n\ntype eltRender struct {\n\tsvg *svg.SVG\n\tid  int\n}\n\nfunc (r *eltRender) genid(prefix string) (id, ref string) {\n\tid = fmt.Sprintf(\"%s%d\", prefix, r.id)\n\tref = \"url(#\" + id + \")\"\n\tr.id++\n\treturn\n}\n\ntype eltCommon struct {\n\txPath, yPath, x2Path, y2Path eltPath\n}\n\nfunc (c *eltCommon) paths() (xPath, yPath, x2Path, y2Path eltPath) {\n\treturn c.xPath, c.yPath, c.x2Path, c.y2Path\n}\n\ntype eltSubplot struct {\n\teltCommon\n\tlayout.Leaf\n\n\tsubplot *subplot\n\tmarks   []plotMark\n\tscales  map[string]map[Scaler]bool\n\n\txTicks, yTicks *eltTicks\n\n\tplotMargins struct {\n\t\tt, r, b, l float64\n\t}\n}\n\nfunc newEltSubplot(s *subplot) *eltSubplot {\n\treturn &eltSubplot{\n\t\teltCommon: eltCommon{xPath: eltPath{s.x}, yPath: eltPath{s.y}},\n\t\tsubplot:   s,\n\t\tscales:    make(map[string]map[Scaler]bool),\n\t}\n}\n\nfunc (e *eltSubplot) SizeHint() (w, h float64, flexw, flexh bool) {\n\treturn 0, 0, true, true\n}\n\nfunc (e *eltSubplot) SetLayout(x, y, w, h float64) {\n\te.Leaf.SetLayout(x, y, w, h)\n\tm := &e.plotMargins\n\tm.t, m.r, m.b, m.l = plotMargins(w, h)\n}\n\ntype eltTicks struct {\n\teltCommon\n\tlayout.Leaf\n\n\taxis     rune        // 'x' or 'y'\n\tticksFor *eltSubplot // Subplot to which this is directly attached\n\tticks    map[Scaler]plotEltTicks\n}\n\ntype plotEltTicks struct {\n\tmajor  table.Slice\n\tminor  table.Slice\n\tlabels []string\n}\n\nfunc newEltTicks(axis rune, s *eltSubplot) *eltTicks {\n\telt := &eltTicks{\n\t\teltCommon: s.eltCommon,\n\t\taxis:      axis,\n\t\tticksFor:  s,\n\t}\n\tswitch axis {\n\tcase 'x':\n\t\telt.yPath = eltPath{s.subplot.y, 1}\n\tcase 'y':\n\t\telt.xPath = eltPath{s.subplot.x, -1}\n\tdefault:\n\t\tpanic(\"bad axis\")\n\t}\n\treturn elt\n}\n\nfunc (e *eltTicks) scales() map[Scaler]bool {\n\tswitch e.axis {\n\tcase 'x':\n\t\treturn e.ticksFor.scales[\"x\"]\n\tcase 'y':\n\t\treturn e.ticksFor.scales[\"y\"]\n\tdefault:\n\t\tpanic(\"bad axis\")\n\t}\n}\n\nfunc (e *eltTicks) mapTicks(s Scaler, ticks table.Slice) (pixels []float64) {\n\tx, y, w, h := e.Layout()\n\t// TODO: This doesn't show ticks in the margin area. This may\n\t// be fine with niced tick labels, but it tends to look bad\n\t// with un-niced ticks. Ideally we would expand the input\n\t// domain instead, but this isn't well-defined for discrete\n\t// scales. We could use Unmap to try to find the expanded\n\t// input domain on both sides, but fall back to expanding the\n\t// ranger if Unmap fails (which it would for a discrete\n\t// scale).\n\tm := e.ticksFor.plotMargins\n\tswitch e.axis {\n\tcase 'x':\n\t\ts.Ranger(NewFloatRanger(x+m.l, x+w-m.r))\n\tcase 'y':\n\t\ts.Ranger(NewFloatRanger(y+h-m.b, y+m.t))\n\t}\n\treturn mapMany(s, ticks).([]float64)\n}\n\n// computeTicks computes the location and labels of the ticks in\n// element e based on the dimensions of e.ticksFor (which must have\n// been laid out prior to calling this).\nfunc (e *eltTicks) computeTicks() {\n\tconst tickDistance = 30 // TODO: Theme. Min pixels between tick labels.\n\n\t_, _, w, h := e.ticksFor.Layout()\n\tvar dim float64\n\tswitch e.axis {\n\tcase 'x':\n\t\tdim = w\n\tcase 'y':\n\t\tdim = h\n\t}\n\n\t// Compute max ticks assuming the labels are zero sized.\n\tmaxTicks := int(dim / tickDistance)\n\n\t// Optimize ticks, keeping labels at least tickDistance apart.\n\te.ticks = make(map[Scaler]plotEltTicks)\n\tfor s := range e.scales() {\n\t\tpred := func(ticks, _ table.Slice, labels []string) bool {\n\t\t\tif len(labels) <= 1 {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\t// Check distance between labels.\n\t\t\tpos := e.mapTicks(s, ticks)\n\t\t\t// Ticks are in value order, but we need them\n\t\t\t// in position order.\n\t\t\tsort.Float64s(pos)\n\t\t\tvar last float64\n\t\t\tfor i, p := range pos {\n\t\t\t\tif i > 0 && p-last < tickDistance {\n\t\t\t\t\t// Labels i-1 and i are too close.\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tmetrics := measureString(fontSize, labels[i])\n\t\t\t\tswitch e.axis {\n\t\t\t\tcase 'x':\n\t\t\t\t\tlast = p + metrics.width\n\t\t\t\tcase 'y':\n\t\t\t\t\tlast = p + metrics.leading\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn true\n\t\t}\n\t\tmajor, minor, labels := s.Ticks(maxTicks, pred)\n\t\te.ticks[s] = plotEltTicks{major, minor, labels}\n\t}\n}\n\nfunc (e *eltTicks) SizeHint() (w, h float64, flexw, flexh bool) {\n\tif len(e.ticks) == 0 {\n\t\t// Ticks haven't been computed yet or there are none.\n\t\t// Assume this takes up no space.\n\t\tswitch e.axis {\n\t\tcase 'x':\n\t\t\treturn 0, 0, true, false\n\t\tcase 'y':\n\t\t\treturn 0, 0, false, true\n\t\tdefault:\n\t\t\tpanic(\"bad axis\")\n\t\t}\n\t}\n\n\tvar maxWidth, maxHeight float64\n\tfor s := range e.scales() {\n\t\tfor _, label := range e.ticks[s].labels {\n\t\t\tmetrics := measureString(fontSize, label)\n\t\t\tmaxHeight = math.Max(maxHeight, metrics.leading)\n\t\t\tmaxWidth = math.Max(maxWidth, metrics.width)\n\t\t}\n\t}\n\tswitch e.axis {\n\tcase 'x':\n\t\tmaxHeight += xTickSep\n\tcase 'y':\n\t\tmaxWidth += yTickSep\n\t}\n\treturn maxWidth, maxHeight, e.axis == 'x', e.axis == 'y'\n}\n\ntype eltLabel struct {\n\teltCommon\n\tlayout.Leaf\n\n\tside  rune // 't', 'b', 'l', 'r'\n\tlabel string\n\tfill  string\n}\n\nfunc newEltLabelFacet(side rune, label string, x1, y1, x2, y2 int, level int) *eltLabel {\n\telt := &eltLabel{\n\t\tside:  side,\n\t\tlabel: label,\n\t\tfill:  \"#ccc\", // TODO: Theme.\n\t}\n\tswitch side {\n\tcase 't':\n\t\telt.eltCommon = eltCommon{\n\t\t\txPath:  eltPath{x1},\n\t\t\tyPath:  eltPath{y1, -3, -level},\n\t\t\tx2Path: eltPath{x2},\n\t\t}\n\tcase 'r':\n\t\telt.eltCommon = eltCommon{\n\t\t\txPath:  eltPath{x2, 3, level},\n\t\t\tyPath:  eltPath{y1},\n\t\t\ty2Path: eltPath{y2},\n\t\t}\n\tdefault:\n\t\tpanic(\"bad side\")\n\t}\n\treturn elt\n}\n\nfunc newEltLabelAxis(side rune, label string, x, y, span int) *eltLabel {\n\telt := &eltLabel{\n\t\teltCommon: eltCommon{xPath: eltPath{x}, yPath: eltPath{y}},\n\t\tside:      side,\n\t\tlabel:     label,\n\t\tfill:      \"none\",\n\t}\n\tswitch side {\n\tcase 'T', 'b':\n\t\telt.x2Path = eltPath{x + span}\n\tcase 'l':\n\t\telt.y2Path = eltPath{y + span}\n\tdefault:\n\t\tpanic(\"bad side\")\n\t}\n\treturn elt\n}\n\nfunc (e *eltLabel) SizeHint() (w, h float64, flexw, flexh bool) {\n\t// TODO: We actually want the height of the text, which could\n\t// be N*leading if there are multiple lines.\n\tdim := measureString(fontSize, e.label).leading * facetLabelHeight\n\tswitch e.side {\n\tcase 't', 'b':\n\t\treturn 0, dim, true, false\n\tcase 'T': // Titles\n\t\treturn 0, 1.5 * dim, true, false\n\tcase 'l', 'r':\n\t\treturn dim, 0, false, true\n\tdefault:\n\t\tpanic(\"bad side\")\n\t}\n}\n\ntype eltPadding struct {\n\teltCommon\n\tlayout.Leaf\n\n\tside rune // 't', 'b', 'l', 'r'\n}\n\nfunc newEltPadding(side rune, x, y int) *eltPadding {\n\telt := &eltPadding{\n\t\teltCommon: eltCommon{xPath: eltPath{x}, yPath: eltPath{y}},\n\t\tside:      side,\n\t}\n\tswitch side {\n\tcase 't':\n\t\telt.yPath = eltPath{y, -2}\n\tcase 'r':\n\t\telt.xPath = eltPath{x, 2}\n\tcase 'b':\n\t\telt.yPath = eltPath{y, 2}\n\tcase 'l':\n\t\telt.xPath = eltPath{x, -2}\n\tdefault:\n\t\tpanic(\"bad side\")\n\t}\n\treturn elt\n}\n\nfunc (e *eltPadding) SizeHint() (w, h float64, flexw, flexh bool) {\n\tconst padding = 4 // TODO: Theme.\n\n\tswitch e.side {\n\tcase 't', 'b':\n\t\treturn 0, padding, true, false\n\tcase 'l', 'r':\n\t\treturn padding, 0, false, true\n\tdefault:\n\t\tpanic(\"bad side\")\n\t}\n}\n\nfunc addSubplotLabels(elts []plotElt) []plotElt {\n\t// Find the regions covered by each subplot band.\n\tvBands := make(map[*subplotBand]subplotRegion)\n\thBands := make(map[*subplotBand]subplotRegion)\n\tfor _, elt := range elts {\n\t\telt, ok := elt.(*eltSubplot)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\ts := elt.subplot\n\n\t\tlevel := 0\n\t\tfor vBand := s.vBand; vBand != nil; vBand = vBand.parent {\n\t\t\tr := vBands[vBand]\n\t\t\tr.update(s, level)\n\t\t\tvBands[vBand] = r\n\t\t\tlevel++\n\t\t}\n\n\t\tlevel = 0\n\t\tfor hBand := s.hBand; hBand != nil; hBand = hBand.parent {\n\t\t\tr := hBands[hBand]\n\t\t\tr.update(s, level)\n\t\t\thBands[hBand] = r\n\t\t\tlevel++\n\t\t}\n\t}\n\n\t// Create ticks.\n\t//\n\t// TODO: If the facet grid isn't total, this can add ticks to\n\t// the side of a plot that's in the middle of the grid and\n\t// that creates a gap between all of the plots. This seems\n\t// like a fundamental limitation of treating this as a grid.\n\t// We could either abandon the grid and instead use a\n\t// hierarchy of left-of/right-of/above/below relations, or we\n\t// could make facets produce a total grid.\n\tvar prev *eltSubplot\n\tvar curTicks *eltTicks\n\tsorter := newSubplotSorter(elts, 'x')\n\tsort.Sort(sorter)\n\tfor _, elt := range sorter.elts {\n\t\tif prev == nil || prev.subplot.y != elt.subplot.y || !eqScales(prev, elt, \"y\") {\n\t\t\t// Show Y axis ticks.\n\t\t\tcurTicks = newEltTicks('y', elt)\n\t\t\telts = append(elts, curTicks)\n\t\t}\n\t\telt.yTicks = curTicks\n\t\tprev = elt\n\t}\n\tsorter.dir = 'y'\n\tsort.Sort(sorter)\n\tprev, curTicks = nil, nil\n\tfor _, elt := range sorter.elts {\n\t\tif prev == nil || prev.subplot.x != elt.subplot.x || !eqScales(prev, elt, \"x\") {\n\t\t\t// Show X axis ticks.\n\t\t\tcurTicks = newEltTicks('x', elt)\n\t\t\telts = append(elts, curTicks)\n\t\t}\n\t\telt.xTicks = curTicks\n\t\tprev = elt\n\t}\n\n\t// Create labels.\n\tfor vBand, r := range vBands {\n\t\telts = append(elts, newEltLabelFacet('t', vBand.label, r.x1, r.y1, r.x2, r.y2, r.level))\n\t}\n\tfor hBand, r := range hBands {\n\t\telts = append(elts, newEltLabelFacet('r', hBand.label, r.x1, r.y1, r.x2, r.y2, r.level))\n\t}\n\treturn elts\n}\n\nfunc addAxisLabels(elts []plotElt, title, xlabel, ylabel string) []plotElt {\n\t// Find the region covered by subplots.\n\tvar r subplotRegion\n\tfor _, elt := range elts {\n\t\telt, ok := elt.(*eltSubplot)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tr.update(elt.subplot, 0)\n\t}\n\tif !r.valid {\n\t\treturn elts\n\t}\n\n\t// Add title.\n\t// TODO: Make this larger.\n\tif title != \"\" {\n\t\telts = append(elts,\n\t\t\tnewEltLabelAxis('T', title, r.x1, r.y1-1, r.x2-r.x1))\n\t}\n\n\t// Add labels.\n\telts = append(elts,\n\t\tnewEltLabelAxis('b', xlabel, r.x1, r.y2+1, r.x2-r.x1),\n\t\tnewEltLabelAxis('l', ylabel, r.x1-1, r.y1, r.y2-r.y1))\n\treturn elts\n}\n\ntype subplotRegion struct {\n\tvalid                 bool\n\tx1, x2, y1, y2, level int\n}\n\nfunc (r *subplotRegion) update(s *subplot, level int) {\n\tif !r.valid {\n\t\tr.x1, r.x2, r.y1, r.y2, r.level = s.x, s.x, s.y, s.y, level\n\t\tr.valid = true\n\t\treturn\n\t}\n\tif s.x < r.x1 {\n\t\tr.x1 = s.x\n\t} else if s.x > r.x2 {\n\t\tr.x2 = s.x\n\t}\n\tif s.y < r.y1 {\n\t\tr.y1 = s.y\n\t} else if s.y > r.y2 {\n\t\tr.y2 = s.y\n\t}\n\tif level > r.level {\n\t\tr.level = level\n\t}\n}\n\n// subplotSorter sorts eltSubplots by subplot (x, y) position.\ntype subplotSorter struct {\n\telts []*eltSubplot\n\n\t// dir indicates primary sorting direction: 'x' means to sort\n\t// left-to-right, top-to-bottom; 'y' means to sort\n\t// bottom-to-top, left-to-right.\n\tdir rune\n}\n\nfunc newSubplotSorter(elts []plotElt, dir rune) *subplotSorter {\n\tselts := []*eltSubplot{}\n\tfor _, elt := range elts {\n\t\tif s, ok := elt.(*eltSubplot); ok {\n\t\t\tselts = append(selts, s)\n\t\t}\n\t}\n\treturn &subplotSorter{selts, dir}\n}\n\nfunc (s subplotSorter) Len() int {\n\treturn len(s.elts)\n}\n\nfunc (s subplotSorter) Less(i, j int) bool {\n\ta, b := s.elts[i], s.elts[j]\n\tif s.dir == 'x' {\n\t\tif a.subplot.y != b.subplot.y {\n\t\t\treturn a.subplot.y < b.subplot.y\n\t\t}\n\t\treturn a.subplot.x < b.subplot.x\n\t} else {\n\t\tif a.subplot.x != b.subplot.x {\n\t\t\treturn a.subplot.x < b.subplot.x\n\t\t}\n\t\treturn a.subplot.y > b.subplot.y\n\t}\n}\n\nfunc (s subplotSorter) Swap(i, j int) {\n\ts.elts[i], s.elts[j] = s.elts[j], s.elts[i]\n}\n\nfunc eqScales(a, b *eltSubplot, aes string) bool {\n\tsa, sb := a.scales[aes], b.scales[aes]\n\tif len(sa) != len(sb) {\n\t\treturn false\n\t}\n\tfor k, v := range sa {\n\t\tif sb[k] != v {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\ntype eltPath []int\n\nfunc (a eltPath) cmp(b eltPath) int {\n\tfor len(a) > 0 || len(b) > 0 {\n\t\tvar ax, bx int\n\t\tif len(a) > 0 {\n\t\t\tax, a = a[0], a[1:]\n\t\t}\n\t\tif len(b) > 0 {\n\t\t\tbx, b = b[0], b[1:]\n\t\t}\n\t\tif ax != bx {\n\t\t\tif ax < bx {\n\t\t\t\treturn -1\n\t\t\t} else {\n\t\t\t\treturn 1\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n\ntype eltPaths []eltPath\n\nfunc (s eltPaths) Len() int {\n\treturn len(s)\n}\n\nfunc (s eltPaths) Less(i, j int) bool {\n\treturn s[i].cmp(s[j]) < 0\n}\n\nfunc (s eltPaths) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc (s eltPaths) nub() eltPaths {\n\tvar i, o int\n\tfor i, o = 1, 1; i < len(s); i++ {\n\t\tif s[i].cmp(s[i-1]) != 0 {\n\t\t\ts[o] = s[i]\n\t\t\to++\n\t\t}\n\t}\n\treturn s[:o]\n}\n\nfunc (s eltPaths) find(p eltPath) int {\n\treturn sort.Search(len(s), func(i int) bool {\n\t\treturn s[i].cmp(p) >= 0\n\t})\n}\n\n// layoutPlotElts returns a layout containing all of the elements in\n// elts.\n//\n// layoutPlotElts flattens the X and Y paths of elts into simple\n// coordinate indexes and constructs a layout.Grid.\nfunc layoutPlotElts(elts []plotElt) layout.Element {\n\t// Add padding elements to each subplot.\n\t//\n\t// TODO: Should there be padding between labels and the plot?\n\tfor _, elt := range elts {\n\t\telt, ok := elt.(*eltSubplot)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tx, y := elt.xPath[0], elt.yPath[0]\n\t\telts = append(elts,\n\t\t\tnewEltPadding('t', x, y),\n\t\t\tnewEltPadding('r', x, y),\n\t\t\tnewEltPadding('b', x, y),\n\t\t\tnewEltPadding('l', x, y),\n\t\t)\n\t}\n\n\t// Construct the global element grid from coordinate paths by\n\t// sorting the sets of X paths and Y paths to each leaf and\n\t// computing a global (x,y) for each leaf from these orders.\n\ttype eltPos struct {\n\t\tx, y, xSpan, ySpan int\n\t}\n\tflat := map[plotElt]eltPos{}\n\tdir := func(get func(plotElt) (p, p2 eltPath), set func(p *eltPos, pos, span int)) {\n\t\tvar paths eltPaths\n\t\tfor _, elt := range elts {\n\t\t\tp, p2 := get(elt)\n\t\t\tpaths = append(paths, p)\n\t\t\tif p2 != nil {\n\t\t\t\tpaths = append(paths, p2)\n\t\t\t}\n\t\t}\n\t\tsort.Sort(paths)\n\t\tpaths = paths.nub()\n\t\tfor _, elt := range elts {\n\t\t\tp, p2 := get(elt)\n\t\t\tpos, span := paths.find(p), 1\n\t\t\tif p2 != nil {\n\t\t\t\tspan = paths.find(p2) - pos + 1\n\t\t\t}\n\t\t\teltPos := flat[elt]\n\t\t\tset(&eltPos, pos, span)\n\t\t\tflat[elt] = eltPos\n\t\t}\n\t}\n\tdir(func(e plotElt) (p, p2 eltPath) {\n\t\tp, _, p2, _ = e.paths()\n\t\treturn\n\t}, func(p *eltPos, pos, span int) {\n\t\tp.x, p.xSpan = pos, span\n\t})\n\tdir(func(e plotElt) (p, p2 eltPath) {\n\t\t_, p, _, p2 = e.paths()\n\t\treturn\n\t}, func(p *eltPos, pos, span int) {\n\t\tp.y, p.ySpan = pos, span\n\t})\n\n\t// Construct the grid layout.\n\tl := new(layout.Grid)\n\tfor elt, pos := range flat {\n\t\tl.Add(elt, pos.x, pos.y, pos.xSpan, pos.ySpan)\n\t}\n\treturn l\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/mark.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport (\n\t\"bytes\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"image\"\n\t\"image/color\"\n\t\"image/png\"\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\t\"strconv\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/stats\"\n\t\"github.com/ajstarks/svgo\"\n)\n\n// TODO: Audit all of this for inf and NaN.\n\ntype marker interface {\n\tmark(env *renderEnv, canvas *svg.SVG)\n}\n\nfunc isFinite(x float64) bool {\n\treturn !(math.IsNaN(x) || math.IsInf(x, 0))\n}\n\ntype plotMark struct {\n\tm      marker\n\tgroups []table.GroupID\n}\n\ntype markPath struct {\n\tx, y, stroke, fill *scaledData\n}\n\nfunc (m *markPath) mark(env *renderEnv, canvas *svg.SVG) {\n\t// XXX What ensures these type assertions will succeed,\n\t// especially if it's an identity scale? Maybe identity scales\n\t// still need to coerce their results to the right type.\n\txs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64)\n\tvar stroke color.Color = color.Black\n\tif m.stroke != nil {\n\t\tstroke = env.getFirst(m.stroke).(color.Color)\n\t}\n\tvar fill color.Color = color.Transparent\n\tif m.fill != nil {\n\t\tfill = env.getFirst(m.fill).(color.Color)\n\t}\n\n\tdrawPath(canvas, xs, ys, stroke, fill)\n}\n\ntype markArea struct {\n\tx, upper, lower, fill, fillOpacity *scaledData\n}\n\nfunc reversed(data []float64) []float64 {\n\tvar rev []float64\n\tfor i := len(data) - 1; i >= 0; i-- {\n\t\trev = append(rev, data[i])\n\t}\n\treturn rev\n}\n\nfunc (m *markArea) mark(env *renderEnv, canvas *svg.SVG) {\n\txs := env.get(m.x).([]float64)\n\tupper := env.get(m.upper).([]float64)\n\tlower := env.get(m.lower).([]float64)\n\tvar fill color.Color = color.Black\n\tif m.fill != nil {\n\t\tfill = env.getFirst(m.fill).(color.Color)\n\t}\n\tfillOpacity := 0.5\n\tif m.fillOpacity != nil {\n\t\tfillOpacity = env.getFirst(m.fillOpacity).(float64)\n\t}\n\tr, g, b, a := fill.RGBA()\n\tfill = color.RGBA64{\n\t\tuint16(float64(r) * fillOpacity),\n\t\tuint16(float64(g) * fillOpacity),\n\t\tuint16(float64(b) * fillOpacity),\n\t\tuint16(float64(a) * fillOpacity)}\n\n\txs = append(xs, reversed(xs)...)\n\tys := append(upper, reversed(lower)...)\n\n\tdrawPath(canvas, xs, ys, color.Transparent, fill)\n}\n\ntype markSteps struct {\n\tdir StepMode\n\n\tx, y, stroke, fill *scaledData\n}\n\nfunc (m *markSteps) mark(env *renderEnv, canvas *svg.SVG) {\n\txs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64)\n\tvar stroke color.Color = color.Black\n\tif m.stroke != nil {\n\t\tstroke = env.getFirst(m.stroke).(color.Color)\n\t}\n\tvar fill color.Color = color.Transparent\n\tif m.fill != nil {\n\t\tfill = env.getFirst(m.fill).(color.Color)\n\t}\n\n\tif len(xs) == 0 {\n\t\treturn\n\t}\n\n\t// Create intermediate points.\n\txs2, ys2 := make([]float64, 2*len(xs)), make([]float64, 2*len(ys))\n\tfor i := range xs2 {\n\t\tswitch m.dir {\n\t\tcase StepHV, StepVH:\n\t\t\txs2[i], ys2[i] = xs[i/2], ys[i/2]\n\t\tcase StepHMid, StepVMid:\n\t\t\tif i == 0 || i == len(xs2)-1 {\n\t\t\t\txs2[i], ys2[i] = xs[i/2], ys[i/2]\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar p1, p2 int\n\t\t\tif i%2 == 0 {\n\t\t\t\t// Interpolate i/2-1 and i/2.\n\t\t\t\tp1, p2 = i/2-1, i/2\n\t\t\t} else {\n\t\t\t\t// Interpolate i/2 and i/2+1.\n\t\t\t\tp1, p2 = i/2, i/2+1\n\t\t\t}\n\t\t\tif m.dir == StepHMid {\n\t\t\t\txs2[i], ys2[i] = (xs[p1]+xs[p2])/2, ys[i/2]\n\t\t\t} else {\n\t\t\t\txs2[i], ys2[i] = xs[i/2], (ys[p1]+ys[p2])/2\n\t\t\t}\n\t\t}\n\t}\n\tif m.dir == StepHV {\n\t\txs2 = xs2[1:]\n\t} else if m.dir == StepVH {\n\t\tys2 = ys2[1:]\n\t}\n\n\tdrawPath(canvas, xs2, ys2, stroke, fill)\n}\n\nfunc drawPath(canvas *svg.SVG, xs, ys []float64, stroke color.Color, fill color.Color) {\n\tswitch len(xs) {\n\tcase 0:\n\t\treturn\n\tcase 1:\n\t\t// TODO: Depending on the stroke cap, this *could* be\n\t\t// well-defined.\n\t\tWarning.Print(\"cannot draw path through 1 point; ignoring\")\n\t\treturn\n\t}\n\n\t// Build path.\n\tvar path []byte\n\tinLine := false\n\tfor i := range xs {\n\t\tif !isFinite(xs[i]) || !isFinite(ys[i]) {\n\t\t\tinLine = false\n\t\t\tcontinue\n\t\t}\n\t\tif !inLine {\n\t\t\tpath = append(path, 'M')\n\t\t\tinLine = true\n\t\t}\n\t\tpath = append(path, ' ')\n\t\tpath = strconv.AppendFloat(path, xs[i], 'g', 6, 64)\n\t\tpath = append(path, ' ')\n\t\tpath = strconv.AppendFloat(path, ys[i], 'g', 6, 64)\n\t}\n\tif len(path) == 0 {\n\t\treturn\n\t}\n\n\t// XXX Stroke width\n\n\tstyle := cssPaint(\"stroke\", stroke) + \";\" + cssPaint(\"fill\", fill) + \";stroke-width:3\"\n\tcanvas.Path(wrapPath(string(path)), style)\n}\n\ntype markPoint struct {\n\tx, y, color, opacity, size *scaledData\n}\n\nfunc (m *markPoint) mark(env *renderEnv, canvas *svg.SVG) {\n\txs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64)\n\tvar colors []color.Color\n\tif m.color != nil {\n\t\tslice.Convert(&colors, env.get(m.color))\n\t}\n\tvar opacities []float64\n\tif m.opacity != nil {\n\t\topacities = env.get(m.opacity).([]float64)\n\t}\n\tvar sizes []float64\n\tif m.size != nil {\n\t\tsizes = env.get(m.size).([]float64)\n\t}\n\tmindim := math.Min(env.Size())\n\n\tfor i := range xs {\n\t\tif !isFinite(xs[i]) || !isFinite(ys[i]) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar style string\n\t\tif colors != nil {\n\t\t\tstyle = cssPaint(\"fill\", colors[i])\n\t\t}\n\t\tif opacities != nil {\n\t\t\tif style != \"\" {\n\t\t\t\tstyle += \";\"\n\t\t\t}\n\t\t\tstyle += fmt.Sprintf(\"opacity:%.6g\", opacities[i])\n\t\t}\n\t\tr := mindim * 0.01\n\t\tif sizes != nil {\n\t\t\tr = mindim * sizes[i]\n\t\t}\n\t\tcanvas.Circle(int(xs[i]), int(ys[i]), int(r), style)\n\t}\n}\n\ntype markTiles struct {\n\tx, y, fill *scaledData\n}\n\nfunc (m *markTiles) mark(env *renderEnv, canvas *svg.SVG) {\n\txs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64)\n\t// TODO: Should the Scaler (or Ranger) ensure that the values\n\t// are color.Color? How would this work with an identity\n\t// scaler?\n\tvar fills []color.Color\n\tif m.fill != nil {\n\t\tslice.Convert(&fills, env.get(m.fill))\n\t}\n\n\t// TODO: We can't use an <image> this if the width and height\n\t// are specified, or if there is a stroke.\n\t// minx, maxx := stats.Bounds(xs)\n\t// miny, maxy := stats.Bounds(ys)\n\n\t// Compute image bounds.\n\timageBounds := func(vals []float64) (float64, float64, float64, bool) {\n\t\t// Reduce to unique values.\n\t\tunique := []float64{}\n\t\tuset := map[float64]bool{}\n\t\tfor _, v := range vals {\n\t\t\tif !uset[v] {\n\t\t\t\tif !isFinite(v) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tunique = append(unique, v)\n\t\t\t\tuset[v] = true\n\t\t\t}\n\t\t}\n\n\t\tvar minGap float64\n\t\tregular := true\n\t\tswitch len(unique) {\n\t\tcase 0:\n\t\t\treturn 0, 0, -1, false\n\t\tcase 1:\n\t\t\t// TODO: In this case we'll produce a 1 pixel\n\t\t\t// wide/high line. That's probably not what's\n\t\t\t// desired. Maybe we want it to be the\n\t\t\t// width/height of the plot area?\n\t\t\tminGap = 1.0\n\t\tdefault:\n\t\t\tsort.Float64s(unique)\n\t\t\tminGap = unique[1] - unique[0]\n\t\t\tfor i, u := range unique[1:] {\n\t\t\t\tminGap = math.Min(minGap, u-unique[i])\n\t\t\t}\n\t\t\t// Consider the spacing \"regular\" if every\n\t\t\t// point is within a 1000th of a multiple of\n\t\t\t// minGap.\n\t\t\tfor _, u := range unique {\n\t\t\t\t_, error := math.Modf((u - unique[0]) / minGap)\n\t\t\t\tif 0.001 <= error && error <= 0.999 {\n\t\t\t\t\tregular = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn unique[0], unique[len(unique)-1], minGap, regular\n\t}\n\txmin, xmax, xgap, xreg := imageBounds(xs)\n\tymin, ymax, ygap, yreg := imageBounds(ys)\n\tif xgap == -1 || ygap == -1 {\n\t\treturn\n\t}\n\tif !xreg || !yreg {\n\t\t// TODO: Can't use an image.\n\t\tpanic(\"not implemented: irregular tile spacing\")\n\t}\n\n\t// TODO: If there are a small number of cells, just make the\n\t// rectangles since it's hard to reliably disable\n\t// interpolation (e.g., the below doesn't work in rsvg).\n\n\t// Create the image.\n\tiw, ih := round((xmax-xmin+xgap)/xgap), round((ymax-ymin+ygap)/ygap)\n\timg := image.NewRGBA(image.Rect(0, 0, iw, ih))\n\tfill := color.Color(color.Black)\n\tfor i := range xs {\n\t\tif !isFinite(xs[i]) || !isFinite(ys[i]) {\n\t\t\tcontinue\n\t\t}\n\t\tif fills != nil {\n\t\t\tfill = fills[i]\n\t\t}\n\t\timg.Set(round((xs[i]-xmin)/xgap), round((ys[i]-ymin)/ygap), fill)\n\t}\n\n\t// Encode the image.\n\turi := bytes.NewBufferString(\"data:image/png;base64,\")\n\tw := base64.NewEncoder(base64.StdEncoding, uri)\n\tif err := png.Encode(w, img); err != nil {\n\t\tWarning.Println(\"error encoding image:\", err)\n\t\treturn\n\t}\n\tw.Close()\n\tcanvas.Image(round(xmin-xgap/2), round(ymin-ygap/2),\n\t\tround(xmax-xmin+xgap), int(ymax-ymin+ygap),\n\t\turi.String(), `preserveAspectRatio=\"none\" style=\"image-rendering:optimizeSpeed;image-rendering:-moz-crisp-edges;image-rendering:-webkit-optimize-contrast;image-rendering:pixelated\"`)\n}\n\ntype markTags struct {\n\tx, y   *scaledData\n\tlabels map[table.GroupID]table.Slice\n\thpos   float64\n\n\toffsetX, offsetY int\n}\n\nfunc (m *markTags) mark(env *renderEnv, canvas *svg.SVG) {\n\tconst padX = 5\n\n\txs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64)\n\tif len(xs) == 0 {\n\t\treturn\n\t}\n\n\t// Find the point closest to hpos between the min and max.\n\t//\n\t// TODO: Give the user control over this.\n\tminx, maxx := stats.Bounds(xs)\n\ttargetx := minx + (maxx-minx)*m.hpos\n\tmidi, middelta := 0, math.Abs(xs[0]-targetx)\n\tfor i, x := range xs {\n\t\tdelta := math.Abs(x - targetx)\n\t\tif delta < middelta {\n\t\t\tmidi, middelta = i, delta\n\t\t}\n\t}\n\n\t// Get label.\n\tlabel := fmt.Sprint(reflect.ValueOf(m.labels[env.gid]).Index(midi).Interface())\n\n\t// Attach tag to this point.\n\t//\n\t// TODO: More user control.\n\t//\n\t// TODO: Make automatic positioning account for bounds of plot.\n\t//\n\t// TODO: Adjust positions to avoid overlap. Unfortunately,\n\t// this requires some global optimization, but mark only sees\n\t// one tag at a time.\n\t//\n\t// TODO: Re-enable the tag box when I have decent text metrics.\n\t//t := measureString(fontSize, label)\n\t//canvas.Rect(int(xs[midi]+offsetX-t.width), int(ys[midi]+offsetY-0.75*t.leading), int(t.width), int(1.5*t.leading), `rx=\"4\"`, `fill=\"white\"`, `stroke=\"black\"`)\n\tif m.offsetX > 0 {\n\t\t// To the right, left-aligned.\n\t\tcanvas.Text(int(xs[midi])+m.offsetX+padX, int(ys[midi])+m.offsetY, label, `dy=\".3em\"`)\n\t} else {\n\t\tcanvas.Text(int(xs[midi])+m.offsetX-padX, int(ys[midi])+m.offsetY, label, `dy=\".3em\"`, `text-anchor=\"end\"`)\n\t}\n\tcanvas.Path(fmt.Sprintf(\"M%.6g %.6gc%.6g %.6g,%.6g %.6g,%.6g %.6g\", xs[midi], ys[midi], 0.8*float64(m.offsetX), 0.0, 0.2*float64(m.offsetX), float64(m.offsetY), float64(m.offsetX), float64(m.offsetY)), `fill=\"none\"`, `stroke=\"black\"`, `stroke-dasharray=\"2, 3\"`, `stroke-width=\"2\"`)\n}\n\ntype markTooltips struct {\n\tx, y   *scaledData\n\tlabels map[table.GroupID]table.Slice\n}\n\nfunc (m *markTooltips) mark(env *renderEnv, canvas *svg.SVG) {\n\t// Construct JSON for data.\n\txs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64)\n\tif len(xs) == 0 {\n\t\treturn\n\t}\n\tvar labels []string\n\tswitch l2 := m.labels[env.gid].(type) {\n\tcase []string:\n\t\tlabels = l2\n\tdefault:\n\t\tl2v := reflect.ValueOf(l2)\n\t\tlabels = make([]string, l2v.Len())\n\t\tfor i := range labels {\n\t\t\tlabels[i] = fmt.Sprint(l2v.Index(i).Interface())\n\t\t}\n\t}\n\n\t// TODO: Make env able to generate IDs.\n\t//\n\t// TODO: Sort by x and use binary search in Javascript.\n\t//\n\t// TODO: Remove points that round to the same coordinate.\n\t//\n\t// TODO: Put on the left if we're close to the right edge.\n\tid := fmt.Sprintf(\"tooltips%p\", env)\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"var %s = \", id)\n\tdata := struct {\n\t\tX []int    `json:\"x\"`\n\t\tY []int    `json:\"y\"`\n\t\tL []string `json:\"l\"`\n\t}{make([]int, 0, len(xs)), make([]int, 0, len(ys)), labels}\n\tfor i := range xs {\n\t\tif !isFinite(xs[i]) || !isFinite(ys[i]) {\n\t\t\tcontinue\n\t\t}\n\t\t// Round data to an int to save space.\n\t\tdata.X = append(data.X, int(xs[i]+0.5))\n\t\tdata.Y = append(data.Y, int(ys[i]+0.5))\n\t}\n\tif len(data.X) == 0 {\n\t\treturn\n\t}\n\tjson.NewEncoder(&buf).Encode(data)\n\tcanvas.Script(\"text/javascript\", buf.String())\n\n\tcanvas.Path(\"\", `display=\"none\"`, `fill=\"white\"`, `stroke=\"black\"`, fmt.Sprintf(`id=\"%s-p\"`, id))\n\tcanvas.Text(0, 0, \"\", `display=\"none\"`, fmt.Sprintf(`id=\"%s-t\"`, id))\n\n\tpx, _, pw, _ := env.Area()\n\tcanvas.Rect(int(env.area[0]), int(env.area[1]), int(env.area[2]), int(env.area[3]), `fill-opacity=\"0\"`, fmt.Sprintf(`onmousemove=\"tooltipMove(evt,%s,&quot;%s&quot;,%v,%v)\"`, id, id, px, px+pw), fmt.Sprintf(`onmouseout=\"tooltipOut(evt,%s,&quot;%s&quot;)\"`, id, id))\n\n\t// TODO: Only write this once per SVG.\n\tcanvas.Script(\"text/javascript\", `\nfunction tooltipMove(evt, data, tid, minx, maxx) {\n\t// Convert evt.x to an SVG coordinate.\n\tvar svg = document.rootElement;\n\tvar pt = svg.createSVGPoint();\n\tpt.x = evt.clientX;\n\tpt.y = evt.clientY;\n\tvar epos = pt.matrixTransform(svg.getScreenCTM().inverse());\n\n\t// Find data point closest to event coordinate.\n\tvar cd = Math.sqrt(Math.pow(epos.x-data.x[0], 2) + Math.pow(epos.y-data.y[0], 2)), ci = 0;\n\tfor (var i = 1; i < data.x.length; i++) {\n\t\tvar d = Math.sqrt(Math.pow(epos.x-data.x[i], 2) + Math.pow(epos.y-data.y[i], 2));\n\t\tif (d < cd) { cd = d; ci = i; }\n\t}\n\n\t// Update text content and position.\n\tvar text = document.getElementById(tid+\"-t\");\n\ttext.textContent = data.l[ci];\n\ttext.style.display = \"block\";\n\ttext.setAttribute(\"x\", 0);\n\ttext.setAttribute(\"y\", 0);\n\tvar bb = text.getBBox();\n\tvar hm = 2, r = 3;\n\tvar tx = data.x[ci] + bb.height/4 + hm;\n\tvar flip = false;\n\tif (tx + bb.width + 2*hm + r > maxx) {\n\t\tvar tx2 = data.x[ci] - bb.height/4 - hm - bb.width;\n\t\tif (tx2 - 2*hm - r >= minx) {\n\t\t\t// Position left of point.\n\t\t\ttx = tx2;\n\t\t\tflip = true;\n\t\t}\n\t}\n\ttext.setAttribute(\"x\", tx);\n\ttext.setAttribute(\"y\", data.y[ci] - (bb.y + bb.height/2));\n\n\t// Update marker.\n\tvar p = document.getElementById(tid+\"-p\");\n\tif (flip) {\n\t\tp.setAttribute(\"transform\", \"translate(\"+2*data.x[ci]+\",0) scale(-1,1)\")\n\t} else {\n\t\tp.setAttribute(\"transform\", \"\")\n\t}\n\tp.setAttribute(\"d\", \"M\"+data.x[ci]+\",\"+data.y[ci]+\n\t\t\"l\"+(bb.height/4)+\",\"+(-bb.height/2)+\n\t\t\"h\"+(bb.width+2*hm)+\n\t\t\"a\"+r+\",\"+r+\",90,0,1,\"+r+\",\"+r+\n\t\t\"v\"+(bb.height-2*r)+\n\t\t\"a\"+r+\",\"+r+\",90,0,1,\"+(-r)+\",\"+r+\n\t\t\"h\"+(-bb.width-2*hm)+\"z\");\n\tp.style.display = \"block\";\n}\nfunction tooltipOut(evt, data, tid) {\n\tvar text = document.getElementById(tid+\"-t\");\n\ttext.style.display = \"none\";\n\tvar p = document.getElementById(tid+\"-p\");\n\tp.style.display = \"none\";\n}\n`)\n}\n\n// cssPaint returns a CSS fragment for setting CSS property prop to\n// color c.\nfunc cssPaint(prop string, c color.Color) string {\n\tr, g, b, a := c.RGBA()\n\tif a == 0 {\n\t\t// No paint.\n\t\treturn prop + \":none\"\n\t}\n\n\tif a != 0xffff {\n\t\t// Undo alpha pre-multiplication.\n\t\tr = r * 0xffff / a\n\t\tg = g * 0xffff / a\n\t\tb = b * 0xffff / a\n\t}\n\tr, g, b = r>>8, g>>8, b>>8\n\n\tcss := prop\n\tif r>>4 == r&0xF && g>>4 == g&0xF && b>>4 == b&0xF {\n\t\t// Use #rgb form.\n\t\tcss += fmt.Sprintf(\":#%x%x%x\", r>>4, g>>4, b>>4)\n\t} else {\n\t\t// Use #rrggbb form.\n\t\tcss += fmt.Sprintf(\":#%02x%02x%02x\", r, g, b)\n\t}\n\n\tif a != 0xffff {\n\t\t// SVG 1.1 only supports CSS2 color formats, which\n\t\t// unfortunately does not include rgba, so we have to\n\t\t// use a separate CSS property.\n\t\tcss += \";\" + prop + \"-opacity:\" + strconv.FormatFloat(float64(a)/0xffff, 'g', 0, 64)\n\t}\n\treturn css\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/package.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package gg creates plots using the Grammar of Graphics.\n//\n// WARNING: This API is highly unstable. For now, please vendor this\n// package.\n//\n// gg creates statistical visualizations. It's designed to help users\n// quickly navigate and explore complex data in different ways, both\n// in terms of what they're plotting and how they're plotting it. This\n// focus on rapid exploration of complex data leads to a very\n// different design than typical plotting packages.\n//\n// gg is heavily inspired by Wilkinson's Grammar of Graphics [1]. A\n// key observation of the Grammar of Graphics is that there are many\n// motifs across different types of plots. The Grammar of Graphics\n// separates these motifs into orthogonal concerns that can be\n// manipulated and extended independently, enabling the creation of\n// traditional plot types from their fundamental components as well as\n// the creation of entirely new plot types.\n//\n// Data model\n//\n// Central to gg is its data model. At the most basic level, the input\n// data consists of a table with a set of named columns, with the rows\n// organized into one or more groups. At a higher level, because gg\n// makes it easy to restructure data before plotting it, it expects to\n// start with regularized input data, where each column represents a\n// distinct independent or dependent variable. In other words, any two\n// values that make sense to plot on the same axis should be in the\n// same column.\n//\n// For example, to express a line graph with several series of\n// different colors in gg, you would say \"plot column A against column\n// B, grouped into series and colored according to column C\". In\n// contrast, typical plotting packages use a \"spreadsheet\" model,\n// where each data series is a separate column, so expressing the same\n// graph requires saying \"plot column A against column B in color 1\n// and plot column A against column C in color 2\" and so on.\n//\n// gg's approach is suited to exploratory data analysis because you\n// don't have to restructure the data to see it in a different way. In\n// the traditional spreadsheet model, you have to structure the data\n// to match the plot. In gg, you tell the plot what structure to\n// extract from the data.\n//\n// Layers and scales\n//\n// To visualize data, gg provides a set of composable plot building\n// blocks. There are no fixed \"plot types\" in gg. The main building\n// block is a \"layer\", which transforms a data set into a set of\n// visual marks, such as lines, points, or rectangles. Each layer is\n// configured by mapping columns of the data set to different\n// \"aesthetics\". An aesthetic is a generalization of a dimension: X\n// and Y are aesthetics, but so are color and stroke width and point\n// shape. Unlike typical plotting packages, these various aesthetics\n// are treated symmetrically and any aesthetic can be fed from any\n// column of the data.\n//\n// Layers work in close concert with \"scales\", which map from values\n// in the data space to values in the visual space. Scales can map\n// from continuous or discrete data values (such as numbers or\n// strings) to continuous or discrete visual values (such as pixel\n// offsets or point shapes). Each aesthetic has an associated scale.\n// If the user hasn't provided a specific scale for an aesthetic, gg\n// uses a default scale that guesses what to do based on the data type\n// and aesthetic.\n//\n// Stats\n//\n// Data can be pre-processed prior to rendering it with a layer using\n// a \"stat\". A stat can be an arbitrary data transformation, but it's\n// typically used to compute statistical summaries, such as the\n// five-number summary (e.g., for a box plot), a linear regression, or\n// a density estimate.\n//\n// TODO: \"Compound\" layers?\n//\n// Facets\n//\n// TODO.\n//\n// Aesthetics\n//\n// gg understands the following aesthetics.\n//\n// \"x\" and \"y\" give the offset from the lower-left corner of a plot.\n// Their ranges are always set to the pixel coordinates of the X and Y\n// axes, respectively, and cannot be overridden.\n//\n// \"stroke\" and \"fill\" give the stroke and fill colors of paths and\n// points. Their ranger must have type color.Color. The default ranger\n// returns a single-hue gradient for continuous data, or a categorical\n// palette for discrete data.\n//\n// \"opacity\" gives the overall opacity of a mark. Its ranger must have\n// type float64 and give values between 0 and 1, inclusive. The\n// default ranger ranges from 10% opaque (0.1) to fully opaque (1.0).\n//\n// \"size\" gives the size of marks. Its ranger must have type float64\n// and yields values that are relative to the smallest dimension of\n// the plot area (e.g., a value of 0.5 creates a point that cover half\n// of the plot width or height, whichever is smaller). The default\n// ranger ranges from 1% (0.01) to 10% (0.1).\n//\n// Related work\n//\n// gg draws ideas and inspiration from many sources. The core\n// principle of a Grammar of Graphics was introduced by Wiklinson [1].\n// There have been many implementations in many languages. The most\n// popular is certainly Wickham's ggplot2 for R [2]. gg draws most\n// heavily on Wickham's follow-up work on ggvis for R [3].\n//\n// [1] Leland Wilkinson, The Grammar of Graphics, Springer, 1999.\n//\n// [2] Hadley Wickham, ggplot2: Elegant Graphics for Data Analysis,\n// Springer, 2009.\n//\n// [3] Hadley Wickham, ggvis, http://ggvis.rstudio.com/.\n//\n// TODO: Scale transforms, coordinate spaces.\npackage gg\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/plot.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\n\t\"github.com/aclements/go-gg/table\"\n)\n\n// TODO: Split transforms, scalers, and layers into their own packages\n// to clean up the name spaces and un-prefix their names?\n\n// Warning is a logger for reporting conditions that don't prevent the\n// production of a plot, but may lead to unexpected results.\nvar Warning = log.New(os.Stderr, \"[gg] \", log.Lshortfile)\n\n// Plot represents a single (potentially faceted) plot.\ntype Plot struct {\n\tenv    *plotEnv\n\tscales map[string]scalerTree\n\n\tscaledData map[scaledDataKey]*scaledData\n\tscaleSet   map[scaleKey]bool\n\tmarks      []plotMark\n\n\taxisLabels     map[string]string\n\tautoAxisLabels map[string][]string\n\n\ttitle string\n\n\tconstNonce int\n}\n\n// NewPlot returns a new Plot backed by data. It has no layers, one\n// facet, and all scales are default.\nfunc NewPlot(data table.Grouping) *Plot {\n\tp := &Plot{\n\t\tenv: &plotEnv{\n\t\t\tdata: data,\n\t\t},\n\t\tscales:         make(map[string]scalerTree),\n\t\tscaledData:     make(map[scaledDataKey]*scaledData),\n\t\tscaleSet:       make(map[scaleKey]bool),\n\t\taxisLabels:     make(map[string]string),\n\t\tautoAxisLabels: make(map[string][]string),\n\t}\n\treturn p\n}\n\ntype plotEnv struct {\n\tparent *plotEnv\n\tdata   table.Grouping\n}\n\ntype scaleKey struct {\n\tgid   table.GroupID\n\taes   string\n\tscale Scaler\n}\n\n// SetData sets p's current data table. The caller must not modify\n// data in this table after this point.\nfunc (p *Plot) SetData(data table.Grouping) *Plot {\n\tp.env.data = data\n\treturn p\n}\n\n// Data returns p's current data table.\nfunc (p *Plot) Data() table.Grouping {\n\treturn p.env.data\n}\n\n// Const creates a new constant column bound to val in all groups and\n// returns the generated column name. This is a convenient way to pass\n// constant values to layers as columns.\n//\n// TODO: Typically this should be used with PreScaled or physical types.\nfunc (p *Plot) Const(val interface{}) string {\n\ttab := p.Data()\n\nretry:\n\tcol := fmt.Sprintf(\"[gg-const-%d]\", p.constNonce)\n\tp.constNonce++\n\tfor _, col2 := range tab.Columns() {\n\t\tif col == col2 {\n\t\t\tgoto retry\n\t\t}\n\t}\n\n\tp.SetData(table.MapTables(tab, func(_ table.GroupID, t *table.Table) *table.Table {\n\t\treturn table.NewBuilder(t).AddConst(col, val).Done()\n\t}))\n\n\treturn col\n}\n\ntype scalerTree struct {\n\tscales map[table.GroupID]Scaler\n}\n\nfunc newScalerTree() scalerTree {\n\treturn scalerTree{map[table.GroupID]Scaler{\n\t\ttable.RootGroupID: &defaultScale{},\n\t}}\n}\n\nfunc (t scalerTree) bind(gid table.GroupID, s Scaler) {\n\t// Unbind scales under gid.\n\tfor ogid := range t.scales {\n\t\tif gid == table.RootGroupID {\n\t\t\t// Optimize binding the root GID.\n\t\t\tdelete(t.scales, ogid)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor p := ogid; ; p = p.Parent() {\n\t\t\tif p == gid {\n\t\t\t\tdelete(t.scales, ogid)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif p == table.RootGroupID {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tt.scales[gid] = s\n}\n\nfunc (t scalerTree) find(gid table.GroupID) Scaler {\n\tfor {\n\t\tif s, ok := t.scales[gid]; ok {\n\t\t\treturn s\n\t\t}\n\t\tif gid == table.RootGroupID {\n\t\t\t// This should never happen.\n\t\t\tpanic(\"no scale for group \" + gid.String())\n\t\t}\n\t\tgid = gid.Parent()\n\t}\n}\n\nfunc (p *Plot) getScales(aes string) scalerTree {\n\tst, ok := p.scales[aes]\n\tif !ok {\n\t\tst = newScalerTree()\n\t\tp.scales[aes] = st\n\t}\n\treturn st\n}\n\nfunc (p *Plot) copyScales(old, new table.GroupID) {\n\tfor _, st := range p.scales {\n\t\tst.scales[new] = st.find(old)\n\t}\n}\n\n// SetScale binds a scale to the given visual aesthetic. SetScale is\n// shorthand for SetScaleAt(aes, s, table.RootGroupID). SetScale must\n// be called before Add.\n//\n// SetScale returns p for ease of chaining.\nfunc (p *Plot) SetScale(aes string, s Scaler) *Plot {\n\treturn p.SetScaleAt(aes, s, table.RootGroupID)\n}\n\n// SetScaleAt binds a scale to the given visual aesthetic for all data\n// in group gid or descendants of gid. SetScaleAt must be called\n// before Add.\nfunc (p *Plot) SetScaleAt(aes string, s Scaler, gid table.GroupID) *Plot {\n\t// TODO: Should aes be an enum so you can't mix up aesthetics\n\t// and column names?\n\tp.getScales(aes).bind(gid, s)\n\treturn p\n}\n\n// GetScale returns the scale for the given visual aesthetic used for\n// data in the root group.\nfunc (p *Plot) GetScale(aes string) Scaler {\n\treturn p.GetScaleAt(aes, table.RootGroupID)\n}\n\n// GetScaleAt returns the scale for the given visual aesthetic used\n// for data in group gid.\nfunc (p *Plot) GetScaleAt(aes string, gid table.GroupID) Scaler {\n\treturn p.getScales(aes).find(gid)\n}\n\ntype scaledDataKey struct {\n\taes  string\n\tdata table.Grouping\n\tcol  string\n}\n\n// use binds a column of data to an aesthetic. It expands the domain\n// of the aesthetic's scale to include the data in col, and returns\n// the scaled data.\n//\n// col may be \"\", in which case it simply returns nil.\n//\n// TODO: Should aes be an enum?\nfunc (p *Plot) use(aes string, col string) *scaledData {\n\tif col == \"\" {\n\t\treturn nil\n\t}\n\n\t// TODO: This is wrong. If the scale tree for aes changes,\n\t// this may return a stale scaledData bound to the wrong\n\t// scalers. If I got rid of scale trees, I could just put the\n\t// scaler in the key. Or I could clean up the cache when the\n\t// scale tree changes.\n\n\tsd := p.scaledData[scaledDataKey{aes, p.Data(), col}]\n\tif sd == nil {\n\t\t// Construct the scaledData.\n\t\tsd = &scaledData{\n\t\t\tseqs: make(map[table.GroupID]scaledSeq),\n\t\t}\n\n\t\t// Get the scale tree.\n\t\tst := p.getScales(aes)\n\n\t\tfor _, gid := range p.Data().Tables() {\n\t\t\ttable := p.Data().Table(gid)\n\n\t\t\t// Get the data.\n\t\t\tseq := table.MustColumn(col)\n\n\t\t\t// Find the scale.\n\t\t\tscaler := st.find(gid)\n\n\t\t\t// Add the scale to the scale set.\n\t\t\tp.scaleSet[scaleKey{gid, aes, scaler}] = true\n\n\t\t\t// Train the scale.\n\t\t\tif _, ok := seq.([]Unscaled); !ok {\n\t\t\t\tscaler.ExpandDomain(seq)\n\t\t\t}\n\n\t\t\t// Add it to the scaledData.\n\t\t\tsd.seqs[gid] = scaledSeq{seq, scaler}\n\t\t}\n\n\t\tp.scaledData[scaledDataKey{aes, p.Data(), col}] = sd\n\t}\n\n\t// Update axis labels.\n\tif aes == \"x\" || aes == \"y\" {\n\t\tp.autoAxisLabels[aes] = append(p.autoAxisLabels[aes], col)\n\t}\n\n\treturn sd\n}\n\n// Save saves the current data table of p to a stack.\nfunc (p *Plot) Save() *Plot {\n\tp.env = &plotEnv{\n\t\tparent: p.env,\n\t\tdata:   p.env.data,\n\t}\n\treturn p\n}\n\n// Restore restores the data table of p from the save stack.\nfunc (p *Plot) Restore() *Plot {\n\tif p.env.parent == nil {\n\t\tpanic(\"unbalanced Save/Restore\")\n\t}\n\tp.env = p.env.parent\n\treturn p\n}\n\n// A Plotter is an operation that can modify a Plot.\ntype Plotter interface {\n\tApply(*Plot)\n}\n\n// Add applies each of plotters to Plot in order.\nfunc (p *Plot) Add(plotters ...Plotter) *Plot {\n\tfor _, plotter := range plotters {\n\t\tplotter.Apply(p)\n\t}\n\treturn p\n}\n\n// AxisLabel returns a Plotter that sets the label of an axis on a\n// Plot. By default, Plot constructs automatic axis labels from column\n// names, but AxisLabel lets callers override these.\n//\n// TODO: Should labels be attached to aesthetics, generally?\n//\n// TODO: Should this really be a Plotter or just a method of Plot?\nfunc AxisLabel(axis, label string) Plotter {\n\treturn axisLabel{axis, label}\n}\n\ntype axisLabel struct {\n\taxis, label string\n}\n\nfunc (a axisLabel) Apply(p *Plot) {\n\tp.axisLabels[a.axis] = a.label\n}\n\n// Title returns a Plotter that sets the title of a Plot.\nfunc Title(label string) Plotter {\n\treturn titlePlotter{label}\n}\n\ntype titlePlotter struct {\n\tlabel string\n}\n\nfunc (t titlePlotter) Apply(p *Plot) {\n\tp.title = t.label\n}\n\n// A Stat transforms a table.Grouping.\ntype Stat interface {\n\tF(table.Grouping) table.Grouping\n}\n\n// Stat applies each of stats in order to p's data.\n//\n// TODO: Perform scale transforms before applying stats.\nfunc (p *Plot) Stat(stats ...Stat) *Plot {\n\tdata := p.Data()\n\tfor _, stat := range stats {\n\t\tdata = stat.F(data)\n\t}\n\treturn p.SetData(data)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/render.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/ajstarks/svgo\"\n)\n\n// fontSize is the font size in pixels.\n//\n// TODO: Theme.\nconst fontSize float64 = 14\n\n// facetLabelHeight is the height of facet labels, as a multiple of\n// the font height.\n//\n// TODO: Should this be a multiple of fontSize, em height, leading?\n// Currently it's leading.\n//\n// TODO: Theme.\nconst facetLabelHeight = 1.3\n\nconst xTickSep = 5 // TODO: Theme.\n\nconst yTickSep = 5 // TODO: Theme.\n\n// plotMargins returns the top, right, bottom, and left margins for a\n// plot of the given width and height.\n//\n// By default, this adds a 5% margin based on the smaller of width and\n// height. This ensures that (with automatic scales), the extremes of\n// the data and its tick labels don't appear right at the edge of the\n// plot area.\n//\n// TODO: Theme.\nvar plotMargins = func(w, h float64) (t, r, b, l float64) {\n\tmargin := 0.05 * math.Min(w, h)\n\treturn margin, margin, margin, margin\n}\n\nfunc (p *Plot) WriteSVG(w io.Writer, width, height int) error {\n\t// TODO: Legend, title.\n\n\t// TODO: Check if the same scaler is used for multiple\n\t// aesthetics with conflicting rangers. Alternatively, if we\n\t// just computed the scaled data eagerly here, it wouldn't\n\t// matter if the same Scaler was used for multiple things\n\t// because we would just change its Ranger between scaling\n\t// different data. We could still optimize for get/get1 by\n\t// specifying whether we care about all of the values or just\n\t// the first when fetching the scaledData (arguably this\n\t// should also affect scale training, so this is necessary\n\t// anyway).\n\n\t// TODO: Rather than finding these scales here and giving them\n\t// Ratners, we could use special \"Width\"/\"Height\" Rangers and\n\t// assign them much earlier (e.g., when they are Used). We\n\t// could then either find all of the scales that have those\n\t// Rangers and configure them at this point, or we could pass\n\t// the renderEnv in when ranging.\n\n\t// TODO: Default ranges for other things like color.\n\n\t// TODO: Expose the layout so a package user can put together\n\t// multiple Plots.\n\t//\n\t// What if the user wants multiple aligned plots, but as\n\t// *different* images (e.g., flipping from one slide to\n\t// another)?\n\n\t// TODO: Let the user alternatively specify the width and\n\t// height of the subplots, rather than the whole plot.\n\n\t// TODO: Automatic aspect ratio by averaging slopes.\n\n\t// TODO: Custom tick breaks.\n\n\t// TODO: Make sure *all* Scalers have Rangers or the user will\n\t// get confusing panics.\n\n\t// TODO: If the user restricts, say, the X range, should that\n\t// only train the Y axis on what's in the X range?\n\n\t// Assign default Rangers to scales that don't have them.\n\t//\n\t// TODO: Do this on a clone of the scale so this doesn't\n\t// persist.\n\tfor aes, scales := range p.scales {\n\t\tif aes == \"x\" || aes == \"y\" {\n\t\t\t// We'll assign these when we render each\n\t\t\t// subplot.\n\t\t\tcontinue\n\t\t}\n\t\tfor _, scale := range scales.scales {\n\t\t\tif scale.Ranger(nil) == nil {\n\t\t\t\tscale.Ranger(defaultRanger(aes))\n\t\t\t}\n\t\t}\n\t}\n\n\t// Find all of the subplots and subdivide the marks.\n\t//\n\t// TODO: If a mark was done in a parent subplot, broadcast it\n\t// to all child leafs of that subplot.\n\tsubplots := make(map[*subplot]*eltSubplot)\n\tplotElts := []plotElt{}\n\tfor _, mark := range p.marks {\n\t\tsubmarks := make(map[*eltSubplot]plotMark)\n\t\tfor _, gid := range mark.groups {\n\t\t\tsubplot := subplotOf(gid)\n\t\t\telt := subplots[subplot]\n\t\t\tif elt == nil {\n\t\t\t\telt = newEltSubplot(subplot)\n\t\t\t\tplotElts = append(plotElts, elt)\n\t\t\t\tsubplots[subplot] = elt\n\t\t\t}\n\n\t\t\tsubmark := submarks[elt]\n\t\t\tsubmark.m = mark.m\n\t\t\tsubmark.groups = append(submark.groups, gid)\n\t\t\tsubmarks[elt] = submark\n\t\t}\n\t\tfor subplot, submark := range submarks {\n\t\t\tsubplot.marks = append(subplot.marks, submark)\n\t\t}\n\t}\n\t// Subdivide the scales.\n\tfor sk := range p.scaleSet {\n\t\tsubplot := subplotOf(sk.gid)\n\t\telt := subplots[subplot]\n\t\tif elt == nil {\n\t\t\tcontinue\n\t\t}\n\t\tss := elt.scales[sk.aes]\n\t\tif ss == nil {\n\t\t\tss = make(map[Scaler]bool)\n\t\t\telt.scales[sk.aes] = ss\n\t\t}\n\t\tss[sk.scale] = true\n\t}\n\n\t// Add ticks and facet labels.\n\tplotElts = addSubplotLabels(plotElts)\n\n\t// Add axis labels and title.\n\tvar xlabel, ylabel string\n\tif l, ok := p.axisLabels[\"x\"]; ok {\n\t\txlabel = l\n\t} else {\n\t\txlabel = strings.Join(slice.Nub(p.autoAxisLabels[\"x\"]).([]string), \"\\n\")\n\t}\n\tif l, ok := p.axisLabels[\"y\"]; ok {\n\t\tylabel = l\n\t} else {\n\t\tylabel = strings.Join(slice.Nub(p.autoAxisLabels[\"y\"]).([]string), \"\\n\")\n\t}\n\tplotElts = addAxisLabels(plotElts, p.title, xlabel, ylabel)\n\n\t// Compute plot element layout.\n\tlayout := layoutPlotElts(plotElts)\n\n\t// Perform layout. There's a cyclic dependency involving tick\n\t// labels here: the tick labels depend on how many ticks there\n\t// are, how many ticks there are depends on the size of the\n\t// plot, the size of the plot depends on its surrounding\n\t// content, and the size of the surrounding content depends on\n\t// the tick labels. There may not be a fixed point here, so we\n\t// compromise around the number of ticks.\n\t//\n\t// 1) Lay out the graphs without ticks.\n\tlayout.SetLayout(0, 0, float64(width), float64(height))\n\t// 2) Compute the number of ticks and tick labels for each\n\t// tick element.\n\tfor _, elt := range plotElts {\n\t\tif elt, ok := elt.(*eltTicks); ok {\n\t\t\telt.computeTicks()\n\t\t}\n\t}\n\t// 3) Re-layout the plot and stick with the ticks we computed.\n\tlayout.SetLayout(0, 0, float64(width), float64(height))\n\n\t// Draw.\n\tsvg := svg.New(w)\n\tsvg.Start(width, height, fmt.Sprintf(`font-size=\"%.6gpx\" font-family=\"Roboto,&quot;Helvetica Neue&quot;,Helvetica,Arial,sans-serif\"`, fontSize))\n\tdefer svg.End()\n\n\t// Render each plot element.\n\tr := &eltRender{svg, 0}\n\tfor _, elt := range plotElts {\n\t\telt.render(r)\n\t}\n\n\treturn nil\n}\n\nfunc (e *eltSubplot) render(r *eltRender) {\n\tsvg := r.svg\n\tx, y, w, h := e.Layout()\n\tm := e.plotMargins\n\n\t// Round the bounds rectangle in.\n\tx2i, y2i := int(x+w), int(y+h)\n\txi, yi := int(math.Ceil(x)), int(math.Ceil(y))\n\twi, hi := x2i-xi, y2i-yi\n\n\t// Create clip region for plot area.\n\tclipId, clipRef := r.genid(\"clip\")\n\tsvg.ClipPath(`id=\"` + clipId + `\"`)\n\tsvg.Rect(xi, yi, wi, hi)\n\tsvg.ClipEnd()\n\tsvg.Group(`clip-path=\"` + clipRef + `\"`)\n\n\t// Set scale ranges.\n\txRanger := NewFloatRanger(float64(xi)+m.l, float64(x2i)-m.r)\n\tyRanger := NewFloatRanger(float64(y2i)-m.b, float64(yi)+m.t)\n\tfor s := range e.scales[\"x\"] {\n\t\ts.Ranger(xRanger)\n\t}\n\tfor s := range e.scales[\"y\"] {\n\t\ts.Ranger(yRanger)\n\t}\n\n\t// Render grid.\n\trenderBackground(svg, xi, yi, wi, hi)\n\tfor s := range e.scales[\"x\"] {\n\t\trenderGrid(svg, 'x', s, e.xTicks.ticks[s], yi, y2i)\n\t}\n\tfor s := range e.scales[\"y\"] {\n\t\trenderGrid(svg, 'y', s, e.yTicks.ticks[s], xi, x2i)\n\t}\n\n\t// Create rendering environment.\n\tenv := &renderEnv{\n\t\tcache: make(map[renderCacheKey]table.Slice),\n\t\tarea:  [4]float64{float64(xi), float64(yi), float64(wi), float64(hi)},\n\t}\n\n\t// Render marks.\n\tfor _, mark := range e.marks {\n\t\tfor _, gid := range mark.groups {\n\t\t\tenv.gid = gid\n\t\t\tmark.m.mark(env, svg)\n\t\t}\n\t}\n\n\t// End clip region.\n\tsvg.Gend()\n\n\t// Draw border and scale ticks.\n\t//\n\t// TODO: Theme.\n\n\t// Render border.\n\tsvg.Path(fmt.Sprintf(\"M%d %dV%dH%d\", xi, yi, y2i, x2i), \"stroke:#888; fill:none; stroke-width:2\") // TODO: Theme.\n\n\t// Render scale ticks.\n\tfor s := range e.scales[\"x\"] {\n\t\trenderScale(svg, 'x', s, e.xTicks.ticks[s], y2i)\n\t}\n\tfor s := range e.scales[\"y\"] {\n\t\trenderScale(svg, 'y', s, e.yTicks.ticks[s], xi)\n\t}\n}\n\n// TODO: Use shape-rendering: crispEdges?\n\nfunc renderBackground(svg *svg.SVG, x, y, w, h int) {\n\tsvg.Rect(x, y, w, h, \"fill:#eee\") // TODO: Theme.\n}\n\nfunc renderGrid(svg *svg.SVG, dir rune, scale Scaler, ticks plotEltTicks, start, end int) {\n\tmajor := mapMany(scale, ticks.major).([]float64)\n\n\tr := func(x float64) float64 {\n\t\t// Round to nearest N.\n\t\treturn math.Floor(x + 0.5)\n\t}\n\n\tvar path []string\n\tfor _, p := range major {\n\t\tif dir == 'x' {\n\t\t\tpath = append(path, fmt.Sprintf(\"M%.6g %dv%d\", r(p), start, end-start))\n\t\t} else {\n\t\t\tpath = append(path, fmt.Sprintf(\"M%d %.6gh%d\", start, r(p), end-start))\n\t\t}\n\t}\n\n\tsvg.Path(wrapPath(strings.Join(path, \"\")), \"stroke: #fff; stroke-width:2\") // TODO: Theme.\n}\n\nfunc renderScale(svg *svg.SVG, dir rune, scale Scaler, ticks plotEltTicks, pos int) {\n\tconst length float64 = 4 // TODO: Theme\n\n\tvar path bytes.Buffer\n\thave := map[float64]bool{}\n\tfor _, t := range []struct {\n\t\tlength float64\n\t\ts      table.Slice\n\t}{\n\t\t{length * 2, ticks.major},\n\t\t{length, ticks.minor},\n\t} {\n\t\tticks := mapMany(scale, t.s).([]float64)\n\n\t\tr := func(x float64) float64 {\n\t\t\t// Round to nearest N.\n\t\t\treturn math.Floor(x + 0.5)\n\t\t}\n\t\tfor _, p := range ticks {\n\t\t\tp = r(p)\n\t\t\tif have[p] {\n\t\t\t\t// Avoid overplotting the same tick\n\t\t\t\t// marks.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\thave[p] = true\n\t\t\tif dir == 'x' {\n\t\t\t\tfmt.Fprintf(&path, \"M%.6g %dv%.6g\", p, pos, -t.length)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(&path, \"M%d %.6gh%.6g\", pos, p, t.length)\n\t\t\t}\n\t\t}\n\n\t}\n\tsvg.Path(wrapPath(path.String()), \"stroke:#888; stroke-width:2\") // TODO: Theme\n}\n\nfunc (e *eltTicks) render(r *eltRender) {\n\tsvg := r.svg\n\tx, y, w, _ := e.Layout()\n\tfor s := range e.scales() {\n\t\tpos := e.mapTicks(s, e.ticks[s].major)\n\t\tfor i, label := range e.ticks[s].labels {\n\t\t\ttick := pos[i]\n\t\t\tif e.axis == 'x' {\n\t\t\t\tsvg.Text(int(tick), int(y+xTickSep), label, `text-anchor=\"middle\" dy=\"1em\" fill=\"#666\"`) // TODO: Theme.\n\t\t\t} else {\n\t\t\t\tsvg.Text(int(x+w-yTickSep), int(tick), label, `text-anchor=\"end\" dy=\".3em\" fill=\"#666\"`)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (e *eltLabel) render(r *eltRender) {\n\tsvg := r.svg\n\tx, y, w, h := e.Layout()\n\n\t// Clip to label region.\n\tclipId, clipRef := r.genid(\"clip\")\n\tsvg.ClipPath(`id=\"` + clipId + `\"`)\n\tsvg.Rect(int(x), int(y), int(w), int(h))\n\tsvg.ClipEnd()\n\tsvg.Group(`clip-path=\"` + clipRef + `\"`)\n\tdefer svg.Gend()\n\n\tif e.fill != \"none\" {\n\t\tsvg.Rect(int(x), int(y), int(w), int(h), \"fill: \"+e.fill)\n\t}\n\t// Vertical centering is very poorly\n\t// supported. dy is the best chance.\n\tstyle := `text-anchor=\"middle\" dy=\".3em\"`\n\tswitch e.side {\n\tcase 'l':\n\t\tstyle += fmt.Sprintf(` transform=\"rotate(-90 %d %d)\"`, int(x+w/2), int(y+h/2))\n\tcase 'r':\n\t\tstyle += fmt.Sprintf(` transform=\"rotate(90 %d %d)\"`, int(x+w/2), int(y+h/2))\n\t}\n\tsvg.Text(int(x+w/2), int(y+h/2), e.label, style)\n}\n\nfunc (e *eltPadding) render(r *eltRender) {\n}\n\ntype renderEnv struct {\n\tgid   table.GroupID\n\tcache map[renderCacheKey]table.Slice\n\tarea  [4]float64\n}\n\ntype renderCacheKey struct {\n\tsd  *scaledData\n\tgid table.GroupID\n}\n\n// scaledData is a key for retrieving scaled data from a renderEnv. It\n// is the result of using a binding and can be thought of as a lazy\n// representation of the visually-mapped data that becomes available\n// once all of the scales have been trained.\ntype scaledData struct {\n\tseqs map[table.GroupID]scaledSeq\n}\n\ntype scaledSeq struct {\n\tseq    table.Slice\n\tscaler Scaler\n}\n\nfunc (env *renderEnv) get(sd *scaledData) table.Slice {\n\tcacheKey := renderCacheKey{sd, env.gid}\n\tif mapped := env.cache[cacheKey]; mapped != nil {\n\t\treturn mapped\n\t}\n\n\tv := sd.seqs[env.gid]\n\tmapped := mapMany(v.scaler, v.seq)\n\tenv.cache[cacheKey] = mapped\n\treturn mapped\n}\n\nfunc (env *renderEnv) getFirst(sd *scaledData) interface{} {\n\tif mapped := env.cache[renderCacheKey{sd, env.gid}]; mapped != nil {\n\t\tmv := reflect.ValueOf(mapped)\n\t\tif mv.Len() == 0 {\n\t\t\treturn nil\n\t\t}\n\t\treturn mv.Index(0).Interface()\n\t}\n\n\tv := sd.seqs[env.gid]\n\trv := reflect.ValueOf(v.seq)\n\tif rv.Len() == 0 {\n\t\treturn nil\n\t}\n\treturn v.scaler.Map(rv.Index(0).Interface())\n}\n\nfunc (env *renderEnv) Area() (x, y, w, h float64) {\n\treturn env.area[0], env.area[1], env.area[2], env.area[3]\n}\n\nfunc (env *renderEnv) Size() (w, h float64) {\n\treturn env.area[2], env.area[3]\n}\n\nfunc round(x float64) int {\n\treturn int(math.Floor(x + 0.5))\n}\n\n// wrapPath wraps path data p to avoid exceeding SVG's recommended\n// line length limit of 255 characters.\nfunc wrapPath(p string) string {\n\tconst width = 70\n\tif len(p) <= width {\n\t\treturn p\n\t}\n\t// Chop up p until we get below the width limit.\n\tparts := make([]string, 0, 16)\n\tfor len(p) > width {\n\t\t// Find the last command or space before exceeding width.\n\t\tlastCmd, lastSpace := 0, 0\n\t\tfor i, ch := range p {\n\t\t\tif i >= width && (lastCmd != 0 || lastSpace != 0) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' {\n\t\t\t\tlastCmd = i\n\t\t\t} else if ch == ' ' {\n\t\t\t\tlastSpace = i\n\t\t\t}\n\t\t}\n\t\tsplit := len(p)\n\t\t// Prefer splitting at commands, but take spaces in\n\t\t// case it's a huge command.\n\t\tif lastCmd != 0 {\n\t\t\tsplit = lastCmd\n\t\t} else if lastSpace != 0 {\n\t\t\tsplit = lastSpace\n\t\t}\n\t\tparts, p = append(parts, p[:split]), p[split:]\n\t}\n\tif len(p) > 0 {\n\t\tparts = append(parts, p)\n\t}\n\treturn strings.Join(parts, \"\\n\")\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/scale.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport (\n\t\"fmt\"\n\t\"image/color\"\n\t\"math\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/aclements/go-gg/generic\"\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/palette\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/scale\"\n)\n\n// Continuous -> Interpolatable? Definitely.\n//\n// Continuous -> Discrete? Can always discretize the input either in\n// value order or in index order. In this case the transform (linear,\n// log, etc) doesn't matter as long as it's order-preserving. OTOH, a\n// continuous input scale can be asked to map *any* value of its input\n// type, but if I do this I can only map values that were trained.\n// That suggests that I have to just bin the range to do this mapping.\n//\n// Discrete -> Interpolatable? Pick evenly spaced values on [0,1].\n//\n// Discrete -> Discrete? Definitely. Cycle the range if it's not long\n// enough. If the input range is a VarNominal, concatenate the\n// sequences and use index ordering.\n//\n// It's not really \"continuous\", it's more specifically cardinal.\n\n// TODO: time.Time and time.Duration scalers. For time.Duration,\n// handle sub-second as (10 seconds)^n (for n <= 0), handle seconds to\n// minutes at multiples of 10 seconds, and likewise minutes to hours\n// as multiples of 10 minutes, and handle hours as (10 hours)^n.\n\n// XXX\n//\n// A Scaler can be cardinal, discrete, or identity.\n//\n// A cardinal Scaler has a VarCardinal input domain. If its output\n// range is continuous, it maps an interval over the input to an\n// interval of the output (possibly through a transformation such as a\n// logarithm). If its output range is discrete, the input is\n// discretized in value order and it acts like a discrete scale.\n//\n// XXX The cardinal -> discrete rule means we need to keep all of the\n// input data, rather than just its bounds, just in case the range is\n// discrete. Maybe it should just be a bucketing rule?\n//\n// A discrete Scaler has a VarNominal input domain. If the input is\n// VarOrdinal, its order is used; otherwise, index order is imposed.\n// If the output range is continuous, a discrete Scaler maps its input\n// to the centers of equal sub-intervals of [0, 1] and then applies\n// the Ranger. If the output range is discrete, the Scaler maps the\n// Nth input level to the N%len(range)th output value.\n//\n// An identity Scaler ignores its input domain and output range and\n// uses an identity function for mapping input to output. This is\n// useful for specifying aesthetics directly, such as color or size,\n// and is especially useful for constant Vars.\n//\n// XXX Should identity Scalers map numeric types to float64? Maybe it\n// should depend on the range type of the ranger?\n//\n// XXX Arrange documentation as X -> Y?\ntype Scaler interface {\n\t// XXX\n\n\tExpandDomain(table.Slice)\n\n\t// Ranger sets this Scaler's output range if r is non-nil and\n\t// returns the previous range. If a scale's Ranger is nil, it\n\t// will be assigned a default Ranger based on its aesthetic\n\t// when the Plot is rendered.\n\tRanger(r Ranger) Ranger\n\n\t// XXX Should RangeType be implied by the aesthetic?\n\t//\n\t// XXX Should this be a method of Ranger instead?\n\tRangeType() reflect.Type\n\n\t// XXX\n\t//\n\t// x must be of the same type as the values in the domain Var.\n\t//\n\t// XXX Or should this take a slice? Or even a Var? That would\n\t// also eliminate RangeType(), though then Map would need to\n\t// know how to make the right type of return slice. Unless we\n\t// pushed slice mapping all the way to Ranger.\n\t//\n\t// XXX We could eliminate ExpandDomain if the caller was\n\t// required to pass everything to this at once and this did\n\t// the scale training. That would also make it easy to\n\t// implement the cardinal -> discrete by value order rule.\n\t// This would probably also make Map much faster.\n\t//\n\t// XXX If x is Unscaled, Map must only apply the ranger.\n\tMap(x interface{}) interface{}\n\n\t// Ticks returns a set of \"nice\" major and minor tick marks\n\t// spanning this Scaler's domain. The returned tick locations\n\t// are values in this Scaler's domain type in increasing\n\t// order. labels[i] gives the label of the major tick at\n\t// major[i]. The minor ticks are a superset of the major\n\t// ticks.\n\t//\n\t// max and pred constrain the ticks returned by Ticks. If\n\t// possible, Ticks returns the largest set of ticks such that\n\t// there are no more than max major ticks and the ticks\n\t// satisfy pred. Both are hints, since for some scale types\n\t// there's no clear way to reduce the number of ticks.\n\t//\n\t// pred should return true if the given set of ticks is\n\t// acceptable. pred must be \"monotonic\" in the following\n\t// sense: if pred is true for a given set of ticks, it must be\n\t// true for any subset of those ticks and if pred is false for\n\t// a given set of ticks, it must be false for any superset of\n\t// those ticks. In other words, pred should return false if\n\t// there are \"too many\" ticks or they are \"too close\n\t// together\". If pred is nil, it is assumed to always be\n\t// satisfied.\n\t//\n\t// If no tick marks can be produced (for example, there are no\n\t// values in this Scaler's domain or the predicate cannot be\n\t// satisfied), Ticks returns nil, nil, nil.\n\t//\n\t// TODO: Should this return ticks in the input space, the\n\t// intermediate space, or the output space? moremath returns\n\t// values in the input space. Input space values doesn't work\n\t// for discrete scales if I want the ticks between values.\n\t// Intermediate space works for continuous and discrete\n\t// inputs, but not for discrete ranges (maybe that's okay) and\n\t// it's awkward for a caller to do anything with an\n\t// intermediate space value. Output space doesn't work with\n\t// this API because I change the plot location in the course\n\t// of layout without recomputing ticks. However, output space\n\t// could work if Scaler exposed tick levels, since I could\n\t// save the computed tick level across a re-layout and\n\t// recompute the output space ticks from that.\n\tTicks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string)\n\n\t// SetFormatter sets the formatter for values on this scale.\n\t//\n\t// f may be nil, which makes this Scaler use the default\n\t// formatting. Otherwise, f must be a func(T) string where T\n\t// is convertible from the Scaler's input type (note that this\n\t// is weaker than typical Go function calls, which require\n\t// that the argument be assignable; this makes it possible to\n\t// use general-purpose functions like func(float64) string\n\t// even for more specific input types).\n\tSetFormatter(f interface{})\n\n\tCloneScaler() Scaler\n}\n\ntype ContinuousScaler interface {\n\tScaler\n\n\t// TODO: There are two variations on min/max. 1) We can force\n\t// the min/max, even if there's data beyond it. 2) We can say\n\t// cap the scale to some min/max, but a smaller range is okay.\n\t// Currently we can't express 2.\n\n\t// SetMin and SetMax set the minimum and maximum values of\n\t// this Scalar's domain and return the Scalar. If v is nil, it\n\t// unsets the bound.\n\t//\n\t// v must be convertible to the Scaler's domain type. For\n\t// example, if this is a linear scale, v can be of any\n\t// numerical type. Unlike ExpandDomain, these do not set the\n\t// Scaler's domain type.\n\tSetMin(v interface{}) ContinuousScaler\n\tSetMax(v interface{}) ContinuousScaler\n\n\t// TODO: Should Include work on any Scaler?\n\n\t// Include requires that v be included in this Scaler's\n\t// domain. Like SetMin/SetMax, this can expand Scaler's\n\t// domain, but unlike SetMin/SetMax, this does not restrict\n\t// it. If v is nil, it does nothing.\n\t//\n\t// v must be convertible to the Scaler's domain type. Unlike\n\t// ExpandDomain, this does not set the Scaler's domain type.\n\tInclude(v interface{}) ContinuousScaler\n}\n\n// Unscaled represents a value that should not be scaled, but instead\n// mapped directly to the output range. For continuous scales, this\n// should be a value between 0 and 1. For discrete scales, this should\n// be an integral value.\n//\n// TODO: This is confusing for opacity and size because it *doesn't*\n// specify an exact opacity or size ratio since their default rangers\n// aren't [0,1]. Maybe Unscaled should bypass scaling altogether (and\n// only work if the range type is float64).\ntype Unscaled float64\n\nvar float64Type = reflect.TypeOf(float64(0))\nvar colorType = reflect.TypeOf((*color.Color)(nil)).Elem()\n\nvar canCardinal = map[reflect.Kind]bool{\n\treflect.Float32: true,\n\treflect.Float64: true,\n\treflect.Int:     true,\n\treflect.Int8:    true,\n\treflect.Int16:   true,\n\treflect.Int32:   true,\n\treflect.Int64:   true,\n\treflect.Uint:    true,\n\treflect.Uintptr: true,\n\treflect.Uint8:   true,\n\treflect.Uint16:  true,\n\treflect.Uint32:  true,\n\treflect.Uint64:  true,\n}\n\nfunc isCardinal(k reflect.Kind) bool {\n\t// XXX Move this to generic.IsCardinalR and rename CanOrderR\n\t// to IsOrderedR. Does complex count? It supports most\n\t// arithmetic operators. Maybe cardinal is a plot concept and\n\t// not a generic concept? If sort.Interface influences this,\n\t// this may need to be a question about a Slice, not a\n\t// reflect.Kind.\n\treturn canCardinal[k]\n}\n\ntype defaultScale struct {\n\tscale Scaler\n\n\t// Pre-instantiation state.\n\tr         Ranger\n\tformatter interface{}\n}\n\nfunc (s *defaultScale) String() string {\n\treturn fmt.Sprintf(\"default (%s)\", s.scale)\n}\n\nfunc (s *defaultScale) ExpandDomain(v table.Slice) {\n\tif s.scale == nil {\n\t\tvar err error\n\t\ts.scale, err = DefaultScale(v)\n\t\tif err != nil {\n\t\t\tpanic(&generic.TypeError{reflect.TypeOf(v), nil, err.Error()})\n\t\t}\n\t\ts.instantiate()\n\t}\n\ts.scale.ExpandDomain(v)\n}\n\nfunc (s *defaultScale) ensure() Scaler {\n\tif s.scale == nil {\n\t\ts.scale = NewLinearScaler()\n\t\ts.instantiate()\n\t}\n\treturn s.scale\n}\n\n// instantiate applies the pre-instantiation state to the newly\n// instantiated s.scale and clears the state in s.\nfunc (s *defaultScale) instantiate() {\n\tif s.r != nil {\n\t\ts.scale.Ranger(s.r)\n\t\ts.r = nil\n\t}\n\tif s.formatter != nil {\n\t\ts.scale.SetFormatter(s.formatter)\n\t\ts.formatter = nil\n\t}\n}\n\nfunc (s *defaultScale) Ranger(r Ranger) Ranger {\n\t// If there's no underlying scale yet, record the Ranger\n\t// locally rather than trying to guess a scale. This way users\n\t// can easily set Rangers before training any data.\n\tif s.scale == nil {\n\t\told := s.r\n\t\ts.r = r\n\t\treturn old\n\t}\n\treturn s.scale.Ranger(r)\n}\n\nfunc (s *defaultScale) RangeType() reflect.Type {\n\tif s.scale == nil {\n\t\treturn s.r.RangeType()\n\t}\n\treturn s.scale.RangeType()\n}\n\nfunc (s *defaultScale) Map(x interface{}) interface{} {\n\treturn s.ensure().Map(x)\n}\n\nfunc (s *defaultScale) Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) {\n\treturn s.ensure().Ticks(max, pred)\n}\n\nfunc (s *defaultScale) SetFormatter(f interface{}) {\n\tif s.scale == nil {\n\t\ts.formatter = f\n\t\treturn\n\t}\n\ts.scale.SetFormatter(f)\n}\n\nfunc (s *defaultScale) CloneScaler() Scaler {\n\tif s.scale == nil {\n\t\treturn &defaultScale{nil, s.r, s.formatter}\n\t}\n\treturn &defaultScale{s.scale.CloneScaler(), nil, s.formatter}\n}\n\nfunc DefaultScale(seq table.Slice) (Scaler, error) {\n\t// Handle common case types.\n\tswitch seq.(type) {\n\tcase []float64, []int, []uint:\n\t\treturn NewLinearScaler(), nil\n\n\tcase []string:\n\t\t// TODO: Ordinal scale\n\n\tcase []time.Time:\n\t\treturn NewTimeScaler(), nil\n\t}\n\n\trt := reflect.TypeOf(seq).Elem()\n\trtk := rt.Kind()\n\n\tswitch {\n\tcase rt.Implements(colorType):\n\t\t// For things that are already visual values, use an\n\t\t// identity scale.\n\t\treturn NewIdentityScale(), nil\n\n\t\t// TODO: GroupAuto needs to make similar\n\t\t// cardinal/ordinal/nominal decisions. Deduplicate\n\t\t// these better.\n\tcase isCardinal(rtk):\n\t\treturn NewLinearScaler(), nil\n\n\tcase slice.CanSort(seq):\n\t\treturn NewOrdinalScale(), nil\n\n\tcase rt.Comparable():\n\t\t// TODO: Nominal scale\n\t\tpanic(\"not implemented\")\n\t}\n\n\treturn nil, fmt.Errorf(\"no default scale type for %T\", seq)\n}\n\n// defaultRanger returns the default Ranger for the given aesthetic.\n// If aes is an axis aesthetic, it returns nil (since these Rangers\n// are assigned at render time). If aes is unknown, it panics.\nfunc defaultRanger(aes string) Ranger {\n\tswitch aes {\n\tcase \"x\", \"y\":\n\t\treturn nil\n\n\tcase \"stroke\", \"fill\":\n\t\treturn &defaultColorRanger{}\n\n\tcase \"opacity\":\n\t\treturn NewFloatRanger(0.1, 1)\n\n\tcase \"size\":\n\t\t// Default to ranging between 1% and 10% of the\n\t\t// minimum plot dimension.\n\t\treturn NewFloatRanger(0.01, 0.1)\n\t}\n\n\tpanic(fmt.Sprintf(\"unknown aesthetic %q\", aes))\n}\n\n// TODO: I'd like to remove identity scales and expose only Unscaled,\n// but I use identity scales for physical types like color.Color right\n// now. Probably that should bypass Scaler altogether.\n\nfunc NewIdentityScale() Scaler {\n\treturn &identityScale{}\n}\n\ntype identityScale struct {\n\trangeType reflect.Type\n}\n\nfunc (s *identityScale) ExpandDomain(v table.Slice) {\n\ts.rangeType = reflect.TypeOf(v).Elem()\n}\n\nfunc (s *identityScale) RangeType() reflect.Type {\n\treturn s.rangeType\n}\n\nfunc (s *identityScale) Ranger(r Ranger) Ranger        { return nil }\nfunc (s *identityScale) Map(x interface{}) interface{} { return x }\n\nfunc (s *identityScale) Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) {\n\treturn nil, nil, nil\n}\n\nfunc (s *identityScale) SetFormatter(f interface{}) {}\n\nfunc (s *identityScale) CloneScaler() Scaler {\n\ts2 := *s\n\treturn &s2\n}\n\n// NewLinearScaler returns a continuous linear scale. The domain must\n// be a VarCardinal.\n//\n// XXX If I return a Scaler, I can't have methods for setting fixed\n// bounds and such. I don't really want to expose the whole type.\n// Maybe a sub-interface for continuous Scalers?\nfunc NewLinearScaler() ContinuousScaler {\n\t// TODO: Control over base.\n\treturn &moremathScale{\n\t\tmin:     math.NaN(),\n\t\tmax:     math.NaN(),\n\t\tdataMin: math.NaN(),\n\t\tdataMax: math.NaN(),\n\t}\n}\n\nfunc NewLogScaler(base int) ContinuousScaler {\n\treturn &moremathScale{\n\t\tmin:     math.NaN(),\n\t\tmax:     math.NaN(),\n\t\tbase:    base,\n\t\tdataMin: math.NaN(),\n\t\tdataMax: math.NaN(),\n\t}\n}\n\ntype moremathScale struct {\n\tr Ranger\n\tf interface{}\n\n\tdomainType       reflect.Type\n\tbase             int\n\tmin, max         float64\n\tdataMin, dataMax float64\n}\n\nfunc (s *moremathScale) String() string {\n\tif s.base > 0 {\n\t\treturn fmt.Sprintf(\"log [%d,%g,%g] => %s\", s.base, s.min, s.max, s.r)\n\t}\n\treturn fmt.Sprintf(\"linear [%g,%g] => %s\", s.min, s.max, s.r)\n}\n\nfunc (s *moremathScale) ExpandDomain(vs table.Slice) {\n\tif s.domainType == nil {\n\t\ts.domainType = reflect.TypeOf(vs).Elem()\n\t}\n\n\tvar data []float64\n\tslice.Convert(&data, vs)\n\tmin, max := s.dataMin, s.dataMax\n\tfor _, v := range data {\n\t\tif math.IsNaN(v) || math.IsInf(v, 0) {\n\t\t\tcontinue\n\t\t}\n\t\tif v < min || math.IsNaN(min) {\n\t\t\tmin = v\n\t\t}\n\t\tif v > max || math.IsNaN(max) {\n\t\t\tmax = v\n\t\t}\n\t}\n\ts.dataMin, s.dataMax = min, max\n}\n\nfunc (s *moremathScale) SetMin(v interface{}) ContinuousScaler {\n\tif v == nil {\n\t\ts.min = math.NaN()\n\t\treturn s\n\t}\n\tvfloat := reflect.ValueOf(v).Convert(float64Type).Float()\n\ts.min = vfloat\n\treturn s\n}\n\nfunc (s *moremathScale) SetMax(v interface{}) ContinuousScaler {\n\tif v == nil {\n\t\ts.max = math.NaN()\n\t\treturn s\n\t}\n\tvfloat := reflect.ValueOf(v).Convert(float64Type).Float()\n\ts.max = vfloat\n\treturn s\n}\n\nfunc (s *moremathScale) Include(v interface{}) ContinuousScaler {\n\tif v == nil {\n\t\treturn s\n\t}\n\tvfloat := reflect.ValueOf(v).Convert(float64Type).Float()\n\tif math.IsNaN(vfloat) || math.IsInf(vfloat, 0) {\n\t\treturn s\n\t}\n\tif math.IsNaN(s.dataMin) {\n\t\ts.dataMin, s.dataMax = vfloat, vfloat\n\t} else {\n\t\ts.dataMin = math.Min(s.dataMin, vfloat)\n\t\ts.dataMax = math.Max(s.dataMax, vfloat)\n\t}\n\treturn s\n}\n\ntype tickMapper interface {\n\tscale.Ticker\n\tMap(float64) float64\n}\n\nfunc (s *moremathScale) get() tickMapper {\n\tmin, max := s.min, s.max\n\tif min > max {\n\t\tmin, max = max, min\n\t}\n\tif math.IsNaN(min) {\n\t\tmin = s.dataMin\n\t}\n\tif math.IsNaN(max) {\n\t\tmax = s.dataMax\n\t}\n\tif math.IsNaN(min) {\n\t\t// Only possible if both dataMin and dataMax are NaN.\n\t\tmin, max = -1, 1\n\t}\n\tif s.base > 0 {\n\t\tls, err := scale.NewLog(min, max, s.base)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tls.SetClamp(true)\n\t\treturn &ls\n\t}\n\treturn &scale.Linear{\n\t\tMin: min, Max: max,\n\t}\n}\n\nfunc (s *moremathScale) Ranger(r Ranger) Ranger {\n\told := s.r\n\tif r != nil {\n\t\ts.r = r\n\t}\n\treturn old\n}\n\nfunc (s *moremathScale) RangeType() reflect.Type {\n\treturn s.r.RangeType()\n}\n\nfunc (s *moremathScale) Map(x interface{}) interface{} {\n\tls := s.get()\n\tvar scaled float64\n\tswitch x := x.(type) {\n\tcase float64:\n\t\tscaled = ls.Map(x)\n\tcase Unscaled:\n\t\tscaled = float64(x)\n\tdefault:\n\t\tv := reflect.ValueOf(x).Convert(float64Type).Float()\n\t\tscaled = ls.Map(v)\n\t}\n\n\tswitch r := s.r.(type) {\n\tcase ContinuousRanger:\n\t\treturn r.Map(scaled)\n\n\tcase DiscreteRanger:\n\t\t_, levels := r.Levels()\n\t\t// Bin the scaled value into 'levels' bins.\n\t\tlevel := int(scaled * float64(levels))\n\t\tif level < 0 {\n\t\t\tlevel = 0\n\t\t} else if level >= levels {\n\t\t\tlevel = levels - 1\n\t\t}\n\t\treturn r.MapLevel(level, levels)\n\n\tdefault:\n\t\tpanic(\"Ranger must be a ContinuousRanger or DiscreteRanger\")\n\t}\n}\n\nfunc (s *moremathScale) Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) {\n\ttype Stringer interface {\n\t\tString() string\n\t}\n\tif s.domainType == nil {\n\t\t// There are no values and no domain type, so we can't\n\t\t// compute ticks or return slices of the domain type.\n\t\treturn nil, nil, nil\n\t}\n\n\to := scale.TickOptions{Max: max}\n\n\t// If the domain type is integral, don't let the tick level go\n\t// below 0. This is particularly important if the domain type\n\t// is a Stringer since the conversion back to the domain type\n\t// will cut off any fractional part.\n\tswitch s.domainType.Kind() {\n\tcase reflect.Int, reflect.Uint, reflect.Uintptr,\n\t\treflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,\n\t\treflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:\n\t\to.MinLevel, o.MaxLevel = 0, 1000\n\tdefault:\n\t\t// Set bounds for the pred loop below.\n\t\to.MinLevel, o.MaxLevel = -1000, 1000\n\t}\n\tls := s.get()\n\tlevel, ok := o.FindLevel(ls, 0)\n\tif !ok {\n\t\treturn nil, nil, nil\n\t}\n\n\tmkLabels := func(major []float64) []string {\n\t\t// Compute labels.\n\t\tlabels = make([]string, len(major))\n\t\tif s.f != nil {\n\t\t\t// Use custom formatter.\n\t\t\tif f, ok := s.f.(func(float64) string); ok {\n\t\t\t\t// Fast path.\n\t\t\t\tfor i, x := range major {\n\t\t\t\t\tlabels[i] = f(x)\n\t\t\t\t}\n\t\t\t\treturn labels\n\t\t\t}\n\t\t\t// TODO: Type check for better error.\n\t\t\tfv := reflect.ValueOf(s.f)\n\t\t\tat := fv.Type().In(0)\n\t\t\tvar avs [1]reflect.Value\n\t\t\tfor i, x := range major {\n\t\t\t\tavs[0] = reflect.ValueOf(x).Convert(at)\n\t\t\t\trvs := fv.Call(avs[:])\n\t\t\t\tlabels[i] = rvs[0].Interface().(string)\n\t\t\t}\n\t\t\treturn labels\n\t\t}\n\t\tif s.domainType != nil {\n\t\t\tz := reflect.Zero(s.domainType).Interface()\n\t\t\tif _, ok := z.(Stringer); ok {\n\t\t\t\t// Convert the ticks back into the domain type\n\t\t\t\t// and use its String method.\n\t\t\t\tfor i, x := range major {\n\t\t\t\t\tv := reflect.ValueOf(x).Convert(s.domainType)\n\t\t\t\t\tlabels[i] = v.Interface().(Stringer).String()\n\t\t\t\t}\n\t\t\t\treturn labels\n\t\t\t}\n\t\t}\n\t\t// Otherwise, just format them as floats.\n\t\tfor i, x := range major {\n\t\t\tlabels[i] = fmt.Sprintf(\"%.6g\", x)\n\t\t}\n\t\treturn labels\n\t}\n\t// Adjust level to satisfy pred.\n\tfor ; level <= o.MaxLevel; level++ {\n\t\tmajorx := ls.TicksAtLevel(level)\n\t\tminorx := ls.TicksAtLevel(level - 1)\n\t\tlabels := mkLabels(majorx.([]float64))\n\n\t\t// Convert to domain type.\n\t\tmajorv := reflect.New(reflect.SliceOf(s.domainType))\n\t\tminorv := reflect.New(reflect.SliceOf(s.domainType))\n\t\tslice.Convert(majorv.Interface(), majorx)\n\t\tslice.Convert(minorv.Interface(), minorx)\n\t\tmajor, minor = majorv.Elem().Interface(), minorv.Elem().Interface()\n\n\t\tif pred == nil || pred(major, minor, labels) {\n\t\t\treturn major, minor, labels\n\t\t}\n\t}\n\tWarning.Printf(\"%s: unable to compute satisfactory ticks, axis will be empty\", s)\n\treturn nil, nil, nil\n}\n\nfunc (s *moremathScale) SetFormatter(f interface{}) {\n\ts.f = f\n}\n\nfunc (s *moremathScale) CloneScaler() Scaler {\n\ts2 := *s\n\treturn &s2\n}\n\n// NewTimeScaler returns a continuous linear scale. The domain must\n// be time.Time.\nfunc NewTimeScaler() *timeScale {\n\treturn &timeScale{}\n}\n\ntype timeScale struct {\n\tr                Ranger\n\tf                func(time.Time) string\n\tmin, max         time.Time\n\tdataMin, dataMax time.Time\n}\n\nfunc (s *timeScale) String() string {\n\treturn fmt.Sprintf(\"time [%g,%g] => %s\", s.min, s.max, s.r)\n}\n\nfunc (s *timeScale) ExpandDomain(vs table.Slice) {\n\tvar data []time.Time\n\tslice.Convert(&data, vs)\n\tmin, max := s.dataMin, s.dataMax\n\tfor _, v := range data {\n\t\tif v.Before(min) || min.IsZero() {\n\t\t\tmin = v\n\t\t}\n\t\tif v.After(max) || max.IsZero() {\n\t\t\tmax = v\n\t\t}\n\t}\n\ts.dataMin, s.dataMax = min, max\n}\n\nfunc (s *timeScale) SetMin(v interface{}) ContinuousScaler {\n\ts.min = v.(time.Time)\n\treturn s\n}\n\nfunc (s *timeScale) SetMax(v interface{}) ContinuousScaler {\n\ts.max = v.(time.Time)\n\treturn s\n}\n\nfunc (s *timeScale) Include(v interface{}) ContinuousScaler {\n\ttv := v.(time.Time)\n\tif s.dataMin.IsZero() {\n\t\ts.dataMin, s.dataMax = tv, tv\n\t} else {\n\t\tif tv.Before(s.dataMin) {\n\t\t\ts.dataMin = tv\n\t\t}\n\t\tif tv.After(s.dataMax) {\n\t\t\ts.dataMax = tv\n\t\t}\n\t}\n\treturn s\n}\n\nfunc (s *timeScale) Ranger(r Ranger) Ranger {\n\told := s.r\n\tif r != nil {\n\t\ts.r = r\n\t}\n\treturn old\n}\n\nfunc (s *timeScale) RangeType() reflect.Type {\n\treturn s.r.RangeType()\n}\n\nfunc (s *timeScale) getMinMax() (time.Time, time.Time) {\n\tmin := s.min\n\tif min.IsZero() {\n\t\tmin = s.dataMin\n\t}\n\tmax := s.max\n\tif max.IsZero() {\n\t\tmax = s.dataMax\n\t}\n\treturn min, max\n}\n\nfunc (s *timeScale) Map(x interface{}) interface{} {\n\tmin, max := s.getMinMax()\n\tt := x.(time.Time)\n\tvar scaled float64 = float64(t.Sub(min)) / float64(max.Sub(min))\n\n\tswitch r := s.r.(type) {\n\tcase ContinuousRanger:\n\t\treturn r.Map(scaled)\n\n\tcase DiscreteRanger:\n\t\t_, levels := r.Levels()\n\t\t// Bin the scaled value into 'levels' bins.\n\t\tlevel := int(scaled * float64(levels))\n\t\tif level < 0 {\n\t\t\tlevel = 0\n\t\t} else if level >= levels {\n\t\t\tlevel = levels - 1\n\t\t}\n\t\treturn r.MapLevel(level, levels)\n\n\tdefault:\n\t\tpanic(\"Ranger must be a ContinuousRanger or DiscreteRanger\")\n\t}\n}\n\ntype durationTicks time.Duration\n\nfunc (d durationTicks) Next(t time.Time) time.Time {\n\tif d == 0 {\n\t\tpanic(\"invalid zero duration\")\n\t}\n\treturn t.Add(time.Duration(d)).Truncate(time.Duration(d))\n}\n\nvar timeTickerLevels = []struct {\n\tmin  time.Duration\n\tnext func(t time.Time) time.Time\n}{\n\t{time.Minute, durationTicks(time.Minute).Next},\n\t{10 * time.Minute, durationTicks(10 * time.Minute).Next},\n\t{time.Hour, func(t time.Time) time.Time {\n\t\tyear, month, day := t.Date()\n\t\t// N.B. This will skip an hour at some DST transitions.\n\t\treturn time.Date(year, month, day, t.Hour()+1, 0, 0, 0, t.Location())\n\t}},\n\t{6 * time.Hour, func(t time.Time) time.Time {\n\t\tyear, month, day := t.Date()\n\t\t// N.B. This will skip an hour if the DST transition\n\t\t// happens at a multiple of 6 hours.\n\t\treturn time.Date(year, month, day, ((t.Hour()+6)/6)*6, 0, 0, 0, t.Location())\n\t}},\n\t{24 * time.Hour, func(t time.Time) time.Time {\n\t\tyear, month, day := t.Date()\n\t\treturn time.Date(year, month, day+1, 0, 0, 0, 0, t.Location())\n\t}},\n\t{7 * 24 * time.Hour, func(t time.Time) time.Time {\n\t\tyear, month, day := t.Date()\n\t\tloc := t.Location()\n\t\t_, week1 := t.ISOWeek()\n\t\tfor {\n\t\t\tday++\n\t\t\tt = time.Date(year, month, day, 0, 0, 0, 0, loc)\n\t\t\tif _, week2 := t.ISOWeek(); week1 != week2 {\n\t\t\t\treturn t\n\t\t\t}\n\t\t}\n\t}},\n\t{30 * 24 * time.Hour, func(t time.Time) time.Time {\n\t\tyear, month, _ := t.Date()\n\t\treturn time.Date(year, month+1, 1, 0, 0, 0, 0, t.Location())\n\t}},\n\t{365 * 24 * time.Hour, func(t time.Time) time.Time {\n\t\treturn time.Date(t.Year()+1, time.January, 1, 0, 0, 0, 0, t.Location())\n\t}},\n}\n\n// timeTicker calculates the ticks between min and max. levels >= 0\n// refer to entries in timeTickerLevels. levels < 0 start with -1 at\n// every 10 seconds and then alternate dividing by 2 and 5. So level\n// -3 is 1s, -9 is 1ms, -12 is 1us, etc.\n// https://play.golang.org/p/xUv4P25Wxi will print the level step\n// sizes.\ntype timeTicker struct {\n\tmin, max time.Time\n}\n\nfunc (t *timeTicker) getNextTick(level int) func(time.Time) time.Time {\n\tif level >= 0 {\n\t\tif level >= len(timeTickerLevels) {\n\t\t\t// TODO: larger ticks should do multiples of\n\t\t\t// the year, like the linear scale does.\n\t\t\tpanic(fmt.Sprintf(\"invalid level %d\", level))\n\t\t}\n\t\treturn timeTickerLevels[level].next\n\t} else {\n\t\texp, double := level/2+1, (level%2 == 0)\n\t\tstep := math.Pow10(exp) * 1e9\n\t\tif double {\n\t\t\tstep = step * 5\n\t\t}\n\t\treturn durationTicks(time.Duration(step)).Next\n\t}\n}\n\nfunc (t *timeTicker) CountTicks(level int) int {\n\tnext := t.getNextTick(level)\n\tvar i int\n\t// N.B. We cut off at 1e5 ticks. If your plot is larger than\n\t// that, you're on your own.\n\tfor x := next(t.min.Add(-1)); !x.After(t.max) && i < 1e5; x = next(x) {\n\t\ti++\n\t}\n\treturn i\n}\n\nfunc (t *timeTicker) TicksAtLevel(level int) interface{} {\n\tvar ticks []time.Time\n\tnext := t.getNextTick(level)\n\tfor x := next(t.min.Add(-1)); !x.After(t.max); x = next(x) {\n\t\tticks = append(ticks, x)\n\t}\n\treturn ticks\n}\n\nfunc (t *timeTicker) GuessLevel() int {\n\tdur := t.max.Sub(t.min)\n\tfor i := len(timeTickerLevels) - 1; i >= 0; i-- {\n\t\tif dur > timeTickerLevels[i].min {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn int(2 * (math.Log10(float64(dur)/1e9) - 2))\n}\n\nfunc (timeTicker) MaxLevel() int {\n\treturn len(timeTickerLevels) - 1\n}\n\nfunc (timeTicker) Label(cur, prev time.Time, level int) string {\n\tdateFmt := \"2006\"\n\tswitch {\n\tcase level < 6:\n\t\tdateFmt = \"2006/1/2\"\n\t\tif !prev.IsZero() {\n\t\t\tif prev.Year() == cur.Year() {\n\t\t\t\tdateFmt = \"Jan 2\"\n\t\t\t\t_, prevweek := prev.ISOWeek()\n\t\t\t\t_, curweek := cur.ISOWeek()\n\t\t\t\tif prevweek == curweek {\n\t\t\t\t\tdateFmt = \"Mon\"\n\t\t\t\t\tif prev.YearDay() == cur.YearDay() {\n\t\t\t\t\t\tdateFmt = \"\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\tcase level < 7:\n\t\tdateFmt = \"2006/1\"\n\t\tif !prev.IsZero() && prev.Year() == cur.Year() {\n\t\t\tdateFmt = \"Jan\"\n\t\t}\n\t}\n\ttimeFmt := \"\"\n\tswitch {\n\tcase level < -3: // < 1s\n\t\tdigits := (-level - 2) / 2\n\t\ttimeFmt = \"15:04:05.\" + strings.Repeat(\"0\", digits)\n\tcase level < 0: // < 1m\n\t\ttimeFmt = \"15:04:05\"\n\tcase level < 4: // < 1d\n\t\ttimeFmt = \"15:04\"\n\t}\n\treturn cur.Format(strings.TrimSpace(dateFmt + \" \" + timeFmt))\n}\n\nfunc (s *timeScale) Ticks(maxTicks int, pred func(major, minor table.Slice, labels []string) bool) (table.Slice, table.Slice, []string) {\n\tmin, max := s.getMinMax()\n\tticker := &timeTicker{min, max}\n\to := scale.TickOptions{Max: maxTicks, MinLevel: -21, MaxLevel: ticker.MaxLevel()}\n\tlevel, ok := o.FindLevel(ticker, ticker.GuessLevel())\n\tif !ok {\n\t\t// TODO(quentin): Better handling of too-large time range.\n\t\treturn nil, nil, nil\n\t}\n\tmkLabels := func(major []time.Time) []string {\n\t\t// TODO(quentin): Pick a format based on which parts\n\t\t// of the time have changed and are non-zero.\n\t\tlabels := make([]string, len(major))\n\t\tif s.f != nil {\n\t\t\t// Use custom formatter.\n\t\t\tfor i, x := range major {\n\t\t\t\tlabels[i] = s.f(x)\n\t\t\t}\n\t\t\treturn labels\n\t\t}\n\t\tvar prev time.Time\n\t\tfor i, t := range major {\n\t\t\tlabels[i] = ticker.Label(t, prev, level)\n\t\t\tprev = t\n\t\t}\n\t\treturn labels\n\t}\n\tvar majors, minors []time.Time\n\tvar labels []string\n\tfor ; level <= o.MaxLevel; level++ {\n\t\tmajors = ticker.TicksAtLevel(level).([]time.Time)\n\t\tif level > o.MinLevel {\n\t\t\tminors = ticker.TicksAtLevel(level - 1).([]time.Time)\n\t\t}\n\t\tlabels = mkLabels(majors)\n\t\tif pred == nil || pred(majors, minors, labels) {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn majors, minors, labels\n}\n\nfunc (s *timeScale) SetFormatter(f interface{}) {\n\ts.f = f.(func(time.Time) string)\n}\n\nfunc (s *timeScale) CloneScaler() Scaler {\n\ts2 := *s\n\treturn &s2\n}\n\n// TODO: The ordinal scale can only work with values it actually sees\n// in the data. It has no sense of the type's actual domain. If the\n// type is an enumerated type, we could fill in intermediate values\n// and the caller could set a min and max for the scale to enumerate\n// between.\n\nfunc NewOrdinalScale() Scaler {\n\treturn &ordinalScale{}\n}\n\ntype ordinalScale struct {\n\tallData []slice.T\n\tr       Ranger\n\tf       interface{}\n\tordered table.Slice\n\tindex   map[interface{}]int\n}\n\nfunc (s *ordinalScale) ExpandDomain(v table.Slice) {\n\t// TODO: Type-check? For example, if I try to use a cardinal\n\t// type for \"Color\" and then a continuous type, this will\n\t// crash confusingly only once Map calls makeIndex and\n\t// NubAppend tries to make a consistently typed slice.\n\ts.allData = append(s.allData, slice.T(v))\n\ts.ordered, s.index = nil, nil\n}\n\nfunc (s *ordinalScale) Ranger(r Ranger) Ranger {\n\told := s.r\n\tif r != nil {\n\t\ts.r = r\n\t}\n\treturn old\n}\n\nfunc (s *ordinalScale) RangeType() reflect.Type {\n\treturn s.r.RangeType()\n}\n\nfunc (s *ordinalScale) makeIndex() {\n\tif s.index != nil {\n\t\treturn\n\t}\n\n\t// Compute ordered data index and cache.\n\ts.ordered = slice.NubAppend(s.allData...)\n\tslice.Sort(s.ordered)\n\tov := reflect.ValueOf(s.ordered)\n\ts.index = make(map[interface{}]int, ov.Len())\n\tfor i, len := 0, ov.Len(); i < len; i++ {\n\t\ts.index[ov.Index(i).Interface()] = i\n\t}\n}\n\nfunc (s *ordinalScale) Map(x interface{}) interface{} {\n\tvar i int\n\tswitch x := x.(type) {\n\tcase Unscaled:\n\t\ti = int(x)\n\tdefault:\n\t\ts.makeIndex()\n\t\ti = s.index[x]\n\t}\n\n\tswitch r := s.r.(type) {\n\tcase DiscreteRanger:\n\t\tminLevels, maxLevels := r.Levels()\n\t\tif len(s.index) <= minLevels {\n\t\t\treturn r.MapLevel(i, minLevels)\n\t\t} else if len(s.index) <= maxLevels {\n\t\t\treturn r.MapLevel(i, len(s.index))\n\t\t} else {\n\t\t\t// TODO: Binning would also be a reasonable\n\t\t\t// policy.\n\t\t\treturn r.MapLevel(i%maxLevels, maxLevels)\n\t\t}\n\n\tcase ContinuousRanger:\n\t\t// Map i to the \"middle\" of the ith equal j-way\n\t\t// subdivision of [0, 1].\n\t\tj := len(s.index)\n\t\tx := (float64(i) + 0.5) / float64(j)\n\t\treturn r.Map(x)\n\n\tdefault:\n\t\tpanic(\"Ranger must be a ContinuousRanger or DiscreteRanger\")\n\t}\n}\n\nfunc (s *ordinalScale) Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) {\n\t// TODO: Return *no* ticks and only labels. Can't currently\n\t// express this.\n\n\t// TODO: Honor constraints.\n\n\ts.makeIndex()\n\tlabels = make([]string, len(s.index))\n\tov := reflect.ValueOf(s.ordered)\n\n\tif s.f != nil {\n\t\t// Use custom formatter.\n\t\t// TODO: Type check for better error.\n\t\tfv := reflect.ValueOf(s.f)\n\t\tat := fv.Type().In(0)\n\t\tvar avs [1]reflect.Value\n\t\tfor i, len := 0, ov.Len(); i < len; i++ {\n\t\t\tavs[0] = ov.Index(i).Convert(at)\n\t\t\trvs := fv.Call(avs[:])\n\t\t\tlabels[i] = rvs[0].Interface().(string)\n\t\t}\n\t} else {\n\t\t// Use String() method or standard format.\n\t\tfor i, len := 0, ov.Len(); i < len; i++ {\n\t\t\tlabels[i] = fmt.Sprintf(\"%v\", ov.Index(i).Interface())\n\t\t}\n\t}\n\treturn s.ordered, nil, labels\n}\n\nfunc (s *ordinalScale) SetFormatter(f interface{}) {\n\ts.f = f\n}\n\nfunc (s *ordinalScale) CloneScaler() Scaler {\n\tns := &ordinalScale{\n\t\tallData: make([]slice.T, len(s.allData)),\n\t\tr:       s.r,\n\t}\n\tfor i, v := range s.allData {\n\t\tns.allData[i] = v\n\t}\n\treturn s\n}\n\n// XXX\n//\n// A Ranger must be either a ContinuousRanger or a DiscreteRanger.\ntype Ranger interface {\n\tRangeType() reflect.Type\n}\n\ntype ContinuousRanger interface {\n\tRanger\n\tMap(x float64) (y interface{})\n\tUnmap(y interface{}) (x float64, ok bool)\n}\n\ntype DiscreteRanger interface {\n\tRanger\n\tLevels() (min, max int)\n\tMapLevel(i, j int) interface{}\n}\n\nfunc NewFloatRanger(lo, hi float64) ContinuousRanger {\n\treturn &floatRanger{lo, hi - lo}\n}\n\ntype floatRanger struct {\n\tlo, w float64\n}\n\nfunc (r *floatRanger) String() string {\n\treturn fmt.Sprintf(\"[%g,%g]\", r.lo, r.lo+r.w)\n}\n\nfunc (r *floatRanger) RangeType() reflect.Type {\n\treturn float64Type\n}\n\nfunc (r *floatRanger) Map(x float64) interface{} {\n\treturn x*r.w + r.lo\n}\n\nfunc (r *floatRanger) Unmap(y interface{}) (float64, bool) {\n\tswitch y := y.(type) {\n\tdefault:\n\t\treturn 0, false\n\n\tcase float64:\n\t\treturn (y - r.lo) / r.w, true\n\t}\n}\n\nfunc NewColorRanger(palette []color.Color) DiscreteRanger {\n\t// TODO: Support continuous palettes.\n\t//\n\t// TODO: Support discrete palettes that vary depending on the\n\t// number of levels.\n\treturn &colorRanger{palette}\n}\n\ntype colorRanger struct {\n\tpalette []color.Color\n}\n\nfunc (r *colorRanger) RangeType() reflect.Type {\n\treturn colorType\n}\n\nfunc (r *colorRanger) Levels() (min, max int) {\n\treturn len(r.palette), len(r.palette)\n}\n\nfunc (r *colorRanger) MapLevel(i, j int) interface{} {\n\tif i < 0 {\n\t\ti = 0\n\t} else if i >= len(r.palette) {\n\t\ti = len(r.palette) - 1\n\t}\n\treturn r.palette[i]\n}\n\n// defaultColorRanger is the default color ranger. It is both a\n// ContinuousRanger and a DiscreteRanger.\ntype defaultColorRanger struct{}\n\n// autoPalette is the discrete palette used by defaultColorRanger.\nvar autoPalette = []color.Color{\n\tcolor.RGBA{0x4c, 0x72, 0xb0, 0xff},\n\tcolor.RGBA{0x55, 0xa8, 0x68, 0xff},\n\tcolor.RGBA{0xc4, 0x4e, 0x52, 0xff},\n\tcolor.RGBA{0x81, 0x72, 0xb2, 0xff},\n\tcolor.RGBA{0xcc, 0xb9, 0x74, 0xff},\n\tcolor.RGBA{0x64, 0xb5, 0xcd, 0xff},\n}\n\nfunc (r *defaultColorRanger) RangeType() reflect.Type {\n\treturn colorType\n}\n\nfunc (r *defaultColorRanger) Map(x float64) interface{} {\n\treturn palette.Viridis.Map(x)\n}\n\nfunc (r *defaultColorRanger) Unmap(y interface{}) (float64, bool) {\n\tswitch y := y.(type) {\n\tdefault:\n\t\treturn 0, false\n\n\tcase color.RGBA:\n\t\treturn float64(y.G) / float64(226), true\n\t}\n}\n\nfunc (r *defaultColorRanger) Levels() (min, max int) {\n\treturn len(autoPalette), len(autoPalette)\n}\n\nfunc (r *defaultColorRanger) MapLevel(i, j int) interface{} {\n\tif i < 0 {\n\t\ti = 0\n\t} else if i >= len(autoPalette) {\n\t\ti = len(autoPalette) - 1\n\t}\n\treturn autoPalette[i]\n}\n\n// mapMany applies scaler.Map to all of the values in seq and returns\n// a slice of the results.\n//\n// TODO: Maybe this should just be how Scaler.Map works.\nfunc mapMany(scaler Scaler, seq table.Slice) table.Slice {\n\tsv := reflect.ValueOf(seq)\n\trt := reflect.SliceOf(scaler.RangeType())\n\tif seq == nil {\n\t\treturn reflect.MakeSlice(rt, 0, 0).Interface()\n\t}\n\tres := reflect.MakeSlice(rt, sv.Len(), sv.Len())\n\tfor i, len := 0, sv.Len(); i < len; i++ {\n\t\tval := scaler.Map(sv.Index(i).Interface())\n\t\tres.Index(i).Set(reflect.ValueOf(val))\n\t}\n\treturn res.Interface()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/stepmode_string.go",
    "content": "// Code generated by \"stringer -type StepMode\"; DO NOT EDIT\n\npackage gg\n\nimport \"fmt\"\n\nconst _StepMode_name = \"StepHVStepVHStepHMidStepVMid\"\n\nvar _StepMode_index = [...]uint8{0, 6, 12, 20, 28}\n\nfunc (i StepMode) String() string {\n\tif i < 0 || i >= StepMode(len(_StepMode_index)-1) {\n\t\treturn fmt.Sprintf(\"StepMode(%d)\", i)\n\t}\n\treturn _StepMode_name[_StepMode_index[i]:_StepMode_index[i+1]]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/testmain.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage main\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n\t\"os\"\n\n\t\"github.com/aclements/go-gg/gg\"\n\t\"github.com/aclements/go-gg/ggstat\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\nfunc main() {\n\txs1 := vec.Linspace(-10, 10, 100)\n\tfor i := range xs1 {\n\t\txs1[i] = rand.Float64()*20 - 10\n\t}\n\tys1 := vec.Map(math.Sin, xs1)\n\n\txs2 := vec.Linspace(-10, 10, 100)\n\tys2 := vec.Map(math.Cos, xs2)\n\n\twhich := []string{}\n\tfor range xs1 {\n\t\twhich = append(which, \"sin\")\n\t}\n\tfor range xs2 {\n\t\twhich = append(which, \"cos\")\n\t}\n\n\txs := vec.Concat(xs1, xs2)\n\tys := vec.Concat(ys1, ys2)\n\n\ttab := table.NewBuilder(nil).Add(\"x\", xs).Add(\"y\", ys).Add(\"which\", which).Done()\n\n\tplot := gg.NewPlot(tab)\n\tplot.GroupAuto()\n\tplot.Add(gg.FacetX{Col: \"which\"})\n\tplot.Add(gg.FacetY{Col: \"which\"})\n\tplot.Add(gg.LayerLines{X: \"x\", Y: \"y\"})\n\n\tplot.Save()\n\tplot.SetData(ggstat.ECDF{X: \"x\"}.F(plot.Data()))\n\tplot.Add(gg.LayerSteps{Step: gg.StepHV})\n\t//plot.Add(gg.LayerSteps{Step:gg.StepHMid})\n\tplot.Restore()\n\n\tplot.Save()\n\tplot.SetData(ggstat.Density{X: \"x\"}.F(plot.Data()))\n\tplot.Add(gg.LayerPaths{})\n\tplot.Restore()\n\n\tplot.WriteSVG(os.Stdout, 400, 300)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/text.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport \"unicode/utf8\"\n\ntype textMetrics struct {\n\twidth   float64\n\tleading float64\n}\n\n// measureString returns the metrics in pixels of s rendered in a font\n// with pixel size pxSize.\n//\n// TODO: Often all I want is the leading, which is much cheaper to get\n// than the width. Maybe textMetrics should have methods?\nfunc measureString(pxSize float64, s string) textMetrics {\n\t// TODO: This is absolutely horribly awful. Make it real,\n\t// perhaps using the freetype package.\n\n\t// Chrome's default font-size is 16px, so 20px is a reasonable\n\t// leading.\n\treturn textMetrics{\n\t\twidth:   0.5 * pxSize * float64(utf8.RuneCountInString(s)),\n\t\tleading: 1.25 * pxSize,\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/gg/transform.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage gg\n\nimport \"github.com/aclements/go-gg/table\"\n\n// SortBy sorts each group by the named columns. If a column's type\n// implements sort.Interface, rows will be sorted according to that\n// order. Otherwise, the values in the column must be naturally\n// ordered (their types must be orderable by the Go specification). If\n// neither is true, SortBy panics with a *generic.TypeError. If more\n// than one column is given, SortBy sorts by the tuple of the columns;\n// that is, if two values in the first column are equal, they are\n// sorted by the second column, and so on.\nfunc (p *Plot) SortBy(cols ...string) *Plot {\n\treturn p.SetData(table.SortBy(p.Data(), cols...))\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/agg.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/stats\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\n// TODO: AggFirst, AggTukey. StdDev?\n\n// Agg constructs an Aggregate transform from a grouping column and a\n// set of Aggregators.\n//\n// TODO: Does this belong in ggstat? The specific aggregator functions\n// probably do, but the concept could go in package table.\nfunc Agg(xs ...string) func(aggs ...Aggregator) Aggregate {\n\treturn func(aggs ...Aggregator) Aggregate {\n\t\treturn Aggregate{xs, aggs}\n\t}\n}\n\n// Aggregate computes aggregate functions of a table grouped by\n// distinct values of a column or set of columns.\n//\n// Aggregate first groups the table by the Xs columns. Each of these\n// groups produces a single row in the output table, where the unique\n// value of each of the Xs columns appears in the output row, along\n// with constant columns from the input, as well as any columns that\n// have a unique value within every group (they're \"effectively\"\n// constant). Additional columns in the output row are produced by\n// applying the Aggregator functions to the group.\ntype Aggregate struct {\n\t// Xs is the list column names to group values by before\n\t// computing aggregate functions.\n\tXs []string\n\n\t// Aggregators is the set of Aggregator functions to apply to\n\t// each group of values.\n\tAggregators []Aggregator\n}\n\n// An Aggregator is a function that aggregates each group of input\n// into one row and adds it to output. It may be based on multiple\n// columns from input and may add multiple columns to output.\ntype Aggregator func(input table.Grouping, output *table.Builder)\n\nfunc (s Aggregate) F(g table.Grouping) table.Grouping {\n\tisConst := make([]bool, len(g.Columns()))\n\tfor i := range isConst {\n\t\tisConst[i] = true\n\t}\n\n\tsubgroups := map[table.GroupID]table.Grouping{}\n\tfor _, gid := range g.Tables() {\n\t\tg := table.GroupBy(g.Table(gid), s.Xs...)\n\t\tsubgroups[gid] = g\n\n\t\tfor i, col := range g.Columns() {\n\t\t\tif !isConst[i] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Can this column be promoted to constant?\n\t\t\tfor _, gid2 := range g.Tables() {\n\t\t\t\tt := g.Table(gid2)\n\t\t\t\tisConst[i] = isConst[i] && checkConst(t, col)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {\n\t\tg := table.GroupBy(t, s.Xs...)\n\t\tvar nt table.Builder\n\n\t\t// Construct X columns.\n\t\trows := len(g.Tables())\n\t\tfor colidx, xcol := range s.Xs {\n\t\t\txs := reflect.MakeSlice(table.ColType(t, xcol), rows, rows)\n\t\t\tfor i, gid := range g.Tables() {\n\t\t\t\tfor j := 0; j < len(s.Xs)-colidx-1; j++ {\n\t\t\t\t\tgid = gid.Parent()\n\t\t\t\t}\n\t\t\t\txs.Index(i).Set(reflect.ValueOf(gid.Label()))\n\t\t\t}\n\n\t\t\tnt.Add(xcol, xs.Interface())\n\t\t}\n\n\t\t// Apply Aggregators.\n\t\tfor _, agg := range s.Aggregators {\n\t\t\tagg(g, &nt)\n\t\t}\n\n\t\t// Keep constant and effectively constant columns.\n\t\tfor i := range isConst {\n\t\t\tcol := t.Columns()[i]\n\t\t\tif !isConst[i] || nt.Has(col) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif cv, ok := t.Const(col); ok {\n\t\t\t\tnt.AddConst(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tncol := reflect.MakeSlice(table.ColType(t, col), len(g.Tables()), len(g.Tables()))\n\t\t\tfor i, gid := range g.Tables() {\n\t\t\t\tv := reflect.ValueOf(g.Table(gid).Column(col))\n\t\t\t\tncol.Index(i).Set(v.Index(0))\n\t\t\t}\n\t\t\tnt.Add(col, ncol.Interface())\n\t\t}\n\t\treturn nt.Done()\n\t})\n}\n\nfunc checkConst(t *table.Table, col string) bool {\n\tif _, ok := t.Const(col); ok {\n\t\treturn true\n\t}\n\tv := reflect.ValueOf(t.Column(col))\n\tif v.Len() <= 1 {\n\t\treturn true\n\t}\n\tif !v.Type().Elem().Comparable() {\n\t\treturn false\n\t}\n\telem := v.Index(0).Interface()\n\tfor i, l := 1, v.Len(); i < l; i++ {\n\t\tif elem != v.Index(i).Interface() {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// AggCount returns an aggregate function that computes the number of\n// rows in each group. The resulting column will be named label, or\n// \"count\" if label is \"\".\nfunc AggCount(label string) Aggregator {\n\tif label == \"\" {\n\t\tlabel = \"count\"\n\t}\n\n\treturn func(input table.Grouping, b *table.Builder) {\n\t\tcounts := make([]int, 0, len(input.Tables()))\n\t\tfor _, gid := range input.Tables() {\n\t\t\tcounts = append(counts, input.Table(gid).Len())\n\t\t}\n\t\tb.Add(label, counts)\n\t}\n}\n\n// AggMean returns an aggregate function that computes the mean of\n// each of cols. The resulting columns will be named \"mean <col>\" and\n// will have the same type as <col>.\nfunc AggMean(cols ...string) Aggregator {\n\treturn aggFn(stats.Mean, \"mean \", cols...)\n}\n\n// AggGeoMean returns an aggregate function that computes the\n// geometric mean of each of cols. The resulting columns will be named\n// \"geomean <col>\" and will have the same type as <col>.\nfunc AggGeoMean(cols ...string) Aggregator {\n\treturn aggFn(stats.GeoMean, \"geomean \", cols...)\n}\n\n// AggMin returns an aggregate function that computes the minimum of\n// each of cols. The resulting columns will be named \"min <col>\" and\n// will have the same type as <col>.\nfunc AggMin(cols ...string) Aggregator {\n\tmin := func(xs []float64) float64 {\n\t\tx, _ := stats.Bounds(xs)\n\t\treturn x\n\t}\n\treturn aggFn(min, \"min \", cols...)\n}\n\n// AggMax returns an aggregate function that computes the maximum of\n// each of cols. The resulting columns will be named \"max <col>\" and\n// will have the same type as <col>.\nfunc AggMax(cols ...string) Aggregator {\n\tmax := func(xs []float64) float64 {\n\t\t_, x := stats.Bounds(xs)\n\t\treturn x\n\t}\n\treturn aggFn(max, \"max \", cols...)\n}\n\n// AggSum returns an aggregate function that computes the sum of each\n// of cols. The resulting columns will be named \"sum <col>\" and will\n// have the same type as <col>.\nfunc AggSum(cols ...string) Aggregator {\n\treturn aggFn(vec.Sum, \"sum \", cols...)\n}\n\n// AggQuantile returns an aggregate function that computes a quantile\n// of each of cols. quantile has a range of [0,1]. The resulting\n// columns will be named \"<prefix> <col>\" and will have the same type\n// as <col>.\nfunc AggQuantile(prefix string, quantile float64, cols ...string) Aggregator {\n\t// \"prefix\" could be autogenerated (e.g. fmt.Sprintf(\"p%g \",\n\t// quantile * 100)), but then the caller would need to do the\n\t// same fmt.Sprintf to compute the column name they had just\n\t// created. Perhaps Aggregator should provide a way to find\n\t// the generated column names.\n\treturn aggFn(func(data []float64) float64 {\n\t\treturn stats.Sample{Xs: data}.Quantile(quantile)\n\t}, prefix+\" \", cols...)\n}\n\nfunc aggFn(f func([]float64) float64, prefix string, cols ...string) Aggregator {\n\tocols := make([]string, len(cols))\n\tfor i, col := range cols {\n\t\tocols[i] = prefix + col\n\t}\n\n\treturn func(input table.Grouping, b *table.Builder) {\n\t\tfor coli, col := range cols {\n\t\t\tmeans := make([]float64, 0, len(input.Tables()))\n\n\t\t\tvar xs []float64\n\t\t\tfor _, gid := range input.Tables() {\n\t\t\t\tv := input.Table(gid).MustColumn(col)\n\t\t\t\tslice.Convert(&xs, v)\n\t\t\t\tmeans = append(means, f(xs))\n\t\t\t}\n\n\t\t\tct := table.ColType(input, col)\n\t\t\tif ct == float64SliceType {\n\t\t\t\tb.Add(ocols[coli], means)\n\t\t\t} else {\n\t\t\t\t// Convert means back to the type of col.\n\t\t\t\toutptr := reflect.New(ct)\n\t\t\t\tslice.Convert(outptr.Interface(), means)\n\t\t\t\tb.Add(ocols[coli], outptr.Elem().Interface())\n\t\t\t}\n\t\t}\n\t}\n}\n\n// AggUnique returns an aggregate function retains the unique value of\n// each of cols within each aggregate group, or panics if some group\n// contains more than one value for one of these columns.\n//\n// Note that Aggregate will automatically retain columns that happen\n// to be unique. AggUnique can be used to enforce at aggregation time\n// that certain columns *must* be unique (and get a nice error if they\n// are not).\nfunc AggUnique(cols ...string) Aggregator {\n\treturn func(input table.Grouping, b *table.Builder) {\n\t\tif len(cols) == 0 {\n\t\t\treturn\n\t\t}\n\t\tif len(input.Tables()) == 0 {\n\t\t\tpanic(fmt.Sprintf(\"unknown column: %q\", cols[0]))\n\t\t}\n\n\t\tfor _, col := range cols {\n\t\t\tctype := table.ColType(input, col)\n\t\t\trows := len(input.Tables())\n\t\t\tvs := reflect.MakeSlice(ctype, rows, rows)\n\t\t\tfor i, gid := range input.Tables() {\n\t\t\t\t// Get values in this column.\n\t\t\t\txs := reflect.ValueOf(input.Table(gid).MustColumn(col))\n\n\t\t\t\t// Check for uniqueness.\n\t\t\t\tif xs.Len() == 0 {\n\t\t\t\t\tpanic(fmt.Sprintf(\"cannot AggUnique empty column %q\", col))\n\t\t\t\t}\n\t\t\t\tuniquev := xs.Index(0)\n\t\t\t\tunique := uniquev.Interface()\n\t\t\t\tfor i, len := 1, xs.Len(); i < len; i++ {\n\t\t\t\t\tother := xs.Index(i).Interface()\n\t\t\t\t\tif unique != other {\n\t\t\t\t\t\tpanic(fmt.Sprintf(\"column %q is not unique; contains at least %v and %v\", col, unique, other))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// Store unique value.\n\t\t\t\tvs.Index(i).Set(uniquev)\n\t\t\t}\n\n\t\t\t// Add unique values slice to output table.\n\t\t\tb.Add(col, vs.Interface())\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/bin.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com/aclements/go-gg/generic\"\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\n// XXX If this is just based on the number of bins, it can come up\n// with really ugly boundary numbers. If the bin width is specified,\n// then you could also specify the left edge and bins will be placed\n// at [align+width*N, align+width*(N+1)]. ggplot2 also lets you\n// specify the center alignment.\n//\n// XXX In Matlab and NumPy, bins are open on the right *except* for\n// the last bin, which is closed on both.\n//\n// XXX Number of bins/bin width/specify boundaries, same bins across\n// all groups/separate for each group/based on shared scales (don't\n// have that information here), relative or absolute histogram (Matlab\n// has lots more).\n//\n// XXX Scale transform.\n//\n// The result of Bin has two columns in addition to constant columns from the input:\n//\n// - Column X is the left edge of the bin.\n//\n// - Column W is the sum of the rows' weights, or column \"count\" is\n//   the number of rows in the bin.\ntype Bin struct {\n\t// X is the name of the column to use for samples.\n\tX string\n\n\t// W is the optional name of the column to use for sample\n\t// weights. It may be \"\" to weight each sample as 1.\n\tW string\n\n\t// Width controls how wide each bin should be. If not provided\n\t// or 0, a width will be chosen to produce 30 bins. If X is an\n\t// integer column, this width will be treated as an integer as\n\t// well.\n\tWidth float64\n\n\t// Center controls the center point of each bin. To center on\n\t// integers, for example, you could use {Width: 1, Center:\n\t// 0}.\n\t// XXX What does center mean for integers? Should an unspecified center yield an autochosen one, or 0?\n\t//Center float64\n\n\t// Breaks is the set of break points to use as boundaries\n\t// between bins. The interval of each bin is [Breaks[i],\n\t// Breaks[i+1]). Data points before the first break are\n\t// dropped. If provided, Width and Center are ignored.\n\tBreaks table.Slice\n\n\t// SplitGroups indicates that each group in the table should\n\t// have separate bounds based on the data in that group alone.\n\t// The default, false, indicates that the binning function\n\t// should use the bounds of all of the data combined. This\n\t// makes it easier to compare bins across groups.\n\tSplitGroups bool\n}\n\nfunc (b Bin) F(g table.Grouping) table.Grouping {\n\tbreaks := reflect.ValueOf(b.Breaks)\n\tagg := AggCount(\"count\")\n\tif b.W != \"\" {\n\t\tagg = aggFn(vec.Sum, \"\", b.W)\n\t}\n\tif !breaks.IsValid() && !b.SplitGroups {\n\t\tbreaks = b.computeBreaks(g)\n\t}\n\t// Change b.X to the start of the bin.\n\tg = table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {\n\t\tbreaks := breaks\n\t\tif !breaks.IsValid() {\n\t\t\tbreaks = b.computeBreaks(t)\n\t\t}\n\t\tnbreaks := breaks.Len()\n\n\t\tin := reflect.ValueOf(t.MustColumn(b.X))\n\t\tnin := in.Len()\n\n\t\tout := reflect.MakeSlice(breaks.Type(), nin, nin)\n\t\tvar found []int\n\t\tfor i := 0; i < nin; i++ {\n\t\t\telt := in.Index(i)\n\t\t\tbin := sort.Search(nbreaks, func(j int) bool {\n\t\t\t\treturn generic.OrderR(elt, breaks.Index(j)) < 0\n\t\t\t})\n\t\t\t// 0 means the row doesn't fit on the front\n\t\t\t// XXX Allow configuring the first and last bin as infinite or not.\n\t\t\tbin = bin - 1\n\t\t\tif bin >= 0 {\n\t\t\t\tfound = append(found, i)\n\t\t\t\tout.Index(i).Set(breaks.Index(bin))\n\t\t\t}\n\t\t}\n\t\tvar nt table.Builder\n\t\tfor _, col := range t.Columns() {\n\t\t\tif col == b.X {\n\t\t\t\tnt.Add(col, slice.Select(out.Interface(), found))\n\t\t\t} else if c, ok := t.Const(col); ok {\n\t\t\t\tnt.AddConst(col, c)\n\t\t\t} else {\n\t\t\t\tnt.Add(col, slice.Select(t.Column(col), found))\n\t\t\t}\n\t\t}\n\t\treturn nt.Done()\n\t})\n\t// Group by the found bin\n\treturn Agg(b.X)(agg).F(g)\n}\n\nfunc (b Bin) computeBreaks(g table.Grouping) reflect.Value {\n\tvar cols []slice.T\n\tfor _, gid := range g.Tables() {\n\t\tcols = append(cols, g.Table(gid).MustColumn(b.X))\n\t}\n\tdata := slice.Concat(cols...)\n\n\tmin := slice.Min(data)\n\tmax := slice.Max(data)\n\n\trv := reflect.ValueOf(min)\n\tswitch rv.Type().Kind() {\n\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:\n\t\tmin, max := rv.Int(), reflect.ValueOf(max).Int()\n\t\twidth := int64(b.Width)\n\t\tif width == 0 {\n\t\t\twidth = (max - min) / 30\n\t\t\tif width < 1 {\n\t\t\t\twidth = 1\n\t\t\t}\n\t\t}\n\t\t// XXX: This assumes boundaries should be aligned with\n\t\t// 0. We should support explicit Center or Boundary\n\t\t// requests.\n\t\tmin -= (min % width)\n\t\tvar breaks []int64\n\t\tfor i := min; i < max; i += width {\n\t\t\tbreaks = append(breaks, i)\n\t\t}\n\t\touts := reflect.New(reflect.ValueOf(cols[0]).Type())\n\t\tslice.Convert(outs.Interface(), breaks)\n\t\treturn outs.Elem()\n\tcase reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:\n\t\tmin, max := rv.Uint(), reflect.ValueOf(max).Uint()\n\t\twidth := uint64(b.Width)\n\t\tif width == 0 {\n\t\t\twidth = (max - min) / 30\n\t\t\tif width < 1 {\n\t\t\t\twidth = 1\n\t\t\t}\n\t\t}\n\t\tmin -= (min % width)\n\t\tvar breaks []uint64\n\t\tfor i := min; i < max; i += width {\n\t\t\tbreaks = append(breaks, i)\n\t\t}\n\t\touts := reflect.New(reflect.ValueOf(cols[0]).Type())\n\t\tslice.Convert(outs.Interface(), breaks)\n\t\treturn outs.Elem()\n\tcase reflect.Float32, reflect.Float64:\n\t\tmin, max := rv.Float(), reflect.ValueOf(max).Float()\n\t\twidth := b.Width\n\t\tif width == 0 {\n\t\t\twidth = (max - min) / 30\n\t\t\tif width == 0 {\n\t\t\t\twidth = 1\n\t\t\t}\n\t\t}\n\t\tmin -= math.Mod(min, width)\n\t\tvar breaks []float64\n\t\tfor i := min; i < max; i += width {\n\t\t\tbreaks = append(breaks, i)\n\t\t}\n\t\touts := reflect.New(reflect.ValueOf(cols[0]).Type())\n\t\tslice.Convert(outs.Interface(), breaks)\n\t\treturn outs.Elem()\n\tdefault:\n\t\tpanic(\"can't compute breaks for unknown type\")\n\t}\n}\n\n// TODO: Count for categorical data.\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/common.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport \"reflect\"\n\nvar float64Type = reflect.TypeOf(float64(0))\nvar float64SliceType = reflect.TypeOf([]float64(nil))\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/density.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/stats\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\n// TODO: Default to first (and second) column for X (and Y)?\n\n// Density constructs a probability density estimate from a set of\n// samples using kernel density estimation.\n//\n// X is the only required field. All other fields have reasonable\n// default zero values.\n//\n// The result of Density has three columns in addition to constant\n// columns from the input:\n//\n// - Column X is the points at which the density estimate is sampled.\n//\n// - Column \"probability density\" is the density estimate.\n//\n// - Column \"cumulative density\" is the cumulative density estimate.\ntype Density struct {\n\t// X is the name of the column to use for samples.\n\tX string\n\n\t// W is the optional name of the column to use for sample\n\t// weights. It may be \"\" to uniformly weight samples.\n\tW string\n\n\t// N is the number of points to sample the KDE at. If N is 0,\n\t// a reasonable default is used.\n\t//\n\t// TODO: This is particularly sensitive to the scale\n\t// transform.\n\t//\n\t// TODO: Base the default on the bandwidth. If the bandwidth\n\t// is really narrow, we may need a lot of samples to exceed\n\t// the Nyquist rate.\n\tN int\n\n\t// Domain specifies the domain at which to sample this function.\n\t// If Domain is nil, it defaults to DomainData{}.\n\tDomain FunctionDomainer\n\n\t// Kernel is the kernel to use for the KDE.\n\tKernel stats.KDEKernel\n\n\t// Bandwidth is the bandwidth to use for the KDE.\n\t//\n\t// If this is zero, the bandwidth is computed from the data\n\t// using a default bandwidth estimator (currently\n\t// stats.BandwidthScott).\n\tBandwidth float64\n\n\t// BoundaryMethod is the boundary correction method to use for\n\t// the KDE. The default value is BoundaryReflect; however, the\n\t// default bounds are effectively +/-inf, which is equivalent\n\t// to performing no boundary correction.\n\tBoundaryMethod stats.KDEBoundaryMethod\n\n\t// [BoundaryMin, BoundaryMax) specify a bounded support for\n\t// the KDE. If both are 0 (their default values), they are\n\t// treated as +/-inf.\n\t//\n\t// To specify a half-bounded support, set Min to math.Inf(-1)\n\t// or Max to math.Inf(1).\n\tBoundaryMin float64\n\tBoundaryMax float64\n}\n\nfunc (d Density) F(g table.Grouping) table.Grouping {\n\tkde := stats.KDE{\n\t\tKernel:         d.Kernel,\n\t\tBandwidth:      d.Bandwidth,\n\t\tBoundaryMethod: d.BoundaryMethod,\n\t\tBoundaryMin:    d.BoundaryMin,\n\t\tBoundaryMax:    d.BoundaryMax,\n\t}\n\tdname, cname := \"probability density\", \"cumulative density\"\n\n\taddEmpty := func(out *table.Builder) {\n\t\tout.Add(dname, []float64{})\n\t\tout.Add(cname, []float64{})\n\t}\n\n\treturn Function{\n\t\tX: d.X, N: d.N, Domain: d.Domain,\n\t\tFn: func(gid table.GroupID, in *table.Table, sampleAt []float64, out *table.Builder) {\n\t\t\tif len(sampleAt) == 0 {\n\t\t\t\taddEmpty(out)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Get input sample.\n\t\t\tvar sample stats.Sample\n\t\t\tslice.Convert(&sample.Xs, in.MustColumn(d.X))\n\t\t\tif d.W != \"\" {\n\t\t\t\tslice.Convert(&sample.Weights, in.MustColumn(d.W))\n\t\t\t\tif sample.Weight() == 0 {\n\t\t\t\t\taddEmpty(out)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Compute KDE.\n\t\t\tkde.Sample = sample\n\t\t\tif d.Bandwidth == 0 {\n\t\t\t\tkde.Bandwidth = stats.BandwidthScott(sample)\n\t\t\t}\n\n\t\t\tout.Add(dname, vec.Map(kde.PDF, sampleAt))\n\t\t\tout.Add(cname, vec.Map(kde.CDF, sampleAt))\n\t\t},\n\t}.F(g)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/domain.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"math\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/stats\"\n)\n\n// A FunctionDomainer computes the domain over which to evaluate a\n// statistical function.\ntype FunctionDomainer interface {\n\t// FunctionDomain computes the domain of a particular column\n\t// within a table. It takes a Grouping and a column in that\n\t// Grouping to compute the domain of and returns a function\n\t// that computes the domain for a specific group in the\n\t// Grouping. This makes it possible for FunctionDomain to\n\t// easily compute either Grouping-wide domains, or per-Table\n\t// domains.\n\t//\n\t// The returned domain may be (NaN, NaN) to indicate that\n\t// there is no data and the domain is vacuous.\n\tFunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64)\n}\n\n// DomainFixed is a FunctionDomainer that returns a fixed domain.\ntype DomainFixed struct {\n\tMin, Max float64\n}\n\nvar _ FunctionDomainer = DomainFixed{}\n\nfunc (r DomainFixed) FunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64) {\n\treturn func(table.GroupID) (min, max float64) {\n\t\treturn r.Min, r.Max\n\t}\n}\n\n// DomainData is a FunctionDomainer that computes domains based on the\n// bounds of the data.\ntype DomainData struct {\n\t// Widen expands the domain by Widen times the span of the\n\t// data.\n\t//\n\t// A value of 1.0 means to use exactly the bounds of the data.\n\t// If Widen is 0, it is treated as 1.1 (that is, widen the\n\t// domain by 10%, or 5% on the left and 5% on the right).\n\tWiden float64\n\n\t// SplitGroups indicates that each group in the table should\n\t// have a separate domain based on the data in that group\n\t// alone. The default, false, indicates that the domain should\n\t// be based on all of the data in the table combined. This\n\t// makes it possible to stack functions and easier to compare\n\t// them across groups.\n\tSplitGroups bool\n}\n\nvar _ FunctionDomainer = DomainData{}\n\nconst defaultWiden = 1.1\n\nfunc (r DomainData) FunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64) {\n\twiden := r.Widen\n\tif widen <= 0 {\n\t\twiden = defaultWiden\n\t}\n\n\tvar xs []float64\n\tif !r.SplitGroups {\n\t\t// Compute combined bounds.\n\t\tgmin, gmax := math.NaN(), math.NaN()\n\t\tfor _, gid := range g.Tables() {\n\t\t\tt := g.Table(gid)\n\t\t\tslice.Convert(&xs, t.MustColumn(col))\n\t\t\txmin, xmax := stats.Bounds(xs)\n\t\t\tif xmin < gmin || math.IsNaN(gmin) {\n\t\t\t\tgmin = xmin\n\t\t\t}\n\t\t\tif xmax > gmax || math.IsNaN(gmax) {\n\t\t\t\tgmax = xmax\n\t\t\t}\n\t\t}\n\n\t\t// Widen bounds.\n\t\tspan := gmax - gmin\n\t\tgmin, gmax = gmin-span*(widen-1)/2, gmax+span*(widen-1)/2\n\n\t\treturn func(table.GroupID) (min, max float64) {\n\t\t\treturn gmin, gmax\n\t\t}\n\t}\n\n\treturn func(gid table.GroupID) (min, max float64) {\n\t\t// Compute bounds.\n\t\tslice.Convert(&xs, g.Table(gid).MustColumn(col))\n\t\tmin, max = stats.Bounds(xs)\n\n\t\t// Widen bounds.\n\t\tspan := max - min\n\t\tmin, max = min-span*(widen-1)/2, max+span*(widen-1)/2\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/ecdf.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\n// ECDF constructs an empirical CDF from a set of samples.\n//\n// X is the only required field. All other fields have reasonable\n// default zero values.\n//\n// The result of ECDF has three columns in addition to constant\n// columns from the input. The names of the columns depend on whether\n// Label is \"\".\n//\n// - Column X is the points at which the CDF changes (a subset of the\n// samples).\n//\n// - Column \"cumulative density\" or \"cumulative density of <label>\" is\n// the cumulative density estimate.\n//\n// - Column \"cumulative count\" (if W and Label are \"\"), \"cumulative\n// weight\" (if W is not \"\", but Label is \"\") or \"cumulative <label>\"\n// (if Label is not \"\") is the cumulative count or weight of samples.\n// That is, cumulative density times the total weight of the samples.\ntype ECDF struct {\n\t// X is the name of the column to use for samples.\n\tX string\n\n\t// W is the optional name of the column to use for sample\n\t// weights. It may be \"\" to uniformly weight samples.\n\tW string\n\n\t// Label, if not \"\", gives a label for the samples. It is used\n\t// to construct more specific names for the output columns. It\n\t// should be a plural noun.\n\tLabel string\n\n\t// Domain specifies the domain of the returned ECDF. If the\n\t// domain is wider than the bounds of the data in a group,\n\t// ECDF will add a point below the smallest sample and above\n\t// the largest sample to make the 0 and 1 levels clear. If\n\t// Domain is nil, it defaults to DomainData{}.\n\tDomain FunctionDomainer\n}\n\nfunc (s ECDF) F(g table.Grouping) table.Grouping {\n\t// Set defaults.\n\tif s.Domain == nil {\n\t\ts.Domain = DomainData{}\n\t}\n\n\t// Construct output column names.\n\tdname, cname := \"cumulative density\", \"cumulative count\"\n\tif s.Label != \"\" {\n\t\tdname += \" of \" + s.Label\n\t\tcname = \"cumulative \" + s.Label\n\t} else if s.W != \"\" {\n\t\tcname = \"cumulative weight\"\n\t}\n\n\tg = table.SortBy(g, s.X)\n\tdomain := s.Domain.FunctionDomain(g, s.X)\n\n\treturn table.MapTables(g, func(gid table.GroupID, t *table.Table) *table.Table {\n\t\t// Get input columns.\n\t\tvar xs, ws []float64\n\t\tslice.Convert(&xs, t.MustColumn(s.X))\n\t\tif s.W != \"\" {\n\t\t\tslice.Convert(&ws, t.MustColumn(s.W))\n\t\t}\n\n\t\t// Ignore empty tables.\n\t\tif len(xs) == 0 {\n\t\t\tnt := new(table.Builder).Add(s.X, []float64{}).Add(cname, []float64{}).Add(dname, []float64{})\n\t\t\tpreserveConsts(nt, t)\n\t\t\treturn nt.Done()\n\t\t}\n\n\t\t// Get domain.\n\t\tmin, max := domain(gid)\n\n\t\t// Create output columns.\n\t\txo, do, co := make([]float64, 0), make([]float64, 0), make([]float64, 0)\n\t\tif min < xs[0] {\n\t\t\t// Extend to the left.\n\t\t\txo = append(xo, min)\n\t\t\tdo = append(do, 0)\n\t\t\tco = append(co, 0)\n\t\t}\n\n\t\t// Compute total weight.\n\t\tvar total float64\n\t\tif ws == nil {\n\t\t\ttotal = float64(t.Len())\n\t\t} else {\n\t\t\ttotal = vec.Sum(ws)\n\t\t}\n\n\t\t// Create ECDF.\n\t\tcum := 0.0\n\t\tfor i := 0; i < len(xs); {\n\t\t\tj := i\n\t\t\tfor j < len(xs) && xs[i] == xs[j] {\n\t\t\t\tif ws == nil {\n\t\t\t\t\tcum += 1\n\t\t\t\t} else {\n\t\t\t\t\tcum += ws[j]\n\t\t\t\t}\n\t\t\t\tj++\n\t\t\t}\n\n\t\t\txo = append(xo, xs[i])\n\t\t\tdo = append(do, cum/total)\n\t\t\tco = append(co, cum)\n\n\t\t\ti = j\n\t\t}\n\n\t\tif xs[len(xs)-1] < max {\n\t\t\t// Extend to the right.\n\t\t\txo = append(xo, max)\n\t\t\tdo = append(do, 1)\n\t\t\tco = append(co, cum)\n\t\t}\n\n\t\t// Construct results table.\n\t\tnt := new(table.Builder).Add(s.X, xo).Add(dname, do).Add(cname, co)\n\t\tpreserveConsts(nt, t)\n\t\treturn nt.Done()\n\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/fn.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"math\"\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\n// Function samples a continuous univariate function at N points in\n// the domain computed by Domain.\n//\n// The result of Function binds column X to the X values at which the\n// function is sampled and retains constant columns from the input.\n// The computed function can add arbitrary columns for its output.\ntype Function struct {\n\t// X is the name of the column to use for input domain of this\n\t// function.\n\tX string\n\n\t// N is the number of points to sample the function at. If N\n\t// is 0, a reasonable default is used.\n\tN int\n\n\t// Domain specifies the domain of which to sample this function.\n\t// If Domain is nil, it defaults to DomainData{}.\n\tDomain FunctionDomainer\n\n\t// Fn is the continuous univariate function to sample. Fn will\n\t// be called with each table in the grouping and the X values\n\t// at which it should be sampled. Fn must add its output\n\t// columns to out. The output table will already contain the\n\t// sample points bound to the X column.\n\tFn func(gid table.GroupID, in *table.Table, sampleAt []float64, out *table.Builder)\n}\n\nconst defaultFunctionSamples = 200\n\nfunc (f Function) F(g table.Grouping) table.Grouping {\n\t// Set defaults.\n\tif f.N <= 0 {\n\t\tf.N = defaultFunctionSamples\n\t}\n\tif f.Domain == nil {\n\t\tf.Domain = DomainData{}\n\t}\n\n\tdomain := f.Domain.FunctionDomain(g, f.X)\n\treturn table.MapTables(g, func(gid table.GroupID, t *table.Table) *table.Table {\n\t\tmin, max := domain(gid)\n\n\t\t// Compute sample points. If there's no data, there\n\t\t// are no sample points, but we still have to run the\n\t\t// function to get the right output columns.\n\t\tvar ss []float64\n\t\tif math.IsNaN(min) {\n\t\t\tss = []float64{}\n\t\t} else {\n\t\t\tss = vec.Linspace(min, max, f.N)\n\t\t}\n\n\t\tvar nt table.Builder\n\t\tctype := table.ColType(t, f.X)\n\t\tif ctype == float64Type {\n\t\t\t// Bind output X column.\n\t\t\tnt.Add(f.X, ss)\n\t\t} else {\n\t\t\t// Convert to the column type.\n\t\t\tvsp := reflect.New(ctype)\n\t\t\tslice.Convert(vsp.Interface(), ss)\n\t\t\tvs := vsp.Elem()\n\t\t\t// This may have produced duplicate values.\n\t\t\t// Eliminate those.\n\t\t\tif vs.Len() > 0 {\n\t\t\t\tprev, i := vs.Index(0).Interface(), 1\n\t\t\t\tfor j := 1; j < vs.Len(); j++ {\n\t\t\t\t\tnext := vs.Index(j).Interface()\n\t\t\t\t\tif prev == next {\n\t\t\t\t\t\t// Skip duplicate.\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tif i != j {\n\t\t\t\t\t\tvs.Index(i).Set(vs.Index(j))\n\t\t\t\t\t}\n\t\t\t\t\ti++\n\t\t\t\t\tprev = next\n\t\t\t\t}\n\t\t\t\tvs.SetLen(i)\n\t\t\t}\n\t\t\t// Bind column-typed values to output X.\n\t\t\tnt.Add(f.X, vs.Interface())\n\t\t\t// And convert back to []float64 so we can\n\t\t\t// apply the function.\n\t\t\tslice.Convert(&ss, vs.Interface())\n\t\t}\n\n\t\t// Apply the function to the sample points.\n\t\tf.Fn(gid, t, ss, &nt)\n\n\t\tpreserveConsts(&nt, t)\n\t\treturn nt.Done()\n\t})\n}\n\n// preserveConsts copies the constant columns from t into nt.\nfunc preserveConsts(nt *table.Builder, t *table.Table) {\n\tfor _, col := range t.Columns() {\n\t\tif nt.Has(col) {\n\t\t\t// Don't overwrite existing columns in nt.\n\t\t\tcontinue\n\t\t}\n\t\tif cv, ok := t.Const(col); ok {\n\t\t\tnt.AddConst(col, cv)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/loess.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/fit\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\n// LOESS constructs a locally-weighted least squares polynomial\n// regression for the data (X, Y).\n//\n// X and Y are required. All other fields have reasonable default zero\n// values.\n//\n// The result of LOESS has two columns in addition to constant columns\n// from the input:\n//\n// - Column X is the points at which the LOESS function is sampled.\n//\n// - Column Y is the result of the LEOSS function.\n//\n// TODO: Confidence internals/bootstrap distributions?\n//\n// TODO: Robust LOESS? See https://www.mathworks.com/help/curvefit/smoothing-data.html#bq_6ys3-3\ntype LOESS struct {\n\t// X and Y are the names of the columns to use for X and Y\n\t// values of data points, respectively.\n\tX, Y string\n\n\t// N is the number of points to sample the regression at. If N\n\t// is 0, a reasonable default is used.\n\tN int\n\n\t// Domain specifies the domain at which to sample this function.\n\t// If Domain is nil, it defaults to DomainData{}.\n\tDomain FunctionDomainer\n\n\t// Degree specifies the degree of the local fit function. If\n\t// it is 0, it is treated as 2.\n\tDegree int\n\n\t// Span controls the smoothness of the fit. If it is 0, it is\n\t// treated as 0.5. The span must be between 0 and 1, where\n\t// smaller values fit the data more tightly.\n\tSpan float64\n}\n\nfunc (s LOESS) F(g table.Grouping) table.Grouping {\n\tif s.Degree <= 0 {\n\t\ts.Degree = 2\n\t}\n\tif s.Span <= 0 {\n\t\ts.Span = 0.5\n\t}\n\n\tvar xs, ys []float64\n\treturn Function{\n\t\tX: s.X, N: s.N, Domain: s.Domain,\n\t\tFn: func(gid table.GroupID, in *table.Table, sampleAt []float64, out *table.Builder) {\n\t\t\tif len(sampleAt) == 0 {\n\t\t\t\tout.Add(s.Y, []float64{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// TODO: We potentially convert each X column\n\t\t\t// twice, since Function also has to convert\n\t\t\t// them.\n\t\t\tslice.Convert(&xs, in.MustColumn(s.X))\n\t\t\tslice.Convert(&ys, in.MustColumn(s.Y))\n\n\t\t\tloess := fit.LOESS(xs, ys, s.Degree, s.Span)\n\t\t\tout.Add(s.Y, vec.Map(loess, sampleAt))\n\t\t},\n\t}.F(g)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/lsquares.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n\t\"github.com/aclements/go-moremath/fit\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\n// LeastSquares constructs a least squares polynomial regression for\n// the data (X, Y).\n//\n// X and Y are required. All other fields have reasonable default zero\n// values.\n//\n// The result of LeastSquares has two columns in addition to constant\n// columns from the input:\n//\n// - Column X is the points at which the fit function is sampled.\n//\n// - Column Y is the result of the fit function.\n//\n// TODO: Confidence internals/bootstrap distributions?\ntype LeastSquares struct {\n\t// X and Y are the names of the columns to use for X and Y\n\t// values of data points, respectively.\n\tX, Y string\n\n\t// N is the number of points to sample the regression at. If N\n\t// is 0, a reasonable default is used.\n\tN int\n\n\t// Domain specifies the domain at which to sample this function.\n\t// If Domain is nil, it defaults to DomainData{}.\n\tDomain FunctionDomainer\n\n\t// Degree specifies the degree of the fit polynomial. If it is\n\t// 0, it is treated as 1.\n\tDegree int\n}\n\nfunc (s LeastSquares) F(g table.Grouping) table.Grouping {\n\tif s.Degree <= 0 {\n\t\ts.Degree = 1\n\t}\n\n\tvar xs, ys []float64\n\treturn Function{\n\t\tX: s.X, N: s.N, Domain: s.Domain,\n\t\tFn: func(gid table.GroupID, in *table.Table, sampleAt []float64, out *table.Builder) {\n\t\t\tif len(sampleAt) == 0 {\n\t\t\t\tout.Add(s.Y, []float64{})\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tslice.Convert(&xs, in.MustColumn(s.X))\n\t\t\tslice.Convert(&ys, in.MustColumn(s.Y))\n\n\t\t\tr := fit.PolynomialRegression(xs, ys, nil, s.Degree)\n\t\t\tout.Add(s.Y, vec.Map(r.F, sampleAt))\n\t\t},\n\t}.F(g)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/ggstat/normalize.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage ggstat\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n\t\"github.com/aclements/go-gg/table\"\n)\n\n// Normalize normalizes each group such that some data point is 1.\n//\n// Either X or Index is required (though 0 is a reasonable value of\n// Index).\n//\n// The result of Normalize is the same as the input table, plus\n// additional columns for each normalized column. These columns will\n// be named \"normalized <col>\" where <col> is the name of the original\n// column and will have type []float64.\ntype Normalize struct {\n\t// X is the name of the column to use to find the denominator\n\t// row. If X is \"\", Index is used instead.\n\tX string\n\n\t// Index is the row index of the denominator row if X is \"\"\n\t// (otherwise it is ignored). Index may be negative, in which\n\t// case it is added to the number of rows (e.g., -1 is the\n\t// last row).\n\tIndex int\n\n\t// By is a function func([]T) int that returns the index of\n\t// the denominator row given column X. By may be nil, in which\n\t// case it defaults to generic.ArgMin.\n\tBy interface{}\n\n\t// Cols is a slice of the names of columns to normalize\n\t// relative to the corresponding DenomCols value in the\n\t// denominator row. Cols may be nil, in which case it defaults\n\t// to all integral and floating point columns.\n\tCols []string\n\n\t// DenomCols is a slice of the names of columns used as the\n\t// demoninator. DenomCols may be nil, in which case it\n\t// defaults to Cols (i.e. each column will be normalized to\n\t// the value from that column in the denominator row.)\n\t// Otherwise, DenomCols must be the same length as Cols.\n\tDenomCols []string\n}\n\nfunc (s Normalize) F(g table.Grouping) table.Grouping {\n\t// Find the columns to normalize.\n\tif s.Cols == nil {\n\t\tcols := []string{}\n\t\tfor i, ct := range colTypes(g) {\n\t\t\tif canNormalize(ct.Elem().Kind()) {\n\t\t\t\tcols = append(cols, g.Columns()[i])\n\t\t\t}\n\t\t}\n\t\ts.Cols = cols\n\t}\n\tif len(s.Cols) == 0 {\n\t\treturn g\n\t}\n\n\t// Construct new column names.\n\tnewcols := make([]string, len(s.Cols))\n\tfor i, col := range s.Cols {\n\t\tnewcols[i] = \"normalized \" + col\n\t}\n\n\t// Get \"by\" function.\n\tvar byv reflect.Value\n\tbyargs := make([]reflect.Value, 1)\n\tif s.By != nil {\n\t\tbyv = reflect.ValueOf(s.By)\n\t\t// TODO: Type check byv better.\n\t}\n\n\treturn table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table {\n\t\tif t.Len() == 0 {\n\t\t\treturn t\n\t\t}\n\n\t\t// Find the denominator row.\n\t\tvar drow int\n\t\tif s.X == \"\" {\n\t\t\tdrow = s.Index\n\t\t\tif drow < 0 {\n\t\t\t\tdrow += t.Len()\n\t\t\t}\n\t\t} else {\n\t\t\txs := t.MustColumn(s.X)\n\t\t\tif s.By == nil {\n\t\t\t\tdrow = slice.ArgMin(xs)\n\t\t\t} else {\n\t\t\t\tbyargs[0] = reflect.ValueOf(xs)\n\t\t\t\tbyout := byv.Call(byargs)\n\t\t\t\tdrow = int(byout[0].Int())\n\t\t\t}\n\t\t}\n\n\t\t// Normalize columns.\n\t\tnewt := table.NewBuilder(t)\n\t\tdenomCols := s.DenomCols\n\t\tif denomCols == nil {\n\t\t\tdenomCols = s.Cols\n\t\t}\n\t\tfor coli, col := range s.Cols {\n\t\t\tdenom := denomValue(t.MustColumn(denomCols[coli]), drow)\n\t\t\tout := normalizeTo(t.MustColumn(col), denom)\n\t\t\tnewt.Add(newcols[coli], out)\n\t\t}\n\n\t\treturn newt.Done()\n\t})\n}\n\nfunc colTypes(g table.Grouping) []reflect.Type {\n\tcts := make([]reflect.Type, len(g.Columns()))\n\tfor i, col := range g.Columns() {\n\t\tcts[i] = table.ColType(g, col)\n\t}\n\treturn cts\n}\n\nvar canNormalizeKinds = map[reflect.Kind]bool{\n\treflect.Float32: true,\n\treflect.Float64: true,\n\treflect.Int:     true,\n\treflect.Int8:    true,\n\treflect.Int16:   true,\n\treflect.Int32:   true,\n\treflect.Int64:   true,\n\treflect.Uint:    true,\n\treflect.Uintptr: true,\n\treflect.Uint8:   true,\n\treflect.Uint16:  true,\n\treflect.Uint32:  true,\n\treflect.Uint64:  true,\n}\n\nfunc canNormalize(k reflect.Kind) bool {\n\treturn canNormalizeKinds[k]\n}\n\nfunc denomValue(s interface{}, index int) float64 {\n\tswitch s := s.(type) {\n\tcase []float64:\n\t\treturn s[index]\n\t}\n\treturn reflect.ValueOf(s).Index(index).Convert(float64Type).Float()\n}\n\nfunc normalizeTo(s interface{}, denom float64) interface{} {\n\tswitch s := s.(type) {\n\tcase []float64:\n\t\tout := make([]float64, len(s))\n\t\tfor i, numer := range s {\n\t\t\tout[i] = numer / denom\n\t\t}\n\t\treturn out\n\t}\n\n\tsv := reflect.ValueOf(s)\n\n\tout := reflect.MakeSlice(float64SliceType, sv.Len(), sv.Len())\n\tfor i, len := 0, sv.Len(); i < len; i++ {\n\t\tnumer := sv.Index(i).Convert(float64Type).Float()\n\t\tout.Index(i).SetFloat(numer / denom)\n\t}\n\treturn out.Interface()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/blend.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage palette\n\nimport \"image/color\"\n\n// blendRGBA returns the interpolation between two sRGB colors with\n// pre-multiplied alpha.\nfunc blendRGBA(a, b color.RGBA, x float64) color.RGBA {\n\tconst linThresh = 5\n\tdiff8 := func(a, b uint8) uint8 {\n\t\tif a < b {\n\t\t\treturn b - a\n\t\t}\n\t\treturn a - b\n\t}\n\tif a.A == 255 && b.A == 255 && diff8(a.R, b.R) <= linThresh && diff8(a.G, b.G) <= linThresh && diff8(a.B, b.B) <= linThresh {\n\t\t// Perform a quick linear interpolation.\n\t\tblend8 := func(a, b uint8, x float64) uint8 {\n\t\t\tc := float64(a)*(1-x) + float64(b)*x\n\t\t\tif c <= 0 {\n\t\t\t\treturn 0\n\t\t\t} else if c >= 255 {\n\t\t\t\treturn 255\n\t\t\t}\n\t\t\treturn uint8(c)\n\t\t}\n\t\treturn color.RGBA{\n\t\t\tblend8(a.R, b.R, x),\n\t\t\tblend8(a.G, b.G, x),\n\t\t\tblend8(a.B, b.B, x),\n\t\t\t255,\n\t\t}\n\t}\n\n\tblend := func(a, b uint8, x float64, lim uint8) uint8 {\n\t\t// Map to linear RGB, blend in linear RGB, and map\n\t\t// back to sRGB.\n\t\tal, bl := sRGB8ToLinear(a), sRGB8ToLinear(b)\n\t\tcl := float64(al)*(1-x) + float64(bl)*x\n\t\tif cl < 0 {\n\t\t\treturn 0\n\t\t} else if cl >= 1<<16-1 {\n\t\t\treturn 255\n\t\t}\n\t\tout := linearTosRGB8(uint16(cl))\n\t\tif out > lim {\n\t\t\tout = lim\n\t\t}\n\t\treturn out\n\t}\n\tlinear := func(a, b uint8, x float64) uint8 {\n\t\tc := int(float64(a)*(1-x) + float64(b)*x)\n\t\tif c <= 0 {\n\t\t\treturn 0\n\t\t} else if c >= 255 {\n\t\t\treturn 255\n\t\t}\n\t\treturn uint8(c)\n\t}\n\n\tif a.A == b.A {\n\t\t// No need to undo the alpha pre-multiplication.\n\t\treturn color.RGBA{\n\t\t\tblend(a.R, b.R, x, a.A),\n\t\t\tblend(a.G, b.G, x, a.A),\n\t\t\tblend(a.B, b.B, x, a.A),\n\t\t\ta.A,\n\t\t}\n\t}\n\n\t// Un-premultiply the alpha, map to linear RGB, blend in\n\t// linear RGB, map back to sRGB, and re-premultiply the alpha.\n\tif a.A == 0 {\n\t\treturn color.RGBA{b.R, b.G, b.B, linear(a.A, b.A, x)}\n\t} else if b.A == 0 {\n\t\treturn color.RGBA{a.R, a.G, a.B, linear(a.A, b.A, x)}\n\t}\n\t// TODO: This loses precision. Maybe use 16 bit sRGB?\n\ta.R = uint8(uint16(a.R) * 255 / uint16(a.A))\n\ta.G = uint8(uint16(a.G) * 255 / uint16(a.A))\n\ta.B = uint8(uint16(a.B) * 255 / uint16(a.A))\n\tb.R = uint8(uint16(b.R) * 255 / uint16(b.A))\n\tb.G = uint8(uint16(b.G) * 255 / uint16(b.A))\n\tb.B = uint8(uint16(b.B) * 255 / uint16(b.A))\n\tc := color.RGBA{\n\t\tblend(a.R, b.R, x, 255),\n\t\tblend(a.G, b.G, x, 255),\n\t\tblend(a.B, b.B, x, 255),\n\t\tlinear(a.A, b.A, x),\n\t}\n\tc.R = uint8(uint16(c.R) * uint16(c.A) / 255)\n\tc.G = uint8(uint16(c.G) * uint16(c.A) / 255)\n\tc.B = uint8(uint16(c.B) * uint16(c.A) / 255)\n\treturn c\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/brewer/brewer.go",
    "content": "// Generated by genbrewer. DO NOT EDIT.\n// Please see license at http://colorbrewer.org/export/LICENSE.txt\n\npackage brewer\n\nimport \"image/color\"\n\nvar (\n\t// Accent is a qualitative palette.\n\tAccent = map[int][]color.Color{3: Accent_3, 4: Accent_4, 5: Accent_5, 6: Accent_6, 7: Accent_7, 8: Accent_8}\n\tAccent_3 = []color.Color{color.RGBA{127, 201, 127, 255}, color.RGBA{190, 174, 212, 255}, color.RGBA{253, 192, 134, 255}}\n\tAccent_4 = []color.Color{color.RGBA{127, 201, 127, 255}, color.RGBA{190, 174, 212, 255}, color.RGBA{253, 192, 134, 255}, color.RGBA{255, 255, 153, 255}}\n\tAccent_5 = []color.Color{color.RGBA{127, 201, 127, 255}, color.RGBA{190, 174, 212, 255}, color.RGBA{253, 192, 134, 255}, color.RGBA{255, 255, 153, 255}, color.RGBA{56, 108, 176, 255}}\n\tAccent_6 = []color.Color{color.RGBA{127, 201, 127, 255}, color.RGBA{190, 174, 212, 255}, color.RGBA{253, 192, 134, 255}, color.RGBA{255, 255, 153, 255}, color.RGBA{56, 108, 176, 255}, color.RGBA{240, 2, 127, 255}}\n\tAccent_7 = []color.Color{color.RGBA{127, 201, 127, 255}, color.RGBA{190, 174, 212, 255}, color.RGBA{253, 192, 134, 255}, color.RGBA{255, 255, 153, 255}, color.RGBA{56, 108, 176, 255}, color.RGBA{240, 2, 127, 255}, color.RGBA{191, 91, 23, 255}}\n\tAccent_8 = []color.Color{color.RGBA{127, 201, 127, 255}, color.RGBA{190, 174, 212, 255}, color.RGBA{253, 192, 134, 255}, color.RGBA{255, 255, 153, 255}, color.RGBA{56, 108, 176, 255}, color.RGBA{240, 2, 127, 255}, color.RGBA{191, 91, 23, 255}, color.RGBA{102, 102, 102, 255}}\n)\n\nvar (\n\t// Blues is a sequential palette.\n\tBlues = map[int][]color.Color{3: Blues_3, 4: Blues_4, 5: Blues_5, 6: Blues_6, 7: Blues_7, 8: Blues_8, 9: Blues_9}\n\tBlues_3 = []color.Color{color.RGBA{222, 235, 247, 255}, color.RGBA{158, 202, 225, 255}, color.RGBA{49, 130, 189, 255}}\n\tBlues_4 = []color.Color{color.RGBA{239, 243, 255, 255}, color.RGBA{189, 215, 231, 255}, color.RGBA{107, 174, 214, 255}, color.RGBA{33, 113, 181, 255}}\n\tBlues_5 = []color.Color{color.RGBA{239, 243, 255, 255}, color.RGBA{189, 215, 231, 255}, color.RGBA{107, 174, 214, 255}, color.RGBA{49, 130, 189, 255}, color.RGBA{8, 81, 156, 255}}\n\tBlues_6 = []color.Color{color.RGBA{239, 243, 255, 255}, color.RGBA{198, 219, 239, 255}, color.RGBA{158, 202, 225, 255}, color.RGBA{107, 174, 214, 255}, color.RGBA{49, 130, 189, 255}, color.RGBA{8, 81, 156, 255}}\n\tBlues_7 = []color.Color{color.RGBA{239, 243, 255, 255}, color.RGBA{198, 219, 239, 255}, color.RGBA{158, 202, 225, 255}, color.RGBA{107, 174, 214, 255}, color.RGBA{66, 146, 198, 255}, color.RGBA{33, 113, 181, 255}, color.RGBA{8, 69, 148, 255}}\n\tBlues_8 = []color.Color{color.RGBA{247, 251, 255, 255}, color.RGBA{222, 235, 247, 255}, color.RGBA{198, 219, 239, 255}, color.RGBA{158, 202, 225, 255}, color.RGBA{107, 174, 214, 255}, color.RGBA{66, 146, 198, 255}, color.RGBA{33, 113, 181, 255}, color.RGBA{8, 69, 148, 255}}\n\tBlues_9 = []color.Color{color.RGBA{247, 251, 255, 255}, color.RGBA{222, 235, 247, 255}, color.RGBA{198, 219, 239, 255}, color.RGBA{158, 202, 225, 255}, color.RGBA{107, 174, 214, 255}, color.RGBA{66, 146, 198, 255}, color.RGBA{33, 113, 181, 255}, color.RGBA{8, 81, 156, 255}, color.RGBA{8, 48, 107, 255}}\n)\n\nvar (\n\t// BrBG is a diverging palette.\n\tBrBG = map[int][]color.Color{3: BrBG_3, 4: BrBG_4, 5: BrBG_5, 6: BrBG_6, 7: BrBG_7, 8: BrBG_8, 9: BrBG_9, 10: BrBG_10, 11: BrBG_11}\n\tBrBG_3 = []color.Color{color.RGBA{216, 179, 101, 255}, color.RGBA{245, 245, 245, 255}, color.RGBA{90, 180, 172, 255}}\n\tBrBG_4 = []color.Color{color.RGBA{166, 97, 26, 255}, color.RGBA{223, 194, 125, 255}, color.RGBA{128, 205, 193, 255}, color.RGBA{1, 133, 113, 255}}\n\tBrBG_5 = []color.Color{color.RGBA{166, 97, 26, 255}, color.RGBA{223, 194, 125, 255}, color.RGBA{245, 245, 245, 255}, color.RGBA{128, 205, 193, 255}, color.RGBA{1, 133, 113, 255}}\n\tBrBG_6 = []color.Color{color.RGBA{140, 81, 10, 255}, color.RGBA{216, 179, 101, 255}, color.RGBA{246, 232, 195, 255}, color.RGBA{199, 234, 229, 255}, color.RGBA{90, 180, 172, 255}, color.RGBA{1, 102, 94, 255}}\n\tBrBG_7 = []color.Color{color.RGBA{140, 81, 10, 255}, color.RGBA{216, 179, 101, 255}, color.RGBA{246, 232, 195, 255}, color.RGBA{245, 245, 245, 255}, color.RGBA{199, 234, 229, 255}, color.RGBA{90, 180, 172, 255}, color.RGBA{1, 102, 94, 255}}\n\tBrBG_8 = []color.Color{color.RGBA{140, 81, 10, 255}, color.RGBA{191, 129, 45, 255}, color.RGBA{223, 194, 125, 255}, color.RGBA{246, 232, 195, 255}, color.RGBA{199, 234, 229, 255}, color.RGBA{128, 205, 193, 255}, color.RGBA{53, 151, 143, 255}, color.RGBA{1, 102, 94, 255}}\n\tBrBG_9 = []color.Color{color.RGBA{140, 81, 10, 255}, color.RGBA{191, 129, 45, 255}, color.RGBA{223, 194, 125, 255}, color.RGBA{246, 232, 195, 255}, color.RGBA{245, 245, 245, 255}, color.RGBA{199, 234, 229, 255}, color.RGBA{128, 205, 193, 255}, color.RGBA{53, 151, 143, 255}, color.RGBA{1, 102, 94, 255}}\n\tBrBG_10 = []color.Color{color.RGBA{84, 48, 5, 255}, color.RGBA{140, 81, 10, 255}, color.RGBA{191, 129, 45, 255}, color.RGBA{223, 194, 125, 255}, color.RGBA{246, 232, 195, 255}, color.RGBA{199, 234, 229, 255}, color.RGBA{128, 205, 193, 255}, color.RGBA{53, 151, 143, 255}, color.RGBA{1, 102, 94, 255}, color.RGBA{0, 60, 48, 255}}\n\tBrBG_11 = []color.Color{color.RGBA{84, 48, 5, 255}, color.RGBA{140, 81, 10, 255}, color.RGBA{191, 129, 45, 255}, color.RGBA{223, 194, 125, 255}, color.RGBA{246, 232, 195, 255}, color.RGBA{245, 245, 245, 255}, color.RGBA{199, 234, 229, 255}, color.RGBA{128, 205, 193, 255}, color.RGBA{53, 151, 143, 255}, color.RGBA{1, 102, 94, 255}, color.RGBA{0, 60, 48, 255}}\n)\n\nvar (\n\t// BuGn is a sequential palette.\n\tBuGn = map[int][]color.Color{3: BuGn_3, 4: BuGn_4, 5: BuGn_5, 6: BuGn_6, 7: BuGn_7, 8: BuGn_8, 9: BuGn_9}\n\tBuGn_3 = []color.Color{color.RGBA{229, 245, 249, 255}, color.RGBA{153, 216, 201, 255}, color.RGBA{44, 162, 95, 255}}\n\tBuGn_4 = []color.Color{color.RGBA{237, 248, 251, 255}, color.RGBA{178, 226, 226, 255}, color.RGBA{102, 194, 164, 255}, color.RGBA{35, 139, 69, 255}}\n\tBuGn_5 = []color.Color{color.RGBA{237, 248, 251, 255}, color.RGBA{178, 226, 226, 255}, color.RGBA{102, 194, 164, 255}, color.RGBA{44, 162, 95, 255}, color.RGBA{0, 109, 44, 255}}\n\tBuGn_6 = []color.Color{color.RGBA{237, 248, 251, 255}, color.RGBA{204, 236, 230, 255}, color.RGBA{153, 216, 201, 255}, color.RGBA{102, 194, 164, 255}, color.RGBA{44, 162, 95, 255}, color.RGBA{0, 109, 44, 255}}\n\tBuGn_7 = []color.Color{color.RGBA{237, 248, 251, 255}, color.RGBA{204, 236, 230, 255}, color.RGBA{153, 216, 201, 255}, color.RGBA{102, 194, 164, 255}, color.RGBA{65, 174, 118, 255}, color.RGBA{35, 139, 69, 255}, color.RGBA{0, 88, 36, 255}}\n\tBuGn_8 = []color.Color{color.RGBA{247, 252, 253, 255}, color.RGBA{229, 245, 249, 255}, color.RGBA{204, 236, 230, 255}, color.RGBA{153, 216, 201, 255}, color.RGBA{102, 194, 164, 255}, color.RGBA{65, 174, 118, 255}, color.RGBA{35, 139, 69, 255}, color.RGBA{0, 88, 36, 255}}\n\tBuGn_9 = []color.Color{color.RGBA{247, 252, 253, 255}, color.RGBA{229, 245, 249, 255}, color.RGBA{204, 236, 230, 255}, color.RGBA{153, 216, 201, 255}, color.RGBA{102, 194, 164, 255}, color.RGBA{65, 174, 118, 255}, color.RGBA{35, 139, 69, 255}, color.RGBA{0, 109, 44, 255}, color.RGBA{0, 68, 27, 255}}\n)\n\nvar (\n\t// BuPu is a sequential palette.\n\tBuPu = map[int][]color.Color{3: BuPu_3, 4: BuPu_4, 5: BuPu_5, 6: BuPu_6, 7: BuPu_7, 8: BuPu_8, 9: BuPu_9}\n\tBuPu_3 = []color.Color{color.RGBA{224, 236, 244, 255}, color.RGBA{158, 188, 218, 255}, color.RGBA{136, 86, 167, 255}}\n\tBuPu_4 = []color.Color{color.RGBA{237, 248, 251, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{140, 150, 198, 255}, color.RGBA{136, 65, 157, 255}}\n\tBuPu_5 = []color.Color{color.RGBA{237, 248, 251, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{140, 150, 198, 255}, color.RGBA{136, 86, 167, 255}, color.RGBA{129, 15, 124, 255}}\n\tBuPu_6 = []color.Color{color.RGBA{237, 248, 251, 255}, color.RGBA{191, 211, 230, 255}, color.RGBA{158, 188, 218, 255}, color.RGBA{140, 150, 198, 255}, color.RGBA{136, 86, 167, 255}, color.RGBA{129, 15, 124, 255}}\n\tBuPu_7 = []color.Color{color.RGBA{237, 248, 251, 255}, color.RGBA{191, 211, 230, 255}, color.RGBA{158, 188, 218, 255}, color.RGBA{140, 150, 198, 255}, color.RGBA{140, 107, 177, 255}, color.RGBA{136, 65, 157, 255}, color.RGBA{110, 1, 107, 255}}\n\tBuPu_8 = []color.Color{color.RGBA{247, 252, 253, 255}, color.RGBA{224, 236, 244, 255}, color.RGBA{191, 211, 230, 255}, color.RGBA{158, 188, 218, 255}, color.RGBA{140, 150, 198, 255}, color.RGBA{140, 107, 177, 255}, color.RGBA{136, 65, 157, 255}, color.RGBA{110, 1, 107, 255}}\n\tBuPu_9 = []color.Color{color.RGBA{247, 252, 253, 255}, color.RGBA{224, 236, 244, 255}, color.RGBA{191, 211, 230, 255}, color.RGBA{158, 188, 218, 255}, color.RGBA{140, 150, 198, 255}, color.RGBA{140, 107, 177, 255}, color.RGBA{136, 65, 157, 255}, color.RGBA{129, 15, 124, 255}, color.RGBA{77, 0, 75, 255}}\n)\n\nvar (\n\t// Dark2 is a qualitative palette.\n\tDark2 = map[int][]color.Color{3: Dark2_3, 4: Dark2_4, 5: Dark2_5, 6: Dark2_6, 7: Dark2_7, 8: Dark2_8}\n\tDark2_3 = []color.Color{color.RGBA{27, 158, 119, 255}, color.RGBA{217, 95, 2, 255}, color.RGBA{117, 112, 179, 255}}\n\tDark2_4 = []color.Color{color.RGBA{27, 158, 119, 255}, color.RGBA{217, 95, 2, 255}, color.RGBA{117, 112, 179, 255}, color.RGBA{231, 41, 138, 255}}\n\tDark2_5 = []color.Color{color.RGBA{27, 158, 119, 255}, color.RGBA{217, 95, 2, 255}, color.RGBA{117, 112, 179, 255}, color.RGBA{231, 41, 138, 255}, color.RGBA{102, 166, 30, 255}}\n\tDark2_6 = []color.Color{color.RGBA{27, 158, 119, 255}, color.RGBA{217, 95, 2, 255}, color.RGBA{117, 112, 179, 255}, color.RGBA{231, 41, 138, 255}, color.RGBA{102, 166, 30, 255}, color.RGBA{230, 171, 2, 255}}\n\tDark2_7 = []color.Color{color.RGBA{27, 158, 119, 255}, color.RGBA{217, 95, 2, 255}, color.RGBA{117, 112, 179, 255}, color.RGBA{231, 41, 138, 255}, color.RGBA{102, 166, 30, 255}, color.RGBA{230, 171, 2, 255}, color.RGBA{166, 118, 29, 255}}\n\tDark2_8 = []color.Color{color.RGBA{27, 158, 119, 255}, color.RGBA{217, 95, 2, 255}, color.RGBA{117, 112, 179, 255}, color.RGBA{231, 41, 138, 255}, color.RGBA{102, 166, 30, 255}, color.RGBA{230, 171, 2, 255}, color.RGBA{166, 118, 29, 255}, color.RGBA{102, 102, 102, 255}}\n)\n\nvar (\n\t// GnBu is a sequential palette.\n\tGnBu = map[int][]color.Color{3: GnBu_3, 4: GnBu_4, 5: GnBu_5, 6: GnBu_6, 7: GnBu_7, 8: GnBu_8, 9: GnBu_9}\n\tGnBu_3 = []color.Color{color.RGBA{224, 243, 219, 255}, color.RGBA{168, 221, 181, 255}, color.RGBA{67, 162, 202, 255}}\n\tGnBu_4 = []color.Color{color.RGBA{240, 249, 232, 255}, color.RGBA{186, 228, 188, 255}, color.RGBA{123, 204, 196, 255}, color.RGBA{43, 140, 190, 255}}\n\tGnBu_5 = []color.Color{color.RGBA{240, 249, 232, 255}, color.RGBA{186, 228, 188, 255}, color.RGBA{123, 204, 196, 255}, color.RGBA{67, 162, 202, 255}, color.RGBA{8, 104, 172, 255}}\n\tGnBu_6 = []color.Color{color.RGBA{240, 249, 232, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{168, 221, 181, 255}, color.RGBA{123, 204, 196, 255}, color.RGBA{67, 162, 202, 255}, color.RGBA{8, 104, 172, 255}}\n\tGnBu_7 = []color.Color{color.RGBA{240, 249, 232, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{168, 221, 181, 255}, color.RGBA{123, 204, 196, 255}, color.RGBA{78, 179, 211, 255}, color.RGBA{43, 140, 190, 255}, color.RGBA{8, 88, 158, 255}}\n\tGnBu_8 = []color.Color{color.RGBA{247, 252, 240, 255}, color.RGBA{224, 243, 219, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{168, 221, 181, 255}, color.RGBA{123, 204, 196, 255}, color.RGBA{78, 179, 211, 255}, color.RGBA{43, 140, 190, 255}, color.RGBA{8, 88, 158, 255}}\n\tGnBu_9 = []color.Color{color.RGBA{247, 252, 240, 255}, color.RGBA{224, 243, 219, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{168, 221, 181, 255}, color.RGBA{123, 204, 196, 255}, color.RGBA{78, 179, 211, 255}, color.RGBA{43, 140, 190, 255}, color.RGBA{8, 104, 172, 255}, color.RGBA{8, 64, 129, 255}}\n)\n\nvar (\n\t// Greens is a sequential palette.\n\tGreens = map[int][]color.Color{3: Greens_3, 4: Greens_4, 5: Greens_5, 6: Greens_6, 7: Greens_7, 8: Greens_8, 9: Greens_9}\n\tGreens_3 = []color.Color{color.RGBA{229, 245, 224, 255}, color.RGBA{161, 217, 155, 255}, color.RGBA{49, 163, 84, 255}}\n\tGreens_4 = []color.Color{color.RGBA{237, 248, 233, 255}, color.RGBA{186, 228, 179, 255}, color.RGBA{116, 196, 118, 255}, color.RGBA{35, 139, 69, 255}}\n\tGreens_5 = []color.Color{color.RGBA{237, 248, 233, 255}, color.RGBA{186, 228, 179, 255}, color.RGBA{116, 196, 118, 255}, color.RGBA{49, 163, 84, 255}, color.RGBA{0, 109, 44, 255}}\n\tGreens_6 = []color.Color{color.RGBA{237, 248, 233, 255}, color.RGBA{199, 233, 192, 255}, color.RGBA{161, 217, 155, 255}, color.RGBA{116, 196, 118, 255}, color.RGBA{49, 163, 84, 255}, color.RGBA{0, 109, 44, 255}}\n\tGreens_7 = []color.Color{color.RGBA{237, 248, 233, 255}, color.RGBA{199, 233, 192, 255}, color.RGBA{161, 217, 155, 255}, color.RGBA{116, 196, 118, 255}, color.RGBA{65, 171, 93, 255}, color.RGBA{35, 139, 69, 255}, color.RGBA{0, 90, 50, 255}}\n\tGreens_8 = []color.Color{color.RGBA{247, 252, 245, 255}, color.RGBA{229, 245, 224, 255}, color.RGBA{199, 233, 192, 255}, color.RGBA{161, 217, 155, 255}, color.RGBA{116, 196, 118, 255}, color.RGBA{65, 171, 93, 255}, color.RGBA{35, 139, 69, 255}, color.RGBA{0, 90, 50, 255}}\n\tGreens_9 = []color.Color{color.RGBA{247, 252, 245, 255}, color.RGBA{229, 245, 224, 255}, color.RGBA{199, 233, 192, 255}, color.RGBA{161, 217, 155, 255}, color.RGBA{116, 196, 118, 255}, color.RGBA{65, 171, 93, 255}, color.RGBA{35, 139, 69, 255}, color.RGBA{0, 109, 44, 255}, color.RGBA{0, 68, 27, 255}}\n)\n\nvar (\n\t// Greys is a sequential palette.\n\tGreys = map[int][]color.Color{3: Greys_3, 4: Greys_4, 5: Greys_5, 6: Greys_6, 7: Greys_7, 8: Greys_8, 9: Greys_9}\n\tGreys_3 = []color.Color{color.RGBA{240, 240, 240, 255}, color.RGBA{189, 189, 189, 255}, color.RGBA{99, 99, 99, 255}}\n\tGreys_4 = []color.Color{color.RGBA{247, 247, 247, 255}, color.RGBA{204, 204, 204, 255}, color.RGBA{150, 150, 150, 255}, color.RGBA{82, 82, 82, 255}}\n\tGreys_5 = []color.Color{color.RGBA{247, 247, 247, 255}, color.RGBA{204, 204, 204, 255}, color.RGBA{150, 150, 150, 255}, color.RGBA{99, 99, 99, 255}, color.RGBA{37, 37, 37, 255}}\n\tGreys_6 = []color.Color{color.RGBA{247, 247, 247, 255}, color.RGBA{217, 217, 217, 255}, color.RGBA{189, 189, 189, 255}, color.RGBA{150, 150, 150, 255}, color.RGBA{99, 99, 99, 255}, color.RGBA{37, 37, 37, 255}}\n\tGreys_7 = []color.Color{color.RGBA{247, 247, 247, 255}, color.RGBA{217, 217, 217, 255}, color.RGBA{189, 189, 189, 255}, color.RGBA{150, 150, 150, 255}, color.RGBA{115, 115, 115, 255}, color.RGBA{82, 82, 82, 255}, color.RGBA{37, 37, 37, 255}}\n\tGreys_8 = []color.Color{color.RGBA{255, 255, 255, 255}, color.RGBA{240, 240, 240, 255}, color.RGBA{217, 217, 217, 255}, color.RGBA{189, 189, 189, 255}, color.RGBA{150, 150, 150, 255}, color.RGBA{115, 115, 115, 255}, color.RGBA{82, 82, 82, 255}, color.RGBA{37, 37, 37, 255}}\n\tGreys_9 = []color.Color{color.RGBA{255, 255, 255, 255}, color.RGBA{240, 240, 240, 255}, color.RGBA{217, 217, 217, 255}, color.RGBA{189, 189, 189, 255}, color.RGBA{150, 150, 150, 255}, color.RGBA{115, 115, 115, 255}, color.RGBA{82, 82, 82, 255}, color.RGBA{37, 37, 37, 255}, color.RGBA{0, 0, 0, 255}}\n)\n\nvar (\n\t// OrRd is a sequential palette.\n\tOrRd = map[int][]color.Color{3: OrRd_3, 4: OrRd_4, 5: OrRd_5, 6: OrRd_6, 7: OrRd_7, 8: OrRd_8, 9: OrRd_9}\n\tOrRd_3 = []color.Color{color.RGBA{254, 232, 200, 255}, color.RGBA{253, 187, 132, 255}, color.RGBA{227, 74, 51, 255}}\n\tOrRd_4 = []color.Color{color.RGBA{254, 240, 217, 255}, color.RGBA{253, 204, 138, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{215, 48, 31, 255}}\n\tOrRd_5 = []color.Color{color.RGBA{254, 240, 217, 255}, color.RGBA{253, 204, 138, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{227, 74, 51, 255}, color.RGBA{179, 0, 0, 255}}\n\tOrRd_6 = []color.Color{color.RGBA{254, 240, 217, 255}, color.RGBA{253, 212, 158, 255}, color.RGBA{253, 187, 132, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{227, 74, 51, 255}, color.RGBA{179, 0, 0, 255}}\n\tOrRd_7 = []color.Color{color.RGBA{254, 240, 217, 255}, color.RGBA{253, 212, 158, 255}, color.RGBA{253, 187, 132, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{239, 101, 72, 255}, color.RGBA{215, 48, 31, 255}, color.RGBA{153, 0, 0, 255}}\n\tOrRd_8 = []color.Color{color.RGBA{255, 247, 236, 255}, color.RGBA{254, 232, 200, 255}, color.RGBA{253, 212, 158, 255}, color.RGBA{253, 187, 132, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{239, 101, 72, 255}, color.RGBA{215, 48, 31, 255}, color.RGBA{153, 0, 0, 255}}\n\tOrRd_9 = []color.Color{color.RGBA{255, 247, 236, 255}, color.RGBA{254, 232, 200, 255}, color.RGBA{253, 212, 158, 255}, color.RGBA{253, 187, 132, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{239, 101, 72, 255}, color.RGBA{215, 48, 31, 255}, color.RGBA{179, 0, 0, 255}, color.RGBA{127, 0, 0, 255}}\n)\n\nvar (\n\t// Oranges is a sequential palette.\n\tOranges = map[int][]color.Color{3: Oranges_3, 4: Oranges_4, 5: Oranges_5, 6: Oranges_6, 7: Oranges_7, 8: Oranges_8, 9: Oranges_9}\n\tOranges_3 = []color.Color{color.RGBA{254, 230, 206, 255}, color.RGBA{253, 174, 107, 255}, color.RGBA{230, 85, 13, 255}}\n\tOranges_4 = []color.Color{color.RGBA{254, 237, 222, 255}, color.RGBA{253, 190, 133, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{217, 71, 1, 255}}\n\tOranges_5 = []color.Color{color.RGBA{254, 237, 222, 255}, color.RGBA{253, 190, 133, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{230, 85, 13, 255}, color.RGBA{166, 54, 3, 255}}\n\tOranges_6 = []color.Color{color.RGBA{254, 237, 222, 255}, color.RGBA{253, 208, 162, 255}, color.RGBA{253, 174, 107, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{230, 85, 13, 255}, color.RGBA{166, 54, 3, 255}}\n\tOranges_7 = []color.Color{color.RGBA{254, 237, 222, 255}, color.RGBA{253, 208, 162, 255}, color.RGBA{253, 174, 107, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{241, 105, 19, 255}, color.RGBA{217, 72, 1, 255}, color.RGBA{140, 45, 4, 255}}\n\tOranges_8 = []color.Color{color.RGBA{255, 245, 235, 255}, color.RGBA{254, 230, 206, 255}, color.RGBA{253, 208, 162, 255}, color.RGBA{253, 174, 107, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{241, 105, 19, 255}, color.RGBA{217, 72, 1, 255}, color.RGBA{140, 45, 4, 255}}\n\tOranges_9 = []color.Color{color.RGBA{255, 245, 235, 255}, color.RGBA{254, 230, 206, 255}, color.RGBA{253, 208, 162, 255}, color.RGBA{253, 174, 107, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{241, 105, 19, 255}, color.RGBA{217, 72, 1, 255}, color.RGBA{166, 54, 3, 255}, color.RGBA{127, 39, 4, 255}}\n)\n\nvar (\n\t// PRGn is a diverging palette.\n\tPRGn = map[int][]color.Color{3: PRGn_3, 4: PRGn_4, 5: PRGn_5, 6: PRGn_6, 7: PRGn_7, 8: PRGn_8, 9: PRGn_9, 10: PRGn_10, 11: PRGn_11}\n\tPRGn_3 = []color.Color{color.RGBA{175, 141, 195, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{127, 191, 123, 255}}\n\tPRGn_4 = []color.Color{color.RGBA{123, 50, 148, 255}, color.RGBA{194, 165, 207, 255}, color.RGBA{166, 219, 160, 255}, color.RGBA{0, 136, 55, 255}}\n\tPRGn_5 = []color.Color{color.RGBA{123, 50, 148, 255}, color.RGBA{194, 165, 207, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{166, 219, 160, 255}, color.RGBA{0, 136, 55, 255}}\n\tPRGn_6 = []color.Color{color.RGBA{118, 42, 131, 255}, color.RGBA{175, 141, 195, 255}, color.RGBA{231, 212, 232, 255}, color.RGBA{217, 240, 211, 255}, color.RGBA{127, 191, 123, 255}, color.RGBA{27, 120, 55, 255}}\n\tPRGn_7 = []color.Color{color.RGBA{118, 42, 131, 255}, color.RGBA{175, 141, 195, 255}, color.RGBA{231, 212, 232, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{217, 240, 211, 255}, color.RGBA{127, 191, 123, 255}, color.RGBA{27, 120, 55, 255}}\n\tPRGn_8 = []color.Color{color.RGBA{118, 42, 131, 255}, color.RGBA{153, 112, 171, 255}, color.RGBA{194, 165, 207, 255}, color.RGBA{231, 212, 232, 255}, color.RGBA{217, 240, 211, 255}, color.RGBA{166, 219, 160, 255}, color.RGBA{90, 174, 97, 255}, color.RGBA{27, 120, 55, 255}}\n\tPRGn_9 = []color.Color{color.RGBA{118, 42, 131, 255}, color.RGBA{153, 112, 171, 255}, color.RGBA{194, 165, 207, 255}, color.RGBA{231, 212, 232, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{217, 240, 211, 255}, color.RGBA{166, 219, 160, 255}, color.RGBA{90, 174, 97, 255}, color.RGBA{27, 120, 55, 255}}\n\tPRGn_10 = []color.Color{color.RGBA{64, 0, 75, 255}, color.RGBA{118, 42, 131, 255}, color.RGBA{153, 112, 171, 255}, color.RGBA{194, 165, 207, 255}, color.RGBA{231, 212, 232, 255}, color.RGBA{217, 240, 211, 255}, color.RGBA{166, 219, 160, 255}, color.RGBA{90, 174, 97, 255}, color.RGBA{27, 120, 55, 255}, color.RGBA{0, 68, 27, 255}}\n\tPRGn_11 = []color.Color{color.RGBA{64, 0, 75, 255}, color.RGBA{118, 42, 131, 255}, color.RGBA{153, 112, 171, 255}, color.RGBA{194, 165, 207, 255}, color.RGBA{231, 212, 232, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{217, 240, 211, 255}, color.RGBA{166, 219, 160, 255}, color.RGBA{90, 174, 97, 255}, color.RGBA{27, 120, 55, 255}, color.RGBA{0, 68, 27, 255}}\n)\n\nvar (\n\t// Paired is a qualitative palette.\n\tPaired = map[int][]color.Color{3: Paired_3, 4: Paired_4, 5: Paired_5, 6: Paired_6, 7: Paired_7, 8: Paired_8, 9: Paired_9, 10: Paired_10, 11: Paired_11, 12: Paired_12}\n\tPaired_3 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}}\n\tPaired_4 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}}\n\tPaired_5 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}, color.RGBA{251, 154, 153, 255}}\n\tPaired_6 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}, color.RGBA{251, 154, 153, 255}, color.RGBA{227, 26, 28, 255}}\n\tPaired_7 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}, color.RGBA{251, 154, 153, 255}, color.RGBA{227, 26, 28, 255}, color.RGBA{253, 191, 111, 255}}\n\tPaired_8 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}, color.RGBA{251, 154, 153, 255}, color.RGBA{227, 26, 28, 255}, color.RGBA{253, 191, 111, 255}, color.RGBA{255, 127, 0, 255}}\n\tPaired_9 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}, color.RGBA{251, 154, 153, 255}, color.RGBA{227, 26, 28, 255}, color.RGBA{253, 191, 111, 255}, color.RGBA{255, 127, 0, 255}, color.RGBA{202, 178, 214, 255}}\n\tPaired_10 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}, color.RGBA{251, 154, 153, 255}, color.RGBA{227, 26, 28, 255}, color.RGBA{253, 191, 111, 255}, color.RGBA{255, 127, 0, 255}, color.RGBA{202, 178, 214, 255}, color.RGBA{106, 61, 154, 255}}\n\tPaired_11 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}, color.RGBA{251, 154, 153, 255}, color.RGBA{227, 26, 28, 255}, color.RGBA{253, 191, 111, 255}, color.RGBA{255, 127, 0, 255}, color.RGBA{202, 178, 214, 255}, color.RGBA{106, 61, 154, 255}, color.RGBA{255, 255, 153, 255}}\n\tPaired_12 = []color.Color{color.RGBA{166, 206, 227, 255}, color.RGBA{31, 120, 180, 255}, color.RGBA{178, 223, 138, 255}, color.RGBA{51, 160, 44, 255}, color.RGBA{251, 154, 153, 255}, color.RGBA{227, 26, 28, 255}, color.RGBA{253, 191, 111, 255}, color.RGBA{255, 127, 0, 255}, color.RGBA{202, 178, 214, 255}, color.RGBA{106, 61, 154, 255}, color.RGBA{255, 255, 153, 255}, color.RGBA{177, 89, 40, 255}}\n)\n\nvar (\n\t// Pastel1 is a qualitative palette.\n\tPastel1 = map[int][]color.Color{3: Pastel1_3, 4: Pastel1_4, 5: Pastel1_5, 6: Pastel1_6, 7: Pastel1_7, 8: Pastel1_8, 9: Pastel1_9}\n\tPastel1_3 = []color.Color{color.RGBA{251, 180, 174, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{204, 235, 197, 255}}\n\tPastel1_4 = []color.Color{color.RGBA{251, 180, 174, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{222, 203, 228, 255}}\n\tPastel1_5 = []color.Color{color.RGBA{251, 180, 174, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{222, 203, 228, 255}, color.RGBA{254, 217, 166, 255}}\n\tPastel1_6 = []color.Color{color.RGBA{251, 180, 174, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{222, 203, 228, 255}, color.RGBA{254, 217, 166, 255}, color.RGBA{255, 255, 204, 255}}\n\tPastel1_7 = []color.Color{color.RGBA{251, 180, 174, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{222, 203, 228, 255}, color.RGBA{254, 217, 166, 255}, color.RGBA{255, 255, 204, 255}, color.RGBA{229, 216, 189, 255}}\n\tPastel1_8 = []color.Color{color.RGBA{251, 180, 174, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{222, 203, 228, 255}, color.RGBA{254, 217, 166, 255}, color.RGBA{255, 255, 204, 255}, color.RGBA{229, 216, 189, 255}, color.RGBA{253, 218, 236, 255}}\n\tPastel1_9 = []color.Color{color.RGBA{251, 180, 174, 255}, color.RGBA{179, 205, 227, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{222, 203, 228, 255}, color.RGBA{254, 217, 166, 255}, color.RGBA{255, 255, 204, 255}, color.RGBA{229, 216, 189, 255}, color.RGBA{253, 218, 236, 255}, color.RGBA{242, 242, 242, 255}}\n)\n\nvar (\n\t// Pastel2 is a qualitative palette.\n\tPastel2 = map[int][]color.Color{3: Pastel2_3, 4: Pastel2_4, 5: Pastel2_5, 6: Pastel2_6, 7: Pastel2_7, 8: Pastel2_8}\n\tPastel2_3 = []color.Color{color.RGBA{179, 226, 205, 255}, color.RGBA{253, 205, 172, 255}, color.RGBA{203, 213, 232, 255}}\n\tPastel2_4 = []color.Color{color.RGBA{179, 226, 205, 255}, color.RGBA{253, 205, 172, 255}, color.RGBA{203, 213, 232, 255}, color.RGBA{244, 202, 228, 255}}\n\tPastel2_5 = []color.Color{color.RGBA{179, 226, 205, 255}, color.RGBA{253, 205, 172, 255}, color.RGBA{203, 213, 232, 255}, color.RGBA{244, 202, 228, 255}, color.RGBA{230, 245, 201, 255}}\n\tPastel2_6 = []color.Color{color.RGBA{179, 226, 205, 255}, color.RGBA{253, 205, 172, 255}, color.RGBA{203, 213, 232, 255}, color.RGBA{244, 202, 228, 255}, color.RGBA{230, 245, 201, 255}, color.RGBA{255, 242, 174, 255}}\n\tPastel2_7 = []color.Color{color.RGBA{179, 226, 205, 255}, color.RGBA{253, 205, 172, 255}, color.RGBA{203, 213, 232, 255}, color.RGBA{244, 202, 228, 255}, color.RGBA{230, 245, 201, 255}, color.RGBA{255, 242, 174, 255}, color.RGBA{241, 226, 204, 255}}\n\tPastel2_8 = []color.Color{color.RGBA{179, 226, 205, 255}, color.RGBA{253, 205, 172, 255}, color.RGBA{203, 213, 232, 255}, color.RGBA{244, 202, 228, 255}, color.RGBA{230, 245, 201, 255}, color.RGBA{255, 242, 174, 255}, color.RGBA{241, 226, 204, 255}, color.RGBA{204, 204, 204, 255}}\n)\n\nvar (\n\t// PiYG is a diverging palette.\n\tPiYG = map[int][]color.Color{3: PiYG_3, 4: PiYG_4, 5: PiYG_5, 6: PiYG_6, 7: PiYG_7, 8: PiYG_8, 9: PiYG_9, 10: PiYG_10, 11: PiYG_11}\n\tPiYG_3 = []color.Color{color.RGBA{233, 163, 201, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{161, 215, 106, 255}}\n\tPiYG_4 = []color.Color{color.RGBA{208, 28, 139, 255}, color.RGBA{241, 182, 218, 255}, color.RGBA{184, 225, 134, 255}, color.RGBA{77, 172, 38, 255}}\n\tPiYG_5 = []color.Color{color.RGBA{208, 28, 139, 255}, color.RGBA{241, 182, 218, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{184, 225, 134, 255}, color.RGBA{77, 172, 38, 255}}\n\tPiYG_6 = []color.Color{color.RGBA{197, 27, 125, 255}, color.RGBA{233, 163, 201, 255}, color.RGBA{253, 224, 239, 255}, color.RGBA{230, 245, 208, 255}, color.RGBA{161, 215, 106, 255}, color.RGBA{77, 146, 33, 255}}\n\tPiYG_7 = []color.Color{color.RGBA{197, 27, 125, 255}, color.RGBA{233, 163, 201, 255}, color.RGBA{253, 224, 239, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{230, 245, 208, 255}, color.RGBA{161, 215, 106, 255}, color.RGBA{77, 146, 33, 255}}\n\tPiYG_8 = []color.Color{color.RGBA{197, 27, 125, 255}, color.RGBA{222, 119, 174, 255}, color.RGBA{241, 182, 218, 255}, color.RGBA{253, 224, 239, 255}, color.RGBA{230, 245, 208, 255}, color.RGBA{184, 225, 134, 255}, color.RGBA{127, 188, 65, 255}, color.RGBA{77, 146, 33, 255}}\n\tPiYG_9 = []color.Color{color.RGBA{197, 27, 125, 255}, color.RGBA{222, 119, 174, 255}, color.RGBA{241, 182, 218, 255}, color.RGBA{253, 224, 239, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{230, 245, 208, 255}, color.RGBA{184, 225, 134, 255}, color.RGBA{127, 188, 65, 255}, color.RGBA{77, 146, 33, 255}}\n\tPiYG_10 = []color.Color{color.RGBA{142, 1, 82, 255}, color.RGBA{197, 27, 125, 255}, color.RGBA{222, 119, 174, 255}, color.RGBA{241, 182, 218, 255}, color.RGBA{253, 224, 239, 255}, color.RGBA{230, 245, 208, 255}, color.RGBA{184, 225, 134, 255}, color.RGBA{127, 188, 65, 255}, color.RGBA{77, 146, 33, 255}, color.RGBA{39, 100, 25, 255}}\n\tPiYG_11 = []color.Color{color.RGBA{142, 1, 82, 255}, color.RGBA{197, 27, 125, 255}, color.RGBA{222, 119, 174, 255}, color.RGBA{241, 182, 218, 255}, color.RGBA{253, 224, 239, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{230, 245, 208, 255}, color.RGBA{184, 225, 134, 255}, color.RGBA{127, 188, 65, 255}, color.RGBA{77, 146, 33, 255}, color.RGBA{39, 100, 25, 255}}\n)\n\nvar (\n\t// PuBu is a sequential palette.\n\tPuBu = map[int][]color.Color{3: PuBu_3, 4: PuBu_4, 5: PuBu_5, 6: PuBu_6, 7: PuBu_7, 8: PuBu_8, 9: PuBu_9}\n\tPuBu_3 = []color.Color{color.RGBA{236, 231, 242, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{43, 140, 190, 255}}\n\tPuBu_4 = []color.Color{color.RGBA{241, 238, 246, 255}, color.RGBA{189, 201, 225, 255}, color.RGBA{116, 169, 207, 255}, color.RGBA{5, 112, 176, 255}}\n\tPuBu_5 = []color.Color{color.RGBA{241, 238, 246, 255}, color.RGBA{189, 201, 225, 255}, color.RGBA{116, 169, 207, 255}, color.RGBA{43, 140, 190, 255}, color.RGBA{4, 90, 141, 255}}\n\tPuBu_6 = []color.Color{color.RGBA{241, 238, 246, 255}, color.RGBA{208, 209, 230, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{116, 169, 207, 255}, color.RGBA{43, 140, 190, 255}, color.RGBA{4, 90, 141, 255}}\n\tPuBu_7 = []color.Color{color.RGBA{241, 238, 246, 255}, color.RGBA{208, 209, 230, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{116, 169, 207, 255}, color.RGBA{54, 144, 192, 255}, color.RGBA{5, 112, 176, 255}, color.RGBA{3, 78, 123, 255}}\n\tPuBu_8 = []color.Color{color.RGBA{255, 247, 251, 255}, color.RGBA{236, 231, 242, 255}, color.RGBA{208, 209, 230, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{116, 169, 207, 255}, color.RGBA{54, 144, 192, 255}, color.RGBA{5, 112, 176, 255}, color.RGBA{3, 78, 123, 255}}\n\tPuBu_9 = []color.Color{color.RGBA{255, 247, 251, 255}, color.RGBA{236, 231, 242, 255}, color.RGBA{208, 209, 230, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{116, 169, 207, 255}, color.RGBA{54, 144, 192, 255}, color.RGBA{5, 112, 176, 255}, color.RGBA{4, 90, 141, 255}, color.RGBA{2, 56, 88, 255}}\n)\n\nvar (\n\t// PuBuGn is a sequential palette.\n\tPuBuGn = map[int][]color.Color{3: PuBuGn_3, 4: PuBuGn_4, 5: PuBuGn_5, 6: PuBuGn_6, 7: PuBuGn_7, 8: PuBuGn_8, 9: PuBuGn_9}\n\tPuBuGn_3 = []color.Color{color.RGBA{236, 226, 240, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{28, 144, 153, 255}}\n\tPuBuGn_4 = []color.Color{color.RGBA{246, 239, 247, 255}, color.RGBA{189, 201, 225, 255}, color.RGBA{103, 169, 207, 255}, color.RGBA{2, 129, 138, 255}}\n\tPuBuGn_5 = []color.Color{color.RGBA{246, 239, 247, 255}, color.RGBA{189, 201, 225, 255}, color.RGBA{103, 169, 207, 255}, color.RGBA{28, 144, 153, 255}, color.RGBA{1, 108, 89, 255}}\n\tPuBuGn_6 = []color.Color{color.RGBA{246, 239, 247, 255}, color.RGBA{208, 209, 230, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{103, 169, 207, 255}, color.RGBA{28, 144, 153, 255}, color.RGBA{1, 108, 89, 255}}\n\tPuBuGn_7 = []color.Color{color.RGBA{246, 239, 247, 255}, color.RGBA{208, 209, 230, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{103, 169, 207, 255}, color.RGBA{54, 144, 192, 255}, color.RGBA{2, 129, 138, 255}, color.RGBA{1, 100, 80, 255}}\n\tPuBuGn_8 = []color.Color{color.RGBA{255, 247, 251, 255}, color.RGBA{236, 226, 240, 255}, color.RGBA{208, 209, 230, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{103, 169, 207, 255}, color.RGBA{54, 144, 192, 255}, color.RGBA{2, 129, 138, 255}, color.RGBA{1, 100, 80, 255}}\n\tPuBuGn_9 = []color.Color{color.RGBA{255, 247, 251, 255}, color.RGBA{236, 226, 240, 255}, color.RGBA{208, 209, 230, 255}, color.RGBA{166, 189, 219, 255}, color.RGBA{103, 169, 207, 255}, color.RGBA{54, 144, 192, 255}, color.RGBA{2, 129, 138, 255}, color.RGBA{1, 108, 89, 255}, color.RGBA{1, 70, 54, 255}}\n)\n\nvar (\n\t// PuOr is a diverging palette.\n\tPuOr = map[int][]color.Color{3: PuOr_3, 4: PuOr_4, 5: PuOr_5, 6: PuOr_6, 7: PuOr_7, 8: PuOr_8, 9: PuOr_9, 10: PuOr_10, 11: PuOr_11}\n\tPuOr_3 = []color.Color{color.RGBA{241, 163, 64, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{153, 142, 195, 255}}\n\tPuOr_4 = []color.Color{color.RGBA{230, 97, 1, 255}, color.RGBA{253, 184, 99, 255}, color.RGBA{178, 171, 210, 255}, color.RGBA{94, 60, 153, 255}}\n\tPuOr_5 = []color.Color{color.RGBA{230, 97, 1, 255}, color.RGBA{253, 184, 99, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{178, 171, 210, 255}, color.RGBA{94, 60, 153, 255}}\n\tPuOr_6 = []color.Color{color.RGBA{179, 88, 6, 255}, color.RGBA{241, 163, 64, 255}, color.RGBA{254, 224, 182, 255}, color.RGBA{216, 218, 235, 255}, color.RGBA{153, 142, 195, 255}, color.RGBA{84, 39, 136, 255}}\n\tPuOr_7 = []color.Color{color.RGBA{179, 88, 6, 255}, color.RGBA{241, 163, 64, 255}, color.RGBA{254, 224, 182, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{216, 218, 235, 255}, color.RGBA{153, 142, 195, 255}, color.RGBA{84, 39, 136, 255}}\n\tPuOr_8 = []color.Color{color.RGBA{179, 88, 6, 255}, color.RGBA{224, 130, 20, 255}, color.RGBA{253, 184, 99, 255}, color.RGBA{254, 224, 182, 255}, color.RGBA{216, 218, 235, 255}, color.RGBA{178, 171, 210, 255}, color.RGBA{128, 115, 172, 255}, color.RGBA{84, 39, 136, 255}}\n\tPuOr_9 = []color.Color{color.RGBA{179, 88, 6, 255}, color.RGBA{224, 130, 20, 255}, color.RGBA{253, 184, 99, 255}, color.RGBA{254, 224, 182, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{216, 218, 235, 255}, color.RGBA{178, 171, 210, 255}, color.RGBA{128, 115, 172, 255}, color.RGBA{84, 39, 136, 255}}\n\tPuOr_10 = []color.Color{color.RGBA{127, 59, 8, 255}, color.RGBA{179, 88, 6, 255}, color.RGBA{224, 130, 20, 255}, color.RGBA{253, 184, 99, 255}, color.RGBA{254, 224, 182, 255}, color.RGBA{216, 218, 235, 255}, color.RGBA{178, 171, 210, 255}, color.RGBA{128, 115, 172, 255}, color.RGBA{84, 39, 136, 255}, color.RGBA{45, 0, 75, 255}}\n\tPuOr_11 = []color.Color{color.RGBA{127, 59, 8, 255}, color.RGBA{179, 88, 6, 255}, color.RGBA{224, 130, 20, 255}, color.RGBA{253, 184, 99, 255}, color.RGBA{254, 224, 182, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{216, 218, 235, 255}, color.RGBA{178, 171, 210, 255}, color.RGBA{128, 115, 172, 255}, color.RGBA{84, 39, 136, 255}, color.RGBA{45, 0, 75, 255}}\n)\n\nvar (\n\t// PuRd is a sequential palette.\n\tPuRd = map[int][]color.Color{3: PuRd_3, 4: PuRd_4, 5: PuRd_5, 6: PuRd_6, 7: PuRd_7, 8: PuRd_8, 9: PuRd_9}\n\tPuRd_3 = []color.Color{color.RGBA{231, 225, 239, 255}, color.RGBA{201, 148, 199, 255}, color.RGBA{221, 28, 119, 255}}\n\tPuRd_4 = []color.Color{color.RGBA{241, 238, 246, 255}, color.RGBA{215, 181, 216, 255}, color.RGBA{223, 101, 176, 255}, color.RGBA{206, 18, 86, 255}}\n\tPuRd_5 = []color.Color{color.RGBA{241, 238, 246, 255}, color.RGBA{215, 181, 216, 255}, color.RGBA{223, 101, 176, 255}, color.RGBA{221, 28, 119, 255}, color.RGBA{152, 0, 67, 255}}\n\tPuRd_6 = []color.Color{color.RGBA{241, 238, 246, 255}, color.RGBA{212, 185, 218, 255}, color.RGBA{201, 148, 199, 255}, color.RGBA{223, 101, 176, 255}, color.RGBA{221, 28, 119, 255}, color.RGBA{152, 0, 67, 255}}\n\tPuRd_7 = []color.Color{color.RGBA{241, 238, 246, 255}, color.RGBA{212, 185, 218, 255}, color.RGBA{201, 148, 199, 255}, color.RGBA{223, 101, 176, 255}, color.RGBA{231, 41, 138, 255}, color.RGBA{206, 18, 86, 255}, color.RGBA{145, 0, 63, 255}}\n\tPuRd_8 = []color.Color{color.RGBA{247, 244, 249, 255}, color.RGBA{231, 225, 239, 255}, color.RGBA{212, 185, 218, 255}, color.RGBA{201, 148, 199, 255}, color.RGBA{223, 101, 176, 255}, color.RGBA{231, 41, 138, 255}, color.RGBA{206, 18, 86, 255}, color.RGBA{145, 0, 63, 255}}\n\tPuRd_9 = []color.Color{color.RGBA{247, 244, 249, 255}, color.RGBA{231, 225, 239, 255}, color.RGBA{212, 185, 218, 255}, color.RGBA{201, 148, 199, 255}, color.RGBA{223, 101, 176, 255}, color.RGBA{231, 41, 138, 255}, color.RGBA{206, 18, 86, 255}, color.RGBA{152, 0, 67, 255}, color.RGBA{103, 0, 31, 255}}\n)\n\nvar (\n\t// Purples is a sequential palette.\n\tPurples = map[int][]color.Color{3: Purples_3, 4: Purples_4, 5: Purples_5, 6: Purples_6, 7: Purples_7, 8: Purples_8, 9: Purples_9}\n\tPurples_3 = []color.Color{color.RGBA{239, 237, 245, 255}, color.RGBA{188, 189, 220, 255}, color.RGBA{117, 107, 177, 255}}\n\tPurples_4 = []color.Color{color.RGBA{242, 240, 247, 255}, color.RGBA{203, 201, 226, 255}, color.RGBA{158, 154, 200, 255}, color.RGBA{106, 81, 163, 255}}\n\tPurples_5 = []color.Color{color.RGBA{242, 240, 247, 255}, color.RGBA{203, 201, 226, 255}, color.RGBA{158, 154, 200, 255}, color.RGBA{117, 107, 177, 255}, color.RGBA{84, 39, 143, 255}}\n\tPurples_6 = []color.Color{color.RGBA{242, 240, 247, 255}, color.RGBA{218, 218, 235, 255}, color.RGBA{188, 189, 220, 255}, color.RGBA{158, 154, 200, 255}, color.RGBA{117, 107, 177, 255}, color.RGBA{84, 39, 143, 255}}\n\tPurples_7 = []color.Color{color.RGBA{242, 240, 247, 255}, color.RGBA{218, 218, 235, 255}, color.RGBA{188, 189, 220, 255}, color.RGBA{158, 154, 200, 255}, color.RGBA{128, 125, 186, 255}, color.RGBA{106, 81, 163, 255}, color.RGBA{74, 20, 134, 255}}\n\tPurples_8 = []color.Color{color.RGBA{252, 251, 253, 255}, color.RGBA{239, 237, 245, 255}, color.RGBA{218, 218, 235, 255}, color.RGBA{188, 189, 220, 255}, color.RGBA{158, 154, 200, 255}, color.RGBA{128, 125, 186, 255}, color.RGBA{106, 81, 163, 255}, color.RGBA{74, 20, 134, 255}}\n\tPurples_9 = []color.Color{color.RGBA{252, 251, 253, 255}, color.RGBA{239, 237, 245, 255}, color.RGBA{218, 218, 235, 255}, color.RGBA{188, 189, 220, 255}, color.RGBA{158, 154, 200, 255}, color.RGBA{128, 125, 186, 255}, color.RGBA{106, 81, 163, 255}, color.RGBA{84, 39, 143, 255}, color.RGBA{63, 0, 125, 255}}\n)\n\nvar (\n\t// RdBu is a diverging palette.\n\tRdBu = map[int][]color.Color{3: RdBu_3, 4: RdBu_4, 5: RdBu_5, 6: RdBu_6, 7: RdBu_7, 8: RdBu_8, 9: RdBu_9, 10: RdBu_10, 11: RdBu_11}\n\tRdBu_3 = []color.Color{color.RGBA{239, 138, 98, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{103, 169, 207, 255}}\n\tRdBu_4 = []color.Color{color.RGBA{202, 0, 32, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{146, 197, 222, 255}, color.RGBA{5, 113, 176, 255}}\n\tRdBu_5 = []color.Color{color.RGBA{202, 0, 32, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{146, 197, 222, 255}, color.RGBA{5, 113, 176, 255}}\n\tRdBu_6 = []color.Color{color.RGBA{178, 24, 43, 255}, color.RGBA{239, 138, 98, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{209, 229, 240, 255}, color.RGBA{103, 169, 207, 255}, color.RGBA{33, 102, 172, 255}}\n\tRdBu_7 = []color.Color{color.RGBA{178, 24, 43, 255}, color.RGBA{239, 138, 98, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{209, 229, 240, 255}, color.RGBA{103, 169, 207, 255}, color.RGBA{33, 102, 172, 255}}\n\tRdBu_8 = []color.Color{color.RGBA{178, 24, 43, 255}, color.RGBA{214, 96, 77, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{209, 229, 240, 255}, color.RGBA{146, 197, 222, 255}, color.RGBA{67, 147, 195, 255}, color.RGBA{33, 102, 172, 255}}\n\tRdBu_9 = []color.Color{color.RGBA{178, 24, 43, 255}, color.RGBA{214, 96, 77, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{209, 229, 240, 255}, color.RGBA{146, 197, 222, 255}, color.RGBA{67, 147, 195, 255}, color.RGBA{33, 102, 172, 255}}\n\tRdBu_10 = []color.Color{color.RGBA{103, 0, 31, 255}, color.RGBA{178, 24, 43, 255}, color.RGBA{214, 96, 77, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{209, 229, 240, 255}, color.RGBA{146, 197, 222, 255}, color.RGBA{67, 147, 195, 255}, color.RGBA{33, 102, 172, 255}, color.RGBA{5, 48, 97, 255}}\n\tRdBu_11 = []color.Color{color.RGBA{103, 0, 31, 255}, color.RGBA{178, 24, 43, 255}, color.RGBA{214, 96, 77, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{247, 247, 247, 255}, color.RGBA{209, 229, 240, 255}, color.RGBA{146, 197, 222, 255}, color.RGBA{67, 147, 195, 255}, color.RGBA{33, 102, 172, 255}, color.RGBA{5, 48, 97, 255}}\n)\n\nvar (\n\t// RdGy is a diverging palette.\n\tRdGy = map[int][]color.Color{3: RdGy_3, 4: RdGy_4, 5: RdGy_5, 6: RdGy_6, 7: RdGy_7, 8: RdGy_8, 9: RdGy_9, 10: RdGy_10, 11: RdGy_11}\n\tRdGy_3 = []color.Color{color.RGBA{239, 138, 98, 255}, color.RGBA{255, 255, 255, 255}, color.RGBA{153, 153, 153, 255}}\n\tRdGy_4 = []color.Color{color.RGBA{202, 0, 32, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{186, 186, 186, 255}, color.RGBA{64, 64, 64, 255}}\n\tRdGy_5 = []color.Color{color.RGBA{202, 0, 32, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{255, 255, 255, 255}, color.RGBA{186, 186, 186, 255}, color.RGBA{64, 64, 64, 255}}\n\tRdGy_6 = []color.Color{color.RGBA{178, 24, 43, 255}, color.RGBA{239, 138, 98, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{224, 224, 224, 255}, color.RGBA{153, 153, 153, 255}, color.RGBA{77, 77, 77, 255}}\n\tRdGy_7 = []color.Color{color.RGBA{178, 24, 43, 255}, color.RGBA{239, 138, 98, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{255, 255, 255, 255}, color.RGBA{224, 224, 224, 255}, color.RGBA{153, 153, 153, 255}, color.RGBA{77, 77, 77, 255}}\n\tRdGy_8 = []color.Color{color.RGBA{178, 24, 43, 255}, color.RGBA{214, 96, 77, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{224, 224, 224, 255}, color.RGBA{186, 186, 186, 255}, color.RGBA{135, 135, 135, 255}, color.RGBA{77, 77, 77, 255}}\n\tRdGy_9 = []color.Color{color.RGBA{178, 24, 43, 255}, color.RGBA{214, 96, 77, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{255, 255, 255, 255}, color.RGBA{224, 224, 224, 255}, color.RGBA{186, 186, 186, 255}, color.RGBA{135, 135, 135, 255}, color.RGBA{77, 77, 77, 255}}\n\tRdGy_10 = []color.Color{color.RGBA{103, 0, 31, 255}, color.RGBA{178, 24, 43, 255}, color.RGBA{214, 96, 77, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{224, 224, 224, 255}, color.RGBA{186, 186, 186, 255}, color.RGBA{135, 135, 135, 255}, color.RGBA{77, 77, 77, 255}, color.RGBA{26, 26, 26, 255}}\n\tRdGy_11 = []color.Color{color.RGBA{103, 0, 31, 255}, color.RGBA{178, 24, 43, 255}, color.RGBA{214, 96, 77, 255}, color.RGBA{244, 165, 130, 255}, color.RGBA{253, 219, 199, 255}, color.RGBA{255, 255, 255, 255}, color.RGBA{224, 224, 224, 255}, color.RGBA{186, 186, 186, 255}, color.RGBA{135, 135, 135, 255}, color.RGBA{77, 77, 77, 255}, color.RGBA{26, 26, 26, 255}}\n)\n\nvar (\n\t// RdPu is a sequential palette.\n\tRdPu = map[int][]color.Color{3: RdPu_3, 4: RdPu_4, 5: RdPu_5, 6: RdPu_6, 7: RdPu_7, 8: RdPu_8, 9: RdPu_9}\n\tRdPu_3 = []color.Color{color.RGBA{253, 224, 221, 255}, color.RGBA{250, 159, 181, 255}, color.RGBA{197, 27, 138, 255}}\n\tRdPu_4 = []color.Color{color.RGBA{254, 235, 226, 255}, color.RGBA{251, 180, 185, 255}, color.RGBA{247, 104, 161, 255}, color.RGBA{174, 1, 126, 255}}\n\tRdPu_5 = []color.Color{color.RGBA{254, 235, 226, 255}, color.RGBA{251, 180, 185, 255}, color.RGBA{247, 104, 161, 255}, color.RGBA{197, 27, 138, 255}, color.RGBA{122, 1, 119, 255}}\n\tRdPu_6 = []color.Color{color.RGBA{254, 235, 226, 255}, color.RGBA{252, 197, 192, 255}, color.RGBA{250, 159, 181, 255}, color.RGBA{247, 104, 161, 255}, color.RGBA{197, 27, 138, 255}, color.RGBA{122, 1, 119, 255}}\n\tRdPu_7 = []color.Color{color.RGBA{254, 235, 226, 255}, color.RGBA{252, 197, 192, 255}, color.RGBA{250, 159, 181, 255}, color.RGBA{247, 104, 161, 255}, color.RGBA{221, 52, 151, 255}, color.RGBA{174, 1, 126, 255}, color.RGBA{122, 1, 119, 255}}\n\tRdPu_8 = []color.Color{color.RGBA{255, 247, 243, 255}, color.RGBA{253, 224, 221, 255}, color.RGBA{252, 197, 192, 255}, color.RGBA{250, 159, 181, 255}, color.RGBA{247, 104, 161, 255}, color.RGBA{221, 52, 151, 255}, color.RGBA{174, 1, 126, 255}, color.RGBA{122, 1, 119, 255}}\n\tRdPu_9 = []color.Color{color.RGBA{255, 247, 243, 255}, color.RGBA{253, 224, 221, 255}, color.RGBA{252, 197, 192, 255}, color.RGBA{250, 159, 181, 255}, color.RGBA{247, 104, 161, 255}, color.RGBA{221, 52, 151, 255}, color.RGBA{174, 1, 126, 255}, color.RGBA{122, 1, 119, 255}, color.RGBA{73, 0, 106, 255}}\n)\n\nvar (\n\t// RdYlBu is a diverging palette.\n\tRdYlBu = map[int][]color.Color{3: RdYlBu_3, 4: RdYlBu_4, 5: RdYlBu_5, 6: RdYlBu_6, 7: RdYlBu_7, 8: RdYlBu_8, 9: RdYlBu_9, 10: RdYlBu_10, 11: RdYlBu_11}\n\tRdYlBu_3 = []color.Color{color.RGBA{252, 141, 89, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{145, 191, 219, 255}}\n\tRdYlBu_4 = []color.Color{color.RGBA{215, 25, 28, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{171, 217, 233, 255}, color.RGBA{44, 123, 182, 255}}\n\tRdYlBu_5 = []color.Color{color.RGBA{215, 25, 28, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{171, 217, 233, 255}, color.RGBA{44, 123, 182, 255}}\n\tRdYlBu_6 = []color.Color{color.RGBA{215, 48, 39, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{254, 224, 144, 255}, color.RGBA{224, 243, 248, 255}, color.RGBA{145, 191, 219, 255}, color.RGBA{69, 117, 180, 255}}\n\tRdYlBu_7 = []color.Color{color.RGBA{215, 48, 39, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{254, 224, 144, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{224, 243, 248, 255}, color.RGBA{145, 191, 219, 255}, color.RGBA{69, 117, 180, 255}}\n\tRdYlBu_8 = []color.Color{color.RGBA{215, 48, 39, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 144, 255}, color.RGBA{224, 243, 248, 255}, color.RGBA{171, 217, 233, 255}, color.RGBA{116, 173, 209, 255}, color.RGBA{69, 117, 180, 255}}\n\tRdYlBu_9 = []color.Color{color.RGBA{215, 48, 39, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 144, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{224, 243, 248, 255}, color.RGBA{171, 217, 233, 255}, color.RGBA{116, 173, 209, 255}, color.RGBA{69, 117, 180, 255}}\n\tRdYlBu_10 = []color.Color{color.RGBA{165, 0, 38, 255}, color.RGBA{215, 48, 39, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 144, 255}, color.RGBA{224, 243, 248, 255}, color.RGBA{171, 217, 233, 255}, color.RGBA{116, 173, 209, 255}, color.RGBA{69, 117, 180, 255}, color.RGBA{49, 54, 149, 255}}\n\tRdYlBu_11 = []color.Color{color.RGBA{165, 0, 38, 255}, color.RGBA{215, 48, 39, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 144, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{224, 243, 248, 255}, color.RGBA{171, 217, 233, 255}, color.RGBA{116, 173, 209, 255}, color.RGBA{69, 117, 180, 255}, color.RGBA{49, 54, 149, 255}}\n)\n\nvar (\n\t// RdYlGn is a diverging palette.\n\tRdYlGn = map[int][]color.Color{3: RdYlGn_3, 4: RdYlGn_4, 5: RdYlGn_5, 6: RdYlGn_6, 7: RdYlGn_7, 8: RdYlGn_8, 9: RdYlGn_9, 10: RdYlGn_10, 11: RdYlGn_11}\n\tRdYlGn_3 = []color.Color{color.RGBA{252, 141, 89, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{145, 207, 96, 255}}\n\tRdYlGn_4 = []color.Color{color.RGBA{215, 25, 28, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{166, 217, 106, 255}, color.RGBA{26, 150, 65, 255}}\n\tRdYlGn_5 = []color.Color{color.RGBA{215, 25, 28, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{166, 217, 106, 255}, color.RGBA{26, 150, 65, 255}}\n\tRdYlGn_6 = []color.Color{color.RGBA{215, 48, 39, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{217, 239, 139, 255}, color.RGBA{145, 207, 96, 255}, color.RGBA{26, 152, 80, 255}}\n\tRdYlGn_7 = []color.Color{color.RGBA{215, 48, 39, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{217, 239, 139, 255}, color.RGBA{145, 207, 96, 255}, color.RGBA{26, 152, 80, 255}}\n\tRdYlGn_8 = []color.Color{color.RGBA{215, 48, 39, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{217, 239, 139, 255}, color.RGBA{166, 217, 106, 255}, color.RGBA{102, 189, 99, 255}, color.RGBA{26, 152, 80, 255}}\n\tRdYlGn_9 = []color.Color{color.RGBA{215, 48, 39, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{217, 239, 139, 255}, color.RGBA{166, 217, 106, 255}, color.RGBA{102, 189, 99, 255}, color.RGBA{26, 152, 80, 255}}\n\tRdYlGn_10 = []color.Color{color.RGBA{165, 0, 38, 255}, color.RGBA{215, 48, 39, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{217, 239, 139, 255}, color.RGBA{166, 217, 106, 255}, color.RGBA{102, 189, 99, 255}, color.RGBA{26, 152, 80, 255}, color.RGBA{0, 104, 55, 255}}\n\tRdYlGn_11 = []color.Color{color.RGBA{165, 0, 38, 255}, color.RGBA{215, 48, 39, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{217, 239, 139, 255}, color.RGBA{166, 217, 106, 255}, color.RGBA{102, 189, 99, 255}, color.RGBA{26, 152, 80, 255}, color.RGBA{0, 104, 55, 255}}\n)\n\nvar (\n\t// Reds is a sequential palette.\n\tReds = map[int][]color.Color{3: Reds_3, 4: Reds_4, 5: Reds_5, 6: Reds_6, 7: Reds_7, 8: Reds_8, 9: Reds_9}\n\tReds_3 = []color.Color{color.RGBA{254, 224, 210, 255}, color.RGBA{252, 146, 114, 255}, color.RGBA{222, 45, 38, 255}}\n\tReds_4 = []color.Color{color.RGBA{254, 229, 217, 255}, color.RGBA{252, 174, 145, 255}, color.RGBA{251, 106, 74, 255}, color.RGBA{203, 24, 29, 255}}\n\tReds_5 = []color.Color{color.RGBA{254, 229, 217, 255}, color.RGBA{252, 174, 145, 255}, color.RGBA{251, 106, 74, 255}, color.RGBA{222, 45, 38, 255}, color.RGBA{165, 15, 21, 255}}\n\tReds_6 = []color.Color{color.RGBA{254, 229, 217, 255}, color.RGBA{252, 187, 161, 255}, color.RGBA{252, 146, 114, 255}, color.RGBA{251, 106, 74, 255}, color.RGBA{222, 45, 38, 255}, color.RGBA{165, 15, 21, 255}}\n\tReds_7 = []color.Color{color.RGBA{254, 229, 217, 255}, color.RGBA{252, 187, 161, 255}, color.RGBA{252, 146, 114, 255}, color.RGBA{251, 106, 74, 255}, color.RGBA{239, 59, 44, 255}, color.RGBA{203, 24, 29, 255}, color.RGBA{153, 0, 13, 255}}\n\tReds_8 = []color.Color{color.RGBA{255, 245, 240, 255}, color.RGBA{254, 224, 210, 255}, color.RGBA{252, 187, 161, 255}, color.RGBA{252, 146, 114, 255}, color.RGBA{251, 106, 74, 255}, color.RGBA{239, 59, 44, 255}, color.RGBA{203, 24, 29, 255}, color.RGBA{153, 0, 13, 255}}\n\tReds_9 = []color.Color{color.RGBA{255, 245, 240, 255}, color.RGBA{254, 224, 210, 255}, color.RGBA{252, 187, 161, 255}, color.RGBA{252, 146, 114, 255}, color.RGBA{251, 106, 74, 255}, color.RGBA{239, 59, 44, 255}, color.RGBA{203, 24, 29, 255}, color.RGBA{165, 15, 21, 255}, color.RGBA{103, 0, 13, 255}}\n)\n\nvar (\n\t// Set1 is a qualitative palette.\n\tSet1 = map[int][]color.Color{3: Set1_3, 4: Set1_4, 5: Set1_5, 6: Set1_6, 7: Set1_7, 8: Set1_8, 9: Set1_9}\n\tSet1_3 = []color.Color{color.RGBA{228, 26, 28, 255}, color.RGBA{55, 126, 184, 255}, color.RGBA{77, 175, 74, 255}}\n\tSet1_4 = []color.Color{color.RGBA{228, 26, 28, 255}, color.RGBA{55, 126, 184, 255}, color.RGBA{77, 175, 74, 255}, color.RGBA{152, 78, 163, 255}}\n\tSet1_5 = []color.Color{color.RGBA{228, 26, 28, 255}, color.RGBA{55, 126, 184, 255}, color.RGBA{77, 175, 74, 255}, color.RGBA{152, 78, 163, 255}, color.RGBA{255, 127, 0, 255}}\n\tSet1_6 = []color.Color{color.RGBA{228, 26, 28, 255}, color.RGBA{55, 126, 184, 255}, color.RGBA{77, 175, 74, 255}, color.RGBA{152, 78, 163, 255}, color.RGBA{255, 127, 0, 255}, color.RGBA{255, 255, 51, 255}}\n\tSet1_7 = []color.Color{color.RGBA{228, 26, 28, 255}, color.RGBA{55, 126, 184, 255}, color.RGBA{77, 175, 74, 255}, color.RGBA{152, 78, 163, 255}, color.RGBA{255, 127, 0, 255}, color.RGBA{255, 255, 51, 255}, color.RGBA{166, 86, 40, 255}}\n\tSet1_8 = []color.Color{color.RGBA{228, 26, 28, 255}, color.RGBA{55, 126, 184, 255}, color.RGBA{77, 175, 74, 255}, color.RGBA{152, 78, 163, 255}, color.RGBA{255, 127, 0, 255}, color.RGBA{255, 255, 51, 255}, color.RGBA{166, 86, 40, 255}, color.RGBA{247, 129, 191, 255}}\n\tSet1_9 = []color.Color{color.RGBA{228, 26, 28, 255}, color.RGBA{55, 126, 184, 255}, color.RGBA{77, 175, 74, 255}, color.RGBA{152, 78, 163, 255}, color.RGBA{255, 127, 0, 255}, color.RGBA{255, 255, 51, 255}, color.RGBA{166, 86, 40, 255}, color.RGBA{247, 129, 191, 255}, color.RGBA{153, 153, 153, 255}}\n)\n\nvar (\n\t// Set2 is a qualitative palette.\n\tSet2 = map[int][]color.Color{3: Set2_3, 4: Set2_4, 5: Set2_5, 6: Set2_6, 7: Set2_7, 8: Set2_8}\n\tSet2_3 = []color.Color{color.RGBA{102, 194, 165, 255}, color.RGBA{252, 141, 98, 255}, color.RGBA{141, 160, 203, 255}}\n\tSet2_4 = []color.Color{color.RGBA{102, 194, 165, 255}, color.RGBA{252, 141, 98, 255}, color.RGBA{141, 160, 203, 255}, color.RGBA{231, 138, 195, 255}}\n\tSet2_5 = []color.Color{color.RGBA{102, 194, 165, 255}, color.RGBA{252, 141, 98, 255}, color.RGBA{141, 160, 203, 255}, color.RGBA{231, 138, 195, 255}, color.RGBA{166, 216, 84, 255}}\n\tSet2_6 = []color.Color{color.RGBA{102, 194, 165, 255}, color.RGBA{252, 141, 98, 255}, color.RGBA{141, 160, 203, 255}, color.RGBA{231, 138, 195, 255}, color.RGBA{166, 216, 84, 255}, color.RGBA{255, 217, 47, 255}}\n\tSet2_7 = []color.Color{color.RGBA{102, 194, 165, 255}, color.RGBA{252, 141, 98, 255}, color.RGBA{141, 160, 203, 255}, color.RGBA{231, 138, 195, 255}, color.RGBA{166, 216, 84, 255}, color.RGBA{255, 217, 47, 255}, color.RGBA{229, 196, 148, 255}}\n\tSet2_8 = []color.Color{color.RGBA{102, 194, 165, 255}, color.RGBA{252, 141, 98, 255}, color.RGBA{141, 160, 203, 255}, color.RGBA{231, 138, 195, 255}, color.RGBA{166, 216, 84, 255}, color.RGBA{255, 217, 47, 255}, color.RGBA{229, 196, 148, 255}, color.RGBA{179, 179, 179, 255}}\n)\n\nvar (\n\t// Set3 is a qualitative palette.\n\tSet3 = map[int][]color.Color{3: Set3_3, 4: Set3_4, 5: Set3_5, 6: Set3_6, 7: Set3_7, 8: Set3_8, 9: Set3_9, 10: Set3_10, 11: Set3_11, 12: Set3_12}\n\tSet3_3 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}}\n\tSet3_4 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}}\n\tSet3_5 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}, color.RGBA{128, 177, 211, 255}}\n\tSet3_6 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}, color.RGBA{128, 177, 211, 255}, color.RGBA{253, 180, 98, 255}}\n\tSet3_7 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}, color.RGBA{128, 177, 211, 255}, color.RGBA{253, 180, 98, 255}, color.RGBA{179, 222, 105, 255}}\n\tSet3_8 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}, color.RGBA{128, 177, 211, 255}, color.RGBA{253, 180, 98, 255}, color.RGBA{179, 222, 105, 255}, color.RGBA{252, 205, 229, 255}}\n\tSet3_9 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}, color.RGBA{128, 177, 211, 255}, color.RGBA{253, 180, 98, 255}, color.RGBA{179, 222, 105, 255}, color.RGBA{252, 205, 229, 255}, color.RGBA{217, 217, 217, 255}}\n\tSet3_10 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}, color.RGBA{128, 177, 211, 255}, color.RGBA{253, 180, 98, 255}, color.RGBA{179, 222, 105, 255}, color.RGBA{252, 205, 229, 255}, color.RGBA{217, 217, 217, 255}, color.RGBA{188, 128, 189, 255}}\n\tSet3_11 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}, color.RGBA{128, 177, 211, 255}, color.RGBA{253, 180, 98, 255}, color.RGBA{179, 222, 105, 255}, color.RGBA{252, 205, 229, 255}, color.RGBA{217, 217, 217, 255}, color.RGBA{188, 128, 189, 255}, color.RGBA{204, 235, 197, 255}}\n\tSet3_12 = []color.Color{color.RGBA{141, 211, 199, 255}, color.RGBA{255, 255, 179, 255}, color.RGBA{190, 186, 218, 255}, color.RGBA{251, 128, 114, 255}, color.RGBA{128, 177, 211, 255}, color.RGBA{253, 180, 98, 255}, color.RGBA{179, 222, 105, 255}, color.RGBA{252, 205, 229, 255}, color.RGBA{217, 217, 217, 255}, color.RGBA{188, 128, 189, 255}, color.RGBA{204, 235, 197, 255}, color.RGBA{255, 237, 111, 255}}\n)\n\nvar (\n\t// Spectral is a diverging palette.\n\tSpectral = map[int][]color.Color{3: Spectral_3, 4: Spectral_4, 5: Spectral_5, 6: Spectral_6, 7: Spectral_7, 8: Spectral_8, 9: Spectral_9, 10: Spectral_10, 11: Spectral_11}\n\tSpectral_3 = []color.Color{color.RGBA{252, 141, 89, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{153, 213, 148, 255}}\n\tSpectral_4 = []color.Color{color.RGBA{215, 25, 28, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{171, 221, 164, 255}, color.RGBA{43, 131, 186, 255}}\n\tSpectral_5 = []color.Color{color.RGBA{215, 25, 28, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{171, 221, 164, 255}, color.RGBA{43, 131, 186, 255}}\n\tSpectral_6 = []color.Color{color.RGBA{213, 62, 79, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{230, 245, 152, 255}, color.RGBA{153, 213, 148, 255}, color.RGBA{50, 136, 189, 255}}\n\tSpectral_7 = []color.Color{color.RGBA{213, 62, 79, 255}, color.RGBA{252, 141, 89, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{230, 245, 152, 255}, color.RGBA{153, 213, 148, 255}, color.RGBA{50, 136, 189, 255}}\n\tSpectral_8 = []color.Color{color.RGBA{213, 62, 79, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{230, 245, 152, 255}, color.RGBA{171, 221, 164, 255}, color.RGBA{102, 194, 165, 255}, color.RGBA{50, 136, 189, 255}}\n\tSpectral_9 = []color.Color{color.RGBA{213, 62, 79, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{230, 245, 152, 255}, color.RGBA{171, 221, 164, 255}, color.RGBA{102, 194, 165, 255}, color.RGBA{50, 136, 189, 255}}\n\tSpectral_10 = []color.Color{color.RGBA{158, 1, 66, 255}, color.RGBA{213, 62, 79, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{230, 245, 152, 255}, color.RGBA{171, 221, 164, 255}, color.RGBA{102, 194, 165, 255}, color.RGBA{50, 136, 189, 255}, color.RGBA{94, 79, 162, 255}}\n\tSpectral_11 = []color.Color{color.RGBA{158, 1, 66, 255}, color.RGBA{213, 62, 79, 255}, color.RGBA{244, 109, 67, 255}, color.RGBA{253, 174, 97, 255}, color.RGBA{254, 224, 139, 255}, color.RGBA{255, 255, 191, 255}, color.RGBA{230, 245, 152, 255}, color.RGBA{171, 221, 164, 255}, color.RGBA{102, 194, 165, 255}, color.RGBA{50, 136, 189, 255}, color.RGBA{94, 79, 162, 255}}\n)\n\nvar (\n\t// YlGn is a sequential palette.\n\tYlGn = map[int][]color.Color{3: YlGn_3, 4: YlGn_4, 5: YlGn_5, 6: YlGn_6, 7: YlGn_7, 8: YlGn_8, 9: YlGn_9}\n\tYlGn_3 = []color.Color{color.RGBA{247, 252, 185, 255}, color.RGBA{173, 221, 142, 255}, color.RGBA{49, 163, 84, 255}}\n\tYlGn_4 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{194, 230, 153, 255}, color.RGBA{120, 198, 121, 255}, color.RGBA{35, 132, 67, 255}}\n\tYlGn_5 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{194, 230, 153, 255}, color.RGBA{120, 198, 121, 255}, color.RGBA{49, 163, 84, 255}, color.RGBA{0, 104, 55, 255}}\n\tYlGn_6 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{217, 240, 163, 255}, color.RGBA{173, 221, 142, 255}, color.RGBA{120, 198, 121, 255}, color.RGBA{49, 163, 84, 255}, color.RGBA{0, 104, 55, 255}}\n\tYlGn_7 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{217, 240, 163, 255}, color.RGBA{173, 221, 142, 255}, color.RGBA{120, 198, 121, 255}, color.RGBA{65, 171, 93, 255}, color.RGBA{35, 132, 67, 255}, color.RGBA{0, 90, 50, 255}}\n\tYlGn_8 = []color.Color{color.RGBA{255, 255, 229, 255}, color.RGBA{247, 252, 185, 255}, color.RGBA{217, 240, 163, 255}, color.RGBA{173, 221, 142, 255}, color.RGBA{120, 198, 121, 255}, color.RGBA{65, 171, 93, 255}, color.RGBA{35, 132, 67, 255}, color.RGBA{0, 90, 50, 255}}\n\tYlGn_9 = []color.Color{color.RGBA{255, 255, 229, 255}, color.RGBA{247, 252, 185, 255}, color.RGBA{217, 240, 163, 255}, color.RGBA{173, 221, 142, 255}, color.RGBA{120, 198, 121, 255}, color.RGBA{65, 171, 93, 255}, color.RGBA{35, 132, 67, 255}, color.RGBA{0, 104, 55, 255}, color.RGBA{0, 69, 41, 255}}\n)\n\nvar (\n\t// YlGnBu is a sequential palette.\n\tYlGnBu = map[int][]color.Color{3: YlGnBu_3, 4: YlGnBu_4, 5: YlGnBu_5, 6: YlGnBu_6, 7: YlGnBu_7, 8: YlGnBu_8, 9: YlGnBu_9}\n\tYlGnBu_3 = []color.Color{color.RGBA{237, 248, 177, 255}, color.RGBA{127, 205, 187, 255}, color.RGBA{44, 127, 184, 255}}\n\tYlGnBu_4 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{161, 218, 180, 255}, color.RGBA{65, 182, 196, 255}, color.RGBA{34, 94, 168, 255}}\n\tYlGnBu_5 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{161, 218, 180, 255}, color.RGBA{65, 182, 196, 255}, color.RGBA{44, 127, 184, 255}, color.RGBA{37, 52, 148, 255}}\n\tYlGnBu_6 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{199, 233, 180, 255}, color.RGBA{127, 205, 187, 255}, color.RGBA{65, 182, 196, 255}, color.RGBA{44, 127, 184, 255}, color.RGBA{37, 52, 148, 255}}\n\tYlGnBu_7 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{199, 233, 180, 255}, color.RGBA{127, 205, 187, 255}, color.RGBA{65, 182, 196, 255}, color.RGBA{29, 145, 192, 255}, color.RGBA{34, 94, 168, 255}, color.RGBA{12, 44, 132, 255}}\n\tYlGnBu_8 = []color.Color{color.RGBA{255, 255, 217, 255}, color.RGBA{237, 248, 177, 255}, color.RGBA{199, 233, 180, 255}, color.RGBA{127, 205, 187, 255}, color.RGBA{65, 182, 196, 255}, color.RGBA{29, 145, 192, 255}, color.RGBA{34, 94, 168, 255}, color.RGBA{12, 44, 132, 255}}\n\tYlGnBu_9 = []color.Color{color.RGBA{255, 255, 217, 255}, color.RGBA{237, 248, 177, 255}, color.RGBA{199, 233, 180, 255}, color.RGBA{127, 205, 187, 255}, color.RGBA{65, 182, 196, 255}, color.RGBA{29, 145, 192, 255}, color.RGBA{34, 94, 168, 255}, color.RGBA{37, 52, 148, 255}, color.RGBA{8, 29, 88, 255}}\n)\n\nvar (\n\t// YlOrBr is a sequential palette.\n\tYlOrBr = map[int][]color.Color{3: YlOrBr_3, 4: YlOrBr_4, 5: YlOrBr_5, 6: YlOrBr_6, 7: YlOrBr_7, 8: YlOrBr_8, 9: YlOrBr_9}\n\tYlOrBr_3 = []color.Color{color.RGBA{255, 247, 188, 255}, color.RGBA{254, 196, 79, 255}, color.RGBA{217, 95, 14, 255}}\n\tYlOrBr_4 = []color.Color{color.RGBA{255, 255, 212, 255}, color.RGBA{254, 217, 142, 255}, color.RGBA{254, 153, 41, 255}, color.RGBA{204, 76, 2, 255}}\n\tYlOrBr_5 = []color.Color{color.RGBA{255, 255, 212, 255}, color.RGBA{254, 217, 142, 255}, color.RGBA{254, 153, 41, 255}, color.RGBA{217, 95, 14, 255}, color.RGBA{153, 52, 4, 255}}\n\tYlOrBr_6 = []color.Color{color.RGBA{255, 255, 212, 255}, color.RGBA{254, 227, 145, 255}, color.RGBA{254, 196, 79, 255}, color.RGBA{254, 153, 41, 255}, color.RGBA{217, 95, 14, 255}, color.RGBA{153, 52, 4, 255}}\n\tYlOrBr_7 = []color.Color{color.RGBA{255, 255, 212, 255}, color.RGBA{254, 227, 145, 255}, color.RGBA{254, 196, 79, 255}, color.RGBA{254, 153, 41, 255}, color.RGBA{236, 112, 20, 255}, color.RGBA{204, 76, 2, 255}, color.RGBA{140, 45, 4, 255}}\n\tYlOrBr_8 = []color.Color{color.RGBA{255, 255, 229, 255}, color.RGBA{255, 247, 188, 255}, color.RGBA{254, 227, 145, 255}, color.RGBA{254, 196, 79, 255}, color.RGBA{254, 153, 41, 255}, color.RGBA{236, 112, 20, 255}, color.RGBA{204, 76, 2, 255}, color.RGBA{140, 45, 4, 255}}\n\tYlOrBr_9 = []color.Color{color.RGBA{255, 255, 229, 255}, color.RGBA{255, 247, 188, 255}, color.RGBA{254, 227, 145, 255}, color.RGBA{254, 196, 79, 255}, color.RGBA{254, 153, 41, 255}, color.RGBA{236, 112, 20, 255}, color.RGBA{204, 76, 2, 255}, color.RGBA{153, 52, 4, 255}, color.RGBA{102, 37, 6, 255}}\n)\n\nvar (\n\t// YlOrRd is a sequential palette.\n\tYlOrRd = map[int][]color.Color{3: YlOrRd_3, 4: YlOrRd_4, 5: YlOrRd_5, 6: YlOrRd_6, 7: YlOrRd_7, 8: YlOrRd_8}\n\tYlOrRd_3 = []color.Color{color.RGBA{255, 237, 160, 255}, color.RGBA{254, 178, 76, 255}, color.RGBA{240, 59, 32, 255}}\n\tYlOrRd_4 = []color.Color{color.RGBA{255, 255, 178, 255}, color.RGBA{254, 204, 92, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{227, 26, 28, 255}}\n\tYlOrRd_5 = []color.Color{color.RGBA{255, 255, 178, 255}, color.RGBA{254, 204, 92, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{240, 59, 32, 255}, color.RGBA{189, 0, 38, 255}}\n\tYlOrRd_6 = []color.Color{color.RGBA{255, 255, 178, 255}, color.RGBA{254, 217, 118, 255}, color.RGBA{254, 178, 76, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{240, 59, 32, 255}, color.RGBA{189, 0, 38, 255}}\n\tYlOrRd_7 = []color.Color{color.RGBA{255, 255, 178, 255}, color.RGBA{254, 217, 118, 255}, color.RGBA{254, 178, 76, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{252, 78, 42, 255}, color.RGBA{227, 26, 28, 255}, color.RGBA{177, 0, 38, 255}}\n\tYlOrRd_8 = []color.Color{color.RGBA{255, 255, 204, 255}, color.RGBA{255, 237, 160, 255}, color.RGBA{254, 217, 118, 255}, color.RGBA{254, 178, 76, 255}, color.RGBA{253, 141, 60, 255}, color.RGBA{252, 78, 42, 255}, color.RGBA{227, 26, 28, 255}, color.RGBA{177, 0, 38, 255}}\n)\n\n// ByName is a map indexing all palettes by string name.\nvar ByName = map[string]map[int][]color.Color{\"Accent\": Accent, \"Blues\": Blues, \"BrBG\": BrBG, \"BuGn\": BuGn, \"BuPu\": BuPu, \"Dark2\": Dark2, \"GnBu\": GnBu, \"Greens\": Greens, \"Greys\": Greys, \"OrRd\": OrRd, \"Oranges\": Oranges, \"PRGn\": PRGn, \"Paired\": Paired, \"Pastel1\": Pastel1, \"Pastel2\": Pastel2, \"PiYG\": PiYG, \"PuBu\": PuBu, \"PuBuGn\": PuBuGn, \"PuOr\": PuOr, \"PuRd\": PuRd, \"Purples\": Purples, \"RdBu\": RdBu, \"RdGy\": RdGy, \"RdPu\": RdPu, \"RdYlBu\": RdYlBu, \"RdYlGn\": RdYlGn, \"Reds\": Reds, \"Set1\": Set1, \"Set2\": Set2, \"Set3\": Set3, \"Spectral\": Spectral, \"YlGn\": YlGn, \"YlGnBu\": YlGnBu, \"YlOrBr\": YlOrBr, \"YlOrRd\": YlOrRd}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/brewer/colorbrewer.json",
    "content": "{ \n\"Spectral\":  {\"3\": [\"rgb(252,141,89)\", \"rgb(255,255,191)\", \"rgb(153,213,148)\"], \"4\": [\"rgb(215,25,28)\", \"rgb(253,174,97)\", \"rgb(171,221,164)\", \"rgb(43,131,186)\"], \"5\": [\"rgb(215,25,28)\", \"rgb(253,174,97)\", \"rgb(255,255,191)\", \"rgb(171,221,164)\", \"rgb(43,131,186)\"], \"6\": [\"rgb(213,62,79)\", \"rgb(252,141,89)\", \"rgb(254,224,139)\", \"rgb(230,245,152)\", \"rgb(153,213,148)\", \"rgb(50,136,189)\"], \"7\": [\"rgb(213,62,79)\", \"rgb(252,141,89)\", \"rgb(254,224,139)\", \"rgb(255,255,191)\", \"rgb(230,245,152)\", \"rgb(153,213,148)\", \"rgb(50,136,189)\"], \"8\": [\"rgb(213,62,79)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,139)\", \"rgb(230,245,152)\", \"rgb(171,221,164)\", \"rgb(102,194,165)\", \"rgb(50,136,189)\"], \"9\": [\"rgb(213,62,79)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,139)\", \"rgb(255,255,191)\", \"rgb(230,245,152)\", \"rgb(171,221,164)\", \"rgb(102,194,165)\", \"rgb(50,136,189)\"], \"10\": [\"rgb(158,1,66)\", \"rgb(213,62,79)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,139)\", \"rgb(230,245,152)\", \"rgb(171,221,164)\", \"rgb(102,194,165)\", \"rgb(50,136,189)\", \"rgb(94,79,162)\"], \"11\": [\"rgb(158,1,66)\", \"rgb(213,62,79)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,139)\", \"rgb(255,255,191)\", \"rgb(230,245,152)\", \"rgb(171,221,164)\", \"rgb(102,194,165)\", \"rgb(50,136,189)\", \"rgb(94,79,162)\"], \"type\": \"div\"} ,\n\"RdYlGn\":  {\"3\": [\"rgb(252,141,89)\", \"rgb(255,255,191)\", \"rgb(145,207,96)\"], \"4\": [\"rgb(215,25,28)\", \"rgb(253,174,97)\", \"rgb(166,217,106)\", \"rgb(26,150,65)\"], \"5\": [\"rgb(215,25,28)\", \"rgb(253,174,97)\", \"rgb(255,255,191)\", \"rgb(166,217,106)\", \"rgb(26,150,65)\"], \"6\": [\"rgb(215,48,39)\", \"rgb(252,141,89)\", \"rgb(254,224,139)\", \"rgb(217,239,139)\", \"rgb(145,207,96)\", \"rgb(26,152,80)\"], \"7\": [\"rgb(215,48,39)\", \"rgb(252,141,89)\", \"rgb(254,224,139)\", \"rgb(255,255,191)\", \"rgb(217,239,139)\", \"rgb(145,207,96)\", \"rgb(26,152,80)\"], \"8\": [\"rgb(215,48,39)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,139)\", \"rgb(217,239,139)\", \"rgb(166,217,106)\", \"rgb(102,189,99)\", \"rgb(26,152,80)\"], \"9\": [\"rgb(215,48,39)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,139)\", \"rgb(255,255,191)\", \"rgb(217,239,139)\", \"rgb(166,217,106)\", \"rgb(102,189,99)\", \"rgb(26,152,80)\"], \"10\": [\"rgb(165,0,38)\", \"rgb(215,48,39)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,139)\", \"rgb(217,239,139)\", \"rgb(166,217,106)\", \"rgb(102,189,99)\", \"rgb(26,152,80)\", \"rgb(0,104,55)\"], \"11\": [\"rgb(165,0,38)\", \"rgb(215,48,39)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,139)\", \"rgb(255,255,191)\", \"rgb(217,239,139)\", \"rgb(166,217,106)\", \"rgb(102,189,99)\", \"rgb(26,152,80)\", \"rgb(0,104,55)\"], \"type\": \"div\"} ,\n\"RdBu\":  {\"3\": [\"rgb(239,138,98)\", \"rgb(247,247,247)\", \"rgb(103,169,207)\"], \"4\": [\"rgb(202,0,32)\", \"rgb(244,165,130)\", \"rgb(146,197,222)\", \"rgb(5,113,176)\"], \"5\": [\"rgb(202,0,32)\", \"rgb(244,165,130)\", \"rgb(247,247,247)\", \"rgb(146,197,222)\", \"rgb(5,113,176)\"], \"6\": [\"rgb(178,24,43)\", \"rgb(239,138,98)\", \"rgb(253,219,199)\", \"rgb(209,229,240)\", \"rgb(103,169,207)\", \"rgb(33,102,172)\"], \"7\": [\"rgb(178,24,43)\", \"rgb(239,138,98)\", \"rgb(253,219,199)\", \"rgb(247,247,247)\", \"rgb(209,229,240)\", \"rgb(103,169,207)\", \"rgb(33,102,172)\"], \"8\": [\"rgb(178,24,43)\", \"rgb(214,96,77)\", \"rgb(244,165,130)\", \"rgb(253,219,199)\", \"rgb(209,229,240)\", \"rgb(146,197,222)\", \"rgb(67,147,195)\", \"rgb(33,102,172)\"], \"9\": [\"rgb(178,24,43)\", \"rgb(214,96,77)\", \"rgb(244,165,130)\", \"rgb(253,219,199)\", \"rgb(247,247,247)\", \"rgb(209,229,240)\", \"rgb(146,197,222)\", \"rgb(67,147,195)\", \"rgb(33,102,172)\"], \"10\": [\"rgb(103,0,31)\", \"rgb(178,24,43)\", \"rgb(214,96,77)\", \"rgb(244,165,130)\", \"rgb(253,219,199)\", \"rgb(209,229,240)\", \"rgb(146,197,222)\", \"rgb(67,147,195)\", \"rgb(33,102,172)\", \"rgb(5,48,97)\"], \"11\": [\"rgb(103,0,31)\", \"rgb(178,24,43)\", \"rgb(214,96,77)\", \"rgb(244,165,130)\", \"rgb(253,219,199)\", \"rgb(247,247,247)\", \"rgb(209,229,240)\", \"rgb(146,197,222)\", \"rgb(67,147,195)\", \"rgb(33,102,172)\", \"rgb(5,48,97)\"], \"type\": \"div\"} ,\n\"PiYG\":  {\"3\": [\"rgb(233,163,201)\", \"rgb(247,247,247)\", \"rgb(161,215,106)\"], \"4\": [\"rgb(208,28,139)\", \"rgb(241,182,218)\", \"rgb(184,225,134)\", \"rgb(77,172,38)\"], \"5\": [\"rgb(208,28,139)\", \"rgb(241,182,218)\", \"rgb(247,247,247)\", \"rgb(184,225,134)\", \"rgb(77,172,38)\"], \"6\": [\"rgb(197,27,125)\", \"rgb(233,163,201)\", \"rgb(253,224,239)\", \"rgb(230,245,208)\", \"rgb(161,215,106)\", \"rgb(77,146,33)\"], \"7\": [\"rgb(197,27,125)\", \"rgb(233,163,201)\", \"rgb(253,224,239)\", \"rgb(247,247,247)\", \"rgb(230,245,208)\", \"rgb(161,215,106)\", \"rgb(77,146,33)\"], \"8\": [\"rgb(197,27,125)\", \"rgb(222,119,174)\", \"rgb(241,182,218)\", \"rgb(253,224,239)\", \"rgb(230,245,208)\", \"rgb(184,225,134)\", \"rgb(127,188,65)\", \"rgb(77,146,33)\"], \"9\": [\"rgb(197,27,125)\", \"rgb(222,119,174)\", \"rgb(241,182,218)\", \"rgb(253,224,239)\", \"rgb(247,247,247)\", \"rgb(230,245,208)\", \"rgb(184,225,134)\", \"rgb(127,188,65)\", \"rgb(77,146,33)\"], \"10\": [\"rgb(142,1,82)\", \"rgb(197,27,125)\", \"rgb(222,119,174)\", \"rgb(241,182,218)\", \"rgb(253,224,239)\", \"rgb(230,245,208)\", \"rgb(184,225,134)\", \"rgb(127,188,65)\", \"rgb(77,146,33)\", \"rgb(39,100,25)\"], \"11\": [\"rgb(142,1,82)\", \"rgb(197,27,125)\", \"rgb(222,119,174)\", \"rgb(241,182,218)\", \"rgb(253,224,239)\", \"rgb(247,247,247)\", \"rgb(230,245,208)\", \"rgb(184,225,134)\", \"rgb(127,188,65)\", \"rgb(77,146,33)\", \"rgb(39,100,25)\"], \"type\": \"div\"} ,\n\"PRGn\":  {\"3\": [\"rgb(175,141,195)\", \"rgb(247,247,247)\", \"rgb(127,191,123)\"], \"4\": [\"rgb(123,50,148)\", \"rgb(194,165,207)\", \"rgb(166,219,160)\", \"rgb(0,136,55)\"], \"5\": [\"rgb(123,50,148)\", \"rgb(194,165,207)\", \"rgb(247,247,247)\", \"rgb(166,219,160)\", \"rgb(0,136,55)\"], \"6\": [\"rgb(118,42,131)\", \"rgb(175,141,195)\", \"rgb(231,212,232)\", \"rgb(217,240,211)\", \"rgb(127,191,123)\", \"rgb(27,120,55)\"], \"7\": [\"rgb(118,42,131)\", \"rgb(175,141,195)\", \"rgb(231,212,232)\", \"rgb(247,247,247)\", \"rgb(217,240,211)\", \"rgb(127,191,123)\", \"rgb(27,120,55)\"], \"8\": [\"rgb(118,42,131)\", \"rgb(153,112,171)\", \"rgb(194,165,207)\", \"rgb(231,212,232)\", \"rgb(217,240,211)\", \"rgb(166,219,160)\", \"rgb(90,174,97)\", \"rgb(27,120,55)\"], \"9\": [\"rgb(118,42,131)\", \"rgb(153,112,171)\", \"rgb(194,165,207)\", \"rgb(231,212,232)\", \"rgb(247,247,247)\", \"rgb(217,240,211)\", \"rgb(166,219,160)\", \"rgb(90,174,97)\", \"rgb(27,120,55)\"], \"10\": [\"rgb(64,0,75)\", \"rgb(118,42,131)\", \"rgb(153,112,171)\", \"rgb(194,165,207)\", \"rgb(231,212,232)\", \"rgb(217,240,211)\", \"rgb(166,219,160)\", \"rgb(90,174,97)\", \"rgb(27,120,55)\", \"rgb(0,68,27)\"], \"11\": [\"rgb(64,0,75)\", \"rgb(118,42,131)\", \"rgb(153,112,171)\", \"rgb(194,165,207)\", \"rgb(231,212,232)\", \"rgb(247,247,247)\", \"rgb(217,240,211)\", \"rgb(166,219,160)\", \"rgb(90,174,97)\", \"rgb(27,120,55)\", \"rgb(0,68,27)\"], \"type\": \"div\"} ,\n\"RdYlBu\":  {\"3\": [\"rgb(252,141,89)\", \"rgb(255,255,191)\", \"rgb(145,191,219)\"], \"4\": [\"rgb(215,25,28)\", \"rgb(253,174,97)\", \"rgb(171,217,233)\", \"rgb(44,123,182)\"], \"5\": [\"rgb(215,25,28)\", \"rgb(253,174,97)\", \"rgb(255,255,191)\", \"rgb(171,217,233)\", \"rgb(44,123,182)\"], \"6\": [\"rgb(215,48,39)\", \"rgb(252,141,89)\", \"rgb(254,224,144)\", \"rgb(224,243,248)\", \"rgb(145,191,219)\", \"rgb(69,117,180)\"], \"7\": [\"rgb(215,48,39)\", \"rgb(252,141,89)\", \"rgb(254,224,144)\", \"rgb(255,255,191)\", \"rgb(224,243,248)\", \"rgb(145,191,219)\", \"rgb(69,117,180)\"], \"8\": [\"rgb(215,48,39)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,144)\", \"rgb(224,243,248)\", \"rgb(171,217,233)\", \"rgb(116,173,209)\", \"rgb(69,117,180)\"], \"9\": [\"rgb(215,48,39)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,144)\", \"rgb(255,255,191)\", \"rgb(224,243,248)\", \"rgb(171,217,233)\", \"rgb(116,173,209)\", \"rgb(69,117,180)\"], \"10\": [\"rgb(165,0,38)\", \"rgb(215,48,39)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,144)\", \"rgb(224,243,248)\", \"rgb(171,217,233)\", \"rgb(116,173,209)\", \"rgb(69,117,180)\", \"rgb(49,54,149)\"], \"11\": [\"rgb(165,0,38)\", \"rgb(215,48,39)\", \"rgb(244,109,67)\", \"rgb(253,174,97)\", \"rgb(254,224,144)\", \"rgb(255,255,191)\", \"rgb(224,243,248)\", \"rgb(171,217,233)\", \"rgb(116,173,209)\", \"rgb(69,117,180)\", \"rgb(49,54,149)\"], \"type\": \"div\"} ,\n\"BrBG\":  {\"3\": [\"rgb(216,179,101)\", \"rgb(245,245,245)\", \"rgb(90,180,172)\"], \"4\": [\"rgb(166,97,26)\", \"rgb(223,194,125)\", \"rgb(128,205,193)\", \"rgb(1,133,113)\"], \"5\": [\"rgb(166,97,26)\", \"rgb(223,194,125)\", \"rgb(245,245,245)\", \"rgb(128,205,193)\", \"rgb(1,133,113)\"], \"6\": [\"rgb(140,81,10)\", \"rgb(216,179,101)\", \"rgb(246,232,195)\", \"rgb(199,234,229)\", \"rgb(90,180,172)\", \"rgb(1,102,94)\"], \"7\": [\"rgb(140,81,10)\", \"rgb(216,179,101)\", \"rgb(246,232,195)\", \"rgb(245,245,245)\", \"rgb(199,234,229)\", \"rgb(90,180,172)\", \"rgb(1,102,94)\"], \"8\": [\"rgb(140,81,10)\", \"rgb(191,129,45)\", \"rgb(223,194,125)\", \"rgb(246,232,195)\", \"rgb(199,234,229)\", \"rgb(128,205,193)\", \"rgb(53,151,143)\", \"rgb(1,102,94)\"], \"9\": [\"rgb(140,81,10)\", \"rgb(191,129,45)\", \"rgb(223,194,125)\", \"rgb(246,232,195)\", \"rgb(245,245,245)\", \"rgb(199,234,229)\", \"rgb(128,205,193)\", \"rgb(53,151,143)\", \"rgb(1,102,94)\"], \"10\": [\"rgb(84,48,5)\", \"rgb(140,81,10)\", \"rgb(191,129,45)\", \"rgb(223,194,125)\", \"rgb(246,232,195)\", \"rgb(199,234,229)\", \"rgb(128,205,193)\", \"rgb(53,151,143)\", \"rgb(1,102,94)\", \"rgb(0,60,48)\"], \"11\": [\"rgb(84,48,5)\", \"rgb(140,81,10)\", \"rgb(191,129,45)\", \"rgb(223,194,125)\", \"rgb(246,232,195)\", \"rgb(245,245,245)\", \"rgb(199,234,229)\", \"rgb(128,205,193)\", \"rgb(53,151,143)\", \"rgb(1,102,94)\", \"rgb(0,60,48)\"], \"type\": \"div\"} ,\n\"RdGy\":  {\"3\": [\"rgb(239,138,98)\", \"rgb(255,255,255)\", \"rgb(153,153,153)\"], \"4\": [\"rgb(202,0,32)\", \"rgb(244,165,130)\", \"rgb(186,186,186)\", \"rgb(64,64,64)\"], \"5\": [\"rgb(202,0,32)\", \"rgb(244,165,130)\", \"rgb(255,255,255)\", \"rgb(186,186,186)\", \"rgb(64,64,64)\"], \"6\": [\"rgb(178,24,43)\", \"rgb(239,138,98)\", \"rgb(253,219,199)\", \"rgb(224,224,224)\", \"rgb(153,153,153)\", \"rgb(77,77,77)\"], \"7\": [\"rgb(178,24,43)\", \"rgb(239,138,98)\", \"rgb(253,219,199)\", \"rgb(255,255,255)\", \"rgb(224,224,224)\", \"rgb(153,153,153)\", \"rgb(77,77,77)\"], \"8\": [\"rgb(178,24,43)\", \"rgb(214,96,77)\", \"rgb(244,165,130)\", \"rgb(253,219,199)\", \"rgb(224,224,224)\", \"rgb(186,186,186)\", \"rgb(135,135,135)\", \"rgb(77,77,77)\"], \"9\": [\"rgb(178,24,43)\", \"rgb(214,96,77)\", \"rgb(244,165,130)\", \"rgb(253,219,199)\", \"rgb(255,255,255)\", \"rgb(224,224,224)\", \"rgb(186,186,186)\", \"rgb(135,135,135)\", \"rgb(77,77,77)\"], \"10\": [\"rgb(103,0,31)\", \"rgb(178,24,43)\", \"rgb(214,96,77)\", \"rgb(244,165,130)\", \"rgb(253,219,199)\", \"rgb(224,224,224)\", \"rgb(186,186,186)\", \"rgb(135,135,135)\", \"rgb(77,77,77)\", \"rgb(26,26,26)\"], \"11\": [\"rgb(103,0,31)\", \"rgb(178,24,43)\", \"rgb(214,96,77)\", \"rgb(244,165,130)\", \"rgb(253,219,199)\", \"rgb(255,255,255)\", \"rgb(224,224,224)\", \"rgb(186,186,186)\", \"rgb(135,135,135)\", \"rgb(77,77,77)\", \"rgb(26,26,26)\"], \"type\": \"div\"} ,\n\"PuOr\":  {\"3\": [\"rgb(241,163,64)\", \"rgb(247,247,247)\", \"rgb(153,142,195)\"], \"4\": [\"rgb(230,97,1)\", \"rgb(253,184,99)\", \"rgb(178,171,210)\", \"rgb(94,60,153)\"], \"5\": [\"rgb(230,97,1)\", \"rgb(253,184,99)\", \"rgb(247,247,247)\", \"rgb(178,171,210)\", \"rgb(94,60,153)\"], \"6\": [\"rgb(179,88,6)\", \"rgb(241,163,64)\", \"rgb(254,224,182)\", \"rgb(216,218,235)\", \"rgb(153,142,195)\", \"rgb(84,39,136)\"], \"7\": [\"rgb(179,88,6)\", \"rgb(241,163,64)\", \"rgb(254,224,182)\", \"rgb(247,247,247)\", \"rgb(216,218,235)\", \"rgb(153,142,195)\", \"rgb(84,39,136)\"], \"8\": [\"rgb(179,88,6)\", \"rgb(224,130,20)\", \"rgb(253,184,99)\", \"rgb(254,224,182)\", \"rgb(216,218,235)\", \"rgb(178,171,210)\", \"rgb(128,115,172)\", \"rgb(84,39,136)\"], \"9\": [\"rgb(179,88,6)\", \"rgb(224,130,20)\", \"rgb(253,184,99)\", \"rgb(254,224,182)\", \"rgb(247,247,247)\", \"rgb(216,218,235)\", \"rgb(178,171,210)\", \"rgb(128,115,172)\", \"rgb(84,39,136)\"], \"10\": [\"rgb(127,59,8)\", \"rgb(179,88,6)\", \"rgb(224,130,20)\", \"rgb(253,184,99)\", \"rgb(254,224,182)\", \"rgb(216,218,235)\", \"rgb(178,171,210)\", \"rgb(128,115,172)\", \"rgb(84,39,136)\", \"rgb(45,0,75)\"], \"11\": [\"rgb(127,59,8)\", \"rgb(179,88,6)\", \"rgb(224,130,20)\", \"rgb(253,184,99)\", \"rgb(254,224,182)\", \"rgb(247,247,247)\", \"rgb(216,218,235)\", \"rgb(178,171,210)\", \"rgb(128,115,172)\", \"rgb(84,39,136)\", \"rgb(45,0,75)\"], \"type\": \"div\"} ,\n\n\"Set2\":  {\"3\": [\"rgb(102,194,165)\", \"rgb(252,141,98)\", \"rgb(141,160,203)\"], \"4\": [\"rgb(102,194,165)\", \"rgb(252,141,98)\", \"rgb(141,160,203)\", \"rgb(231,138,195)\"], \"5\": [\"rgb(102,194,165)\", \"rgb(252,141,98)\", \"rgb(141,160,203)\", \"rgb(231,138,195)\", \"rgb(166,216,84)\"], \"6\": [\"rgb(102,194,165)\", \"rgb(252,141,98)\", \"rgb(141,160,203)\", \"rgb(231,138,195)\", \"rgb(166,216,84)\", \"rgb(255,217,47)\"], \"7\": [\"rgb(102,194,165)\", \"rgb(252,141,98)\", \"rgb(141,160,203)\", \"rgb(231,138,195)\", \"rgb(166,216,84)\", \"rgb(255,217,47)\", \"rgb(229,196,148)\"], \"8\": [\"rgb(102,194,165)\", \"rgb(252,141,98)\", \"rgb(141,160,203)\", \"rgb(231,138,195)\", \"rgb(166,216,84)\", \"rgb(255,217,47)\", \"rgb(229,196,148)\", \"rgb(179,179,179)\"], \"type\": \"qual\"} ,\n\"Accent\":  {\"3\": [\"rgb(127,201,127)\", \"rgb(190,174,212)\", \"rgb(253,192,134)\"], \"4\": [\"rgb(127,201,127)\", \"rgb(190,174,212)\", \"rgb(253,192,134)\", \"rgb(255,255,153)\"], \"5\": [\"rgb(127,201,127)\", \"rgb(190,174,212)\", \"rgb(253,192,134)\", \"rgb(255,255,153)\", \"rgb(56,108,176)\"], \"6\": [\"rgb(127,201,127)\", \"rgb(190,174,212)\", \"rgb(253,192,134)\", \"rgb(255,255,153)\", \"rgb(56,108,176)\", \"rgb(240,2,127)\"], \"7\": [\"rgb(127,201,127)\", \"rgb(190,174,212)\", \"rgb(253,192,134)\", \"rgb(255,255,153)\", \"rgb(56,108,176)\", \"rgb(240,2,127)\", \"rgb(191,91,23)\"], \"8\": [\"rgb(127,201,127)\", \"rgb(190,174,212)\", \"rgb(253,192,134)\", \"rgb(255,255,153)\", \"rgb(56,108,176)\", \"rgb(240,2,127)\", \"rgb(191,91,23)\", \"rgb(102,102,102)\"], \"type\": \"qual\"} ,\n\"Set1\":  {\"3\": [\"rgb(228,26,28)\", \"rgb(55,126,184)\", \"rgb(77,175,74)\"], \"4\": [\"rgb(228,26,28)\", \"rgb(55,126,184)\", \"rgb(77,175,74)\", \"rgb(152,78,163)\"], \"5\": [\"rgb(228,26,28)\", \"rgb(55,126,184)\", \"rgb(77,175,74)\", \"rgb(152,78,163)\", \"rgb(255,127,0)\"], \"6\": [\"rgb(228,26,28)\", \"rgb(55,126,184)\", \"rgb(77,175,74)\", \"rgb(152,78,163)\", \"rgb(255,127,0)\", \"rgb(255,255,51)\"], \"7\": [\"rgb(228,26,28)\", \"rgb(55,126,184)\", \"rgb(77,175,74)\", \"rgb(152,78,163)\", \"rgb(255,127,0)\", \"rgb(255,255,51)\", \"rgb(166,86,40)\"], \"8\": [\"rgb(228,26,28)\", \"rgb(55,126,184)\", \"rgb(77,175,74)\", \"rgb(152,78,163)\", \"rgb(255,127,0)\", \"rgb(255,255,51)\", \"rgb(166,86,40)\", \"rgb(247,129,191)\"], \"9\": [\"rgb(228,26,28)\", \"rgb(55,126,184)\", \"rgb(77,175,74)\", \"rgb(152,78,163)\", \"rgb(255,127,0)\", \"rgb(255,255,51)\", \"rgb(166,86,40)\", \"rgb(247,129,191)\", \"rgb(153,153,153)\"], \"type\": \"qual\"} ,\n\"Set3\":  {\"3\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\"], \"4\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\"], \"5\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\", \"rgb(128,177,211)\"], \"6\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\", \"rgb(128,177,211)\", \"rgb(253,180,98)\"], \"7\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\", \"rgb(128,177,211)\", \"rgb(253,180,98)\", \"rgb(179,222,105)\"], \"8\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\", \"rgb(128,177,211)\", \"rgb(253,180,98)\", \"rgb(179,222,105)\", \"rgb(252,205,229)\"], \"9\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\", \"rgb(128,177,211)\", \"rgb(253,180,98)\", \"rgb(179,222,105)\", \"rgb(252,205,229)\", \"rgb(217,217,217)\"], \"10\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\", \"rgb(128,177,211)\", \"rgb(253,180,98)\", \"rgb(179,222,105)\", \"rgb(252,205,229)\", \"rgb(217,217,217)\", \"rgb(188,128,189)\"], \"11\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\", \"rgb(128,177,211)\", \"rgb(253,180,98)\", \"rgb(179,222,105)\", \"rgb(252,205,229)\", \"rgb(217,217,217)\", \"rgb(188,128,189)\", \"rgb(204,235,197)\"], \"12\": [\"rgb(141,211,199)\", \"rgb(255,255,179)\", \"rgb(190,186,218)\", \"rgb(251,128,114)\", \"rgb(128,177,211)\", \"rgb(253,180,98)\", \"rgb(179,222,105)\", \"rgb(252,205,229)\", \"rgb(217,217,217)\", \"rgb(188,128,189)\", \"rgb(204,235,197)\", \"rgb(255,237,111)\"], \"type\": \"qual\"} ,\n\"Dark2\":  {\"3\": [\"rgb(27,158,119)\", \"rgb(217,95,2)\", \"rgb(117,112,179)\"], \"4\": [\"rgb(27,158,119)\", \"rgb(217,95,2)\", \"rgb(117,112,179)\", \"rgb(231,41,138)\"], \"5\": [\"rgb(27,158,119)\", \"rgb(217,95,2)\", \"rgb(117,112,179)\", \"rgb(231,41,138)\", \"rgb(102,166,30)\"], \"6\": [\"rgb(27,158,119)\", \"rgb(217,95,2)\", \"rgb(117,112,179)\", \"rgb(231,41,138)\", \"rgb(102,166,30)\", \"rgb(230,171,2)\"], \"7\": [\"rgb(27,158,119)\", \"rgb(217,95,2)\", \"rgb(117,112,179)\", \"rgb(231,41,138)\", \"rgb(102,166,30)\", \"rgb(230,171,2)\", \"rgb(166,118,29)\"], \"8\": [\"rgb(27,158,119)\", \"rgb(217,95,2)\", \"rgb(117,112,179)\", \"rgb(231,41,138)\", \"rgb(102,166,30)\", \"rgb(230,171,2)\", \"rgb(166,118,29)\", \"rgb(102,102,102)\"], \"type\": \"qual\"} ,\n\"Paired\":  {\"3\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\"], \"4\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\"], \"5\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\", \"rgb(251,154,153)\"], \"6\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\", \"rgb(251,154,153)\", \"rgb(227,26,28)\"], \"7\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\", \"rgb(251,154,153)\", \"rgb(227,26,28)\", \"rgb(253,191,111)\"], \"8\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\", \"rgb(251,154,153)\", \"rgb(227,26,28)\", \"rgb(253,191,111)\", \"rgb(255,127,0)\"], \"9\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\", \"rgb(251,154,153)\", \"rgb(227,26,28)\", \"rgb(253,191,111)\", \"rgb(255,127,0)\", \"rgb(202,178,214)\"], \"10\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\", \"rgb(251,154,153)\", \"rgb(227,26,28)\", \"rgb(253,191,111)\", \"rgb(255,127,0)\", \"rgb(202,178,214)\", \"rgb(106,61,154)\"], \"11\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\", \"rgb(251,154,153)\", \"rgb(227,26,28)\", \"rgb(253,191,111)\", \"rgb(255,127,0)\", \"rgb(202,178,214)\", \"rgb(106,61,154)\", \"rgb(255,255,153)\"], \"12\": [\"rgb(166,206,227)\", \"rgb(31,120,180)\", \"rgb(178,223,138)\", \"rgb(51,160,44)\", \"rgb(251,154,153)\", \"rgb(227,26,28)\", \"rgb(253,191,111)\", \"rgb(255,127,0)\", \"rgb(202,178,214)\", \"rgb(106,61,154)\", \"rgb(255,255,153)\", \"rgb(177,89,40)\"], \"type\": \"qual\"} ,\n\"Pastel2\":  {\"3\": [\"rgb(179,226,205)\", \"rgb(253,205,172)\", \"rgb(203,213,232)\"], \"4\": [\"rgb(179,226,205)\", \"rgb(253,205,172)\", \"rgb(203,213,232)\", \"rgb(244,202,228)\"], \"5\": [\"rgb(179,226,205)\", \"rgb(253,205,172)\", \"rgb(203,213,232)\", \"rgb(244,202,228)\", \"rgb(230,245,201)\"], \"6\": [\"rgb(179,226,205)\", \"rgb(253,205,172)\", \"rgb(203,213,232)\", \"rgb(244,202,228)\", \"rgb(230,245,201)\", \"rgb(255,242,174)\"], \"7\": [\"rgb(179,226,205)\", \"rgb(253,205,172)\", \"rgb(203,213,232)\", \"rgb(244,202,228)\", \"rgb(230,245,201)\", \"rgb(255,242,174)\", \"rgb(241,226,204)\"], \"8\": [\"rgb(179,226,205)\", \"rgb(253,205,172)\", \"rgb(203,213,232)\", \"rgb(244,202,228)\", \"rgb(230,245,201)\", \"rgb(255,242,174)\", \"rgb(241,226,204)\", \"rgb(204,204,204)\"], \"type\": \"qual\"} ,\n\"Pastel1\":  {\"3\": [\"rgb(251,180,174)\", \"rgb(179,205,227)\", \"rgb(204,235,197)\"], \"4\": [\"rgb(251,180,174)\", \"rgb(179,205,227)\", \"rgb(204,235,197)\", \"rgb(222,203,228)\"], \"5\": [\"rgb(251,180,174)\", \"rgb(179,205,227)\", \"rgb(204,235,197)\", \"rgb(222,203,228)\", \"rgb(254,217,166)\"], \"6\": [\"rgb(251,180,174)\", \"rgb(179,205,227)\", \"rgb(204,235,197)\", \"rgb(222,203,228)\", \"rgb(254,217,166)\", \"rgb(255,255,204)\"], \"7\": [\"rgb(251,180,174)\", \"rgb(179,205,227)\", \"rgb(204,235,197)\", \"rgb(222,203,228)\", \"rgb(254,217,166)\", \"rgb(255,255,204)\", \"rgb(229,216,189)\"], \"8\": [\"rgb(251,180,174)\", \"rgb(179,205,227)\", \"rgb(204,235,197)\", \"rgb(222,203,228)\", \"rgb(254,217,166)\", \"rgb(255,255,204)\", \"rgb(229,216,189)\", \"rgb(253,218,236)\"], \"9\": [\"rgb(251,180,174)\", \"rgb(179,205,227)\", \"rgb(204,235,197)\", \"rgb(222,203,228)\", \"rgb(254,217,166)\", \"rgb(255,255,204)\", \"rgb(229,216,189)\", \"rgb(253,218,236)\", \"rgb(242,242,242)\"], \"type\": \"qual\"} ,\n\n\"OrRd\":  {\"3\": [\"rgb(254,232,200)\", \"rgb(253,187,132)\", \"rgb(227,74,51)\"], \"4\": [\"rgb(254,240,217)\", \"rgb(253,204,138)\", \"rgb(252,141,89)\", \"rgb(215,48,31)\"], \"5\": [\"rgb(254,240,217)\", \"rgb(253,204,138)\", \"rgb(252,141,89)\", \"rgb(227,74,51)\", \"rgb(179,0,0)\"], \"6\": [\"rgb(254,240,217)\", \"rgb(253,212,158)\", \"rgb(253,187,132)\", \"rgb(252,141,89)\", \"rgb(227,74,51)\", \"rgb(179,0,0)\"], \"7\": [\"rgb(254,240,217)\", \"rgb(253,212,158)\", \"rgb(253,187,132)\", \"rgb(252,141,89)\", \"rgb(239,101,72)\", \"rgb(215,48,31)\", \"rgb(153,0,0)\"], \"8\": [\"rgb(255,247,236)\", \"rgb(254,232,200)\", \"rgb(253,212,158)\", \"rgb(253,187,132)\", \"rgb(252,141,89)\", \"rgb(239,101,72)\", \"rgb(215,48,31)\", \"rgb(153,0,0)\"], \"9\": [\"rgb(255,247,236)\", \"rgb(254,232,200)\", \"rgb(253,212,158)\", \"rgb(253,187,132)\", \"rgb(252,141,89)\", \"rgb(239,101,72)\", \"rgb(215,48,31)\", \"rgb(179,0,0)\", \"rgb(127,0,0)\"], \"type\": \"seq\"} ,\n\"PuBu\":  {\"3\": [\"rgb(236,231,242)\", \"rgb(166,189,219)\", \"rgb(43,140,190)\"], \"4\": [\"rgb(241,238,246)\", \"rgb(189,201,225)\", \"rgb(116,169,207)\", \"rgb(5,112,176)\"], \"5\": [\"rgb(241,238,246)\", \"rgb(189,201,225)\", \"rgb(116,169,207)\", \"rgb(43,140,190)\", \"rgb(4,90,141)\"], \"6\": [\"rgb(241,238,246)\", \"rgb(208,209,230)\", \"rgb(166,189,219)\", \"rgb(116,169,207)\", \"rgb(43,140,190)\", \"rgb(4,90,141)\"], \"7\": [\"rgb(241,238,246)\", \"rgb(208,209,230)\", \"rgb(166,189,219)\", \"rgb(116,169,207)\", \"rgb(54,144,192)\", \"rgb(5,112,176)\", \"rgb(3,78,123)\"], \"8\": [\"rgb(255,247,251)\", \"rgb(236,231,242)\", \"rgb(208,209,230)\", \"rgb(166,189,219)\", \"rgb(116,169,207)\", \"rgb(54,144,192)\", \"rgb(5,112,176)\", \"rgb(3,78,123)\"], \"9\": [\"rgb(255,247,251)\", \"rgb(236,231,242)\", \"rgb(208,209,230)\", \"rgb(166,189,219)\", \"rgb(116,169,207)\", \"rgb(54,144,192)\", \"rgb(5,112,176)\", \"rgb(4,90,141)\", \"rgb(2,56,88)\"], \"type\": \"seq\"} ,\n\"BuPu\":  {\"3\": [\"rgb(224,236,244)\", \"rgb(158,188,218)\", \"rgb(136,86,167)\"], \"4\": [\"rgb(237,248,251)\", \"rgb(179,205,227)\", \"rgb(140,150,198)\", \"rgb(136,65,157)\"], \"5\": [\"rgb(237,248,251)\", \"rgb(179,205,227)\", \"rgb(140,150,198)\", \"rgb(136,86,167)\", \"rgb(129,15,124)\"], \"6\": [\"rgb(237,248,251)\", \"rgb(191,211,230)\", \"rgb(158,188,218)\", \"rgb(140,150,198)\", \"rgb(136,86,167)\", \"rgb(129,15,124)\"], \"7\": [\"rgb(237,248,251)\", \"rgb(191,211,230)\", \"rgb(158,188,218)\", \"rgb(140,150,198)\", \"rgb(140,107,177)\", \"rgb(136,65,157)\", \"rgb(110,1,107)\"], \"8\": [\"rgb(247,252,253)\", \"rgb(224,236,244)\", \"rgb(191,211,230)\", \"rgb(158,188,218)\", \"rgb(140,150,198)\", \"rgb(140,107,177)\", \"rgb(136,65,157)\", \"rgb(110,1,107)\"], \"9\": [\"rgb(247,252,253)\", \"rgb(224,236,244)\", \"rgb(191,211,230)\", \"rgb(158,188,218)\", \"rgb(140,150,198)\", \"rgb(140,107,177)\", \"rgb(136,65,157)\", \"rgb(129,15,124)\", \"rgb(77,0,75)\"], \"type\": \"seq\"} ,\n\"Oranges\":  {\"3\": [\"rgb(254,230,206)\", \"rgb(253,174,107)\", \"rgb(230,85,13)\"], \"4\": [\"rgb(254,237,222)\", \"rgb(253,190,133)\", \"rgb(253,141,60)\", \"rgb(217,71,1)\"], \"5\": [\"rgb(254,237,222)\", \"rgb(253,190,133)\", \"rgb(253,141,60)\", \"rgb(230,85,13)\", \"rgb(166,54,3)\"], \"6\": [\"rgb(254,237,222)\", \"rgb(253,208,162)\", \"rgb(253,174,107)\", \"rgb(253,141,60)\", \"rgb(230,85,13)\", \"rgb(166,54,3)\"], \"7\": [\"rgb(254,237,222)\", \"rgb(253,208,162)\", \"rgb(253,174,107)\", \"rgb(253,141,60)\", \"rgb(241,105,19)\", \"rgb(217,72,1)\", \"rgb(140,45,4)\"], \"8\": [\"rgb(255,245,235)\", \"rgb(254,230,206)\", \"rgb(253,208,162)\", \"rgb(253,174,107)\", \"rgb(253,141,60)\", \"rgb(241,105,19)\", \"rgb(217,72,1)\", \"rgb(140,45,4)\"], \"9\": [\"rgb(255,245,235)\", \"rgb(254,230,206)\", \"rgb(253,208,162)\", \"rgb(253,174,107)\", \"rgb(253,141,60)\", \"rgb(241,105,19)\", \"rgb(217,72,1)\", \"rgb(166,54,3)\", \"rgb(127,39,4)\"], \"type\": \"seq\"} ,\n\"BuGn\":  {\"3\": [\"rgb(229,245,249)\", \"rgb(153,216,201)\", \"rgb(44,162,95)\"], \"4\": [\"rgb(237,248,251)\", \"rgb(178,226,226)\", \"rgb(102,194,164)\", \"rgb(35,139,69)\"], \"5\": [\"rgb(237,248,251)\", \"rgb(178,226,226)\", \"rgb(102,194,164)\", \"rgb(44,162,95)\", \"rgb(0,109,44)\"], \"6\": [\"rgb(237,248,251)\", \"rgb(204,236,230)\", \"rgb(153,216,201)\", \"rgb(102,194,164)\", \"rgb(44,162,95)\", \"rgb(0,109,44)\"], \"7\": [\"rgb(237,248,251)\", \"rgb(204,236,230)\", \"rgb(153,216,201)\", \"rgb(102,194,164)\", \"rgb(65,174,118)\", \"rgb(35,139,69)\", \"rgb(0,88,36)\"], \"8\": [\"rgb(247,252,253)\", \"rgb(229,245,249)\", \"rgb(204,236,230)\", \"rgb(153,216,201)\", \"rgb(102,194,164)\", \"rgb(65,174,118)\", \"rgb(35,139,69)\", \"rgb(0,88,36)\"], \"9\": [\"rgb(247,252,253)\", \"rgb(229,245,249)\", \"rgb(204,236,230)\", \"rgb(153,216,201)\", \"rgb(102,194,164)\", \"rgb(65,174,118)\", \"rgb(35,139,69)\", \"rgb(0,109,44)\", \"rgb(0,68,27)\"], \"type\": \"seq\"} ,\n\"YlOrBr\":  {\"3\": [\"rgb(255,247,188)\", \"rgb(254,196,79)\", \"rgb(217,95,14)\"], \"4\": [\"rgb(255,255,212)\", \"rgb(254,217,142)\", \"rgb(254,153,41)\", \"rgb(204,76,2)\"], \"5\": [\"rgb(255,255,212)\", \"rgb(254,217,142)\", \"rgb(254,153,41)\", \"rgb(217,95,14)\", \"rgb(153,52,4)\"], \"6\": [\"rgb(255,255,212)\", \"rgb(254,227,145)\", \"rgb(254,196,79)\", \"rgb(254,153,41)\", \"rgb(217,95,14)\", \"rgb(153,52,4)\"], \"7\": [\"rgb(255,255,212)\", \"rgb(254,227,145)\", \"rgb(254,196,79)\", \"rgb(254,153,41)\", \"rgb(236,112,20)\", \"rgb(204,76,2)\", \"rgb(140,45,4)\"], \"8\": [\"rgb(255,255,229)\", \"rgb(255,247,188)\", \"rgb(254,227,145)\", \"rgb(254,196,79)\", \"rgb(254,153,41)\", \"rgb(236,112,20)\", \"rgb(204,76,2)\", \"rgb(140,45,4)\"], \"9\": [\"rgb(255,255,229)\", \"rgb(255,247,188)\", \"rgb(254,227,145)\", \"rgb(254,196,79)\", \"rgb(254,153,41)\", \"rgb(236,112,20)\", \"rgb(204,76,2)\", \"rgb(153,52,4)\", \"rgb(102,37,6)\"], \"type\": \"seq\"} ,\n\"YlGn\":  {\"3\": [\"rgb(247,252,185)\", \"rgb(173,221,142)\", \"rgb(49,163,84)\"], \"4\": [\"rgb(255,255,204)\", \"rgb(194,230,153)\", \"rgb(120,198,121)\", \"rgb(35,132,67)\"], \"5\": [\"rgb(255,255,204)\", \"rgb(194,230,153)\", \"rgb(120,198,121)\", \"rgb(49,163,84)\", \"rgb(0,104,55)\"], \"6\": [\"rgb(255,255,204)\", \"rgb(217,240,163)\", \"rgb(173,221,142)\", \"rgb(120,198,121)\", \"rgb(49,163,84)\", \"rgb(0,104,55)\"], \"7\": [\"rgb(255,255,204)\", \"rgb(217,240,163)\", \"rgb(173,221,142)\", \"rgb(120,198,121)\", \"rgb(65,171,93)\", \"rgb(35,132,67)\", \"rgb(0,90,50)\"], \"8\": [\"rgb(255,255,229)\", \"rgb(247,252,185)\", \"rgb(217,240,163)\", \"rgb(173,221,142)\", \"rgb(120,198,121)\", \"rgb(65,171,93)\", \"rgb(35,132,67)\", \"rgb(0,90,50)\"], \"9\": [\"rgb(255,255,229)\", \"rgb(247,252,185)\", \"rgb(217,240,163)\", \"rgb(173,221,142)\", \"rgb(120,198,121)\", \"rgb(65,171,93)\", \"rgb(35,132,67)\", \"rgb(0,104,55)\", \"rgb(0,69,41)\"], \"type\": \"seq\"} ,\n\"Reds\":  {\"3\": [\"rgb(254,224,210)\", \"rgb(252,146,114)\", \"rgb(222,45,38)\"], \"4\": [\"rgb(254,229,217)\", \"rgb(252,174,145)\", \"rgb(251,106,74)\", \"rgb(203,24,29)\"], \"5\": [\"rgb(254,229,217)\", \"rgb(252,174,145)\", \"rgb(251,106,74)\", \"rgb(222,45,38)\", \"rgb(165,15,21)\"], \"6\": [\"rgb(254,229,217)\", \"rgb(252,187,161)\", \"rgb(252,146,114)\", \"rgb(251,106,74)\", \"rgb(222,45,38)\", \"rgb(165,15,21)\"], \"7\": [\"rgb(254,229,217)\", \"rgb(252,187,161)\", \"rgb(252,146,114)\", \"rgb(251,106,74)\", \"rgb(239,59,44)\", \"rgb(203,24,29)\", \"rgb(153,0,13)\"], \"8\": [\"rgb(255,245,240)\", \"rgb(254,224,210)\", \"rgb(252,187,161)\", \"rgb(252,146,114)\", \"rgb(251,106,74)\", \"rgb(239,59,44)\", \"rgb(203,24,29)\", \"rgb(153,0,13)\"], \"9\": [\"rgb(255,245,240)\", \"rgb(254,224,210)\", \"rgb(252,187,161)\", \"rgb(252,146,114)\", \"rgb(251,106,74)\", \"rgb(239,59,44)\", \"rgb(203,24,29)\", \"rgb(165,15,21)\", \"rgb(103,0,13)\"], \"type\": \"seq\"} ,\n\"RdPu\":  {\"3\": [\"rgb(253,224,221)\", \"rgb(250,159,181)\", \"rgb(197,27,138)\"], \"4\": [\"rgb(254,235,226)\", \"rgb(251,180,185)\", \"rgb(247,104,161)\", \"rgb(174,1,126)\"], \"5\": [\"rgb(254,235,226)\", \"rgb(251,180,185)\", \"rgb(247,104,161)\", \"rgb(197,27,138)\", \"rgb(122,1,119)\"], \"6\": [\"rgb(254,235,226)\", \"rgb(252,197,192)\", \"rgb(250,159,181)\", \"rgb(247,104,161)\", \"rgb(197,27,138)\", \"rgb(122,1,119)\"], \"7\": [\"rgb(254,235,226)\", \"rgb(252,197,192)\", \"rgb(250,159,181)\", \"rgb(247,104,161)\", \"rgb(221,52,151)\", \"rgb(174,1,126)\", \"rgb(122,1,119)\"], \"8\": [\"rgb(255,247,243)\", \"rgb(253,224,221)\", \"rgb(252,197,192)\", \"rgb(250,159,181)\", \"rgb(247,104,161)\", \"rgb(221,52,151)\", \"rgb(174,1,126)\", \"rgb(122,1,119)\"], \"9\": [\"rgb(255,247,243)\", \"rgb(253,224,221)\", \"rgb(252,197,192)\", \"rgb(250,159,181)\", \"rgb(247,104,161)\", \"rgb(221,52,151)\", \"rgb(174,1,126)\", \"rgb(122,1,119)\", \"rgb(73,0,106)\"], \"type\": \"seq\"} ,\n\"Greens\":  {\"3\": [\"rgb(229,245,224)\", \"rgb(161,217,155)\", \"rgb(49,163,84)\"], \"4\": [\"rgb(237,248,233)\", \"rgb(186,228,179)\", \"rgb(116,196,118)\", \"rgb(35,139,69)\"], \"5\": [\"rgb(237,248,233)\", \"rgb(186,228,179)\", \"rgb(116,196,118)\", \"rgb(49,163,84)\", \"rgb(0,109,44)\"], \"6\": [\"rgb(237,248,233)\", \"rgb(199,233,192)\", \"rgb(161,217,155)\", \"rgb(116,196,118)\", \"rgb(49,163,84)\", \"rgb(0,109,44)\"], \"7\": [\"rgb(237,248,233)\", \"rgb(199,233,192)\", \"rgb(161,217,155)\", \"rgb(116,196,118)\", \"rgb(65,171,93)\", \"rgb(35,139,69)\", \"rgb(0,90,50)\"], \"8\": [\"rgb(247,252,245)\", \"rgb(229,245,224)\", \"rgb(199,233,192)\", \"rgb(161,217,155)\", \"rgb(116,196,118)\", \"rgb(65,171,93)\", \"rgb(35,139,69)\", \"rgb(0,90,50)\"], \"9\": [\"rgb(247,252,245)\", \"rgb(229,245,224)\", \"rgb(199,233,192)\", \"rgb(161,217,155)\", \"rgb(116,196,118)\", \"rgb(65,171,93)\", \"rgb(35,139,69)\", \"rgb(0,109,44)\", \"rgb(0,68,27)\"], \"type\": \"seq\"} ,\n\"YlGnBu\":  {\"3\": [\"rgb(237,248,177)\", \"rgb(127,205,187)\", \"rgb(44,127,184)\"], \"4\": [\"rgb(255,255,204)\", \"rgb(161,218,180)\", \"rgb(65,182,196)\", \"rgb(34,94,168)\"], \"5\": [\"rgb(255,255,204)\", \"rgb(161,218,180)\", \"rgb(65,182,196)\", \"rgb(44,127,184)\", \"rgb(37,52,148)\"], \"6\": [\"rgb(255,255,204)\", \"rgb(199,233,180)\", \"rgb(127,205,187)\", \"rgb(65,182,196)\", \"rgb(44,127,184)\", \"rgb(37,52,148)\"], \"7\": [\"rgb(255,255,204)\", \"rgb(199,233,180)\", \"rgb(127,205,187)\", \"rgb(65,182,196)\", \"rgb(29,145,192)\", \"rgb(34,94,168)\", \"rgb(12,44,132)\"], \"8\": [\"rgb(255,255,217)\", \"rgb(237,248,177)\", \"rgb(199,233,180)\", \"rgb(127,205,187)\", \"rgb(65,182,196)\", \"rgb(29,145,192)\", \"rgb(34,94,168)\", \"rgb(12,44,132)\"], \"9\": [\"rgb(255,255,217)\", \"rgb(237,248,177)\", \"rgb(199,233,180)\", \"rgb(127,205,187)\", \"rgb(65,182,196)\", \"rgb(29,145,192)\", \"rgb(34,94,168)\", \"rgb(37,52,148)\", \"rgb(8,29,88)\"], \"type\": \"seq\"} ,\n\"Purples\":  {\"3\": [\"rgb(239,237,245)\", \"rgb(188,189,220)\", \"rgb(117,107,177)\"], \"4\": [\"rgb(242,240,247)\", \"rgb(203,201,226)\", \"rgb(158,154,200)\", \"rgb(106,81,163)\"], \"5\": [\"rgb(242,240,247)\", \"rgb(203,201,226)\", \"rgb(158,154,200)\", \"rgb(117,107,177)\", \"rgb(84,39,143)\"], \"6\": [\"rgb(242,240,247)\", \"rgb(218,218,235)\", \"rgb(188,189,220)\", \"rgb(158,154,200)\", \"rgb(117,107,177)\", \"rgb(84,39,143)\"], \"7\": [\"rgb(242,240,247)\", \"rgb(218,218,235)\", \"rgb(188,189,220)\", \"rgb(158,154,200)\", \"rgb(128,125,186)\", \"rgb(106,81,163)\", \"rgb(74,20,134)\"], \"8\": [\"rgb(252,251,253)\", \"rgb(239,237,245)\", \"rgb(218,218,235)\", \"rgb(188,189,220)\", \"rgb(158,154,200)\", \"rgb(128,125,186)\", \"rgb(106,81,163)\", \"rgb(74,20,134)\"], \"9\": [\"rgb(252,251,253)\", \"rgb(239,237,245)\", \"rgb(218,218,235)\", \"rgb(188,189,220)\", \"rgb(158,154,200)\", \"rgb(128,125,186)\", \"rgb(106,81,163)\", \"rgb(84,39,143)\", \"rgb(63,0,125)\"], \"type\": \"seq\"} ,\n\"GnBu\":  {\"3\": [\"rgb(224,243,219)\", \"rgb(168,221,181)\", \"rgb(67,162,202)\"], \"4\": [\"rgb(240,249,232)\", \"rgb(186,228,188)\", \"rgb(123,204,196)\", \"rgb(43,140,190)\"], \"5\": [\"rgb(240,249,232)\", \"rgb(186,228,188)\", \"rgb(123,204,196)\", \"rgb(67,162,202)\", \"rgb(8,104,172)\"], \"6\": [\"rgb(240,249,232)\", \"rgb(204,235,197)\", \"rgb(168,221,181)\", \"rgb(123,204,196)\", \"rgb(67,162,202)\", \"rgb(8,104,172)\"], \"7\": [\"rgb(240,249,232)\", \"rgb(204,235,197)\", \"rgb(168,221,181)\", \"rgb(123,204,196)\", \"rgb(78,179,211)\", \"rgb(43,140,190)\", \"rgb(8,88,158)\"], \"8\": [\"rgb(247,252,240)\", \"rgb(224,243,219)\", \"rgb(204,235,197)\", \"rgb(168,221,181)\", \"rgb(123,204,196)\", \"rgb(78,179,211)\", \"rgb(43,140,190)\", \"rgb(8,88,158)\"], \"9\": [\"rgb(247,252,240)\", \"rgb(224,243,219)\", \"rgb(204,235,197)\", \"rgb(168,221,181)\", \"rgb(123,204,196)\", \"rgb(78,179,211)\", \"rgb(43,140,190)\", \"rgb(8,104,172)\", \"rgb(8,64,129)\"], \"type\": \"seq\"} ,\n\"Greys\":  {\"3\": [\"rgb(240,240,240)\", \"rgb(189,189,189)\", \"rgb(99,99,99)\"], \"4\": [\"rgb(247,247,247)\", \"rgb(204,204,204)\", \"rgb(150,150,150)\", \"rgb(82,82,82)\"], \"5\": [\"rgb(247,247,247)\", \"rgb(204,204,204)\", \"rgb(150,150,150)\", \"rgb(99,99,99)\", \"rgb(37,37,37)\"], \"6\": [\"rgb(247,247,247)\", \"rgb(217,217,217)\", \"rgb(189,189,189)\", \"rgb(150,150,150)\", \"rgb(99,99,99)\", \"rgb(37,37,37)\"], \"7\": [\"rgb(247,247,247)\", \"rgb(217,217,217)\", \"rgb(189,189,189)\", \"rgb(150,150,150)\", \"rgb(115,115,115)\", \"rgb(82,82,82)\", \"rgb(37,37,37)\"], \"8\": [\"rgb(255,255,255)\", \"rgb(240,240,240)\", \"rgb(217,217,217)\", \"rgb(189,189,189)\", \"rgb(150,150,150)\", \"rgb(115,115,115)\", \"rgb(82,82,82)\", \"rgb(37,37,37)\"], \"9\": [\"rgb(255,255,255)\", \"rgb(240,240,240)\", \"rgb(217,217,217)\", \"rgb(189,189,189)\", \"rgb(150,150,150)\", \"rgb(115,115,115)\", \"rgb(82,82,82)\", \"rgb(37,37,37)\", \"rgb(0,0,0)\"], \"type\": \"seq\"} ,\n\"YlOrRd\":  {\"3\": [\"rgb(255,237,160)\", \"rgb(254,178,76)\", \"rgb(240,59,32)\"], \"4\": [\"rgb(255,255,178)\", \"rgb(254,204,92)\", \"rgb(253,141,60)\", \"rgb(227,26,28)\"], \"5\": [\"rgb(255,255,178)\", \"rgb(254,204,92)\", \"rgb(253,141,60)\", \"rgb(240,59,32)\", \"rgb(189,0,38)\"], \"6\": [\"rgb(255,255,178)\", \"rgb(254,217,118)\", \"rgb(254,178,76)\", \"rgb(253,141,60)\", \"rgb(240,59,32)\", \"rgb(189,0,38)\"], \"7\": [\"rgb(255,255,178)\", \"rgb(254,217,118)\", \"rgb(254,178,76)\", \"rgb(253,141,60)\", \"rgb(252,78,42)\", \"rgb(227,26,28)\", \"rgb(177,0,38)\"], \"8\": [\"rgb(255,255,204)\", \"rgb(255,237,160)\", \"rgb(254,217,118)\", \"rgb(254,178,76)\", \"rgb(253,141,60)\", \"rgb(252,78,42)\", \"rgb(227,26,28)\", \"rgb(177,0,38)\"], \"type\": \"seq\"} ,\n\"PuRd\":  {\"3\": [\"rgb(231,225,239)\", \"rgb(201,148,199)\", \"rgb(221,28,119)\"], \"4\": [\"rgb(241,238,246)\", \"rgb(215,181,216)\", \"rgb(223,101,176)\", \"rgb(206,18,86)\"], \"5\": [\"rgb(241,238,246)\", \"rgb(215,181,216)\", \"rgb(223,101,176)\", \"rgb(221,28,119)\", \"rgb(152,0,67)\"], \"6\": [\"rgb(241,238,246)\", \"rgb(212,185,218)\", \"rgb(201,148,199)\", \"rgb(223,101,176)\", \"rgb(221,28,119)\", \"rgb(152,0,67)\"], \"7\": [\"rgb(241,238,246)\", \"rgb(212,185,218)\", \"rgb(201,148,199)\", \"rgb(223,101,176)\", \"rgb(231,41,138)\", \"rgb(206,18,86)\", \"rgb(145,0,63)\"], \"8\": [\"rgb(247,244,249)\", \"rgb(231,225,239)\", \"rgb(212,185,218)\", \"rgb(201,148,199)\", \"rgb(223,101,176)\", \"rgb(231,41,138)\", \"rgb(206,18,86)\", \"rgb(145,0,63)\"], \"9\": [\"rgb(247,244,249)\", \"rgb(231,225,239)\", \"rgb(212,185,218)\", \"rgb(201,148,199)\", \"rgb(223,101,176)\", \"rgb(231,41,138)\", \"rgb(206,18,86)\", \"rgb(152,0,67)\", \"rgb(103,0,31)\"], \"type\": \"seq\"} ,\n\"Blues\":  {\"3\": [\"rgb(222,235,247)\", \"rgb(158,202,225)\", \"rgb(49,130,189)\"], \"4\": [\"rgb(239,243,255)\", \"rgb(189,215,231)\", \"rgb(107,174,214)\", \"rgb(33,113,181)\"], \"5\": [\"rgb(239,243,255)\", \"rgb(189,215,231)\", \"rgb(107,174,214)\", \"rgb(49,130,189)\", \"rgb(8,81,156)\"], \"6\": [\"rgb(239,243,255)\", \"rgb(198,219,239)\", \"rgb(158,202,225)\", \"rgb(107,174,214)\", \"rgb(49,130,189)\", \"rgb(8,81,156)\"], \"7\": [\"rgb(239,243,255)\", \"rgb(198,219,239)\", \"rgb(158,202,225)\", \"rgb(107,174,214)\", \"rgb(66,146,198)\", \"rgb(33,113,181)\", \"rgb(8,69,148)\"], \"8\": [\"rgb(247,251,255)\", \"rgb(222,235,247)\", \"rgb(198,219,239)\", \"rgb(158,202,225)\", \"rgb(107,174,214)\", \"rgb(66,146,198)\", \"rgb(33,113,181)\", \"rgb(8,69,148)\"], \"9\": [\"rgb(247,251,255)\", \"rgb(222,235,247)\", \"rgb(198,219,239)\", \"rgb(158,202,225)\", \"rgb(107,174,214)\", \"rgb(66,146,198)\", \"rgb(33,113,181)\", \"rgb(8,81,156)\", \"rgb(8,48,107)\"], \"type\": \"seq\"} ,\n\"PuBuGn\":  {\"3\": [\"rgb(236,226,240)\", \"rgb(166,189,219)\", \"rgb(28,144,153)\"], \"4\": [\"rgb(246,239,247)\", \"rgb(189,201,225)\", \"rgb(103,169,207)\", \"rgb(2,129,138)\"], \"5\": [\"rgb(246,239,247)\", \"rgb(189,201,225)\", \"rgb(103,169,207)\", \"rgb(28,144,153)\", \"rgb(1,108,89)\"], \"6\": [\"rgb(246,239,247)\", \"rgb(208,209,230)\", \"rgb(166,189,219)\", \"rgb(103,169,207)\", \"rgb(28,144,153)\", \"rgb(1,108,89)\"], \"7\": [\"rgb(246,239,247)\", \"rgb(208,209,230)\", \"rgb(166,189,219)\", \"rgb(103,169,207)\", \"rgb(54,144,192)\", \"rgb(2,129,138)\", \"rgb(1,100,80)\"], \"8\": [\"rgb(255,247,251)\", \"rgb(236,226,240)\", \"rgb(208,209,230)\", \"rgb(166,189,219)\", \"rgb(103,169,207)\", \"rgb(54,144,192)\", \"rgb(2,129,138)\", \"rgb(1,100,80)\"], \"9\": [\"rgb(255,247,251)\", \"rgb(236,226,240)\", \"rgb(208,209,230)\", \"rgb(166,189,219)\", \"rgb(103,169,207)\", \"rgb(54,144,192)\", \"rgb(2,129,138)\", \"rgb(1,108,89)\", \"rgb(1,70,54)\"], \"type\": \"seq\"} \n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/brewer/genbrewer.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nfunc main() {\n\tlog.SetPrefix(\"genbrewer: \")\n\tlog.SetFlags(0)\n\n\tif len(os.Args) != 2 {\n\t\tfmt.Fprintf(os.Stderr, `usage: %s colorbrewer.json\n\nGenerates brewer.go from the colors in the named JSON file, which should be\nretrieved from http://colorbrewer2.org/export/colorbrewer.json.`, os.Args[0])\n\t\tos.Exit(2)\n\t}\n\n\tf, err := os.Open(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tvar brewer map[string]map[string]interface{}\n\terr = json.NewDecoder(f).Decode(&brewer)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\to, err := os.Create(\"brewer.go\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\to.WriteString(`// Generated by genbrewer. DO NOT EDIT.\n// Please see license at http://colorbrewer.org/export/LICENSE.txt\n\npackage brewer\n\nimport \"image/color\"\n\n`)\n\n\tnameMap := []string{}\n\tfor _, name := range sortedKeys(brewer) {\n\t\trawVariants := brewer[name]\n\n\t\t// variantKeys are strings that are mostly numbers,\n\t\t// but also have some metadata. Extract just the\n\t\t// numbers and put them in order.\n\t\tvariants := []int{}\n\t\tfor variant := range rawVariants {\n\t\t\tif num, err := strconv.Atoi(variant); err == nil {\n\t\t\t\tvariants = append(variants, num)\n\t\t\t}\n\t\t}\n\t\tsort.Ints(variants)\n\n\t\tvariantMap := []string{}\n\t\tvar defs bytes.Buffer\n\t\tfor _, variant := range variants {\n\t\t\tcolors := rawVariants[strconv.Itoa(variant)].([]interface{})\n\t\t\tvname := fmt.Sprintf(\"%s_%d\", name, variant)\n\t\t\tfmt.Fprintf(&defs, \"\\t%s = []color.Color{\", vname)\n\t\t\tfor i, color := range colors {\n\t\t\t\tif i != 0 {\n\t\t\t\t\tfmt.Fprintf(&defs, \", \")\n\t\t\t\t}\n\t\t\t\tr, g, b := parse(color.(string))\n\t\t\t\tfmt.Fprintf(&defs, \"color.RGBA{%d, %d, %d, 255}\", r, g, b)\n\t\t\t}\n\t\t\tfmt.Fprintf(&defs, \"}\\n\")\n\n\t\t\tvariantMap = append(variantMap, fmt.Sprintf(\"%d: %s\", variant, vname))\n\t\t}\n\n\t\tfmt.Fprintf(o, \"var (\\n\")\n\n\t\ttyp := rawVariants[\"type\"].(string)\n\t\tfmt.Fprintf(o, \"\\t// %s is a %s palette.\\n\", name, niceType[typ])\n\t\tfmt.Fprintf(o, \"\\t%s = map[int][]color.Color{%s}\\n\", name, strings.Join(variantMap, \", \"))\n\t\tfmt.Fprintf(o, \"%s)\\n\\n\", defs.String())\n\n\t\tnameMap = append(nameMap, fmt.Sprintf(\"%q: %s\", name, name))\n\t}\n\n\tfmt.Fprintf(o, \"// ByName is a map indexing all palettes by string name.\\n\")\n\tfmt.Fprintf(o, \"var ByName = map[string]map[int][]color.Color{%s}\\n\", strings.Join(nameMap, \", \"))\n\n\tif err = o.Close(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nvar niceType = map[string]string{\n\t\"seq\":  \"sequential\",\n\t\"div\":  \"diverging\",\n\t\"qual\": \"qualitative\",\n}\n\nvar colorRe = regexp.MustCompile(`^rgb\\s*\\(\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*\\)\\s*$`)\n\nfunc parse(cssColor string) (r, g, b uint8) {\n\tm := colorRe.FindStringSubmatch(cssColor)\n\tif m == nil {\n\t\tlog.Fatalf(\"unknown color syntax: %q\", cssColor)\n\t}\n\tp := func(x string) uint8 {\n\t\tn, err := strconv.ParseUint(x, 10, 8)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\treturn uint8(n)\n\t}\n\treturn p(m[1]), p(m[2]), p(m[3])\n}\n\nfunc sortedKeys(m interface{}) []string {\n\tkeys := []string{}\n\tfor _, key := range reflect.ValueOf(m).MapKeys() {\n\t\tkeys = append(keys, key.String())\n\t}\n\tsort.Strings(keys)\n\treturn keys\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/brewer/package.go",
    "content": "// Copyright (c) 2002 Cynthia Brewer, Mark Harrower, and The\n// Pennsylvania State University.\n// Please see license at http://colorbrewer.org/export/LICENSE.txt.\n\n// Package brewer provides color specifications and designs developed\n// by Cynthia Brewer (http://colorbrewer.org/).\n//\n// Please see license at http://colorbrewer.org/export/LICENSE.txt.\n//\n// This package provides three different types of color palettes.\n// Sequential palettes are for ordered data that progresses from low\n// to high. Diverging palettes are like sequential palettes, but have\n// a defined middle and two extremes. Finally, qualitative palettes\n// are for unordered or nominal data. See \"Brewer, Cynthia A. 1994.\n// Color use guidelines for mapping and visualization. Chapter 7 (pp.\n// 123–147) in Visualization in Modern Cartography\" for more details.\n//\n// All palettes provided by this package are discrete, but each comes\n// in several variants with different numbers of discrete levels.\n// These variants are named <palette>_<n> where n is the number of\n// levels.\n//\n// Each palette also provides a variable named <palette> that is a map\n// from the number of levels to the specific variants.\n//\n// Finally, the global ByName map from string name to palette.\npackage brewer\n\n//go:generate go run genbrewer.go colorbrewer.json\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/makesrgbtab.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"math\"\n\t\"os\"\n)\n\nfunc sRGBToLinear(s float64) float64 {\n\tif s <= 0.0405 {\n\t\treturn s / 12.92\n\t} else {\n\t\treturn math.Pow(((s + 0.055) / 1.055), 2.4)\n\t}\n}\n\nfunc linearTosRGB(x float64) float64 {\n\tif x <= 0.0031308 {\n\t\treturn x * 12.92\n\t} else {\n\t\treturn 1.055*math.Pow(x, 1/2.4) - 0.055\n\t}\n}\n\nfunc main() {\n\tf, err := os.Create(\"srgbtab.go\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\t// Create the sRGB to linear table.\n\tfmt.Fprintf(f, `// Generated by makesrgbtab. DO NOT EDIT.\n\npackage palette\n\nvar sRGBToLinearTab = [256]uint16{\n`)\n\tfor i := 0; i < 256; i++ {\n\t\ts := float64(i) / 255\n\t\tl := sRGBToLinear(s)\n\t\tfmt.Fprintf(f, \"\\t%d,\\n\", uint16((1<<16-1)*l))\n\t}\n\tfmt.Fprintf(f, \"}\\n\\n\")\n\n\t// We could compute a complete table from uint16 linear RGB to\n\t// uint8 sRGB, but that table is fairly large and very\n\t// redundant. Instead, we try to find a smaller table where a\n\t// simple formula and lookup will get us within a reasonable\n\t// error bound.\n\t//\n\t// Specifically, given an error defined as\n\t//\n\t// err(l) = |table[(l + addend) >> shift] - linearTosRGB(l)|\n\t//\n\t// for l ∈ [0, 1<<16).\n\t//\n\t// we find addend, shift, and table that maximizes shift\n\t// (minimizing the table size) and minimizes the mean squared\n\t// error subject to err(l) < 1/256.\n\t//\n\t// There are fast ways to do this, but it doesn't matter. This\n\t// implementation strives for clarity.\n\t//\n\t// TODO: Consider using multiple tables with different shifts\n\t// to get better precision on the low range and better\n\t// compression on the high range.\n\t//\n\t// TODO: Maybe we also want to ensure round-tripping.\n\ttype entry struct {\n\t\tsrgbs []float64\n\t\tb     uint8\n\t}\n\tvar best struct {\n\t\tshift, addend int\n\t\ttable         []entry\n\t\terr           float64\n\t}\n\tbest.err = 1\n\tconst acceptError = 1 / 256.0\n\tfor shift := 5; shift >= 0; shift-- {\n\t\tbits := 16 - shift\n\tnextTable:\n\t\tfor addend := 0; addend < 1<<uint(shift); addend++ {\n\t\t\tfmt.Println(\"considering shift\", shift, \"addend\", addend)\n\n\t\t\ttable := make([]entry, 1<<uint(bits))\n\n\t\t\t// Compute all of the sRGB values that fall into each\n\t\t\t// table entry.\n\t\t\tfor l := 0; l < 1<<16; l++ {\n\t\t\t\tindex := (l + addend) >> uint(shift)\n\t\t\t\tfor index >= len(table) {\n\t\t\t\t\ttable = append(table, entry{})\n\t\t\t\t}\n\t\t\t\tsrgb := linearTosRGB(float64(l) / 65535)\n\t\t\t\ttable[index].srgbs = append(table[index].srgbs, srgb)\n\t\t\t}\n\n\t\t\ttableErr := 0.0\n\t\t\tfor i := range table {\n\t\t\t\tent := &table[i]\n\n\t\t\t\t// Find a uint8 value for this table\n\t\t\t\t// entry that minimizes the maximum\n\t\t\t\t// error to all sRGB values that fall\n\t\t\t\t// into this entry.\n\t\t\t\tmins := ent.srgbs[0]\n\t\t\t\tmaxs := ent.srgbs[len(ent.srgbs)-1]\n\t\t\t\tminb := int(mins*255) - 1\n\t\t\t\tmaxb := int(maxs*255) + 1\n\t\t\t\tentryErr := 1.0\n\t\t\t\tfor b := minb; b <= maxb; b++ {\n\t\t\t\t\tif b < 0 || b > 255 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ts := float64(b) / 255\n\t\t\t\t\terr := math.Max(math.Abs(s-mins),\n\t\t\t\t\t\tmath.Abs(s-maxs))\n\t\t\t\t\tif err < entryErr {\n\t\t\t\t\t\tentryErr = err\n\t\t\t\t\t\tent.b = uint8(b)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// If we couldn't find a good enough\n\t\t\t\t// table value, try another setting.\n\t\t\t\tif entryErr > acceptError {\n\t\t\t\t\tfmt.Println(\"entry error\", entryErr, \"> acceptable error\", acceptError)\n\t\t\t\t\tcontinue nextTable\n\t\t\t\t}\n\n\t\t\t\ttableErr += entryErr * entryErr\n\t\t\t}\n\t\t\ttableErr = tableErr / float64(len(table))\n\t\t\tfmt.Println(\"MSE is\", tableErr)\n\n\t\t\t// We found an acceptable table.\n\t\t\tif tableErr < best.err {\n\t\t\t\tbest.err = tableErr\n\t\t\t\tbest.shift, best.addend = shift, addend\n\t\t\t\tbest.table = table\n\t\t\t}\n\t\t}\n\n\t\t// If we found a table, that's the best shift.\n\t\tif best.table != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tfmt.Println(\"best table is shift\", best.shift, \"addend\", best.addend)\n\n\t// Create the linear to sRGB table.\n\tfmt.Fprintf(f, `const linearTosRGBShift = %d\n\nconst linearTosRGBAddend = %d\n\nvar linearTosRGBTab = [%d]uint8{\n`, best.shift, best.addend, len(best.table))\n\tfor _, ent := range best.table {\n\t\tfmt.Fprintf(f, \"\\t%d,\\n\", ent.b)\n\t}\n\tfmt.Fprintf(f, \"}\\n\")\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/palette.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package palette provides palettes and ways to define palettes.\npackage palette\n\nimport (\n\t\"image/color\"\n\t\"math\"\n\t\"sort\"\n)\n\n// TODO: Unify continuous and discrete palettes so functions can\n// operate on both? Perhaps treat a continuous like a discrete with a\n// large number of levels (and a \"type\" function indicating that it's\n// okay to blend between neighboring colors.)\n\n// A Continuous palette is a function from [0, 1] to colors. It may be\n// sequential, diverging, or circular.\ntype Continuous interface {\n\tMap(x float64) color.Color\n}\n\n// RGBGradient is a Continuous palette that interpolates between a\n// sequence of colors.\ntype RGBGradient struct {\n\t// Colors is the sequence of colors to interpolate between.\n\t// Interpolation assumes the colors are sRGB values.\n\tColors []color.RGBA\n\n\t// Stops is an optional sequence of stop positions. It may be\n\t// nil, in which case Colors are evenly spaced on the interval\n\t// [0, 1]. Otherwise, it must be a slice with the same length\n\t// as Colors and must be in ascending order.\n\tStops []float64\n}\n\nfunc (g RGBGradient) Map(x float64) color.Color {\n\tif g.Stops == nil {\n\t\tn := x * float64(len(g.Colors)-1)\n\t\tip, fr := math.Modf(n)\n\t\ti := int(ip)\n\t\tif i <= 0 {\n\t\t\treturn g.Colors[0]\n\t\t} else if i >= len(g.Colors)-1 {\n\t\t\treturn g.Colors[len(g.Colors)-1]\n\t\t}\n\t\ta, b := g.Colors[i], g.Colors[i+1]\n\t\treturn blendRGBA(a, b, fr)\n\t}\n\n\ti := sort.SearchFloat64s(g.Stops, x)\n\tif i == 0 {\n\t\treturn g.Colors[0]\n\t} else if i >= len(g.Colors)-1 {\n\t\treturn g.Colors[len(g.Colors)-1]\n\t}\n\tfr := (g.Stops[i] - x) / (g.Stops[i+1] - g.Stops[i])\n\ta, b := g.Colors[i], g.Colors[i+1]\n\treturn blendRGBA(a, b, fr)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/srgb.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage palette\n\n//go:generate go run makesrgbtab.go\n\n// sRGB8ToLinear converts 8-bit sRGB component x to a 16-bit linear\n// intensity.\nfunc sRGB8ToLinear(x uint8) uint16 {\n\treturn sRGBToLinearTab[x]\n}\n\n// linearTosRGB8 converts 16-bit linear intensity x to an 8-bit sRGB\n// component.\nfunc linearTosRGB8(x uint16) uint8 {\n\treturn linearTosRGBTab[(uint32(x)+linearTosRGBAddend)>>linearTosRGBShift]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/srgbtab.go",
    "content": "// Generated by makesrgbtab. DO NOT EDIT.\n\npackage palette\n\nvar sRGBToLinearTab = [256]uint16{\n\t0,\n\t19,\n\t39,\n\t59,\n\t79,\n\t99,\n\t119,\n\t139,\n\t159,\n\t179,\n\t198,\n\t219,\n\t240,\n\t263,\n\t287,\n\t313,\n\t339,\n\t367,\n\t396,\n\t426,\n\t458,\n\t491,\n\t525,\n\t561,\n\t598,\n\t637,\n\t676,\n\t718,\n\t761,\n\t805,\n\t850,\n\t897,\n\t946,\n\t996,\n\t1048,\n\t1101,\n\t1156,\n\t1212,\n\t1270,\n\t1329,\n\t1390,\n\t1453,\n\t1517,\n\t1583,\n\t1650,\n\t1719,\n\t1790,\n\t1862,\n\t1937,\n\t2012,\n\t2090,\n\t2169,\n\t2250,\n\t2333,\n\t2417,\n\t2503,\n\t2591,\n\t2681,\n\t2772,\n\t2866,\n\t2961,\n\t3058,\n\t3156,\n\t3257,\n\t3359,\n\t3464,\n\t3570,\n\t3678,\n\t3788,\n\t3900,\n\t4013,\n\t4129,\n\t4246,\n\t4366,\n\t4487,\n\t4611,\n\t4736,\n\t4863,\n\t4992,\n\t5124,\n\t5257,\n\t5392,\n\t5529,\n\t5668,\n\t5810,\n\t5953,\n\t6098,\n\t6245,\n\t6395,\n\t6546,\n\t6700,\n\t6856,\n\t7013,\n\t7173,\n\t7335,\n\t7499,\n\t7665,\n\t7833,\n\t8004,\n\t8176,\n\t8351,\n\t8528,\n\t8707,\n\t8888,\n\t9072,\n\t9257,\n\t9445,\n\t9635,\n\t9827,\n\t10022,\n\t10218,\n\t10417,\n\t10618,\n\t10821,\n\t11027,\n\t11235,\n\t11445,\n\t11657,\n\t11872,\n\t12089,\n\t12308,\n\t12530,\n\t12754,\n\t12980,\n\t13208,\n\t13439,\n\t13673,\n\t13908,\n\t14146,\n\t14386,\n\t14629,\n\t14874,\n\t15121,\n\t15371,\n\t15623,\n\t15877,\n\t16134,\n\t16394,\n\t16655,\n\t16920,\n\t17186,\n\t17455,\n\t17727,\n\t18000,\n\t18277,\n\t18556,\n\t18837,\n\t19121,\n\t19407,\n\t19696,\n\t19987,\n\t20281,\n\t20577,\n\t20875,\n\t21177,\n\t21480,\n\t21787,\n\t22096,\n\t22407,\n\t22721,\n\t23037,\n\t23356,\n\t23678,\n\t24002,\n\t24329,\n\t24658,\n\t24990,\n\t25324,\n\t25661,\n\t26001,\n\t26343,\n\t26688,\n\t27035,\n\t27386,\n\t27738,\n\t28094,\n\t28452,\n\t28812,\n\t29176,\n\t29542,\n\t29910,\n\t30282,\n\t30656,\n\t31032,\n\t31412,\n\t31794,\n\t32179,\n\t32566,\n\t32956,\n\t33349,\n\t33745,\n\t34143,\n\t34544,\n\t34948,\n\t35354,\n\t35764,\n\t36176,\n\t36590,\n\t37008,\n\t37428,\n\t37851,\n\t38277,\n\t38706,\n\t39137,\n\t39571,\n\t40008,\n\t40448,\n\t40891,\n\t41336,\n\t41784,\n\t42235,\n\t42689,\n\t43146,\n\t43606,\n\t44068,\n\t44533,\n\t45001,\n\t45472,\n\t45946,\n\t46423,\n\t46902,\n\t47385,\n\t47870,\n\t48358,\n\t48850,\n\t49344,\n\t49840,\n\t50340,\n\t50843,\n\t51349,\n\t51857,\n\t52369,\n\t52883,\n\t53400,\n\t53921,\n\t54444,\n\t54970,\n\t55499,\n\t56031,\n\t56567,\n\t57105,\n\t57646,\n\t58190,\n\t58737,\n\t59286,\n\t59839,\n\t60395,\n\t60954,\n\t61516,\n\t62081,\n\t62649,\n\t63220,\n\t63794,\n\t64371,\n\t64951,\n\t65535,\n}\n\nconst linearTosRGBShift = 4\n\nconst linearTosRGBAddend = 0\n\nvar linearTosRGBTab = [4096]uint8{\n\t0,\n\t1,\n\t2,\n\t3,\n\t4,\n\t4,\n\t5,\n\t6,\n\t7,\n\t8,\n\t8,\n\t9,\n\t10,\n\t11,\n\t12,\n\t12,\n\t13,\n\t14,\n\t14,\n\t15,\n\t16,\n\t16,\n\t17,\n\t17,\n\t18,\n\t18,\n\t19,\n\t19,\n\t20,\n\t20,\n\t21,\n\t21,\n\t22,\n\t22,\n\t23,\n\t23,\n\t24,\n\t24,\n\t24,\n\t25,\n\t25,\n\t26,\n\t26,\n\t26,\n\t27,\n\t27,\n\t28,\n\t28,\n\t28,\n\t29,\n\t29,\n\t29,\n\t30,\n\t30,\n\t30,\n\t31,\n\t31,\n\t31,\n\t32,\n\t32,\n\t32,\n\t33,\n\t33,\n\t33,\n\t34,\n\t34,\n\t34,\n\t35,\n\t35,\n\t35,\n\t35,\n\t36,\n\t36,\n\t36,\n\t37,\n\t37,\n\t37,\n\t37,\n\t38,\n\t38,\n\t38,\n\t39,\n\t39,\n\t39,\n\t39,\n\t40,\n\t40,\n\t40,\n\t40,\n\t41,\n\t41,\n\t41,\n\t41,\n\t42,\n\t42,\n\t42,\n\t42,\n\t43,\n\t43,\n\t43,\n\t43,\n\t44,\n\t44,\n\t44,\n\t44,\n\t45,\n\t45,\n\t45,\n\t45,\n\t45,\n\t46,\n\t46,\n\t46,\n\t46,\n\t47,\n\t47,\n\t47,\n\t47,\n\t47,\n\t48,\n\t48,\n\t48,\n\t48,\n\t49,\n\t49,\n\t49,\n\t49,\n\t49,\n\t50,\n\t50,\n\t50,\n\t50,\n\t50,\n\t51,\n\t51,\n\t51,\n\t51,\n\t51,\n\t52,\n\t52,\n\t52,\n\t52,\n\t52,\n\t53,\n\t53,\n\t53,\n\t53,\n\t53,\n\t54,\n\t54,\n\t54,\n\t54,\n\t54,\n\t54,\n\t55,\n\t55,\n\t55,\n\t55,\n\t55,\n\t56,\n\t56,\n\t56,\n\t56,\n\t56,\n\t56,\n\t57,\n\t57,\n\t57,\n\t57,\n\t57,\n\t58,\n\t58,\n\t58,\n\t58,\n\t58,\n\t58,\n\t59,\n\t59,\n\t59,\n\t59,\n\t59,\n\t59,\n\t60,\n\t60,\n\t60,\n\t60,\n\t60,\n\t60,\n\t61,\n\t61,\n\t61,\n\t61,\n\t61,\n\t61,\n\t62,\n\t62,\n\t62,\n\t62,\n\t62,\n\t62,\n\t63,\n\t63,\n\t63,\n\t63,\n\t63,\n\t63,\n\t63,\n\t64,\n\t64,\n\t64,\n\t64,\n\t64,\n\t64,\n\t65,\n\t65,\n\t65,\n\t65,\n\t65,\n\t65,\n\t65,\n\t66,\n\t66,\n\t66,\n\t66,\n\t66,\n\t66,\n\t66,\n\t67,\n\t67,\n\t67,\n\t67,\n\t67,\n\t67,\n\t68,\n\t68,\n\t68,\n\t68,\n\t68,\n\t68,\n\t68,\n\t69,\n\t69,\n\t69,\n\t69,\n\t69,\n\t69,\n\t69,\n\t70,\n\t70,\n\t70,\n\t70,\n\t70,\n\t70,\n\t70,\n\t71,\n\t71,\n\t71,\n\t71,\n\t71,\n\t71,\n\t71,\n\t71,\n\t72,\n\t72,\n\t72,\n\t72,\n\t72,\n\t72,\n\t72,\n\t73,\n\t73,\n\t73,\n\t73,\n\t73,\n\t73,\n\t73,\n\t73,\n\t74,\n\t74,\n\t74,\n\t74,\n\t74,\n\t74,\n\t74,\n\t75,\n\t75,\n\t75,\n\t75,\n\t75,\n\t75,\n\t75,\n\t75,\n\t76,\n\t76,\n\t76,\n\t76,\n\t76,\n\t76,\n\t76,\n\t76,\n\t77,\n\t77,\n\t77,\n\t77,\n\t77,\n\t77,\n\t77,\n\t77,\n\t78,\n\t78,\n\t78,\n\t78,\n\t78,\n\t78,\n\t78,\n\t78,\n\t79,\n\t79,\n\t79,\n\t79,\n\t79,\n\t79,\n\t79,\n\t79,\n\t80,\n\t80,\n\t80,\n\t80,\n\t80,\n\t80,\n\t80,\n\t80,\n\t80,\n\t81,\n\t81,\n\t81,\n\t81,\n\t81,\n\t81,\n\t81,\n\t81,\n\t82,\n\t82,\n\t82,\n\t82,\n\t82,\n\t82,\n\t82,\n\t82,\n\t82,\n\t83,\n\t83,\n\t83,\n\t83,\n\t83,\n\t83,\n\t83,\n\t83,\n\t83,\n\t84,\n\t84,\n\t84,\n\t84,\n\t84,\n\t84,\n\t84,\n\t84,\n\t84,\n\t85,\n\t85,\n\t85,\n\t85,\n\t85,\n\t85,\n\t85,\n\t85,\n\t85,\n\t86,\n\t86,\n\t86,\n\t86,\n\t86,\n\t86,\n\t86,\n\t86,\n\t86,\n\t87,\n\t87,\n\t87,\n\t87,\n\t87,\n\t87,\n\t87,\n\t87,\n\t87,\n\t88,\n\t88,\n\t88,\n\t88,\n\t88,\n\t88,\n\t88,\n\t88,\n\t88,\n\t89,\n\t89,\n\t89,\n\t89,\n\t89,\n\t89,\n\t89,\n\t89,\n\t89,\n\t89,\n\t90,\n\t90,\n\t90,\n\t90,\n\t90,\n\t90,\n\t90,\n\t90,\n\t90,\n\t90,\n\t91,\n\t91,\n\t91,\n\t91,\n\t91,\n\t91,\n\t91,\n\t91,\n\t91,\n\t92,\n\t92,\n\t92,\n\t92,\n\t92,\n\t92,\n\t92,\n\t92,\n\t92,\n\t92,\n\t93,\n\t93,\n\t93,\n\t93,\n\t93,\n\t93,\n\t93,\n\t93,\n\t93,\n\t93,\n\t94,\n\t94,\n\t94,\n\t94,\n\t94,\n\t94,\n\t94,\n\t94,\n\t94,\n\t94,\n\t94,\n\t95,\n\t95,\n\t95,\n\t95,\n\t95,\n\t95,\n\t95,\n\t95,\n\t95,\n\t95,\n\t96,\n\t96,\n\t96,\n\t96,\n\t96,\n\t96,\n\t96,\n\t96,\n\t96,\n\t96,\n\t97,\n\t97,\n\t97,\n\t97,\n\t97,\n\t97,\n\t97,\n\t97,\n\t97,\n\t97,\n\t97,\n\t98,\n\t98,\n\t98,\n\t98,\n\t98,\n\t98,\n\t98,\n\t98,\n\t98,\n\t98,\n\t98,\n\t99,\n\t99,\n\t99,\n\t99,\n\t99,\n\t99,\n\t99,\n\t99,\n\t99,\n\t99,\n\t99,\n\t100,\n\t100,\n\t100,\n\t100,\n\t100,\n\t100,\n\t100,\n\t100,\n\t100,\n\t100,\n\t100,\n\t101,\n\t101,\n\t101,\n\t101,\n\t101,\n\t101,\n\t101,\n\t101,\n\t101,\n\t101,\n\t101,\n\t102,\n\t102,\n\t102,\n\t102,\n\t102,\n\t102,\n\t102,\n\t102,\n\t102,\n\t102,\n\t102,\n\t103,\n\t103,\n\t103,\n\t103,\n\t103,\n\t103,\n\t103,\n\t103,\n\t103,\n\t103,\n\t103,\n\t104,\n\t104,\n\t104,\n\t104,\n\t104,\n\t104,\n\t104,\n\t104,\n\t104,\n\t104,\n\t104,\n\t104,\n\t105,\n\t105,\n\t105,\n\t105,\n\t105,\n\t105,\n\t105,\n\t105,\n\t105,\n\t105,\n\t105,\n\t106,\n\t106,\n\t106,\n\t106,\n\t106,\n\t106,\n\t106,\n\t106,\n\t106,\n\t106,\n\t106,\n\t106,\n\t107,\n\t107,\n\t107,\n\t107,\n\t107,\n\t107,\n\t107,\n\t107,\n\t107,\n\t107,\n\t107,\n\t107,\n\t108,\n\t108,\n\t108,\n\t108,\n\t108,\n\t108,\n\t108,\n\t108,\n\t108,\n\t108,\n\t108,\n\t108,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t109,\n\t110,\n\t110,\n\t110,\n\t110,\n\t110,\n\t110,\n\t110,\n\t110,\n\t110,\n\t110,\n\t110,\n\t110,\n\t111,\n\t111,\n\t111,\n\t111,\n\t111,\n\t111,\n\t111,\n\t111,\n\t111,\n\t111,\n\t111,\n\t111,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t112,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t113,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t114,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t115,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t116,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t117,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t118,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t119,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t120,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t121,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t122,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t123,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t124,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t125,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t126,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t127,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t128,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t129,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t130,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t131,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t132,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t133,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t134,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t135,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t136,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t137,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t138,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t139,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t140,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t141,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t142,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t143,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t144,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t145,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t146,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t147,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t148,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t149,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t150,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t151,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t152,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t153,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t154,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t155,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t156,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t157,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t158,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t159,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t160,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t161,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t162,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t163,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t164,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t165,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t166,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t167,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t168,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t169,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t170,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t171,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t172,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t173,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t174,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t175,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t176,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t177,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t178,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t179,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t180,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t181,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t182,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t183,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t184,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t185,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t186,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t187,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t188,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t189,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t190,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t191,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t192,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t193,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t194,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t195,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t196,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t197,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t198,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t199,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t200,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t201,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t202,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t203,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t204,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t205,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t206,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t207,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t208,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t209,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t210,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t211,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t212,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t213,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t214,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t215,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t216,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t217,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t218,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t219,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t220,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t221,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t222,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t223,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t224,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t225,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t226,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t227,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t228,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t229,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t230,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t231,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t232,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t233,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t234,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t235,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t236,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t237,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t238,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t239,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t240,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t241,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t242,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t243,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t244,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t245,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t246,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t247,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t248,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t249,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t250,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t251,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t252,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t253,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t254,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n\t255,\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/palette/viridis.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage palette\n\nimport \"image/color\"\n\n// Viridis is a continuous sequential color map that is perceptually\n// uniform, colorblind-friendly, and converts well to grayscale. It\n// goes from black to blue to green to yellow.\n//\n// Viridis was developed by Stéfan van der Walt, Nathaniel Smith, and\n// Eric Firing as the default colormap for Matplotlib 2.0. It is\n// available under a CC0 (no rights reserved) license.\nvar Viridis Continuous\n\nfunc init() {\n\tViridis = RGBGradient{\n\t\tColors: []color.RGBA{\n\t\t\t{68, 1, 84, 255},\n\t\t\t{68, 2, 86, 255},\n\t\t\t{69, 4, 87, 255},\n\t\t\t{69, 5, 89, 255},\n\t\t\t{70, 7, 90, 255},\n\t\t\t{70, 8, 92, 255},\n\t\t\t{70, 10, 93, 255},\n\t\t\t{70, 11, 94, 255},\n\t\t\t{71, 13, 96, 255},\n\t\t\t{71, 14, 97, 255},\n\t\t\t{71, 16, 99, 255},\n\t\t\t{71, 17, 100, 255},\n\t\t\t{71, 19, 101, 255},\n\t\t\t{72, 20, 103, 255},\n\t\t\t{72, 22, 104, 255},\n\t\t\t{72, 23, 105, 255},\n\t\t\t{72, 24, 106, 255},\n\t\t\t{72, 26, 108, 255},\n\t\t\t{72, 27, 109, 255},\n\t\t\t{72, 28, 110, 255},\n\t\t\t{72, 29, 111, 255},\n\t\t\t{72, 31, 112, 255},\n\t\t\t{72, 32, 113, 255},\n\t\t\t{72, 33, 115, 255},\n\t\t\t{72, 35, 116, 255},\n\t\t\t{72, 36, 117, 255},\n\t\t\t{72, 37, 118, 255},\n\t\t\t{72, 38, 119, 255},\n\t\t\t{72, 40, 120, 255},\n\t\t\t{72, 41, 121, 255},\n\t\t\t{71, 42, 122, 255},\n\t\t\t{71, 44, 122, 255},\n\t\t\t{71, 45, 123, 255},\n\t\t\t{71, 46, 124, 255},\n\t\t\t{71, 47, 125, 255},\n\t\t\t{70, 48, 126, 255},\n\t\t\t{70, 50, 126, 255},\n\t\t\t{70, 51, 127, 255},\n\t\t\t{70, 52, 128, 255},\n\t\t\t{69, 53, 129, 255},\n\t\t\t{69, 55, 129, 255},\n\t\t\t{69, 56, 130, 255},\n\t\t\t{68, 57, 131, 255},\n\t\t\t{68, 58, 131, 255},\n\t\t\t{68, 59, 132, 255},\n\t\t\t{67, 61, 132, 255},\n\t\t\t{67, 62, 133, 255},\n\t\t\t{66, 63, 133, 255},\n\t\t\t{66, 64, 134, 255},\n\t\t\t{66, 65, 134, 255},\n\t\t\t{65, 66, 135, 255},\n\t\t\t{65, 68, 135, 255},\n\t\t\t{64, 69, 136, 255},\n\t\t\t{64, 70, 136, 255},\n\t\t\t{63, 71, 136, 255},\n\t\t\t{63, 72, 137, 255},\n\t\t\t{62, 73, 137, 255},\n\t\t\t{62, 74, 137, 255},\n\t\t\t{62, 76, 138, 255},\n\t\t\t{61, 77, 138, 255},\n\t\t\t{61, 78, 138, 255},\n\t\t\t{60, 79, 138, 255},\n\t\t\t{60, 80, 139, 255},\n\t\t\t{59, 81, 139, 255},\n\t\t\t{59, 82, 139, 255},\n\t\t\t{58, 83, 139, 255},\n\t\t\t{58, 84, 140, 255},\n\t\t\t{57, 85, 140, 255},\n\t\t\t{57, 86, 140, 255},\n\t\t\t{56, 88, 140, 255},\n\t\t\t{56, 89, 140, 255},\n\t\t\t{55, 90, 140, 255},\n\t\t\t{55, 91, 141, 255},\n\t\t\t{54, 92, 141, 255},\n\t\t\t{54, 93, 141, 255},\n\t\t\t{53, 94, 141, 255},\n\t\t\t{53, 95, 141, 255},\n\t\t\t{52, 96, 141, 255},\n\t\t\t{52, 97, 141, 255},\n\t\t\t{51, 98, 141, 255},\n\t\t\t{51, 99, 141, 255},\n\t\t\t{50, 100, 142, 255},\n\t\t\t{50, 101, 142, 255},\n\t\t\t{49, 102, 142, 255},\n\t\t\t{49, 103, 142, 255},\n\t\t\t{49, 104, 142, 255},\n\t\t\t{48, 105, 142, 255},\n\t\t\t{48, 106, 142, 255},\n\t\t\t{47, 107, 142, 255},\n\t\t\t{47, 108, 142, 255},\n\t\t\t{46, 109, 142, 255},\n\t\t\t{46, 110, 142, 255},\n\t\t\t{46, 111, 142, 255},\n\t\t\t{45, 112, 142, 255},\n\t\t\t{45, 113, 142, 255},\n\t\t\t{44, 113, 142, 255},\n\t\t\t{44, 114, 142, 255},\n\t\t\t{44, 115, 142, 255},\n\t\t\t{43, 116, 142, 255},\n\t\t\t{43, 117, 142, 255},\n\t\t\t{42, 118, 142, 255},\n\t\t\t{42, 119, 142, 255},\n\t\t\t{42, 120, 142, 255},\n\t\t\t{41, 121, 142, 255},\n\t\t\t{41, 122, 142, 255},\n\t\t\t{41, 123, 142, 255},\n\t\t\t{40, 124, 142, 255},\n\t\t\t{40, 125, 142, 255},\n\t\t\t{39, 126, 142, 255},\n\t\t\t{39, 127, 142, 255},\n\t\t\t{39, 128, 142, 255},\n\t\t\t{38, 129, 142, 255},\n\t\t\t{38, 130, 142, 255},\n\t\t\t{38, 130, 142, 255},\n\t\t\t{37, 131, 142, 255},\n\t\t\t{37, 132, 142, 255},\n\t\t\t{37, 133, 142, 255},\n\t\t\t{36, 134, 142, 255},\n\t\t\t{36, 135, 142, 255},\n\t\t\t{35, 136, 142, 255},\n\t\t\t{35, 137, 142, 255},\n\t\t\t{35, 138, 141, 255},\n\t\t\t{34, 139, 141, 255},\n\t\t\t{34, 140, 141, 255},\n\t\t\t{34, 141, 141, 255},\n\t\t\t{33, 142, 141, 255},\n\t\t\t{33, 143, 141, 255},\n\t\t\t{33, 144, 141, 255},\n\t\t\t{33, 145, 140, 255},\n\t\t\t{32, 146, 140, 255},\n\t\t\t{32, 146, 140, 255},\n\t\t\t{32, 147, 140, 255},\n\t\t\t{31, 148, 140, 255},\n\t\t\t{31, 149, 139, 255},\n\t\t\t{31, 150, 139, 255},\n\t\t\t{31, 151, 139, 255},\n\t\t\t{31, 152, 139, 255},\n\t\t\t{31, 153, 138, 255},\n\t\t\t{31, 154, 138, 255},\n\t\t\t{30, 155, 138, 255},\n\t\t\t{30, 156, 137, 255},\n\t\t\t{30, 157, 137, 255},\n\t\t\t{31, 158, 137, 255},\n\t\t\t{31, 159, 136, 255},\n\t\t\t{31, 160, 136, 255},\n\t\t\t{31, 161, 136, 255},\n\t\t\t{31, 161, 135, 255},\n\t\t\t{31, 162, 135, 255},\n\t\t\t{32, 163, 134, 255},\n\t\t\t{32, 164, 134, 255},\n\t\t\t{33, 165, 133, 255},\n\t\t\t{33, 166, 133, 255},\n\t\t\t{34, 167, 133, 255},\n\t\t\t{34, 168, 132, 255},\n\t\t\t{35, 169, 131, 255},\n\t\t\t{36, 170, 131, 255},\n\t\t\t{37, 171, 130, 255},\n\t\t\t{37, 172, 130, 255},\n\t\t\t{38, 173, 129, 255},\n\t\t\t{39, 173, 129, 255},\n\t\t\t{40, 174, 128, 255},\n\t\t\t{41, 175, 127, 255},\n\t\t\t{42, 176, 127, 255},\n\t\t\t{44, 177, 126, 255},\n\t\t\t{45, 178, 125, 255},\n\t\t\t{46, 179, 124, 255},\n\t\t\t{47, 180, 124, 255},\n\t\t\t{49, 181, 123, 255},\n\t\t\t{50, 182, 122, 255},\n\t\t\t{52, 182, 121, 255},\n\t\t\t{53, 183, 121, 255},\n\t\t\t{55, 184, 120, 255},\n\t\t\t{56, 185, 119, 255},\n\t\t\t{58, 186, 118, 255},\n\t\t\t{59, 187, 117, 255},\n\t\t\t{61, 188, 116, 255},\n\t\t\t{63, 188, 115, 255},\n\t\t\t{64, 189, 114, 255},\n\t\t\t{66, 190, 113, 255},\n\t\t\t{68, 191, 112, 255},\n\t\t\t{70, 192, 111, 255},\n\t\t\t{72, 193, 110, 255},\n\t\t\t{74, 193, 109, 255},\n\t\t\t{76, 194, 108, 255},\n\t\t\t{78, 195, 107, 255},\n\t\t\t{80, 196, 106, 255},\n\t\t\t{82, 197, 105, 255},\n\t\t\t{84, 197, 104, 255},\n\t\t\t{86, 198, 103, 255},\n\t\t\t{88, 199, 101, 255},\n\t\t\t{90, 200, 100, 255},\n\t\t\t{92, 200, 99, 255},\n\t\t\t{94, 201, 98, 255},\n\t\t\t{96, 202, 96, 255},\n\t\t\t{99, 203, 95, 255},\n\t\t\t{101, 203, 94, 255},\n\t\t\t{103, 204, 92, 255},\n\t\t\t{105, 205, 91, 255},\n\t\t\t{108, 205, 90, 255},\n\t\t\t{110, 206, 88, 255},\n\t\t\t{112, 207, 87, 255},\n\t\t\t{115, 208, 86, 255},\n\t\t\t{117, 208, 84, 255},\n\t\t\t{119, 209, 83, 255},\n\t\t\t{122, 209, 81, 255},\n\t\t\t{124, 210, 80, 255},\n\t\t\t{127, 211, 78, 255},\n\t\t\t{129, 211, 77, 255},\n\t\t\t{132, 212, 75, 255},\n\t\t\t{134, 213, 73, 255},\n\t\t\t{137, 213, 72, 255},\n\t\t\t{139, 214, 70, 255},\n\t\t\t{142, 214, 69, 255},\n\t\t\t{144, 215, 67, 255},\n\t\t\t{147, 215, 65, 255},\n\t\t\t{149, 216, 64, 255},\n\t\t\t{152, 216, 62, 255},\n\t\t\t{155, 217, 60, 255},\n\t\t\t{157, 217, 59, 255},\n\t\t\t{160, 218, 57, 255},\n\t\t\t{162, 218, 55, 255},\n\t\t\t{165, 219, 54, 255},\n\t\t\t{168, 219, 52, 255},\n\t\t\t{170, 220, 50, 255},\n\t\t\t{173, 220, 48, 255},\n\t\t\t{176, 221, 47, 255},\n\t\t\t{178, 221, 45, 255},\n\t\t\t{181, 222, 43, 255},\n\t\t\t{184, 222, 41, 255},\n\t\t\t{186, 222, 40, 255},\n\t\t\t{189, 223, 38, 255},\n\t\t\t{192, 223, 37, 255},\n\t\t\t{194, 223, 35, 255},\n\t\t\t{197, 224, 33, 255},\n\t\t\t{200, 224, 32, 255},\n\t\t\t{202, 225, 31, 255},\n\t\t\t{205, 225, 29, 255},\n\t\t\t{208, 225, 28, 255},\n\t\t\t{210, 226, 27, 255},\n\t\t\t{213, 226, 26, 255},\n\t\t\t{216, 226, 25, 255},\n\t\t\t{218, 227, 25, 255},\n\t\t\t{221, 227, 24, 255},\n\t\t\t{223, 227, 24, 255},\n\t\t\t{226, 228, 24, 255},\n\t\t\t{229, 228, 25, 255},\n\t\t\t{231, 228, 25, 255},\n\t\t\t{234, 229, 26, 255},\n\t\t\t{236, 229, 27, 255},\n\t\t\t{239, 229, 28, 255},\n\t\t\t{241, 229, 29, 255},\n\t\t\t{244, 230, 30, 255},\n\t\t\t{246, 230, 32, 255},\n\t\t\t{248, 230, 33, 255},\n\t\t\t{251, 231, 35, 255},\n\t\t\t{253, 231, 37, 255},\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/concat.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n)\n\n// Concat returns the concatenation of the rows in each matching group\n// across gs. All Groupings in gs must have the same set of columns\n// (though they need not be in the same order; the column order from\n// gs[0] will be used). The GroupIDs in the returned Grouping will be\n// the union of the GroupIDs in gs.\nfunc Concat(gs ...Grouping) Grouping {\n\tif len(gs) == 0 {\n\t\treturn new(Table)\n\t}\n\n\t// Check that all Groupings have the same set of columns. They\n\t// can be in different orders.\n\tcolSet := map[string]bool{}\n\tfor _, col := range gs[0].Columns() {\n\t\tcolSet[col] = true\n\t}\n\tfor i, g2 := range gs[1:] {\n\t\tdiff := len(g2.Columns()) != len(colSet)\n\t\tif !diff {\n\t\t\tfor _, col := range g2.Columns() {\n\t\t\t\tif !colSet[col] {\n\t\t\t\t\tdiff = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif diff {\n\t\t\tpanic(fmt.Sprintf(\"columns in Groupings 0 and %d differ: %q vs %q\", i+1, gs[0].Columns(), g2.Columns()))\n\t\t}\n\t}\n\n\t// Collect group IDs.\n\thaveGID := map[GroupID]bool{}\n\tgids := []GroupID{}\n\tfor _, g := range gs {\n\t\tfor _, gid := range g.Tables() {\n\t\t\tif haveGID[gid] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\thaveGID[gid] = true\n\t\t\tgids = append(gids, gid)\n\t\t}\n\t}\n\n\t// Build output groups.\n\tvar ng GroupingBuilder\n\tfor _, gid := range gids {\n\t\t// Build output table.\n\t\tvar nt Builder\n\t\tvar cols []slice.T\n\t\tfor _, col := range gs[0].Columns() {\n\t\t\t// Is it constant?\n\t\t\tisConst := false\n\t\t\tvar cv interface{}\n\t\t\tfor _, g := range gs {\n\t\t\t\tt := g.Table(gid)\n\t\t\t\tif t == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif cv1, ok := t.Const(col); ok {\n\t\t\t\t\tif !isConst {\n\t\t\t\t\t\tisConst = true\n\t\t\t\t\t\tcv = cv1\n\t\t\t\t\t} else if cv != cv1 {\n\t\t\t\t\t\tisConst = false\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tisConst = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif isConst {\n\t\t\t\tnt.AddConst(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Not a constant. Collect slices.\n\t\t\tfor _, g := range gs {\n\t\t\t\tt := g.Table(gid)\n\t\t\t\tif t == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tcols = append(cols, t.Column(col))\n\t\t\t}\n\t\t\tnt.Add(col, slice.Concat(cols...))\n\t\t\tcols = cols[:0]\n\t\t}\n\t\tng.Add(gid, nt.Done())\n\t}\n\treturn ng.Done()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/filter.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n)\n\nvar boolType = reflect.TypeOf(false)\n\n// Filter filters g to only rows where pred returns true. pred must be\n// a function that returns bool and takes len(cols) arguments where\n// the type of col[i] is assignable to argument i.\n//\n// TODO: Create a faster batch variant where pred takes slices.\nfunc Filter(g Grouping, pred interface{}, cols ...string) Grouping {\n\t// TODO: Use generic.TypeError.\n\tpredv := reflect.ValueOf(pred)\n\tpredt := predv.Type()\n\tif predt.Kind() != reflect.Func || predt.NumIn() != len(cols) || predt.NumOut() != 1 || predt.Out(0) != boolType {\n\t\tpanic(\"predicate function must be func(col[0], col[1], ...) bool\")\n\t}\n\tif len(cols) == 0 {\n\t\treturn g\n\t}\n\tif len(g.Tables()) == 0 {\n\t\tpanic(fmt.Sprintf(\"unknown column %q\", cols[0]))\n\t}\n\t// Type check arguments.\n\tfor i, col := range cols {\n\t\tcolt := ColType(g, col)\n\t\tif !colt.Elem().AssignableTo(predt.In(i)) {\n\t\t\tpanic(fmt.Sprintf(\"column %d (type %s) is not assignable to predicate argument %d (type %s)\", i, colt.Elem(), i, predt.In(i)))\n\t\t}\n\t}\n\n\targs := make([]reflect.Value, len(cols))\n\tcolvs := make([]reflect.Value, len(cols))\n\tmatch := make([]int, 0)\n\treturn MapTables(g, func(_ GroupID, t *Table) *Table {\n\t\t// Get columns.\n\t\tfor i, col := range cols {\n\t\t\tcolvs[i] = reflect.ValueOf(t.MustColumn(col))\n\t\t}\n\n\t\t// Find the set of row indexes that satisfy pred.\n\t\tmatch = match[:0]\n\t\tfor r, len := 0, t.Len(); r < len; r++ {\n\t\t\tfor c, colv := range colvs {\n\t\t\t\targs[c] = colv.Index(r)\n\t\t\t}\n\t\t\tif predv.Call(args)[0].Bool() {\n\t\t\t\tmatch = append(match, r)\n\t\t\t}\n\t\t}\n\n\t\t// Create the new table.\n\t\tif len(match) == t.Len() {\n\t\t\treturn t\n\t\t}\n\t\tvar nt Builder\n\t\tfor _, col := range t.Columns() {\n\t\t\tnt.Add(col, slice.Select(t.Column(col), match))\n\t\t}\n\t\treturn nt.Done()\n\t})\n}\n\n// FilterEq filters g to only rows where the value in col equals val.\nfunc FilterEq(g Grouping, col string, val interface{}) Grouping {\n\tmatch := make([]int, 0)\n\treturn MapTables(g, func(_ GroupID, t *Table) *Table {\n\t\t// Find the set of row indexes that match val.\n\t\tseq := t.MustColumn(col)\n\t\tmatch = match[:0]\n\t\trv := reflect.ValueOf(seq)\n\t\tfor i, len := 0, rv.Len(); i < len; i++ {\n\t\t\tif rv.Index(i).Interface() == val {\n\t\t\t\tmatch = append(match, i)\n\t\t\t}\n\t\t}\n\n\t\tvar nt Builder\n\t\tfor _, col := range t.Columns() {\n\t\t\tnt.Add(col, slice.Select(t.Column(col), match))\n\t\t}\n\t\treturn nt.Done()\n\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/group.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n)\n\n// GroupID identifies a group. GroupIDs form a tree, rooted at\n// RootGroupID (which is also the zero GroupID).\ntype GroupID struct {\n\t*groupNode\n}\n\n// RootGroupID is the root of the GroupID tree.\nvar RootGroupID = GroupID{}\n\ntype groupNode struct {\n\tparent GroupID\n\tlabel  interface{}\n}\n\n// String returns the path to GroupID g in the form \"/l1/l2/l3\". If g\n// is RootGroupID, it returns \"/\". Each level in the group is formed\n// by formatting the label using fmt's \"%v\" verb. Note that this is\n// purely diagnostic; this string may not uniquely identify g.\nfunc (g GroupID) String() string {\n\tif g == RootGroupID {\n\t\treturn \"/\"\n\t}\n\tparts := []string{}\n\tfor p := g; p != RootGroupID; p = p.parent {\n\t\tpart := fmt.Sprintf(\"/%v\", p.label)\n\t\tparts = append(parts, part)\n\t}\n\tfor i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {\n\t\tparts[i], parts[j] = parts[j], parts[i]\n\t}\n\treturn strings.Join(parts, \"\")\n}\n\n// Extend returns a new GroupID that is a child of GroupID g. The\n// returned GroupID will not be equal to any existing GroupID (even if\n// label is not unique among g's children). The label is primarily\n// diagnostic; the table package uses it only when printing tables,\n// but callers may store semantic information in group labels.\nfunc (g GroupID) Extend(label interface{}) GroupID {\n\treturn GroupID{&groupNode{g, label}}\n}\n\n// Parent returns the parent of g. The parent of RootGroupID is\n// RootGroupID.\nfunc (g GroupID) Parent() GroupID {\n\tif g == RootGroupID {\n\t\treturn RootGroupID\n\t}\n\treturn g.parent\n}\n\n// Label returns the label of g.\nfunc (g GroupID) Label() interface{} {\n\treturn g.label\n}\n\n// GroupBy sub-divides all groups such that all of the rows in each\n// group have equal values for all of the named columns. The relative\n// order of rows with equal values for the named columns is\n// maintained. Grouped-by columns become constant columns within each\n// group.\nfunc GroupBy(g Grouping, cols ...string) Grouping {\n\t// TODO: This would generate much less garbage if we grouped\n\t// all of cols in one pass.\n\t//\n\t// TODO: This constructs one slice per column per input group,\n\t// but it would be even better if it constructed just one\n\t// slice per column.\n\n\tif len(cols) == 0 {\n\t\treturn g\n\t}\n\n\tvar out GroupingBuilder\n\tfor _, gid := range g.Tables() {\n\t\tt := g.Table(gid)\n\n\t\tif cv, ok := t.Const(cols[0]); ok {\n\t\t\t// Grouping by a constant is trivial.\n\t\t\tsubgid := gid.Extend(cv)\n\t\t\tout.Add(subgid, t)\n\t\t\tcontinue\n\t\t}\n\n\t\tc := t.MustColumn(cols[0])\n\n\t\t// Create an index on c.\n\t\ttype subgroupInfo struct {\n\t\t\tkey  interface{}\n\t\t\trows []int\n\t\t}\n\t\tsubgroups := []subgroupInfo{}\n\t\tkeys := make(map[interface{}]int)\n\t\tseq := reflect.ValueOf(c)\n\t\tfor i := 0; i < seq.Len(); i++ {\n\t\t\tx := seq.Index(i).Interface()\n\t\t\tsg, ok := keys[x]\n\t\t\tif !ok {\n\t\t\t\tsg = len(subgroups)\n\t\t\t\tsubgroups = append(subgroups, subgroupInfo{x, []int{}})\n\t\t\t\tkeys[x] = sg\n\t\t\t}\n\t\t\tsubgroup := &subgroups[sg]\n\t\t\tsubgroup.rows = append(subgroup.rows, i)\n\t\t}\n\n\t\t// Count rows in each subgroup.\n\t\toffsets := make([]int, 1+len(subgroups))\n\t\tfor i := range subgroups {\n\t\t\toffsets[i+1] = offsets[i] + len(subgroups[i].rows)\n\t\t}\n\n\t\t// Split each column.\n\t\tbuilders := make([]Builder, len(subgroups))\n\t\tfor _, name := range t.Columns() {\n\t\t\tif name == cols[0] {\n\t\t\t\t// Promote the group-by column to a\n\t\t\t\t// constant.\n\t\t\t\tfor i := range subgroups {\n\t\t\t\t\tbuilders[i].AddConst(name, subgroups[i].key)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif cv, ok := t.Const(name); ok {\n\t\t\t\t// Keep constants constant.\n\t\t\t\tfor i := range builders {\n\t\t\t\t\tbuilders[i].AddConst(name, cv)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Create a slice for all of the values.\n\t\t\tcol := t.Column(name)\n\t\t\tncol := reflect.MakeSlice(reflect.TypeOf(col), t.Len(), t.Len())\n\n\t\t\t// Shuffle each subgroup into ncol.\n\t\t\tfor i := range subgroups {\n\t\t\t\tsubcol := ncol.Slice(offsets[i], offsets[i+1]).Interface()\n\t\t\t\tslice.SelectInto(subcol, col, subgroups[i].rows)\n\t\t\t\tbuilders[i].Add(name, subcol)\n\t\t\t}\n\t\t}\n\n\t\t// Add tables to output Grouping.\n\t\tfor i := range builders {\n\t\t\tsubgid := gid.Extend(subgroups[i].key)\n\t\t\tout.Add(subgid, builders[i].Done())\n\t\t}\n\t}\n\n\treturn GroupBy(out.Done(), cols[1:]...)\n}\n\n// Ungroup concatenates adjacent Tables in g that share a group parent\n// into a Table identified by the parent, undoing the effects of the\n// most recent GroupBy operation.\nfunc Ungroup(g Grouping) Grouping {\n\tgroups := g.Tables()\n\tif len(groups) == 0 || len(groups) == 1 && groups[0] == RootGroupID {\n\t\treturn g\n\t}\n\n\tvar out GroupingBuilder\n\trunGid := groups[0].Parent()\n\trunTabs := []*Table{}\n\tfor _, gid := range groups {\n\t\tif gid.Parent() != runGid {\n\t\t\t// Flush the run.\n\t\t\tout.Add(runGid, concatRows(runTabs...))\n\n\t\t\trunGid = gid.Parent()\n\t\t\trunTabs = runTabs[:0]\n\t\t}\n\t\trunTabs = append(runTabs, g.Table(gid))\n\t}\n\t// Flush the last run.\n\tout.Add(runGid, concatRows(runTabs...))\n\n\treturn out.Done()\n}\n\n// Flatten concatenates all of the groups in g into a single Table.\n// This is equivalent to repeatedly Ungrouping g.\nfunc Flatten(g Grouping) *Table {\n\tgroups := g.Tables()\n\tswitch len(groups) {\n\tcase 0:\n\t\treturn new(Table)\n\n\tcase 1:\n\t\treturn g.Table(groups[0])\n\t}\n\n\ttabs := make([]*Table, len(groups))\n\tfor i, gid := range groups {\n\t\ttabs[i] = g.Table(gid)\n\t}\n\n\treturn concatRows(tabs...)\n}\n\n// concatRows concatenates the rows of tabs into a single Table. All\n// Tables in tabs must all have the same column set.\nfunc concatRows(tabs ...*Table) *Table {\n\t// TODO: Consider making this public. It would have to check\n\t// the columns, and we would probably also want a concatCols.\n\n\tswitch len(tabs) {\n\tcase 0:\n\t\treturn new(Table)\n\n\tcase 1:\n\t\treturn tabs[0]\n\t}\n\n\t// Construct each column.\n\tvar out Builder\n\tseqs := make([]slice.T, len(tabs))\n\tfor _, col := range tabs[0].Columns() {\n\t\tseqs = seqs[:0]\n\t\tfor _, tab := range tabs {\n\t\t\tseqs = append(seqs, tab.Column(col))\n\t\t}\n\t\tout.Add(col, slice.Concat(seqs...))\n\t}\n\n\treturn out.Done()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/head.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport \"reflect\"\n\n// Head returns the first n rows in each Table of g.\nfunc Head(g Grouping, n int) Grouping {\n\treturn headTail(g, n, false)\n}\n\n// Tail returns the last n rows in each Table of g.\nfunc Tail(g Grouping, n int) Grouping {\n\treturn headTail(g, n, true)\n}\n\nfunc headTail(g Grouping, n int, tail bool) Grouping {\n\treturn MapTables(g, func(_ GroupID, t *Table) *Table {\n\t\tif t.Len() <= n {\n\t\t\treturn t\n\t\t}\n\n\t\tvar nt Builder\n\t\tfor _, col := range t.Columns() {\n\t\t\tif cv, ok := t.Const(col); ok {\n\t\t\t\tnt.AddConst(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcv := reflect.ValueOf(t.Column(col))\n\t\t\tif tail {\n\t\t\t\tcv = cv.Slice(t.Len()-n, t.Len())\n\t\t\t} else {\n\t\t\t\tcv = cv.Slice(0, n)\n\t\t\t}\n\t\t\tnt.Add(col, cv.Interface())\n\t\t}\n\t\treturn nt.Done()\n\t})\n}\n\n// HeadTables returns the first n tables in g.\nfunc HeadTables(g Grouping, n int) Grouping {\n\treturn headTailTables(g, n, false)\n}\n\n// TailTables returns the first n tables in g.\nfunc TailTables(g Grouping, n int) Grouping {\n\treturn headTailTables(g, n, true)\n}\n\nfunc headTailTables(g Grouping, n int, tail bool) Grouping {\n\ttables := g.Tables()\n\tif len(tables) <= n {\n\t\treturn g\n\t} else if tail {\n\t\ttables = tables[len(tables)-n:]\n\t} else {\n\t\ttables = tables[:n]\n\t}\n\n\tvar ng GroupingBuilder\n\tfor _, gid := range tables {\n\t\tng.Add(gid, g.Table(gid))\n\t}\n\treturn ng.Done()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/join.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n)\n\n// Join joins g1 and g2 on tables with identical group IDs where col1\n// in g1 equals col2 in g2. It maintains the group order of g1, except\n// that groups that aren't in g2 are removed, and maintains the row\n// order of g1, followed by the row order of g2.\n//\n// TODO: Support join on more than one column.\nfunc Join(g1 Grouping, col1 string, g2 Grouping, col2 string) Grouping {\n\tvar ng GroupingBuilder\n\tfor _, gid := range g1.Tables() {\n\t\tt1, t2 := g1.Table(gid), g2.Table(gid)\n\t\tif t2 == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t// TODO: Optimize for cases where col1 and/or col2 are\n\t\t// constant.\n\n\t\t// Index col2 in t2.\n\t\tridx := make(map[interface{}][]int)\n\t\trv := reflect.ValueOf(t2.MustColumn(col2))\n\t\tfor i, l := 0, rv.Len(); i < l; i++ {\n\t\t\tv := rv.Index(i).Interface()\n\t\t\tridx[v] = append(ridx[v], i)\n\t\t}\n\n\t\t// For each row in t1, find the matching rows in col2\n\t\t// and build up the row indexes for t1 and t2.\n\t\tidx1, idx2 := []int{}, []int{}\n\t\tlv := reflect.ValueOf(t1.MustColumn(col1))\n\t\tfor i, l := 0, lv.Len(); i < l; i++ {\n\t\t\tr := ridx[lv.Index(i).Interface()]\n\t\t\tfor range r {\n\t\t\t\tidx1 = append(idx1, i)\n\t\t\t}\n\t\t\tidx2 = append(idx2, r...)\n\t\t}\n\n\t\t// Build the joined table.\n\t\tvar nt Builder\n\t\tfor _, col := range t1.Columns() {\n\t\t\tif cv, ok := t1.Const(col); ok {\n\t\t\t\tnt.Add(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnt.Add(col, slice.Select(t1.Column(col), idx1))\n\t\t}\n\t\tfor _, col := range t2.Columns() {\n\t\t\t// Often the join column is the same in both\n\t\t\t// and we can skip it because we added it from\n\t\t\t// the first table.\n\t\t\tif col == col1 && col == col2 {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif cv, ok := t2.Const(col); ok {\n\t\t\t\tnt.Add(col, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tnt.Add(col, slice.Select(t2.Column(col), idx2))\n\t\t}\n\n\t\tng.Add(gid, nt.Done())\n\t}\n\treturn ng.Done()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/map.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// MapTables applies f to each Table in g and returns a new Grouping\n// with the same group structure as g, but with the Tables returned by\n// f.\nfunc MapTables(g Grouping, f func(gid GroupID, table *Table) *Table) Grouping {\n\tvar out GroupingBuilder\n\tfor _, gid := range g.Tables() {\n\t\tout.Add(gid, f(gid, g.Table(gid)))\n\t}\n\treturn out.Done()\n}\n\n// MapCols applies f to a set of input columns to construct a set of\n// new output columns.\n//\n// For each Table in g, MapCols calls f(in[0], in[1], ..., out[0],\n// out[1], ...) where in[i] is column incols[i]. f should process the\n// values in the input column slices and fill output columns slices\n// out[j] accordingly. MapCols returns a new Grouping that adds each\n// outcols[j] bound to out[j].\n//\n// If all of the input columns are constant for a given table, MapCols\n// will call f with all slices of length 1. The input column slices\n// will contain the constant column values and MapCols will bind each\n// output column value out[i][0] as a constant.\nfunc MapCols(g Grouping, f interface{}, incols ...string) func(outcols ...string) Grouping {\n\treturn func(outcols ...string) Grouping {\n\t\tfv := reflect.ValueOf(f)\n\t\tif fv.Kind() != reflect.Func {\n\t\t\tpanic(&generic.TypeError{fv.Type(), nil, \"must be a function\"})\n\t\t}\n\t\tft := fv.Type()\n\t\tif ft.NumIn() != len(incols)+len(outcols) {\n\t\t\tpanic(&generic.TypeError{ft, nil, fmt.Sprintf(\"has the wrong number of arguments; expected %d\", len(incols)+len(outcols))})\n\t\t}\n\t\tif ft.NumOut() != 0 {\n\t\t\tpanic(&generic.TypeError{ft, nil, \"has the wrong number of results; expected 0\"})\n\t\t}\n\n\t\t// Create output column slices.\n\t\ttotalRows := 0\n\t\tfor _, gid := range g.Tables() {\n\t\t\tt := g.Table(gid)\n\t\tcolloop:\n\t\t\tfor _, incol := range incols {\n\t\t\t\tif _, ok := t.Const(incol); !ok {\n\t\t\t\t\ttotalRows += g.Table(gid).Len()\n\t\t\t\t\tbreak colloop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tocols := make([]reflect.Value, len(outcols))\n\t\tfor i := range ocols {\n\t\t\tocols[i] = reflect.MakeSlice(ft.In(i+len(incols)), totalRows, totalRows)\n\t\t}\n\n\t\t// Apply f to each group.\n\t\tvar out GroupingBuilder\n\t\targs := make([]reflect.Value, len(incols)+len(outcols))\n\t\topos := 0\n\t\tfor _, gid := range g.Tables() {\n\t\t\tt := g.Table(gid)\n\n\t\t\t// Are all inputs are constants?\n\t\t\tallConst := true\n\t\t\tfor _, incol := range incols {\n\t\t\t\tif _, ok := t.Const(incol); !ok {\n\t\t\t\t\tallConst = false\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif allConst {\n\t\t\t\tfor i, incol := range incols {\n\t\t\t\t\tcv, _ := t.Const(incol)\n\t\t\t\t\targs[i] = reflect.MakeSlice(ColType(t, incol), 1, 1)\n\t\t\t\t\targs[i].Index(0).Set(reflect.ValueOf(cv))\n\t\t\t\t}\n\t\t\t\tfor i, ocol := range ocols {\n\t\t\t\t\targs[i+len(incols)] = reflect.MakeSlice(ocol.Type(), 1, 1)\n\t\t\t\t}\n\n\t\t\t\tfv.Call(args)\n\n\t\t\t\ttb := NewBuilder(t)\n\t\t\t\tfor i, outcol := range outcols {\n\t\t\t\t\ttb.AddConst(outcol, args[i+len(incols)].Index(0).Interface())\n\t\t\t\t}\n\t\t\t\tout.Add(gid, tb.Done())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Prepare arguments.\n\t\t\tfor i, incol := range incols {\n\t\t\t\targs[i] = reflect.ValueOf(t.MustColumn(incol))\n\t\t\t}\n\t\t\tfor i, ocol := range ocols {\n\t\t\t\targs[i+len(incols)] = ocol.Slice(opos, opos+t.Len())\n\t\t\t}\n\t\t\topos += t.Len()\n\n\t\t\t// Call f.\n\t\t\tfv.Call(args)\n\n\t\t\t// Add output columns.\n\t\t\ttb := NewBuilder(t)\n\t\t\tfor i, outcol := range outcols {\n\t\t\t\ttb.Add(outcol, args[i+len(incols)].Interface())\n\t\t\t}\n\t\t\tout.Add(gid, tb.Done())\n\t\t}\n\t\treturn out.Done()\n\t}\n}\n\n// Rename returns g with column 'from' renamed to 'to'. The column\n// retains its position.\nfunc Rename(g Grouping, from, to string) Grouping {\n\treturn MapTables(g, func(_ GroupID, t *Table) *Table {\n\t\tt.MustColumn(from)\n\t\tvar nt Builder\n\t\tfor _, col := range t.Columns() {\n\t\t\tif col == to {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tncol := col\n\t\t\tif col == from {\n\t\t\t\tncol = to\n\t\t\t}\n\n\t\t\tif cv, ok := t.Const(col); ok {\n\t\t\t\tnt.AddConst(ncol, cv)\n\t\t\t} else {\n\t\t\t\tnt.Add(ncol, t.Column(col))\n\t\t\t}\n\t\t}\n\t\treturn nt.Done()\n\t})\n}\n\n// Remove returns g with column 'col' removed.\nfunc Remove(g Grouping, col string) Grouping {\n\treturn MapTables(g, func(_ GroupID, t *Table) *Table {\n\t\tt.MustColumn(col)\n\t\tvar nt Builder\n\t\tfor _, col2 := range t.Columns() {\n\t\t\tif col == col2 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif cv, ok := t.Const(col2); ok {\n\t\t\t\tnt.AddConst(col2, cv)\n\t\t\t} else {\n\t\t\t\tnt.Add(col2, t.Column(col2))\n\t\t\t}\n\t\t}\n\t\treturn nt.Done()\n\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/new.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"reflect\"\n\t\"strconv\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// TableFromStructs converts a []T where T is a struct to a Table\n// where the columns of the table correspond to T's exported fields.\nfunc TableFromStructs(structs Slice) *Table {\n\ts := reflectSlice(structs)\n\tst := s.Type()\n\tif st.Elem().Kind() != reflect.Struct {\n\t\tpanic(&generic.TypeError{st, nil, \"is not a slice of struct\"})\n\t}\n\n\tvar t Builder\n\trows := s.Len()\n\tvar rec func(reflect.Type, []int)\n\trec = func(typ reflect.Type, index []int) {\n\t\tfor fn := 0; fn < typ.NumField(); fn++ {\n\t\t\tfield := typ.Field(fn)\n\t\t\tif field.PkgPath != \"\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\toldIndexLen := len(index)\n\t\t\tindex = append(index, field.Index...)\n\t\t\tif field.Anonymous {\n\t\t\t\trec(field.Type, index)\n\t\t\t} else {\n\t\t\t\tcol := reflect.MakeSlice(reflect.SliceOf(field.Type), rows, rows)\n\t\t\t\tfor i := 0; i < rows; i++ {\n\t\t\t\t\tcol.Index(i).Set(s.Index(i).FieldByIndex(index))\n\t\t\t\t}\n\t\t\t\tt.Add(field.Name, col.Interface())\n\t\t\t}\n\t\t\tindex = index[:oldIndexLen]\n\t\t}\n\t}\n\trec(st.Elem(), []int{})\n\treturn t.Done()\n}\n\n// TableFromStrings converts a [][]string to a Table. This is intended\n// for processing external data, such as from CSV files. If coerce is\n// true, TableFromStrings will convert columns to []int or []float\n// when every string in that column is accepted by strconv.ParseInt or\n// strconv.ParseFloat, respectively.\nfunc TableFromStrings(cols []string, rows [][]string, coerce bool) *Table {\n\tvar t Builder\n\tfor i, col := range cols {\n\t\tslice := make([]string, len(rows))\n\t\tfor j, row := range rows {\n\t\t\tslice[j] = row[i]\n\t\t}\n\n\t\tvar colData interface{} = slice\n\t\tswitch {\n\t\tcase coerce && len(slice) > 0:\n\t\t\t// Try []int.\n\t\t\tvar err error\n\t\t\tfor _, str := range slice {\n\t\t\t\t_, err = strconv.ParseInt(str, 10, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\tnslice := make([]int, len(rows))\n\t\t\t\tfor i, str := range slice {\n\t\t\t\t\tv, _ := strconv.ParseInt(str, 10, 0)\n\t\t\t\t\tnslice[i] = int(v)\n\t\t\t\t}\n\t\t\t\tcolData = nslice\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\t// Try []float64. This must be done after\n\t\t\t// []int. It's also more expensive.\n\t\t\tfor _, str := range slice {\n\t\t\t\t_, err = strconv.ParseFloat(str, 64)\n\t\t\t\tif err != nil {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif err == nil {\n\t\t\t\tnslice := make([]float64, len(rows))\n\t\t\t\tfor i, str := range slice {\n\t\t\t\t\tnslice[i], _ = strconv.ParseFloat(str, 64)\n\t\t\t\t}\n\t\t\t\tcolData = nslice\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tt.Add(col, colData)\n\t}\n\treturn t.Done()\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/new_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"bytes\"\n\t\"encoding/csv\"\n\t\"testing\"\n)\n\nfunc ExampleTableFromStructs() {\n\ttype prez struct {\n\t\tName  string\n\t\tTerms int\n\t}\n\tdata := []prez{{\"Washington\", 2}, {\"Adams\", 1}, {\"Jefferson\", 2}}\n\tPrint(TableFromStructs(data))\n\t// Output:\n\t// Name        Terms\n\t// Washington      2\n\t// Adams           1\n\t// Jefferson       2\n}\n\nfunc TestTableFromStructs(t *testing.T) {\n\t// The example already tests basic functionality.\n\tshouldPanic(t, \"not a slice\", func() {\n\t\tTableFromStructs(42)\n\t})\n\tshouldPanic(t, \"not a slice of struct\", func() {\n\t\tTableFromStructs([]int{42})\n\t})\n}\n\nfunc TestTableFromStructsEmbedded(t *testing.T) {\n\ttype T struct {\n\t\tA int\n\t}\n\ttype U struct {\n\t\tT\n\t}\n\tdata := []U{{T{1}}}\n\ttab := TableFromStructs(data)\n\tif want := []string{\"A\"}; !de(want, tab.Columns()) {\n\t\tt.Errorf(\"columns should be %v; got %v\", want, tab.Columns())\n\t}\n}\n\nfunc TestTableFromStructsUnexported(t *testing.T) {\n\ttype T struct {\n\t\ta int\n\t\tA int\n\t}\n\tdata := []T{{1, 2}}\n\ttab := TableFromStructs(data)\n\tif want := []string{\"A\"}; !de(want, tab.Columns()) {\n\t\tt.Errorf(\"columns should be %v; got %v\", want, tab.Columns())\n\t}\n}\n\nfunc TestTableFromStructsEmbeddedUnexported(t *testing.T) {\n\ttype private struct {\n\t\tA int\n\t\tb int\n\t}\n\ttype U struct {\n\t\tprivate\n\t\tC int\n\t}\n\tdata := []U{{private{1, 2}, 3}}\n\ttab := TableFromStructs(data)\n\tif want := []string{\"A\", \"C\"}; !de(want, tab.Columns()) {\n\t\tt.Errorf(\"columns should be %v; got %v\", want, tab.Columns())\n\t}\n}\n\nfunc ExampleTableFromStrings() {\n\tconst csvData = `name,terms\nWashington,2\nAdams,1\nJefferson,2`\n\trows, _ := csv.NewReader(bytes.NewBufferString(csvData)).ReadAll()\n\tPrint(TableFromStrings(rows[0], rows[1:], true))\n\t// Output:\n\t// name        terms\n\t// Washington      2\n\t// Adams           1\n\t// Jefferson       2\n}\n\nfunc TestTableFromStrings(t *testing.T) {\n\tcsvData := `a,b,c\nA,1,1.0\nB,2,2.0\n`\n\trows, _ := csv.NewReader(bytes.NewBufferString(csvData)).ReadAll()\n\n\t// No coercion.\n\ttab := TableFromStrings(rows[0], rows[1:], false)\n\twant := new(Builder).\n\t\tAdd(\"a\", []string{\"A\", \"B\"}).\n\t\tAdd(\"b\", []string{\"1\", \"2\"}).\n\t\tAdd(\"c\", []string{\"1.0\", \"2.0\"}).\n\t\tDone()\n\tif !equal(want, tab) {\n\t\tt.Errorf(\"want:\\n%sgot:\\n%s\", groupString(want), groupString(tab))\n\t}\n\n\t// Coercion.\n\ttab = TableFromStrings(rows[0], rows[1:], true)\n\twant = new(Builder).\n\t\tAdd(\"a\", []string{\"A\", \"B\"}).\n\t\tAdd(\"b\", []int{1, 2}).\n\t\tAdd(\"c\", []float64{1, 2}).\n\t\tDone()\n\tif !equal(want, tab) {\n\t\tt.Errorf(\"want:\\n%sgot:\\n%s\", groupString(want), groupString(tab))\n\t}\n\n\t// Coercion inhibited by last row.\n\tcsvData += `C,x,x`\n\trows, _ = csv.NewReader(bytes.NewBufferString(csvData)).ReadAll()\n\n\ttab = TableFromStrings(rows[0], rows[1:], true)\n\twant = new(Builder).\n\t\tAdd(\"a\", []string{\"A\", \"B\", \"C\"}).\n\t\tAdd(\"b\", []string{\"1\", \"2\", \"x\"}).\n\t\tAdd(\"c\", []string{\"1.0\", \"2.0\", \"x\"}).\n\t\tDone()\n\tif !equal(want, tab) {\n\t\tt.Errorf(\"want:\\n%sgot:\\n%s\", groupString(want), groupString(tab))\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/pivot.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n)\n\n// Pivot converts rows of g into columns. label and value must name\n// columns in g, and the label column must have type []string. Pivot\n// returns a Grouping with a new column named after each distinct\n// value in the label column, where the values in that column\n// correspond to the values from the value column. All other columns\n// (besides label and value) are copied to the output. If, for a given\n// column in an output row, no input row has that column in the label\n// column, the output cell will have the zero value for its type.\nfunc Pivot(g Grouping, label, value string) Grouping {\n\t// Find all unique values of label. These are the new columns.\n\tlabels := []string{}\n\tlset := map[string]int{}\n\tfor _, gid := range g.Tables() {\n\t\tfor _, l := range g.Table(gid).MustColumn(label).([]string) {\n\t\t\tif _, ok := lset[l]; !ok {\n\t\t\t\tlset[l] = len(lset)\n\t\t\t\tlabels = append(labels, l)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Get all columns that are not label or value.\n\tgroupCols := []string{}\n\tfor _, col := range g.Columns() {\n\t\tif col != label && col != value {\n\t\t\tgroupCols = append(groupCols, col)\n\t\t}\n\t}\n\n\treturn MapTables(g, func(_ GroupID, t *Table) *Table {\n\t\tvar nt Builder\n\n\t\t// Group by all other columns. Each group in gg\n\t\t// becomes an output row.\n\t\tgg := GroupBy(t, groupCols...)\n\n\t\t// Copy grouped-by values.\n\t\tfor _, groupCol := range groupCols {\n\t\t\tcv := reflect.MakeSlice(reflect.TypeOf(t.Column(groupCol)), len(gg.Tables()), len(gg.Tables()))\n\t\t\tfor i, gid := range gg.Tables() {\n\t\t\t\tsub := gg.Table(gid)\n\t\t\t\tcv.Index(i).Set(reflect.ValueOf(sub.Column(groupCol)).Index(0))\n\t\t\t}\n\t\t\tnt.Add(groupCol, cv.Interface())\n\t\t}\n\n\t\t// Initialize new columns.\n\t\tnewCols := make([]reflect.Value, len(lset))\n\t\tvt := reflect.TypeOf(t.MustColumn(value))\n\t\tfor i := range newCols {\n\t\t\tnewCols[i] = reflect.MakeSlice(vt, len(gg.Tables()), len(gg.Tables()))\n\t\t}\n\n\t\t// Fill in new columns.\n\t\tfor i, gid := range gg.Tables() {\n\t\t\tsub := gg.Table(gid)\n\n\t\t\tvcol := reflect.ValueOf(sub.MustColumn(value))\n\t\t\tfor j, l := range sub.MustColumn(label).([]string) {\n\t\t\t\tval := vcol.Index(j)\n\t\t\t\tnewCols[lset[l]].Index(i).Set(val)\n\t\t\t}\n\t\t}\n\n\t\t// Add new columns to output table.\n\t\tfor i, newCol := range newCols {\n\t\t\tnt.Add(labels[i], newCol.Interface())\n\t\t}\n\n\t\treturn nt.Done()\n\t})\n}\n\n// Unpivot converts columns of g into rows. The returned Grouping\n// consists of the columns of g *not* listed in cols, plus two columns\n// named by the label and value arguments. For each input row in g,\n// the returned Grouping will have len(cols) output rows. The i'th\n// such output row corresponds to column cols[i] in the input row. The\n// label column will contain the name of the unpivoted column,\n// cols[i], and the value column will contain that column's value from\n// the input row. The values of all other columns in the input row\n// will be repeated across the output rows. All columns in cols must\n// have the same type.\nfunc Unpivot(g Grouping, label, value string, cols ...string) Grouping {\n\tif len(cols) == 0 {\n\t\tpanic(\"Unpivot requires at least 1 column\")\n\t}\n\n\tcolSet := map[string]bool{}\n\tfor _, col := range cols {\n\t\tcolSet[col] = true\n\t}\n\n\treturn MapTables(g, func(_ GroupID, t *Table) *Table {\n\t\tvar nt Builder\n\n\t\t// Repeat all other columns len(cols) times.\n\t\tntlen := t.Len() * len(cols)\n\t\tfor _, name := range t.Columns() {\n\t\t\tif colSet[name] || name == label || name == value {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcol := reflect.ValueOf(t.Column(name))\n\t\t\tncol := reflect.MakeSlice(col.Type(), ntlen, ntlen)\n\t\t\tfor i, l := 0, col.Len(); i < l; i++ {\n\t\t\t\tv := col.Index(i)\n\t\t\t\tfor j := range cols {\n\t\t\t\t\tncol.Index(i*len(cols) + j).Set(v)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tnt.Add(name, ncol.Interface())\n\t\t}\n\n\t\t// Get input columns.\n\t\tvar vt reflect.Type\n\t\tcolvs := make([]reflect.Value, len(cols))\n\t\tfor i, col := range cols {\n\t\t\tcolvs[i] = reflect.ValueOf(t.MustColumn(col))\n\t\t\tif i == 0 {\n\t\t\t\tvt = colvs[i].Type()\n\t\t\t} else if vt != colvs[i].Type() {\n\t\t\t\tpanic(&generic.TypeError{vt, colvs[i].Type(), fmt.Sprintf(\"; cannot Unpivot columns %q and %q with different types\", cols[0], col)})\n\t\t\t}\n\t\t}\n\n\t\t// Create label and value columns.\n\t\tlcol := make([]string, 0, ntlen)\n\t\tvcol := reflect.MakeSlice(vt, ntlen, ntlen)\n\t\tfor i := 0; i < t.Len(); i++ {\n\t\t\tlcol = append(lcol, cols...)\n\t\t\tfor j, colv := range colvs {\n\t\t\t\tvcol.Index(i*len(cols) + j).Set(colv.Index(i))\n\t\t\t}\n\t\t}\n\t\tnt.Add(label, lcol).Add(value, vcol.Interface())\n\n\t\treturn nt.Done()\n\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/pivot_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nvar stateTemp = TableFromStrings(\n\t[]string{\"state\", \"high\", \"low\"},\n\t[][]string{\n\t\t{\"Alabama\", \"122\", \"-27\"},\n\t\t{\"Alaska\", \"100\", \"-80\"},\n\t}, true)\n\nfunc ExampleUnpivot() {\n\tfmt.Println(\"Original table\")\n\tPrint(stateTemp)\n\tfmt.Println()\n\tfmt.Println(\"Unpivoted table\")\n\tPrint(Unpivot(stateTemp, \"kind\", \"temperature\", \"high\", \"low\"))\n\t// Output:\n\t//\n\t// Original table\n\t// state    high  low\n\t// Alabama   122  -27\n\t// Alaska    100  -80\n\t//\n\t// Unpivoted table\n\t// state    kind  temperature\n\t// Alabama  high          122\n\t// Alabama  low           -27\n\t// Alaska   high          100\n\t// Alaska   low           -80\n}\n\nvar stateTempByKind = Unpivot(stateTemp, \"kind\", \"temperature\", \"high\", \"low\")\n\nfunc ExamplePivot() {\n\tfmt.Println(\"Original table\")\n\tPrint(stateTempByKind)\n\tfmt.Println()\n\tfmt.Println(\"Pivoted table\")\n\tPrint(Pivot(stateTempByKind, \"kind\", \"temperature\"))\n\t// Output:\n\t//\n\t// Original table\n\t// state    kind  temperature\n\t// Alabama  high          122\n\t// Alabama  low           -27\n\t// Alaska   high          100\n\t// Alaska   low           -80\n\t//\n\t// Pivoted table\n\t// state    high  low\n\t// Alabama   122  -27\n\t// Alaska    100  -80\n}\n\nfunc TestUnpivot(t *testing.T) {\n\ttab := new(Builder).Add(\"x\", []int{}).Add(\"y\", []float64{}).Done()\n\tshouldPanic(t, \"at least 1 column\", func() {\n\t\tUnpivot(tab, \"a\", \"b\")\n\t})\n\tshouldPanic(t, \"different types\", func() {\n\t\tUnpivot(tab, \"a\", \"b\", \"x\", \"y\")\n\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/print.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n)\n\n// TODO: Have a format struct with options for things like column\n// separator, and header separator. Provide some defaults ones for,\n// e.g., Markdown, CSV, TSV, and such. Make top-level Print and Fprint\n// call methods in some default format.\n\n// Print(...) is shorthand for Fprint(os.Stderr, ...).\nfunc Print(g Grouping, formats ...string) error {\n\treturn Fprint(os.Stdout, g, formats...)\n}\n\n// Fprint prints Grouping g to w. formats[i] specifies a fmt-style\n// format string for column i. If there are more columns than formats,\n// remaining columns are formatted with %v (in particular, formats may\n// be omitted entirely to use %v for all columns). Numeric columns are\n// right aligned; all other column types are left aligned.\nfunc Fprint(w io.Writer, g Grouping, formats ...string) error {\n\tif g.Columns() == nil {\n\t\treturn nil\n\t}\n\n\t// Convert each column to strings.\n\tss := make([][]string, len(g.Columns()))\n\trowFmts := make([]string, len(g.Columns()))\n\tfor i, col := range g.Columns() {\n\t\tformat := \"%v\"\n\t\tif i < len(formats) {\n\t\t\tformat = formats[i]\n\t\t}\n\n\t\t// Format column.\n\t\tvar valKind reflect.Kind\n\t\tss[i] = []string{col}\n\t\tfor _, gid := range g.Tables() {\n\t\t\tseq := reflect.ValueOf(g.Table(gid).Column(col))\n\t\t\tfor row := 0; row < seq.Len(); row++ {\n\t\t\t\tstr := fmt.Sprintf(format, seq.Index(row).Interface())\n\t\t\t\tss[i] = append(ss[i], str)\n\t\t\t}\n\n\t\t\tif valKind == reflect.Invalid {\n\t\t\t\tvalKind = seq.Type().Elem().Kind()\n\t\t\t}\n\t\t}\n\n\t\t// Find column width.\n\t\twidth := 0\n\t\tfor _, s := range ss[i] {\n\t\t\tif len(s) > width {\n\t\t\t\twidth = len(s)\n\t\t\t}\n\t\t}\n\n\t\t// If it's a numeric column, right align.\n\t\t//\n\t\t// TODO: Even better would be to decimal align, though\n\t\t// that may require some understanding of the format;\n\t\t// or we could only do it for the default format.\n\t\tswitch valKind {\n\t\tcase reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64:\n\t\t\twidth = -width\n\t\t}\n\n\t\tif i == len(g.Columns())-1 && width > 0 {\n\t\t\t// Don't pad the last column.\n\t\t\trowFmts[i] = \"%s\"\n\t\t} else {\n\t\t\trowFmts[i] = fmt.Sprintf(\"%%%ds\", -width)\n\t\t}\n\t}\n\n\t// Compute group headers.\n\tgroups := []GroupID{}\n\tgroupPos := []int{}\n\tlastPos := 1\n\tfor _, gid := range g.Tables() {\n\t\tgroups = append(groups, gid)\n\t\tgroupPos = append(groupPos, lastPos)\n\t\tlastPos += g.Table(gid).Len()\n\t}\n\tif len(groups) == 1 && groups[0] == RootGroupID {\n\t\tgroups, groupPos = nil, nil\n\t}\n\n\t// Print rows.\n\trowFmt := strings.Join(rowFmts, \"  \") + \"\\n\"\n\trowBuf := make([]interface{}, len(rowFmts))\n\tfor row := 0; row < len(ss[0]); row++ {\n\t\t// Print group headers. There may be more than one if\n\t\t// there are empty groups.\n\t\tfor len(groupPos) > 0 && row == groupPos[0] {\n\t\t\t_, err := fmt.Fprintf(w, \"-- %s\\n\", groups[0])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tgroups, groupPos = groups[1:], groupPos[1:]\n\t\t}\n\n\t\tfor col := range rowBuf {\n\t\t\trowBuf[col] = ss[col][row]\n\t\t}\n\t\t_, err := fmt.Fprintf(w, rowFmt, rowBuf...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/print_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"testing\"\n)\n\nfunc groupString(g Grouping) string {\n\tvar b bytes.Buffer\n\tFprint(&b, g, \"%#v\", \"%#v\", \"%#v\", \"%#v\")\n\treturn b.String()\n}\n\nfunc ExampleFprint() {\n\ttab := new(Builder).\n\t\tAdd(\"name\", []string{\"Washington\", \"Adams\", \"Jefferson\"}).\n\t\tAdd(\"terms\", []int{2, 1, 2}).\n\t\tDone()\n\tFprint(os.Stdout, tab)\n\t// Output:\n\t// name        terms\n\t// Washington      2\n\t// Adams           1\n\t// Jefferson       2\n}\n\nfunc ExampleFprint_Formats() {\n\ttab := new(Builder).\n\t\tAdd(\"name\", []string{\"Washington\", \"Adams\", \"Jefferson\"}).\n\t\tAdd(\"terms\", []int{2, 1, 2}).\n\t\tDone()\n\tFprint(os.Stdout, tab, \"President %s\", \"%#x\")\n\t// Output:\n\t// name                  terms\n\t// President Washington    0x2\n\t// President Adams         0x1\n\t// President Jefferson     0x2\n}\n\nfunc ExampleFprint_Groups() {\n\ttab := new(Builder).\n\t\tAdd(\"name\", []string{\"Washington\", \"Adams\", \"Jefferson\"}).\n\t\tAdd(\"terms\", []int{2, 1, 2}).\n\t\tAdd(\"state\", []string{\"Virginia\", \"Massachusetts\", \"Virginia\"}).\n\t\tDone()\n\tg := GroupBy(tab, \"state\")\n\tFprint(os.Stdout, g)\n\t// Output:\n\t// name        terms  state\n\t// -- /Virginia\n\t// Washington      2  Virginia\n\t// Jefferson       2  Virginia\n\t// -- /Massachusetts\n\t// Adams           1  Massachusetts\n}\n\nfunc TestFprintEmpty(t *testing.T) {\n\tvar b bytes.Buffer\n\tFprint(&b, new(Table))\n\tif b.String() != \"\" {\n\t\tt.Fatalf(\"want %q; got %q\", \"\", b.String())\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/sort.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"sort\"\n\n\t\"github.com/aclements/go-gg/generic/slice\"\n)\n\n// SortBy sorts each group of g by the named columns. If a column's\n// type implements sort.Interface, rows will be sorted according to\n// that order. Otherwise, the values in the column must be naturally\n// ordered (their types must be orderable by the Go specification). If\n// neither is true, SortBy panics with a *generic.TypeError. If more\n// than one column is given, SortBy sorts by the tuple of the columns;\n// that is, if two values in the first column are equal, they are\n// sorted by the second column, and so on.\nfunc SortBy(g Grouping, cols ...string) Grouping {\n\t// Sort each group.\n\tsorters := make([]sort.Interface, len(cols))\n\treturn MapTables(g, func(_ GroupID, t *Table) *Table {\n\t\t// Create sorters for each column.\n\t\tsorters = sorters[:0]\n\t\tfor _, col := range cols {\n\t\t\tif _, ok := t.Const(col); ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tseq := t.MustColumn(col)\n\t\t\tsorter := slice.Sorter(seq)\n\t\t\tif sort.IsSorted(sorter) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsorters = append(sorters, sorter)\n\t\t}\n\n\t\tif len(sorters) == 0 {\n\t\t\t// Avoid shuffling everything by the identity\n\t\t\t// permutation.\n\t\t\treturn t\n\t\t}\n\n\t\t// Generate an initial permutation sequence.\n\t\tperm := make([]int, t.Len())\n\t\tfor i := range perm {\n\t\t\tperm[i] = i\n\t\t}\n\n\t\t// Sort the permutation sequence.\n\t\tsort.Stable(&permSort{perm, sorters})\n\n\t\t// Permute all columns.\n\t\tvar nt Builder\n\t\tfor _, name := range t.Columns() {\n\t\t\tif cv, ok := t.Const(name); ok {\n\t\t\t\tnt.AddConst(name, cv)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tseq := t.Column(name)\n\t\t\tseq = slice.Select(seq, perm)\n\t\t\tnt.Add(name, seq)\n\t\t}\n\t\treturn nt.Done()\n\t})\n}\n\ntype permSort struct {\n\tperm []int\n\tkeys []sort.Interface\n}\n\nfunc (s *permSort) Len() int {\n\treturn len(s.perm)\n}\n\nfunc (s *permSort) Less(i, j int) bool {\n\t// Since there's no way to ask about equality, we have to do\n\t// extra work for all of the keys except the last.\n\tfor _, key := range s.keys[:len(s.keys)-1] {\n\t\tif key.Less(s.perm[i], s.perm[j]) {\n\t\t\treturn true\n\t\t} else if key.Less(s.perm[j], s.perm[i]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn s.keys[len(s.keys)-1].Less(s.perm[i], s.perm[j])\n}\n\nfunc (s *permSort) Swap(i, j int) {\n\ts.perm[i], s.perm[j] = s.perm[j], s.perm[i]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/table.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package table implements ordered, grouped two dimensional relations.\n//\n// There are two related abstractions: Table and Grouping.\n//\n// A Table is an ordered relation of rows and columns. Each column is\n// a Go slice and hence must be homogeneously typed, but different\n// columns may have different types. All columns in a Table have the\n// same number of rows.\n//\n// A Grouping generalizes a Table by grouping the Table's rows into\n// zero or more groups. A Table is itself a Grouping with zero or one\n// groups. Most operations take a Grouping and operate on each group\n// independently, though some operations sub-divide or combine groups.\n//\n// The structures of both Tables and Groupings are immutable. They are\n// constructed using a Builder or a GroupingBuilder, respectively, and\n// then \"frozen\" into their respective immutable data structures.\npackage table\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\n\t\"github.com/aclements/go-gg/generic\"\n\t\"github.com/aclements/go-gg/generic/slice\"\n)\n\n// TODO\n//\n// Rename Table to T?\n//\n// Make Table an interface? Then columns could be constructed lazily.\n//\n// Do all transformation functions as func(g Grouping) Grouping? That\n// could be a \"Transform\" type that has easy methods for chaining. In\n// a lot of cases, transformation functions could just return the\n// Transform returned by another function (like MapTables).\n//\n// Make an error type for \"unknown column\".\n\n// A Table is an immutable, ordered two dimensional relation. It\n// consists of a set of named columns. Each column is a sequence of\n// values of a consistent type or a constant value. All (non-constant)\n// columns have the same length.\n//\n// The zero value of Table is the \"empty table\": it has no rows and no\n// columns. Note that a Table may have one or more columns, but no\n// rows; such a Table is *not* considered empty.\n//\n// A Table is also a trivial Grouping. If a Table is empty, it has no\n// groups and hence the zero value of Table is also the \"empty group\".\n// Otherwise, it consists only of the root group, RootGroupID.\ntype Table struct {\n\tcols     map[string]Slice\n\tconsts   map[string]interface{}\n\tcolNames []string\n\tlen      int\n}\n\n// A Builder constructs a Table one column at a time.\n//\n// The zero value of a Builder represents an empty Table.\ntype Builder struct {\n\tt Table\n}\n\n// A Grouping is an immutable set of tables with identical sets of\n// columns, each identified by a distinct GroupID.\n//\n// Visually, a Grouping can be thought of as follows:\n//\n//\t   Col A  Col B  Col C\n//\t------ group /a ------\n//\t0   5.4    \"x\"     90\n//\t1   -.2    \"y\"     30\n//\t------ group /b ------\n//\t0   9.3    \"a\"     10\n//\n// Like a Table, a Grouping's structure is immutable. To construct a\n// Grouping, use a GroupingBuilder.\n//\n// Despite the fact that GroupIDs form a hierarchy, a Grouping ignores\n// this hierarchy and simply operates on a flat map of distinct\n// GroupIDs to Tables.\ntype Grouping interface {\n\t// Columns returns the names of the columns in this Grouping,\n\t// or nil if there are no Tables or the group consists solely\n\t// of empty Tables. All Tables in this Grouping have the same\n\t// set of columns.\n\tColumns() []string\n\n\t// Tables returns the group IDs of the tables in this\n\t// Grouping.\n\tTables() []GroupID\n\n\t// Table returns the Table in group gid, or nil if there is no\n\t// such Table.\n\tTable(gid GroupID) *Table\n}\n\n// A GroupingBuilder constructs a Grouping one table a time.\n//\n// The zero value of a GroupingBuilder represents an empty Grouping\n// with no tables and no columns.\ntype GroupingBuilder struct {\n\tg        groupedTable\n\tcolTypes []reflect.Type\n}\n\ntype groupedTable struct {\n\ttables   map[GroupID]*Table\n\tgroups   []GroupID\n\tcolNames []string\n}\n\n// A Slice is a Go slice value.\n//\n// This is primarily for documentation. There is no way to statically\n// enforce this in Go; however, functions that expect a Slice will\n// panic with a *generic.TypeError if passed a non-slice value.\ntype Slice interface{}\n\nfunc reflectSlice(s Slice) reflect.Value {\n\trv := reflect.ValueOf(s)\n\tif rv.Kind() != reflect.Slice {\n\t\tpanic(&generic.TypeError{rv.Type(), nil, \"is not a slice\"})\n\t}\n\treturn rv\n}\n\n// NewBuilder returns a new Builder. If t is non-nil, it populates the\n// new Builder with the columns from t.\nfunc NewBuilder(t *Table) *Builder {\n\tif t == nil {\n\t\treturn new(Builder)\n\t}\n\tb := Builder{Table{\n\t\tcols:     make(map[string]Slice),\n\t\tconsts:   make(map[string]interface{}),\n\t\tcolNames: append([]string(nil), t.Columns()...),\n\t\tlen:      t.len,\n\t}}\n\tfor k, v := range t.cols {\n\t\tb.t.cols[k] = v\n\t}\n\tfor k, v := range t.consts {\n\t\tb.t.consts[k] = v\n\t}\n\treturn &b\n}\n\n// Add adds a column to b, or removes the named column if data is nil.\n// If b already has a column with the given name, Add replaces it. If\n// data is non-nil, it must have the same length as any existing\n// columns or Add will panic.\nfunc (b *Builder) Add(name string, data Slice) *Builder {\n\tif data == nil {\n\t\t// Remove the column.\n\t\tif _, ok := b.t.cols[name]; !ok {\n\t\t\tif _, ok := b.t.consts[name]; !ok {\n\t\t\t\t// Nothing to remove.\n\t\t\t\treturn b\n\t\t\t}\n\t\t}\n\t\tdelete(b.t.cols, name)\n\t\tdelete(b.t.consts, name)\n\t\tfor i, n := range b.t.colNames {\n\t\t\tif n == name {\n\t\t\t\tcopy(b.t.colNames[i:], b.t.colNames[i+1:])\n\t\t\t\tb.t.colNames = b.t.colNames[:len(b.t.colNames)-1]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn b\n\t}\n\n\t// Are we replacing an existing column?\n\t_, replace := b.t.cols[name]\n\tif !replace {\n\t\t_, replace = b.t.consts[name]\n\t}\n\n\t// Check the column and add it.\n\trv := reflectSlice(data)\n\tdataLen := rv.Len()\n\tif len(b.t.cols) == 0 || (replace && len(b.t.cols) == 1) {\n\t\tif b.t.cols == nil {\n\t\t\tb.t.cols = make(map[string]Slice)\n\t\t}\n\t\t// First non-constant column (possibly replacing the\n\t\t// only non-constant column).\n\t\tb.t.cols[name] = data\n\t\tb.t.len = dataLen\n\t} else if b.t.len != dataLen {\n\t\tpanic(fmt.Sprintf(\"cannot add column %q with %d elements to table with %d rows\", name, dataLen, b.t.len))\n\t} else {\n\t\tb.t.cols[name] = data\n\t}\n\n\tif replace {\n\t\t// Make sure it's not in constants.\n\t\tdelete(b.t.consts, name)\n\t} else {\n\t\tb.t.colNames = append(b.t.colNames, name)\n\t}\n\n\treturn b\n}\n\n// AddConst adds a constant column to b whose value is val. If b\n// already has a column with this name, AddConst replaces it.\n//\n// A constant column has the same value in every row of the Table. It\n// does not itself have an inherent length.\nfunc (b *Builder) AddConst(name string, val interface{}) *Builder {\n\t// Are we replacing an existing column?\n\t_, replace := b.t.cols[name]\n\tif !replace {\n\t\t_, replace = b.t.consts[name]\n\t}\n\n\tif b.t.consts == nil {\n\t\tb.t.consts = make(map[string]interface{})\n\t}\n\tb.t.consts[name] = val\n\n\tif replace {\n\t\t// Make sure it's not in cols.\n\t\tdelete(b.t.cols, name)\n\t} else {\n\t\tb.t.colNames = append(b.t.colNames, name)\n\t}\n\n\treturn b\n}\n\n// Has returns true if b has a column named \"name\".\nfunc (b *Builder) Has(name string) bool {\n\tif _, ok := b.t.cols[name]; ok {\n\t\treturn true\n\t}\n\tif _, ok := b.t.consts[name]; ok {\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Done returns the constructed Table and resets b.\nfunc (b *Builder) Done() *Table {\n\tif len(b.t.colNames) == 0 {\n\t\treturn new(Table)\n\t}\n\tt := b.t\n\tb.t = Table{}\n\treturn &t\n}\n\n// Len returns the number of rows in Table t.\nfunc (t *Table) Len() int {\n\treturn t.len\n}\n\n// Columns returns the names of the columns in Table t, or nil if this\n// Table is empty.\nfunc (t *Table) Columns() []string {\n\treturn t.colNames\n}\n\n// Column returns the slice of data in column name of Table t, or nil\n// if there is no such column. If name is a constant column, this\n// returns a slice with the constant value repeated to the length of\n// the Table.\nfunc (t *Table) Column(name string) Slice {\n\tif c, ok := t.cols[name]; ok {\n\t\t// It's a regular column or a constant column with a\n\t\t// cached expansion.\n\t\treturn c\n\t}\n\n\tif cv, ok := t.consts[name]; ok {\n\t\t// Expand the constant column and cache the result.\n\t\texpanded := slice.Repeat(cv, t.len)\n\t\tt.cols[name] = expanded\n\t\treturn expanded\n\t}\n\n\treturn nil\n}\n\n// MustColumn is like Column, but panics if there is no such column.\nfunc (t *Table) MustColumn(name string) Slice {\n\tif c := t.Column(name); c != nil {\n\t\treturn c\n\t}\n\tpanic(fmt.Sprintf(\"unknown column %q\", name))\n}\n\n// Const returns the value of constant column name. If this column\n// does not exist or is not a constant column, Const returns nil,\n// false.\nfunc (t *Table) Const(name string) (val interface{}, ok bool) {\n\tcv, ok := t.consts[name]\n\treturn cv, ok\n}\n\n// isEmpty returns true if t is an empty Table, meaning it has no rows\n// or columns.\nfunc (t *Table) isEmpty() bool {\n\treturn t.colNames == nil\n}\n\n// Tables returns the groups IDs in this Table. If t is empty, there\n// are no group IDs. Otherwise, there is only RootGroupID.\nfunc (t *Table) Tables() []GroupID {\n\tif t.isEmpty() {\n\t\treturn []GroupID{}\n\t}\n\treturn []GroupID{RootGroupID}\n}\n\n// Table returns t if gid is RootGroupID and t is not empty; otherwise\n// it returns nil.\nfunc (t *Table) Table(gid GroupID) *Table {\n\tif gid == RootGroupID && !t.isEmpty() {\n\t\treturn t\n\t}\n\treturn nil\n}\n\n// NewGroupingBuilder returns a new GroupingBuilder. If g is non-nil,\n// it populates the new GroupingBuilder with the tables from g.\nfunc NewGroupingBuilder(g Grouping) *GroupingBuilder {\n\tif g == nil {\n\t\treturn new(GroupingBuilder)\n\t}\n\tb := GroupingBuilder{groupedTable{\n\t\ttables:   make(map[GroupID]*Table),\n\t\tgroups:   append([]GroupID(nil), g.Tables()...),\n\t\tcolNames: append([]string(nil), g.Columns()...),\n\t}, nil}\n\tfor _, gid := range g.Tables() {\n\t\tt := g.Table(gid)\n\t\tb.g.tables[gid] = t\n\t\tif b.colTypes == nil {\n\t\t\tb.colTypes = colTypes(t)\n\t\t}\n\t}\n\treturn &b\n}\n\nfunc colTypes(t *Table) []reflect.Type {\n\tcolTypes := make([]reflect.Type, len(t.colNames))\n\tfor i, col := range t.colNames {\n\t\tif c, ok := t.cols[col]; ok {\n\t\t\tcolTypes[i] = reflect.TypeOf(c).Elem()\n\t\t} else {\n\t\t\tcolTypes[i] = reflect.TypeOf(t.consts[col])\n\t\t}\n\t}\n\treturn colTypes\n}\n\n// Add adds a Table to b, or removes a table if t is nil. If t is the\n// empty Table, this is a no-op because the empty Table contains no\n// groups. If gid already exists, Add replaces it. Table t must have\n// the same columns as any existing Tables in this Grouping and they\n// must have identical types; otherwise, Add will panic.\n//\n// TODO This doesn't make it easy to combine two Groupings. It could\n// instead take a Grouping and reparent it.\nfunc (b *GroupingBuilder) Add(gid GroupID, t *Table) *GroupingBuilder {\n\tif t == nil {\n\t\tif _, ok := b.g.tables[gid]; !ok {\n\t\t\t// Nothing to remove.\n\t\t\treturn b\n\t\t}\n\t\tdelete(b.g.tables, gid)\n\t\tfor i, g2 := range b.g.groups {\n\t\t\tif g2 == gid {\n\t\t\t\tcopy(b.g.groups[i:], b.g.groups[i+1:])\n\t\t\t\tb.g.groups = b.g.groups[:len(b.g.groups)-1]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\treturn b\n\t}\n\n\tif t != nil && t.isEmpty() {\n\t\t// Adding an empty table has no effect.\n\t\treturn b\n\t}\n\n\tif len(b.g.groups) == 1 && b.g.groups[0] == gid {\n\t\t// We're replacing the only group. This is allowed to\n\t\t// change the shape of the Grouping.\n\t\tb.g.tables[gid] = t\n\t\tb.g.colNames = t.Columns()\n\t\tb.colTypes = colTypes(t)\n\t\treturn b\n\t} else if len(b.g.groups) == 0 {\n\t\tb.g.tables = map[GroupID]*Table{gid: t}\n\t\tb.g.groups = []GroupID{gid}\n\t\tb.g.colNames = t.Columns()\n\t\tb.colTypes = colTypes(t)\n\t\treturn b\n\t}\n\n\t// Check that t's column names match.\n\tmatches := true\n\tif len(t.colNames) != len(b.g.colNames) {\n\t\tmatches = false\n\t} else {\n\t\tfor i, n := range t.colNames {\n\t\t\tif b.g.colNames[i] != n {\n\t\t\t\tmatches = false\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif !matches {\n\t\tpanic(fmt.Sprintf(\"table columns %q do not match group columns %q\", t.colNames, b.g.colNames))\n\t}\n\n\t// Check that t's column types match.\n\tfor i, col := range b.g.colNames {\n\t\tt0 := b.colTypes[i]\n\t\tvar t1 reflect.Type\n\t\tif c, ok := t.cols[col]; ok {\n\t\t\tt1 = reflect.TypeOf(c).Elem()\n\t\t} else if cv, ok := t.consts[col]; ok {\n\t\t\tt1 = reflect.TypeOf(cv)\n\t\t}\n\t\tif t0 != t1 {\n\t\t\tpanic(&generic.TypeError{t0, t1, fmt.Sprintf(\"for column %q are not the same\", col)})\n\t\t}\n\t}\n\n\t// Add t.\n\tif _, ok := b.g.tables[gid]; !ok {\n\t\tb.g.groups = append(b.g.groups, gid)\n\t}\n\tb.g.tables[gid] = t\n\n\treturn b\n}\n\n// Done returns the constructed Grouping and resets b.\nfunc (b *GroupingBuilder) Done() Grouping {\n\tif len(b.g.groups) == 0 {\n\t\treturn new(groupedTable)\n\t}\n\tg := b.g\n\tb.g = groupedTable{}\n\treturn &g\n}\n\nfunc (g *groupedTable) Columns() []string {\n\treturn g.colNames\n}\n\nfunc (g *groupedTable) Tables() []GroupID {\n\treturn g.groups\n}\n\nfunc (g *groupedTable) Table(gid GroupID) *Table {\n\treturn g.tables[gid]\n}\n\n// ColType returns the type of column col in g. This will always be a\n// slice type, even if col is a constant column. ColType panics if col\n// is unknown.\n//\n// TODO: If I introduce a first-class representation for a grouped\n// column, this should probably be in that.\nfunc ColType(g Grouping, col string) reflect.Type {\n\ttables := g.Tables()\n\tif len(tables) == 0 {\n\t\tpanic(fmt.Sprintf(\"unknown column %q\", col))\n\t}\n\tt0 := g.Table(tables[0])\n\tif cv, ok := t0.Const(col); ok {\n\t\treturn reflect.SliceOf(reflect.TypeOf(cv))\n\t}\n\treturn reflect.TypeOf(t0.MustColumn(col))\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-gg/table/table_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage table\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"regexp\"\n\t\"testing\"\n)\n\nvar xgid = RootGroupID.Extend(\"xgid\")\nvar ygid = RootGroupID.Extend(\"ygid\")\n\nfunc isEmpty(g Grouping) bool {\n\tif t, _ := g.(*Table); t != nil && t.Len() != 0 {\n\t\treturn false\n\t}\n\treturn g.Columns() == nil && len(g.Tables()) == 0\n}\n\nfunc de(x, y interface{}) bool {\n\treturn reflect.DeepEqual(x, y)\n}\n\nfunc equal(g1, g2 Grouping) bool {\n\tif !de(g1.Columns(), g2.Columns()) ||\n\t\t!de(g1.Tables(), g2.Tables()) {\n\t\treturn false\n\t}\n\tfor _, gid := range g1.Tables() {\n\t\tfor _, col := range g1.Columns() {\n\t\t\tif !de(g1.Table(gid).Column(col), g2.Table(gid).Column(col)) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\nfunc shouldPanic(t *testing.T, re string, f func()) {\n\tr := regexp.MustCompile(re)\n\tdefer func() {\n\t\terr := recover()\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"want panic matching %q; got no panic\", re)\n\t\t} else if !r.MatchString(fmt.Sprintf(\"%s\", err)) {\n\t\t\tt.Fatalf(\"panic %q does not match %q\", err, re)\n\t\t}\n\t}()\n\tf()\n}\n\nfunc TestEmptyTable(t *testing.T) {\n\ttab := new(Table)\n\tif !isEmpty(tab) {\n\t\tt.Fatalf(\"Table{} is not empty\")\n\t}\n\tif v := tab.Len(); v != 0 {\n\t\tt.Fatalf(\"Table{}.Len() should be 0; got %v\", v)\n\t}\n\tif v := tab.Columns(); v != nil {\n\t\tt.Fatalf(\"Table{}.Columns() should be nil; got %v\", v)\n\t}\n\tif v := tab.Column(\"x\"); v != nil {\n\t\tt.Fatalf(\"Table{}.Column(\\\"x\\\") should be nil; got %v\", v)\n\t}\n\tshouldPanic(t, \"unknown column\", func() {\n\t\ttab.MustColumn(\"x\")\n\t})\n\tif v, w := tab.Tables(), []GroupID{}; !de(v, w) {\n\t\tt.Fatalf(\"Table{}.Tables should be %v; got %v\", w, v)\n\t}\n\tif v := tab.Table(RootGroupID); v != nil {\n\t\tt.Fatalf(\"Table{}.Table(RootGroupID) should be nil; got %v\", v)\n\t}\n\tif v := tab.Table(xgid); v != nil {\n\t\tt.Fatalf(\"Table{}.Table(xgid) should be nil; got %v\", v)\n\t}\n}\n\nfunc TestBuilder(t *testing.T) {\n\tnb := NewBuilder\n\n\tvar b Builder\n\tif !isEmpty(b.Done()) {\n\t\tt.Fatal(\"Empty builder is not empty\")\n\t}\n\tif !isEmpty(nb(nil).Done()) {\n\t\tt.Fatal(\"Empty builder is not empty\")\n\t}\n\tnb(nil).Add(\"x\", []int{}).Done()\n\tnb(nil).Add(\"x\", []int{1, 2, 3}).Done()\n\tshouldPanic(t, \"not a slice\", func() {\n\t\tnb(nil).Add(\"x\", 1)\n\t})\n\n\ttab0 := new(Builder).Add(\"x\", []int{}).Done()\n\tnb(tab0).Add(\"x\", []int{1}) // Can override only column.\n\tshouldPanic(t, \"column \\\"y\\\".* with 1 .* 0 rows\", func() {\n\t\tnb(tab0).Add(\"y\", []int{1})\n\t})\n\tnb(tab0).Add(\"y\", []int{})\n\tif v := nb(tab0).Add(\"x\", nil).Done(); !isEmpty(v) {\n\t\tt.Fatalf(\"tab.Add(\\\"x\\\", nil) should be the empty table; got %v\", v)\n\t}\n\tif v := nb(tab0).Add(\"y\", nil).Done(); !equal(v, tab0) {\n\t\tt.Fatalf(\"tab.Add(\\\"y\\\", nil) should be %v; got %v\", tab0, v)\n\t}\n}\n\nfunc TestTable0(t *testing.T) {\n\tcol := []int{}\n\ttab := new(Builder).Add(\"x\", col).Done()\n\tif isEmpty(tab) {\n\t\tt.Fatalf(\"tab is empty\")\n\t}\n\tif v := tab.Len(); v != 0 {\n\t\tt.Fatalf(\"tab.Len() should be 0; got %v\", v)\n\t}\n\tif v, w := tab.Columns(), []string{\"x\"}; !de(v, w) {\n\t\tt.Fatalf(\"tab.Columns() should be %v; got %v\", w, v)\n\t}\n\tif v := tab.Column(\"x\"); !de(v, col) {\n\t\tt.Fatalf(\"tab.Column(\\\"x\\\") should be %v; got %v\", col, v)\n\t}\n\tif v := tab.Column(\"y\"); v != nil {\n\t\tt.Fatalf(\"tab.Column(\\\"y\\\") should be nil; got %v\", v)\n\t}\n\tif v := tab.MustColumn(\"x\"); !de(v, col) {\n\t\tt.Fatalf(\"tab.MustColumn(\\\"x\\\") should be %v; got %v\", col, v)\n\t}\n\tshouldPanic(t, \"unknown column\", func() {\n\t\ttab.MustColumn(\"y\")\n\t})\n\tif v, w := tab.Tables(), []GroupID{RootGroupID}; !de(v, w) {\n\t\tt.Fatalf(\"tab.Tables() should be %v; got %v\", w, v)\n\t}\n\tif v := tab.Table(RootGroupID); v != tab {\n\t\tt.Fatalf(\"tab.Table(RootGroupID) should be %v; got %v\", tab, v)\n\t}\n\tif v := tab.Table(xgid); v != nil {\n\t\tt.Fatalf(\"tab.Table(xgid) should be nil; got %v\", v)\n\t}\n}\n\nfunc TestTable1(t *testing.T) {\n\tcol := []int{1}\n\ttab := new(Builder).Add(\"x\", col).Done()\n\tif isEmpty(tab) {\n\t\tt.Fatalf(\"tab is empty\")\n\t}\n\tif v := tab.Len(); v != 1 {\n\t\tt.Fatalf(\"tab.Len() should be 1; got %v\", v)\n\t}\n\tif v, w := tab.Columns(), []string{\"x\"}; !de(v, w) {\n\t\tt.Fatalf(\"tab.Columns() should be %v; got %v\", w, v)\n\t}\n\tif v := tab.Column(\"x\"); !de(v, col) {\n\t\tt.Fatalf(\"tab.Column(\\\"x\\\") should be %v; got %v\", col, v)\n\t}\n\tif v := tab.Column(\"y\"); v != nil {\n\t\tt.Fatalf(\"tab.Column(\\\"y\\\") should be nil; got %v\", v)\n\t}\n\tif v := tab.MustColumn(\"x\"); !de(v, col) {\n\t\tt.Fatalf(\"tab.MustColumn(\\\"x\\\") should be %v; got %v\", col, v)\n\t}\n\tshouldPanic(t, \"unknown column\", func() {\n\t\ttab.MustColumn(\"y\")\n\t})\n\tif v, w := tab.Tables(), []GroupID{RootGroupID}; !de(v, w) {\n\t\tt.Fatalf(\"tab.Tables() should be %v; got %v\", w, v)\n\t}\n\tif v := tab.Table(RootGroupID); v != tab {\n\t\tt.Fatalf(\"tab.Table(RootGroupID) should be %v; got %v\", tab, v)\n\t}\n\tif v := tab.Table(xgid); v != nil {\n\t\tt.Fatalf(\"tab.Table(xgid) should be nil; got %v\", v)\n\t}\n}\n\nfunc TestGroupingBuilder(t *testing.T) {\n\tetab := new(Table)\n\ttab0 := new(Builder).Add(\"x\", []int{}).Done()\n\ttab1 := new(Builder).Add(\"x\", []int{1}).Done()\n\ttabY := new(Builder).Add(\"y\", []int{}).Done()\n\ttabXY := new(Builder).Add(\"x\", []int{}).Add(\"y\", []int{}).Done()\n\n\tngb := NewGroupingBuilder\n\tif v := ngb(etab).Add(RootGroupID, etab).Done(); !isEmpty(v) {\n\t\tt.Fatalf(\"etab+etab should be empty; got %v\", v)\n\t}\n\tif v := ngb(etab).Add(RootGroupID, tab1).Done(); !equal(tab1, v) {\n\t\tt.Fatalf(\"etab+(RootGroupID, tab1) should be %v; got %\", tab1, v)\n\t}\n\tif v := ngb(tab1).Add(RootGroupID, etab).Done(); !equal(tab1, v) {\n\t\tt.Fatalf(\"(RootGroupID, tab1)+etab should be %v; got %\", tab1, v)\n\t}\n\n\tif v := ngb(tab0).Add(RootGroupID, tab0).Done(); !equal(tab0, v) {\n\t\tt.Fatalf(\"tab0+(RootGroupID, tab0) should be %v; got %v\", tab0, v)\n\t}\n\tif v := ngb(tab0).Add(RootGroupID, tab1).Done(); !equal(tab1, v) {\n\t\tt.Fatalf(\"tab0+(RootGroupID, tab1) should be %v; got %v\", tab0, v)\n\t}\n\tif v := ngb(tab0).Add(RootGroupID, tabY).Done(); !equal(tabY, v) {\n\t\tt.Fatalf(\"tab0+(RootGroupID, tabY) should be %v; got %v\", tab0, v)\n\t}\n\tshouldPanic(t, `table columns \\[\"y\"\\] do not match group columns \\[\"x\"\\]`, func() {\n\t\tngb(tab0).Add(xgid, tabY)\n\t})\n\tshouldPanic(t, `table columns \\[\"x\" \"y\"\\] do not match group columns \\[\"x\"\\]`, func() {\n\t\tngb(tab0).Add(xgid, tabXY)\n\t})\n\n\ttab01 := ngb(tab0).Add(xgid, tab1).Done()\n\tif v, w := tab01.Columns(), []string{\"x\"}; !de(v, w) {\n\t\tt.Fatalf(\"tab01.Columns() should be %v; got %v\", w, v)\n\t}\n\tif v, w := tab01.Tables(), []GroupID{RootGroupID, xgid}; !de(v, w) {\n\t\tt.Fatalf(\"tab01.Tables() should be %v; got %v\", w, v)\n\t}\n\tif v := tab01.Table(RootGroupID); v != tab0 {\n\t\tt.Fatalf(\"tab01.Table(RootGroupID) should be tab0; got %v\", v)\n\t}\n\tif v := tab01.Table(xgid); v != tab1 {\n\t\tt.Fatalf(\"tab01.Table(xgid) should be tab1; got %v\", v)\n\t}\n\tif v := tab01.Table(RootGroupID.Extend(\"ygid\")); v != nil {\n\t\tt.Fatalf(\"tab01.Table(ygid) should be nil; got %v\", v)\n\t}\n\tif v := ngb(tab01).Add(RootGroupID, new(Table)).Done(); !equal(tab01, v) {\n\t\tt.Fatalf(\"tab01+(RootGroupID, empty) should be tab01; got %v\", v)\n\t}\n\n\tif v := ngb(tab0).Add(RootGroupID, nil).Done(); !isEmpty(v) {\n\t\tt.Fatalf(\"tab0+(RootGroupID, nil) should be empty; got %v\", v)\n\t}\n\tif v := ngb(tab0).Add(xgid, nil).Done(); !equal(tab0, v) {\n\t\tt.Fatalf(\"tab0+(xgid, nil) should be tab0; got %v\", v)\n\t}\n\n\ttab0x := ngb(tab01).Add(xgid, nil).Done()\n\tif !equal(tab0x, tab0) {\n\t\tt.Fatalf(\"tab01+(xgid, nil) should be tab0; got %v\", tab0x)\n\t}\n\tif v := ngb(tab0x).Add(RootGroupID, nil).Done(); !isEmpty(v) {\n\t\tt.Fatalf(\"tab0x+(RootGroupID, nil) should be empty; got %v\", v)\n\t}\n\n\ttab2 := ngb(nil).Add(xgid, tab0).Add(ygid, tab1).Done()\n\tif want := []GroupID{xgid, ygid}; !de(want, tab2.Tables()) {\n\t\tt.Fatalf(\"tables should be %v; got %v\", want, tab2.Tables())\n\t}\n\n\tshouldPanic(t, `int and float64 for column \"x\"`, func() {\n\t\tngb(tab0).Add(xgid, new(Builder).Add(\"x\", []float64{}).Done())\n\t})\n}\n\nfunc TestColumnOrder(t *testing.T) {\n\t// Test that columns stay in order.\n\tcols := []string{\"a\", \"b\", \"c\", \"d\"}\n\tfor iter := 0; iter < 10; iter++ {\n\t\tvar b Builder\n\t\tfor _, col := range cols {\n\t\t\tb.Add(col, []int{})\n\t\t}\n\t\ttab := b.Done()\n\t\tif !de(cols, tab.Columns()) {\n\t\t\tt.Fatalf(\"want %v; got %v\", cols, tab.Columns())\n\t\t}\n\t}\n\n\t// Test that re-adding a column keeps it in place.\n\ttab := new(Builder).Add(\"a\", []int{}).Add(\"b\", []int{}).Add(\"a\", []int{}).Done()\n\tif want := []string{\"a\", \"b\"}; !de(want, tab.Columns()) {\n\t\tt.Fatalf(\"want %v; got %v\", want, tab.Columns())\n\t}\n}\n\nfunc TestGroupOrder(t *testing.T) {\n\t// Test that groups stay in order.\n\tgids := []GroupID{\n\t\tRootGroupID.Extend(\"a\"),\n\t\tRootGroupID.Extend(\"b\"),\n\t\tRootGroupID.Extend(\"c\"),\n\t\tRootGroupID.Extend(\"d\"),\n\t}\n\ttab := new(Builder).Add(\"col\", []int{}).Done()\n\tfor iter := 0; iter < 10; iter++ {\n\t\tvar b GroupingBuilder\n\t\tfor _, gid := range gids {\n\t\t\tb.Add(gid, tab)\n\t\t}\n\t\tg := b.Done()\n\t\tif !de(gids, g.Tables()) {\n\t\t\tt.Fatalf(\"want %v; got %v\", gids, g.Tables())\n\t\t}\n\t}\n\n\t// Test that re-adding a group keeps it in place.\n\tvar b GroupingBuilder\n\tg := b.Add(gids[0], tab).Add(gids[1], tab).Add(gids[0], tab).Done()\n\tif want := []GroupID{gids[0], gids[1]}; !de(want, g.Tables()) {\n\t\tt.Fatalf(\"want %v; got %v\", want, g.Tables())\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/LICENSE",
    "content": "Copyright (c) 2015 The Go Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/README.md",
    "content": "These packages provide more specialized math routines than are\navailable in the standard Go math package. go-moremath currently\nfocuses on statistical routines, with particular focus on high-quality\nimplementations and APIs for non-parametric methods.\n\nThe API is not stable.\n\nPlease see the [documentation](https://godoc.org/github.com/aclements/go-moremath).\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/cmd/dist/dist.go",
    "content": "// dist reads newline-separated numbers and describes their distribution.\n//\n// For example,\n//\n//  $ seq 1 20 | grep -v 1 | dist\n//  N 9  sum 64  mean 7.11111  gmean 5.78509  std dev 5.34894  variance 28.6111\n//\n//       min 2\n//     1%ile 2\n//     5%ile 2\n//    25%ile 3.66667\n//    median 6\n//    75%ile 8.33333\n//    95%ile 20\n//    99%ile 20\n//       max 20\n//\n//  ⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣠⠖⠒⠒⠒⠒⠒⠒⠒⠒⠒⠒⠒⠒⠒⠦⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡖ 0.1\n//  ⠀⠀⠀⠀⠀⠀⠀⢀⣠⠴⠊⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠲⢤⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡇\n//  ⠠⠤⠤⠤⠤⠴⠒⠋⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠈⠑⠲⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠤⠴⠒⠋⠉⠉⠀⠀⠉⠉⠙⠒⠦⠤⠤⠤⠤⠄⠧ 0.0\n//  ⠈⠉⠉⠉⠉⠙⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠋⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠉⠋⠉⠉⠉⠉⠉⠉⠉⠉⠉⠁\n//       0                         10                         20\npackage main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/aclements/go-moremath/stats\"\n)\n\nfunc main() {\n\ts := readInput(os.Stdin)\n\tif len(s.Xs) == 0 {\n\t\tfmt.Fprintln(os.Stderr, \"no input\")\n\t\treturn\n\t}\n\ts.Sort()\n\n\tfmt.Printf(\"N %d  sum %.6g  mean %.6g\", len(s.Xs), s.Sum(), s.Mean())\n\tgmean := s.GeoMean()\n\tif !math.IsNaN(gmean) {\n\t\tfmt.Printf(\"  gmean %.6g\", gmean)\n\t}\n\tfmt.Printf(\"  std dev %.6g  variance %.6g\\n\", s.StdDev(), s.Variance())\n\tfmt.Println()\n\n\t// Quartiles and tails.\n\tlabels := map[int]string{0: \"min\", 50: \"median\", 100: \"max\"}\n\tfor _, p := range []int{0, 1, 5, 25, 50, 75, 95, 99, 100} {\n\t\tlabel, ok := labels[p]\n\t\tif !ok {\n\t\t\tlabel = fmt.Sprintf(\"%d%%ile\", p)\n\t\t}\n\t\tfmt.Printf(\"%8s %.6g\\n\", label, s.Quantile(float64(p)/100))\n\t}\n\tfmt.Println()\n\n\t// Kernel density estimate.\n\tkde := &stats.KDE{Sample: s}\n\tFprintPDF(os.Stdout, kde)\n}\n\nfunc readInput(r io.Reader) (sample stats.Sample) {\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tl := scanner.Text()\n\t\tl = strings.TrimSpace(l)\n\t\tif l == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tvalue, err := strconv.ParseFloat(l, 64)\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tsample.Xs = append(sample.Xs, value)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\treturn\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/cmd/dist/plot.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"unicode/utf8\"\n\n\t\"github.com/aclements/go-moremath/scale\"\n\t\"github.com/aclements/go-moremath/stats\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\nconst (\n\t// printSamples is the number of points on the X axis to\n\t// sample a function at for printing.\n\tprintSamples = 500\n\n\t// printWidth is the width of the plot area in dots.\n\tprintWidth = 70 * 2\n\t// printHeight is the height of the plot area in dots.\n\tprintHeight = 3 * 4\n\n\tprintXMargin = 1\n\tprintYMargin = 1\n)\n\n// FprintPDF prints a Unicode representation of the PDF of each\n// distribution in dists to w. Multiple distributions are printed\n// stacked vertically and on the same X axis (but possibly different Y\n// axes).\nfunc FprintPDF(w io.Writer, dists ...stats.Dist) error {\n\txscale, xs := commonScale(dists...)\n\tfor _, d := range dists {\n\t\tif err := fprintFn(w, d.PDF, xscale, xs); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn fprintScale(w, xscale)\n}\n\n// FprintCDF is equivalent to FprintPDF, but prints the CDF of each\n// distribution.\nfunc FprintCDF(w io.Writer, dists ...stats.Dist) error {\n\txscale, xs := commonScale(dists...)\n\tfor _, d := range dists {\n\t\tif err := fprintFn(w, d.CDF, xscale, xs); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn fprintScale(w, xscale)\n}\n\n// makeScale creates a linear scale from [x1, x2) to [y1, y2).\nfunc makeScale(x1, x2 float64, y1, y2 int) scale.QQ {\n\treturn scale.QQ{\n\t\tSrc:  &scale.Linear{Min: x1, Max: x2, Clamp: true},\n\t\tDest: &scale.Linear{Min: float64(y1), Max: float64(y2) - 1e-10},\n\t}\n}\n\nfunc commonScale(dist ...stats.Dist) (xscale scale.QQ, xs []float64) {\n\tvar l, h float64\n\tif len(dist) == 0 {\n\t\tl, h = -1, 1\n\t} else {\n\t\tl, h = dist[0].Bounds()\n\t\tfor _, d := range dist[1:] {\n\t\t\tdl, dh := d.Bounds()\n\t\t\tl, h = math.Min(l, dl), math.Max(h, dh)\n\t\t}\n\t}\n\txscale = makeScale(l, h, printXMargin, printWidth-printXMargin)\n\t//xscale.Src.Nice(10)\n\tsrc := xscale.Src.(*scale.Linear)\n\txs = vec.Linspace(src.Min, src.Max, printSamples)\n\treturn\n}\n\nfunc fprintScale(w io.Writer, sc scale.QQ) error {\n\timg := make([][]bool, printWidth)\n\tfor i := range img {\n\t\tif i < printXMargin || i >= printWidth-printXMargin {\n\t\t\timg[i] = make([]bool, 2)\n\t\t} else {\n\t\t\timg[i] = []bool{true, false}\n\t\t}\n\t}\n\tmajor, _ := sc.Src.Ticks(scale.TickOptions{Max: 3})\n\tlabels := make([]string, len(major))\n\tlpos := make([]int, len(major))\n\tfor i, tick := range major {\n\t\tx := int(sc.Map(tick))\n\t\timg[x][1] = true\n\t\t// TODO: It would be nice if the scale could format\n\t\t// these ticks in a consistent way.\n\t\tlabels[i] = fmt.Sprintf(\"%g\", tick)\n\t\twidth := len(labels[i])\n\t\tlpos[i] = minint(maxint(x/2-width/2, 0), (printWidth+1)/2-width)\n\t}\n\tif err := fprintImage(w, img, []string{\"\"}); err != nil {\n\t\treturn err\n\t}\n\tcurpos := 0\n\tfor i, label := range labels {\n\t\tgap := lpos[i] - curpos\n\t\tif i > 0 {\n\t\t\tgap = maxint(gap, 1)\n\t\t}\n\t\t_, err := fmt.Fprintf(w, \"%*s%s\", gap, \"\", label)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcurpos += gap + len(label)\n\t}\n\t_, err := fmt.Fprintf(w, \"\\n\")\n\treturn err\n}\n\nfunc fprintFn(w io.Writer, fn func(float64) float64, xscale scale.QQ, xs []float64) error {\n\tys := vec.Map(fn, xs)\n\n\tyl, yh := stats.Bounds(ys)\n\tif yl > 0 && yl-(yh-yl)*0.1 <= 0 {\n\t\tyl = 0\n\t}\n\tyscale := makeScale(yh, yl, printYMargin, printHeight-printYMargin)\n\n\t// Render the function to an image.\n\timg := make([][]bool, printWidth+2)\n\tfor i := range img {\n\t\timg[i] = make([]bool, printHeight)\n\t}\n\tfor i, x := range xs {\n\t\timg[int(xscale.Map(x))][int(yscale.Map(ys[i]))] = true\n\t}\n\n\t// Render Y axis.\n\typos := printWidth\n\tfor y := printYMargin; y < printHeight-printYMargin; y++ {\n\t\timg[ypos][y] = true\n\t}\n\timg[ypos+1][printYMargin] = true\n\timg[ypos+1][len(img[0])-1-printYMargin] = true\n\n\ttrail := make([]string, (printHeight+3)/4)\n\ttrail[0] = fmt.Sprintf(\" %4.3f\", yh)\n\ttrail[len(trail)-1] = fmt.Sprintf(\" %4.3f\", yl)\n\n\treturn fprintImage(w, img, trail)\n}\n\nfunc fprintImage(w io.Writer, img [][]bool, trail []string) error {\n\tvar x, y int\n\tbit := func(ox, oy int) byte {\n\t\tif x+ox < len(img) && y+oy < len(img[x+ox]) && img[x+ox][y+oy] {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\t}\n\n\tmaxTrail := len(trail[0])\n\tfor _, trail1 := range trail {\n\t\tmaxTrail = maxint(maxTrail, len(trail1))\n\t}\n\tbuf := make([]byte, 3*(len(img)+1)/2+maxTrail+1)\n\tfor y = 0; y < len(img[0]); y += 4 {\n\t\tbufpos := 0\n\t\tfor x = 0; x < len(img); x += 2 {\n\t\t\t// Grab the 2x4 cell of pixels and encode it\n\t\t\t// into a byte with the following bit layout:\n\t\t\t//  0 3\n\t\t\t//  1 4\n\t\t\t//  2 5\n\t\t\t//  6 7\n\t\t\tcell := bit(0, 0)<<0 | bit(1, 0)<<3\n\t\t\tcell |= bit(0, 1)<<1 | bit(1, 1)<<4\n\t\t\tcell |= bit(0, 2)<<2 | bit(1, 2)<<5\n\t\t\tcell |= bit(0, 3)<<6 | bit(1, 3)<<7\n\t\t\t// Translate cell into the Unicode Braille space.\n\t\t\tr := 0x2800 + rune(cell)\n\t\t\tbufpos += utf8.EncodeRune(buf[bufpos:], r)\n\t\t}\n\t\tbufpos += copy(buf[bufpos:], trail[y/4])\n\t\tbuf[bufpos] = '\\n'\n\t\tif _, err := w.Write(buf[:bufpos+1]); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// TODO: These should be exported by go-moremath.\n\nfunc maxint(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc minint(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/fit/loess.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage fit\n\nimport (\n\t\"math\"\n\t\"sort\"\n)\n\n// LOESS computes the locally-weighted least squares polynomial\n// regression to the data (xs[i], ys[i]). 0 < span <= 1 is the\n// smoothing parameter, where smaller values fit the data more\n// tightly. Degree is typically 2 and span is typically between 0.5\n// and 0.75.\n//\n// The regression is \"local\" because the weights used for the\n// polynomial regression depend on the x at which the regression\n// function is evaluated. The weight of observation i is\n// W((x-xs[i])/d(x)) where d(x) is the distance from x to the\n// span*len(xs)'th closest point to x and W is the tricube weight\n// function W(u) = (1-|u|³)³ for |u| < 1, 0 otherwise. One consequence\n// of this is that only the span*len(xs) points closest to x affect\n// the regression at x, and that the effect of these points falls off\n// further from x.\n//\n// References\n//\n// Cleveland, William S., and Susan J. Devlin. \"Locally weighted\n// regression: an approach to regression analysis by local fitting.\"\n// Journal of the American Statistical Association 83.403 (1988):\n// 596-610.\n//\n// http://www.itl.nist.gov/div898/handbook/pmd/section1/dep/dep144.htm\nfunc LOESS(xs, ys []float64, degree int, span float64) func(x float64) float64 {\n\tif degree < 0 {\n\t\tpanic(\"degree must be non-negative\")\n\t}\n\tif span <= 0 {\n\t\tpanic(\"span must be positive\")\n\t}\n\n\t// q is the window width in data points.\n\tq := int(math.Ceil(span * float64(len(xs))))\n\tif q >= len(xs) {\n\t\tq = len(xs)\n\t}\n\n\t// Sort xs.\n\tif !sort.Float64sAreSorted(xs) {\n\t\txs = append([]float64(nil), xs...)\n\t\tys = append([]float64(nil), ys...)\n\t\tsort.Sort(&pairSlice{xs, ys})\n\t}\n\n\treturn func(x float64) float64 {\n\t\t// Find the q points closest to x.\n\t\tn := 0\n\t\tif len(xs) > q {\n\t\t\tn = sort.Search(len(xs)-q, func(i int) bool {\n\t\t\t\t// The cut-off between xs[i:i+q] and\n\t\t\t\t// xs[i+1:i+1+q] is avg(xs[i],\n\t\t\t\t// xs[i+q]).\n\t\t\t\treturn (xs[i] + xs[i+q]) >= x*2\n\t\t\t})\n\t\t}\n\t\tclosest := xs[n : n+q]\n\n\t\t// Compute the distance to the q'th farthest point.\n\t\t// This will be either the first or last point in\n\t\t// closest.\n\t\td := x - closest[0]\n\t\tif closest[q-1]-x > d {\n\t\t\td = closest[q-1] - x\n\t\t}\n\n\t\t// Compute the weights.\n\t\tweights := make([]float64, q)\n\t\tfor i, c := range closest {\n\t\t\t// u is the normalized distance from x to\n\t\t\t// closest[i].\n\t\t\tu := math.Abs(x-c) / d\n\t\t\t// Compute the tricube weight (1-|u|³)³ for\n\t\t\t// |u| < 1. We know 0 <= u <= 1, so we can\n\t\t\t// simplify this a bit.\n\t\t\ttmp := 1 - u*u*u\n\t\t\tweights[i] = tmp * tmp * tmp\n\t\t}\n\n\t\t// Compute the polynomial regression at x.\n\t\tpr := PolynomialRegression(closest, ys[n:n+q], weights, degree)\n\n\t\t// Evaluate the polynomial at x.\n\t\treturn pr.F(x)\n\t}\n}\n\ntype pairSlice struct {\n\txs, ys []float64\n}\n\nfunc (s *pairSlice) Len() int {\n\treturn len(s.xs)\n}\n\nfunc (s *pairSlice) Less(i, j int) bool {\n\treturn s.xs[i] < s.xs[j]\n}\n\nfunc (s *pairSlice) Swap(i, j int) {\n\ts.xs[i], s.xs[j] = s.xs[j], s.xs[i]\n\ts.ys[i], s.ys[j] = s.ys[j], s.ys[i]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/fit/loess_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage fit\n\nimport (\n\t\"testing\"\n\n\t\"github.com/aclements/go-moremath/internal/mathtest\"\n)\n\nfunc TestLOESS_NIST(t *testing.T) {\n\t// LOWESS example from the NIST handbook.\n\txs := []float64{0.5578196,\n\t\t2.0217271,\n\t\t2.5773252,\n\t\t3.4140288,\n\t\t4.3014084,\n\t\t4.7448394,\n\t\t5.1073781,\n\t\t6.5411662,\n\t\t6.7216176,\n\t\t7.2600583,\n\t\t8.1335874,\n\t\t9.1224379,\n\t\t11.9296663,\n\t\t12.3797674,\n\t\t13.2728619,\n\t\t14.2767453,\n\t\t15.3731026,\n\t\t15.6476637,\n\t\t18.5605355,\n\t\t18.5866354,\n\t\t18.7572812,\n\t}\n\tys := []float64{18.63654,\n\t\t103.49646,\n\t\t150.35391,\n\t\t190.51031,\n\t\t208.70115,\n\t\t213.71135,\n\t\t228.49353,\n\t\t233.55387,\n\t\t234.55054,\n\t\t223.89225,\n\t\t227.68339,\n\t\t223.91982,\n\t\t168.01999,\n\t\t164.95750,\n\t\t152.61107,\n\t\t160.78742,\n\t\t168.55567,\n\t\t152.42658,\n\t\t221.70702,\n\t\t222.69040,\n\t\t243.18828,\n\t}\n\n\tdefer mathtest.SetAeqDigits(mathtest.SetAeqDigits(7))\n\tmathtest.WantFunc(t, \"LOESS\", LOESS(xs, ys, 1, 0.33),\n\t\tmap[float64]float64{\n\t\t\t0.5578196: 20.59302,\n\t\t\t2.0217271: 107.1603,\n\t\t\t2.5773252: 139.7674,\n\t\t\t3.4140288: 174.2630,\n\t\t\t4.301408:  207.2334,\n\t\t\t4.744839:  216.6616,\n\t\t\t5.107378:  220.5445,\n\t\t\t6.541166:  229.8607,\n\t\t\t6.721618:  229.8347,\n\t\t\t7.260058:  229.4301,\n\t\t\t8.133587:  226.6045,\n\t\t\t9.122438:  220.3904,\n\t\t\t11.929666: 172.3480,\n\t\t\t12.379767: 163.8417,\n\t\t\t13.272862: 161.8490,\n\t\t\t14.27675:  160.3351,\n\t\t\t15.37310:  160.1920,\n\t\t\t15.64766:  161.0556,\n\t\t\t18.56054:  227.3400,\n\t\t\t18.58664:  227.8985,\n\t\t\t18.75728:  231.5586,\n\t\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/fit/lsquares.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage fit\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"strings\"\n\n\t\"github.com/gonum/matrix/mat64\"\n)\n\n// LinearLeastSquares computes the least squares fit for the function\n//\n//   f(x) = Β₀terms₀(x) + Β₁terms₁(x) + ...\n//\n// to the data (xs[i], ys[i]). It returns the parameters Β₀, Β₁, ...\n// that minimize the sum of the squares of the residuals of f:\n//\n//   ∑ (ys[i] - f(xs[i]))²\n//\n// If weights is non-nil, it is used to weight these residuals:\n//\n//   ∑ weights[i] × (ys[i] - f(xs[i]))²\n//\n// The function f is specified by one Go function for each linear\n// term. For efficiency, the Go function is vectorized: it will be\n// passed a slice of x values in xs and must fill the slice termOut\n// with the value of the term for each value in xs.\n//\n// Note that this is called a \"linear\" least squares fit because the\n// fitted function is linear in the computed parameters. The function\n// need not be linear in x.\nfunc LinearLeastSquares(xs, ys, weights []float64, terms ...func(xs, termOut []float64)) (params []float64) {\n\t// The optimal parameters are found by solving for Β̂ in the\n\t// \"normal equations\":\n\t//\n\t//    (𝐗ᵀ𝐖𝐗)Β̂ = 𝐗ᵀ𝐖𝐲\n\t//\n\t// where 𝐖 is a diagonal weight matrix (or the identity matrix\n\t// for the unweighted case).\n\n\t// TODO: Consider using orthogonal decomposition.\n\n\t// TODO: Consider providing a multidimensional version of\n\t// this.\n\n\tif len(xs) != len(ys) {\n\t\tpanic(\"len(xs) != len(ys)\")\n\t}\n\tif weights != nil && len(xs) != len(weights) {\n\t\tpanic(\"len(xs) != len(weights)\")\n\t}\n\n\t// Construct 𝐗ᵀ. This is the more convenient representation\n\t// for efficiently calling the term functions.\n\txTVals := make([]float64, len(terms)*len(xs))\n\tfor i, term := range terms {\n\t\tterm(xs, xTVals[i*len(xs):i*len(xs)+len(xs)])\n\t}\n\tXT := mat64.NewDense(len(terms), len(xs), xTVals)\n\tX := XT.T()\n\n\t// Construct 𝐗ᵀ𝐖.\n\tvar XTW *mat64.Dense\n\tif weights == nil {\n\t\t// 𝐖 is the identity matrix.\n\t\tXTW = XT\n\t} else {\n\t\t// Since 𝐖 is a diagonal matrix, we do this directly.\n\t\tXTW = mat64.DenseCopyOf(XT)\n\t\tWDiag := mat64.NewVector(len(weights), weights)\n\t\tfor row := 0; row < len(terms); row++ {\n\t\t\trowView := XTW.RowView(row)\n\t\t\trowView.MulElemVec(rowView, WDiag)\n\t\t}\n\t}\n\n\t// Construct 𝐲.\n\ty := mat64.NewVector(len(ys), ys)\n\n\t// Compute Β̂.\n\tlhs := mat64.NewDense(len(terms), len(terms), nil)\n\tlhs.Mul(XTW, X)\n\n\trhs := mat64.NewVector(len(terms), nil)\n\trhs.MulVec(XTW, y)\n\n\tBVals := make([]float64, len(terms))\n\tB := mat64.NewVector(len(terms), BVals)\n\tB.SolveVec(lhs, rhs)\n\treturn BVals\n}\n\n// PolynomialRegressionResult is the resulting polynomial from a\n// PolynomialRegression.\n//\n// TODO: Should this just be a least squares regression result? We\n// have the terms functions, so we can construct F, though it won't be\n// very efficient.\ntype PolynomialRegressionResult struct {\n\t// Coefficients is the coefficients of the fitted polynomial.\n\t// Coefficients[i] is the coefficient of the x^i term.\n\tCoefficients []float64\n\n\t// F evaluates the fitted polynomial at x.\n\tF func(x float64) float64\n}\n\nfunc (r PolynomialRegressionResult) String() string {\n\tvar terms []string\n\tfor pow, factor := range r.Coefficients {\n\t\tswitch {\n\t\tcase factor == 0:\n\t\t\tcontinue\n\t\tcase pow == 0:\n\t\t\tterms = append(terms, fmt.Sprintf(\"%v\", factor))\n\t\tcase pow == 1:\n\t\t\tterms = append(terms, fmt.Sprintf(\"%vx\", factor))\n\t\tdefault:\n\t\t\tterms = append(terms, fmt.Sprintf(\"%vx^%d\", factor, pow))\n\t\t}\n\t}\n\tif len(terms) == 0 {\n\t\treturn \"0\"\n\t}\n\treturn strings.Join(terms, \"+\")\n}\n\n// PolynomialRegression performs a least squares regression with a\n// polynomial of the given degree. If weights is non-nil, it is used\n// to weight the residuals.\nfunc PolynomialRegression(xs, ys, weights []float64, degree int) PolynomialRegressionResult {\n\tterms := make([]func(xs, termOut []float64), degree+1)\n\tterms[0] = func(xs, termsOut []float64) {\n\t\tfor i := range termsOut {\n\t\t\ttermsOut[i] = 1\n\t\t}\n\t}\n\tif degree >= 1 {\n\t\tterms[1] = func(xs, termOut []float64) {\n\t\t\tcopy(termOut, xs)\n\t\t}\n\t}\n\tif degree >= 2 {\n\t\tterms[2] = func(xs, termOut []float64) {\n\t\t\tfor i, x := range xs {\n\t\t\t\ttermOut[i] = x * x\n\t\t\t}\n\t\t}\n\t}\n\tfor d := 3; d < len(terms); d++ {\n\t\td := d\n\t\tterms[d] = func(xs, termOut []float64) {\n\t\t\tfor i, x := range xs {\n\t\t\t\ttermOut[i] = math.Pow(x, float64(d+1))\n\t\t\t}\n\t\t}\n\t}\n\n\tcoeffs := LinearLeastSquares(xs, ys, weights, terms...)\n\tf := func(x float64) float64 {\n\t\ty := coeffs[0]\n\t\txp := x\n\t\tfor _, c := range coeffs[1:] {\n\t\t\ty += xp * c\n\t\t\txp *= x\n\t\t}\n\t\treturn y\n\t}\n\treturn PolynomialRegressionResult{coeffs, f}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/fit/package.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package fit provides functions for fitting models to data.\npackage fit\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/internal/mathtest/mathtest.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mathtest\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar (\n\taeqDigits int\n\taeqFactor float64\n)\n\nfunc SetAeqDigits(digits int) int {\n\told := aeqDigits\n\taeqDigits = digits\n\taeqFactor = 1 - math.Pow(10, float64(-digits+1))\n\treturn old\n}\n\nfunc init() {\n\tSetAeqDigits(8)\n}\n\n// Aeq returns true if expect and got are equal up to the current\n// number of aeq digits set by SetAeqDigits. By default, this is 8\n// significant figures (1 part in 100 million).\nfunc Aeq(expect, got float64) bool {\n\tif expect < 0 && got < 0 {\n\t\texpect, got = -expect, -got\n\t}\n\treturn expect*aeqFactor <= got && got*aeqFactor <= expect\n}\n\nfunc WantFunc(t *testing.T, name string, f func(float64) float64, vals map[float64]float64) {\n\txs := make([]float64, 0, len(vals))\n\tfor x := range vals {\n\t\txs = append(xs, x)\n\t}\n\tsort.Float64s(xs)\n\n\tfor _, x := range xs {\n\t\twant, got := vals[x], f(x)\n\t\tif math.IsNaN(want) && math.IsNaN(got) || Aeq(want, got) {\n\t\t\tcontinue\n\t\t}\n\t\tvar label string\n\t\tif strings.Contains(name, \"%v\") {\n\t\t\tlabel = fmt.Sprintf(name, x)\n\t\t} else {\n\t\t\tlabel = fmt.Sprintf(\"%s(%v)\", name, x)\n\t\t}\n\t\tt.Errorf(\"want %s=%v, got %v\", label, want, got)\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/mathx/beta.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mathx\n\nimport \"math\"\n\nfunc lgamma(x float64) float64 {\n\ty, _ := math.Lgamma(x)\n\treturn y\n}\n\n// Beta returns the value of the complete beta function B(a, b).\nfunc Beta(a, b float64) float64 {\n\t// B(x,y) = Γ(x)Γ(y) / Γ(x+y)\n\treturn math.Exp(lgamma(a) + lgamma(b) - lgamma(a+b))\n}\n\n// BetaInc returns the value of the regularized incomplete beta\n// function Iₓ(a, b).\n//\n// This is not to be confused with the \"incomplete beta function\",\n// which can be computed as BetaInc(x, a, b)*Beta(a, b).\n//\n// If x < 0 or x > 1, returns NaN.\nfunc BetaInc(x, a, b float64) float64 {\n\t// Based on Numerical Recipes in C, section 6.4. This uses the\n\t// continued fraction definition of I:\n\t//\n\t//  (xᵃ*(1-x)ᵇ)/(a*B(a,b)) * (1/(1+(d₁/(1+(d₂/(1+...))))))\n\t//\n\t// where B(a,b) is the beta function and\n\t//\n\t//  d_{2m+1} = -(a+m)(a+b+m)x/((a+2m)(a+2m+1))\n\t//  d_{2m}   = m(b-m)x/((a+2m-1)(a+2m))\n\tif x < 0 || x > 1 {\n\t\treturn math.NaN()\n\t}\n\tbt := 0.0\n\tif 0 < x && x < 1 {\n\t\t// Compute the coefficient before the continued\n\t\t// fraction.\n\t\tbt = math.Exp(lgamma(a+b) - lgamma(a) - lgamma(b) +\n\t\t\ta*math.Log(x) + b*math.Log(1-x))\n\t}\n\tif x < (a+1)/(a+b+2) {\n\t\t// Compute continued fraction directly.\n\t\treturn bt * betacf(x, a, b) / a\n\t} else {\n\t\t// Compute continued fraction after symmetry transform.\n\t\treturn 1 - bt*betacf(1-x, b, a)/b\n\t}\n}\n\n// betacf is the continued fraction component of the regularized\n// incomplete beta function Iₓ(a, b).\nfunc betacf(x, a, b float64) float64 {\n\tconst maxIterations = 200\n\tconst epsilon = 3e-14\n\n\traiseZero := func(z float64) float64 {\n\t\tif math.Abs(z) < math.SmallestNonzeroFloat64 {\n\t\t\treturn math.SmallestNonzeroFloat64\n\t\t}\n\t\treturn z\n\t}\n\n\tc := 1.0\n\td := 1 / raiseZero(1-(a+b)*x/(a+1))\n\th := d\n\tfor m := 1; m <= maxIterations; m++ {\n\t\tmf := float64(m)\n\n\t\t// Even step of the recurrence.\n\t\tnumer := mf * (b - mf) * x / ((a + 2*mf - 1) * (a + 2*mf))\n\t\td = 1 / raiseZero(1+numer*d)\n\t\tc = raiseZero(1 + numer/c)\n\t\th *= d * c\n\n\t\t// Odd step of the recurrence.\n\t\tnumer = -(a + mf) * (a + b + mf) * x / ((a + 2*mf) * (a + 2*mf + 1))\n\t\td = 1 / raiseZero(1+numer*d)\n\t\tc = raiseZero(1 + numer/c)\n\t\thfac := d * c\n\t\th *= hfac\n\n\t\tif math.Abs(hfac-1) < epsilon {\n\t\t\treturn h\n\t\t}\n\t}\n\tpanic(\"betainc: a or b too big; failed to converge\")\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/mathx/beta_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mathx\n\nimport (\n\t\"testing\"\n\n\t. \"github.com/aclements/go-moremath/internal/mathtest\"\n)\n\nfunc TestBetaInc(t *testing.T) {\n\t// Example values from MATLAB betainc documentation.\n\tWantFunc(t, \"I_0.5(%v, 3)\",\n\t\tfunc(a float64) float64 { return BetaInc(0.5, a, 3) },\n\t\tmap[float64]float64{\n\t\t\t0:  1.00000000000000,\n\t\t\t1:  0.87500000000000,\n\t\t\t2:  0.68750000000000,\n\t\t\t3:  0.50000000000000,\n\t\t\t4:  0.34375000000000,\n\t\t\t5:  0.22656250000000,\n\t\t\t6:  0.14453125000000,\n\t\t\t7:  0.08984375000000,\n\t\t\t8:  0.05468750000000,\n\t\t\t9:  0.03271484375000,\n\t\t\t10: 0.01928710937500})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/mathx/choose.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mathx\n\nimport \"math\"\n\nconst smallFactLimit = 20 // 20! => 62 bits\nvar smallFact [smallFactLimit + 1]int64\n\nfunc init() {\n\tsmallFact[0] = 1\n\tfact := int64(1)\n\tfor n := int64(1); n <= smallFactLimit; n++ {\n\t\tfact *= n\n\t\tsmallFact[n] = fact\n\t}\n}\n\n// Choose returns the binomial coefficient of n and k.\nfunc Choose(n, k int) float64 {\n\tif k == 0 || k == n {\n\t\treturn 1\n\t}\n\tif k < 0 || n < k {\n\t\treturn 0\n\t}\n\tif n <= smallFactLimit { // Implies k <= smallFactLimit\n\t\t// It's faster to do several integer multiplications\n\t\t// than it is to do an extra integer division.\n\t\t// Remarkably, this is also faster than pre-computing\n\t\t// Pascal's triangle (presumably because this is very\n\t\t// cache efficient).\n\t\tnumer := int64(1)\n\t\tfor n1 := int64(n - (k - 1)); n1 <= int64(n); n1++ {\n\t\t\tnumer *= n1\n\t\t}\n\t\tdenom := smallFact[k]\n\t\treturn float64(numer / denom)\n\t}\n\n\treturn math.Exp(lchoose(n, k))\n}\n\n// Lchoose returns math.Log(Choose(n, k)).\nfunc Lchoose(n, k int) float64 {\n\tif k == 0 || k == n {\n\t\treturn 0\n\t}\n\tif k < 0 || n < k {\n\t\treturn math.NaN()\n\t}\n\treturn lchoose(n, k)\n}\n\nfunc lchoose(n, k int) float64 {\n\ta, _ := math.Lgamma(float64(n + 1))\n\tb, _ := math.Lgamma(float64(k + 1))\n\tc, _ := math.Lgamma(float64(n - k + 1))\n\treturn a - b - c\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/mathx/gamma.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mathx\n\nimport \"math\"\n\n// GammaInc returns the value of the incomplete gamma function (also\n// known as the regularized gamma function):\n//\n//   P(a, x) = 1 / Γ(a) * ∫₀ˣ exp(-t) t**(a-1) dt\nfunc GammaInc(a, x float64) float64 {\n\t// Based on Numerical Recipes in C, section 6.2.\n\n\tif a <= 0 || x < 0 || math.IsNaN(a) || math.IsNaN(x) {\n\t\treturn math.NaN()\n\t}\n\n\tif x < a+1 {\n\t\t// Use the series representation, which converges more\n\t\t// rapidly in this range.\n\t\treturn gammaIncSeries(a, x)\n\t} else {\n\t\t// Use the continued fraction representation.\n\t\treturn 1 - gammaIncCF(a, x)\n\t}\n}\n\n// GammaIncComp returns the complement of the incomplete gamma\n// function 1 - GammaInc(a, x). This is more numerically stable for\n// values near 0.\nfunc GammaIncComp(a, x float64) float64 {\n\tif a <= 0 || x < 0 || math.IsNaN(a) || math.IsNaN(x) {\n\t\treturn math.NaN()\n\t}\n\n\tif x < a+1 {\n\t\treturn 1 - gammaIncSeries(a, x)\n\t} else {\n\t\treturn gammaIncCF(a, x)\n\t}\n}\n\nfunc gammaIncSeries(a, x float64) float64 {\n\tconst maxIterations = 200\n\tconst epsilon = 3e-14\n\n\tif x == 0 {\n\t\treturn 0\n\t}\n\n\tap := a\n\tdel := 1 / a\n\tsum := del\n\tfor n := 0; n < maxIterations; n++ {\n\t\tap++\n\t\tdel *= x / ap\n\t\tsum += del\n\t\tif math.Abs(del) < math.Abs(sum)*epsilon {\n\t\t\treturn sum * math.Exp(-x+a*math.Log(x)-lgamma(a))\n\t\t}\n\t}\n\tpanic(\"a too large; failed to converge\")\n}\n\nfunc gammaIncCF(a, x float64) float64 {\n\tconst maxIterations = 200\n\tconst epsilon = 3e-14\n\n\traiseZero := func(z float64) float64 {\n\t\tif math.Abs(z) < math.SmallestNonzeroFloat64 {\n\t\t\treturn math.SmallestNonzeroFloat64\n\t\t}\n\t\treturn z\n\t}\n\n\tb := x + 1 - a\n\tc := math.MaxFloat64\n\td := 1 / b\n\th := d\n\n\tfor i := 1; i <= maxIterations; i++ {\n\t\tan := -float64(i) * (float64(i) - a)\n\t\tb += 2\n\t\td = raiseZero(an*d + b)\n\t\tc = raiseZero(b + an/c)\n\t\td = 1 / d\n\t\tdel := d * c\n\t\th *= del\n\t\tif math.Abs(del-1) < epsilon {\n\t\t\treturn math.Exp(-x+a*math.Log(x)-lgamma(a)) * h\n\t\t}\n\t}\n\tpanic(\"a too large; failed to converge\")\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/mathx/gamma_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mathx\n\nimport (\n\t\"testing\"\n\n\t. \"github.com/aclements/go-moremath/internal/mathtest\"\n)\n\nfunc TestGammaInc(t *testing.T) {\n\tWantFunc(t, \"GammaInc(1, %v)\",\n\t\tfunc(x float64) float64 { return GammaInc(1, x) },\n\t\tmap[float64]float64{\n\t\t\t0.1: 0.095162581964040441,\n\t\t\t0.2: 0.18126924692201815,\n\t\t\t0.3: 0.25918177931828207,\n\t\t\t0.4: 0.32967995396436056,\n\t\t\t0.5: 0.39346934028736652,\n\t\t\t0.6: 0.45118836390597361,\n\t\t\t0.7: 0.50341469620859047,\n\t\t\t0.8: 0.55067103588277833,\n\t\t\t0.9: 0.59343034025940089,\n\t\t\t1:   0.63212055882855778,\n\t\t\t2:   0.86466471676338730,\n\t\t\t3:   0.95021293163213605,\n\t\t\t4:   0.98168436111126578,\n\t\t\t5:   0.99326205300091452,\n\t\t\t6:   0.99752124782333362,\n\t\t\t7:   0.99908811803444553,\n\t\t\t8:   0.99966453737209748,\n\t\t\t9:   0.99987659019591335,\n\t\t\t10:  0.99995460007023750,\n\t\t})\n\tWantFunc(t, \"GammaInc(2, %v)\",\n\t\tfunc(x float64) float64 { return GammaInc(2, x) },\n\t\tmap[float64]float64{\n\t\t\t1:  0.26424111765711528,\n\t\t\t2:  0.59399415029016167,\n\t\t\t3:  0.80085172652854419,\n\t\t\t4:  0.90842180555632912,\n\t\t\t5:  0.95957231800548726,\n\t\t\t6:  0.98264873476333547,\n\t\t\t7:  0.99270494427556388,\n\t\t\t8:  0.99698083634887735,\n\t\t\t9:  0.99876590195913317,\n\t\t\t10: 0.99950060077261271,\n\t\t})\n\n\t// TODO: Test strange values.\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/mathx/package.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package mathx implements special functions not provided by the\n// standard math package.\npackage mathx // import \"github.com/aclements/go-moremath/mathx\"\n\nimport \"math\"\n\nvar nan = math.NaN()\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/mathx/sign.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage mathx\n\n// Sign returns the sign of x: -1 if x < 0, 0 if x == 0, 1 if x > 0.\n// If x is NaN, it returns NaN.\nfunc Sign(x float64) float64 {\n\tif x == 0 {\n\t\treturn 0\n\t} else if x < 0 {\n\t\treturn -1\n\t} else if x > 0 {\n\t\treturn 1\n\t}\n\treturn nan\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/err.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\n// RangeErr is an error that indicates some argument or value is out\n// of range.\ntype RangeErr string\n\nfunc (r RangeErr) Error() string {\n\treturn string(r)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/interface.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\n// A Quantative scale is an invertible function from some continuous\n// input domain to an output range of [0, 1].\ntype Quantitative interface {\n\t// Map maps from a value x in the input domain to [0, 1]. If x\n\t// is outside the input domain and clamping is enabled, x will\n\t// first be clamped to the input domain.\n\tMap(x float64) float64\n\n\t// Unmap is the inverse of Map. That is, if x is in the input\n\t// domain or clamping is disabled, x = Unmap(Map(x)). If\n\t// clamping is enabled and y is outside [0,1], the results are\n\t// undefined.\n\tUnmap(y float64) float64\n\n\t// SetClamp sets the clamping mode of this scale.\n\tSetClamp(bool)\n\n\t// Ticks returns major and minor ticks that satisfy the\n\t// constraints given by o. These ticks will have \"nice\" values\n\t// within the input domain. Both arrays are sorted in\n\t// ascending order and minor includes ticks in major.\n\tTicks(o TickOptions) (major, minor []float64)\n\n\t// Nice expands the input domain of this scale to \"nice\"\n\t// values for covering the input domain satisfying the\n\t// constraints given by o. After calling Nice(o), the first\n\t// and last major ticks returned by Ticks(o) will equal the\n\t// lower and upper bounds of the input domain.\n\tNice(o TickOptions)\n\n\t// A Quantitative scale is also a Ticker.\n\tTicker\n}\n\n// A QQ maps from a source Quantitative scale to a destination\n// Quantitative scale.\ntype QQ struct {\n\tSrc, Dest Quantitative\n}\n\n// Map maps from a value x in the source scale's input domain to a\n// value y in the destination scale's input domain.\nfunc (q QQ) Map(x float64) float64 {\n\treturn q.Dest.Unmap(q.Src.Map(x))\n}\n\n// Unmap maps from a value y in the destination scale's input domain to\n// a value x in the source scale's input domain.\nfunc (q QQ) Unmap(x float64) float64 {\n\treturn q.Src.Unmap(q.Dest.Map(x))\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/linear.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\nimport (\n\t\"math\"\n\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\ntype Linear struct {\n\t// Min and Max specify the lower and upper bounds of the input\n\t// domain. The input domain [Min, Max] will be linearly mapped\n\t// to the output range [0, 1].\n\tMin, Max float64\n\n\t// Base specifies a base for computing ticks. Ticks will be\n\t// placed at powers of Base; that is at n*Base^l for n ∈ ℤ and\n\t// some integer tick level l. As a special case, a base of 0\n\t// alternates between ticks at n*10^⌊l/2⌋ and ticks at\n\t// 5n*10^⌊l/2⌋.\n\tBase int\n\n\t// If Clamp is true, the input is clamped to [Min, Max].\n\tClamp bool\n}\n\n// *Linear is a Quantitative scale.\nvar _ Quantitative = &Linear{}\n\nfunc (s Linear) Map(x float64) float64 {\n\tif s.Min == s.Max {\n\t\treturn 0.5\n\t}\n\ty := (x - s.Min) / (s.Max - s.Min)\n\tif s.Clamp {\n\t\ty = clamp(y)\n\t}\n\treturn y\n}\n\nfunc (s Linear) Unmap(y float64) float64 {\n\treturn y*(s.Max-s.Min) + s.Min\n}\n\nfunc (s *Linear) SetClamp(clamp bool) {\n\ts.Clamp = clamp\n}\n\n// ebase sanity checks and returns the \"effective base\" of this scale.\n// If s.Base is 0, it returns 10. If s.Base is 1 or negative, it\n// panics.\nfunc (s Linear) ebase() int {\n\tif s.Base == 0 {\n\t\treturn 10\n\t} else if s.Base == 1 {\n\t\tpanic(\"scale.Linear cannot have a base of 1\")\n\t} else if s.Base < 0 {\n\t\tpanic(\"scale.Linear cannot have a negative base\")\n\t}\n\treturn s.Base\n}\n\n// In the default base, the tick levels are:\n//\n// Level -2 is a major tick at -0.1, 0, 0.1, etc.\n// Level -1 is a major tick at -1, -0.5, 0, 0.5, 1, etc.\n// Level 0 is a major tick at -1, 0, 1, etc.\n// Level 1 is a major tick at -10, -5, 0, 5, 10, etc.\n// Level 2 is a major tick at -10, 0, 10, etc.\n//\n// That is, level 0 is unit intervals, and we alternate between\n// interval *= 5 and interval *= 2. Combined, these give us interval\n// *= 10 at every other level.\n//\n// In non-default bases, level 0 is the same and we alternate between\n// interval *= 1 (for consistency) and interval *= base.\n\nfunc (s *Linear) guessLevel() int {\n\treturn 2 * int(math.Log(s.Max-s.Min)/math.Log(float64(s.ebase())))\n}\n\nfunc (s *Linear) spacingAtLevel(level int, roundOut bool) (firstN, lastN, spacing float64) {\n\t// Watch out! Integer division is round toward zero, but we\n\t// need round down, and modulus is signed.\n\texp, double := math.Floor(float64(level)/2), (level%2 == 1 || level%2 == -1)\n\tspacing = math.Pow(float64(s.ebase()), exp)\n\tif double && s.Base == 0 {\n\t\tspacing *= 5\n\t}\n\n\t// Add a tiny bit of slack to the floor and ceiling below so\n\t// that rounding errors don't significantly affect tick marks.\n\tslack := (s.Max - s.Min) * 1e-10\n\n\tif roundOut {\n\t\tfirstN = math.Floor((s.Min + slack) / spacing)\n\t\tlastN = math.Ceil((s.Max - slack) / spacing)\n\t} else {\n\t\tfirstN = math.Ceil((s.Min - slack) / spacing)\n\t\tlastN = math.Floor((s.Max + slack) / spacing)\n\t}\n\treturn\n}\n\n// CountTicks returns the number of ticks in [s.Min, s.Max] at the\n// given tick level.\nfunc (s Linear) CountTicks(level int) int {\n\treturn linearTicker{&s, false}.CountTicks(level)\n}\n\n// TicksAtLevel returns the tick locations in [s.Min, s.Max] as a\n// []float64 at the given tick level in ascending order.\nfunc (s Linear) TicksAtLevel(level int) interface{} {\n\treturn linearTicker{&s, false}.TicksAtLevel(level)\n}\n\ntype linearTicker struct {\n\ts        *Linear\n\troundOut bool\n}\n\nfunc (t linearTicker) CountTicks(level int) int {\n\tfirstN, lastN, _ := t.s.spacingAtLevel(level, t.roundOut)\n\treturn int(lastN - firstN + 1)\n}\n\nfunc (t linearTicker) TicksAtLevel(level int) interface{} {\n\tfirstN, lastN, spacing := t.s.spacingAtLevel(level, t.roundOut)\n\tn := int(lastN - firstN + 1)\n\treturn vec.Linspace(firstN*spacing, lastN*spacing, n)\n}\n\nfunc (s Linear) Ticks(o TickOptions) (major, minor []float64) {\n\tif o.Max <= 0 {\n\t\treturn nil, nil\n\t} else if s.Min == s.Max {\n\t\treturn []float64{s.Min}, []float64{s.Min}\n\t} else if s.Min > s.Max {\n\t\ts.Min, s.Max = s.Max, s.Min\n\t}\n\n\tlevel, ok := o.FindLevel(linearTicker{&s, false}, s.guessLevel())\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn s.TicksAtLevel(level).([]float64), s.TicksAtLevel(level - 1).([]float64)\n}\n\nfunc (s *Linear) Nice(o TickOptions) {\n\tif s.Min == s.Max {\n\t\ts.Min -= 0.5\n\t\ts.Max += 0.5\n\t} else if s.Min > s.Max {\n\t\ts.Min, s.Max = s.Max, s.Min\n\t}\n\n\tlevel, ok := o.FindLevel(linearTicker{s, true}, s.guessLevel())\n\tif !ok {\n\t\treturn\n\t}\n\n\tfirstN, lastN, spacing := s.spacingAtLevel(level, true)\n\ts.Min = firstN * spacing\n\ts.Max = lastN * spacing\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/linear_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/aclements/go-moremath/internal/mathtest\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\nfunc TestLinear(t *testing.T) {\n\tl := Linear{Min: -10, Max: 10}\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Map\", l), l.Map,\n\t\tmap[float64]float64{\n\t\t\t-20: -0.5,\n\t\t\t-10: 0,\n\t\t\t0:   0.5,\n\t\t\t10:  1,\n\t\t\t20:  1.5,\n\t\t})\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Unmap\", l), l.Unmap,\n\t\tmap[float64]float64{\n\t\t\t-0.5: -20,\n\t\t\t0:    -10,\n\t\t\t0.5:  0,\n\t\t\t1:    10,\n\t\t\t1.5:  20,\n\t\t})\n\n\tl.SetClamp(true)\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Map\", l), l.Map,\n\t\tmap[float64]float64{\n\t\t\t-20: 0,\n\t\t\t-10: 0,\n\t\t\t0:   0.5,\n\t\t\t10:  1,\n\t\t\t20:  1,\n\t\t})\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Unmap\", l), l.Unmap,\n\t\tmap[float64]float64{\n\t\t\t0:   -10,\n\t\t\t0.5: 0,\n\t\t\t1:   10,\n\t\t})\n\n\tl = Linear{Min: 5, Max: 5}\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Map\", l), l.Map,\n\t\tmap[float64]float64{\n\t\t\t-10: 0.5,\n\t\t\t0:   0.5,\n\t\t\t10:  0.5,\n\t\t})\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Unmap\", l), l.Unmap,\n\t\tmap[float64]float64{\n\t\t\t0:   5,\n\t\t\t0.5: 5,\n\t\t\t1:   5,\n\t\t})\n}\n\nfunc ticksEq(major, wmajor, minor, wminor []float64) bool {\n\t// TODO: It would be nice to have a deep Aeq. It could also\n\t// support checking predicates like LE(5) or IsNaN within\n\t// structures, which could be used in WantFunc. Heck, deep Aeq\n\t// could subsume WantFunc where the left side is a function\n\t// and the right side is a map from arguments to results, but\n\t// maybe it would be harder to produce a good error message.\n\tif len(major) != len(wmajor) || len(minor) != len(wminor) {\n\t\treturn false\n\t}\n\tfor i, v := range major {\n\t\tif !mathtest.Aeq(wmajor[i], v) {\n\t\t\treturn false\n\t\t}\n\t}\n\tfor i, v := range minor {\n\t\tif !mathtest.Aeq(wminor[i], v) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc TestLinearTicks(t *testing.T) {\n\tm := func(m int) TickOptions {\n\t\treturn TickOptions{Max: m}\n\t}\n\n\tl := Linear{Min: 0, Max: 100}\n\tmajor, minor := l.Ticks(m(5))\n\twmajor, wminor := vec.Linspace(0, 100, 3), vec.Linspace(0, 100, 11)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tmajor, minor = l.Ticks(m(2))\n\twmajor, wminor = vec.Linspace(0, 100, 2), vec.Linspace(0, 100, 3)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(2) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tl.Nice(m(2))\n\tmajor, minor = l.Ticks(m(2))\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(2) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tl = Linear{Min: 15.4, Max: 16.6}\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Linspace(15.5, 16.5, 3), vec.Linspace(15.4, 16.6, 13)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tl.Nice(m(5))\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Linspace(15, 17, 5), vec.Linspace(15, 17, 21)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test negative tick levels.\n\tl = Linear{Min: 9.9989, Max: 10}\n\tmajor, minor = l.Ticks(m(2))\n\twmajor, wminor = vec.Linspace(9.999, 10, 2), vec.Linspace(9.999, 10, 3)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(2) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tl.Nice(m(2))\n\tmajor, minor = l.Ticks(m(2))\n\twmajor, wminor = vec.Linspace(9.995, 10, 2), vec.Linspace(9.995, 10, 6)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(2) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test non-default bases.\n\tl = Linear{Min: 2, Max: 9, Base: 2}\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Linspace(2, 8, 4), vec.Linspace(2, 9, 8)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tl.Nice(m(5))\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Linspace(2, 10, 5), vec.Linspace(2, 10, 9)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test Min==Max.\n\tl = Linear{Min: 2, Max: 2}\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = []float64{2}, []float64{2}\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tl.Nice(m(5))\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Linspace(1.5, 2.5, 3), vec.Linspace(1.5, 2.5, 11)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/log.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\nimport \"math\"\n\ntype Log struct {\n\tprivate struct{}\n\n\t// Min and Max specify the lower and upper bounds of the input\n\t// domain. The input range [Min, Max] will be mapped to the\n\t// output range [0, 1]. The range [Min, Max] must not include\n\t// 0.\n\tMin, Max float64\n\n\t// Base specifies the base of the logarithm for computing\n\t// ticks. Ticks will be placed at Base^((2^l)*n) for tick\n\t// level l ∈ ℕ and n ∈ ℤ. Typically l is 0, in which case this\n\t// is simply Base^n.\n\tBase int\n\n\t// If Clamp is true, the input is clamped to [Min, Max].\n\tClamp bool\n\n\t// TODO: Let the user specify the minor ticks. Default to [1,\n\t// .. 9], but [1, 3] and [1, 2, 5] are common.\n}\n\n// *Log is a Quantitative scale.\nvar _ Quantitative = &Log{}\n\n// NewLog constructs a Log scale. If the arguments are out of range,\n// it returns a RangeErr.\nfunc NewLog(min, max float64, base int) (Log, error) {\n\tif min > max {\n\t\tmin, max = max, min\n\t}\n\n\tif base <= 1 {\n\t\treturn Log{}, RangeErr(\"Log scale base must be 2 or more\")\n\t}\n\tif min <= 0 && max >= 0 {\n\t\treturn Log{}, RangeErr(\"Log scale range cannot include 0\")\n\t}\n\n\treturn Log{Min: min, Max: max, Base: base}, nil\n}\n\nfunc (s *Log) ebounds() (bool, float64, float64) {\n\tif s.Min < 0 {\n\t\treturn true, -s.Max, -s.Min\n\t}\n\treturn false, s.Min, s.Max\n}\n\nfunc (s Log) Map(x float64) float64 {\n\tneg, min, max := s.ebounds()\n\tif neg {\n\t\tx = -x\n\t}\n\tif x <= 0 {\n\t\treturn math.NaN()\n\t}\n\tif min == max {\n\t\treturn 0.5\n\t}\n\n\tlogMin, logMax := math.Log(min), math.Log(max)\n\ty := (math.Log(x) - logMin) / (logMax - logMin)\n\tif neg {\n\t\ty = 1 - y\n\t}\n\tif s.Clamp {\n\t\ty = clamp(y)\n\t}\n\treturn y\n}\n\nfunc (s Log) Unmap(y float64) float64 {\n\tneg, min, max := s.ebounds()\n\tif neg {\n\t\ty = 1 - y\n\t}\n\tlogMin, logMax := math.Log(min), math.Log(max)\n\tx := math.Exp(y*(logMax-logMin) + logMin)\n\tif neg {\n\t\tx = -x\n\t}\n\treturn x\n}\n\nfunc (s *Log) SetClamp(clamp bool) {\n\ts.Clamp = clamp\n}\n\n// The tick levels are:\n//\n// Level 0 is a major tick at Base^n (1, 10, 100, ...)\n// Level 1 is a major tick at Base^(2*n) (1, 100, 10000, ...)\n// Level 2 is a major tick at Base^(4*n) (1, 10000, 100000000, ...)\n//\n// That is, each level eliminates every other tick. Levels below 0 are\n// not defined.\n\nfunc logb(x float64, b float64) float64 {\n\treturn math.Log(x) / math.Log(b)\n}\n\nfunc (s *Log) spacingAtLevel(level int, roundOut bool) (firstN, lastN, ebase float64) {\n\t_, min, max := s.ebounds()\n\n\t// Compute the effective base at this level.\n\tebase = math.Pow(float64(s.Base), math.Pow(2, float64(level)))\n\tlmin, lmax := logb(min, ebase), logb(max, ebase)\n\n\t// Add a tiny bit of slack to the floor and ceiling so that\n\t// rounding errors don't significantly affect tick marks.\n\tslack := (lmax - lmin) * 1e-10\n\n\tif roundOut {\n\t\tfirstN = math.Floor(lmin + slack)\n\t\tlastN = math.Ceil(lmax - slack)\n\t} else {\n\t\tfirstN = math.Ceil(lmin - slack)\n\t\tlastN = math.Floor(lmax + slack)\n\t}\n\n\treturn\n}\n\nfunc (s *Log) CountTicks(level int) int {\n\treturn logTicker{s, false}.CountTicks(level)\n}\n\nfunc (s *Log) TicksAtLevel(level int) interface{} {\n\treturn logTicker{s, false}.TicksAtLevel(level)\n}\n\ntype logTicker struct {\n\ts        *Log\n\troundOut bool\n}\n\nfunc (t logTicker) CountTicks(level int) int {\n\tif level < 0 {\n\t\tconst maxInt = int(^uint(0) >> 1)\n\t\treturn maxInt\n\t}\n\n\tfirstN, lastN, _ := t.s.spacingAtLevel(level, t.roundOut)\n\treturn int(lastN - firstN + 1)\n}\n\nfunc (t logTicker) TicksAtLevel(level int) interface{} {\n\tneg, min, max := t.s.ebounds()\n\tticks := []float64{}\n\n\tif level < 0 {\n\t\t// Minor ticks for level 0. Get the major\n\t\t// ticks, but round out so we can fill in\n\t\t// minor ticks outside of the major ticks.\n\t\tfirstN, lastN, _ := t.s.spacingAtLevel(0, true)\n\t\tfor n := firstN; n <= lastN; n++ {\n\t\t\ttick := math.Pow(float64(t.s.Base), n)\n\t\t\tstep := tick\n\t\t\tfor i := 0; i < t.s.Base-1; i++ {\n\t\t\t\tif min <= tick && tick <= max {\n\t\t\t\t\tticks = append(ticks, tick)\n\t\t\t\t}\n\t\t\t\ttick += step\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfirstN, lastN, base := t.s.spacingAtLevel(level, t.roundOut)\n\t\tfor n := firstN; n <= lastN; n++ {\n\t\t\tticks = append(ticks, math.Pow(base, n))\n\t\t}\n\t}\n\n\tif neg {\n\t\t// Negate and reverse order of ticks.\n\t\tfor i := 0; i < (len(ticks)+1)/2; i++ {\n\t\t\tj := len(ticks) - i - 1\n\t\t\tticks[i], ticks[j] = -ticks[j], -ticks[i]\n\t\t}\n\t}\n\n\treturn ticks\n}\n\nfunc (s Log) Ticks(o TickOptions) (major, minor []float64) {\n\tif o.Max <= 0 {\n\t\treturn nil, nil\n\t} else if s.Min == s.Max {\n\t\treturn []float64{s.Min}, []float64{s.Max}\n\t}\n\tt := logTicker{&s, false}\n\n\tlevel, ok := o.FindLevel(t, 0)\n\tif !ok {\n\t\treturn nil, nil\n\t}\n\treturn t.TicksAtLevel(level).([]float64), t.TicksAtLevel(level - 1).([]float64)\n}\n\nfunc (s *Log) Nice(o TickOptions) {\n\tif s.Min == s.Max {\n\t\treturn\n\t}\n\tneg, _, _ := s.ebounds()\n\tt := logTicker{s, true}\n\n\tlevel, ok := o.FindLevel(t, 0)\n\tif !ok {\n\t\treturn\n\t}\n\tfirstN, lastN, base := s.spacingAtLevel(level, true)\n\ts.Min = math.Pow(base, firstN)\n\ts.Max = math.Pow(base, lastN)\n\tif neg {\n\t\ts.Min, s.Max = -s.Max, -s.Min\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/log_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/aclements/go-moremath/internal/mathtest\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\nfunc TestLog(t *testing.T) {\n\tl, err := NewLog(0, 10, 10)\n\tif _, ok := err.(RangeErr); !ok {\n\t\tt.Errorf(\"want RangeErr; got %v\", err)\n\t}\n\tl, err = NewLog(-10, 0, 10)\n\tif _, ok := err.(RangeErr); !ok {\n\t\tt.Errorf(\"want RangeErr; got %v\", err)\n\t}\n\tl, err = NewLog(-10, 10, 10)\n\tif _, ok := err.(RangeErr); !ok {\n\t\tt.Errorf(\"want RangeErr; got %v\", err)\n\t}\n\tl, err = NewLog(10, 20, 0)\n\tif _, ok := err.(RangeErr); !ok {\n\t\tt.Errorf(\"want RangeErr; got %v\", err)\n\t}\n\n\tl, _ = NewLog(1, 10, 10)\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Map\", l), l.Map,\n\t\tmap[float64]float64{\n\t\t\t-1:                math.NaN(),\n\t\t\t0:                 math.NaN(),\n\t\t\t0.1:               -1,\n\t\t\t1:                 0,\n\t\t\tmath.Pow(10, 0.5): 0.5,\n\t\t\t10:                1,\n\t\t\t100:               2,\n\t\t})\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Unmap\", l), l.Unmap,\n\t\tmap[float64]float64{\n\t\t\t-1:  0.1,\n\t\t\t0:   1,\n\t\t\t0.5: math.Pow(10, 0.5),\n\t\t\t1:   10,\n\t\t\t2:   100,\n\t\t})\n\n\tl.SetClamp(true)\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Map\", l), l.Map,\n\t\tmap[float64]float64{\n\t\t\t-1:                math.NaN(),\n\t\t\t0:                 math.NaN(),\n\t\t\t0.1:               0,\n\t\t\t1:                 0,\n\t\t\tmath.Pow(10, 0.5): 0.5,\n\t\t\t10:                1,\n\t\t\t100:               1,\n\t\t})\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Unmap\", l), l.Unmap,\n\t\tmap[float64]float64{\n\t\t\t0:   1,\n\t\t\t0.5: math.Pow(10, 0.5),\n\t\t\t1:   10,\n\t\t})\n\n\tl, _ = NewLog(-1, -10, 10)\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Map\", l), l.Map,\n\t\tmap[float64]float64{\n\t\t\t1:                  math.NaN(),\n\t\t\t0:                  math.NaN(),\n\t\t\t-0.1:               2,\n\t\t\t-1:                 1,\n\t\t\t-math.Pow(10, 0.5): 0.5,\n\t\t\t-10:                0,\n\t\t\t-100:               -1,\n\t\t})\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Unmap\", l), l.Unmap,\n\t\tmap[float64]float64{\n\t\t\t2:   -0.1,\n\t\t\t1:   -1,\n\t\t\t0.5: -math.Pow(10, 0.5),\n\t\t\t0:   -10,\n\t\t\t-1:  -100,\n\t\t})\n\n\tl, _ = NewLog(5, 5, 10)\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Map\", l), l.Map,\n\t\tmap[float64]float64{\n\t\t\t-1: math.NaN(),\n\t\t\t0:  math.NaN(),\n\t\t\t1:  0.5,\n\t\t\t10: 0.5,\n\t\t})\n\tmathtest.WantFunc(t, fmt.Sprintf(\"%v.Unmap\", l), l.Unmap,\n\t\tmap[float64]float64{\n\t\t\t0:   5,\n\t\t\t0.5: 5,\n\t\t\t1:   5,\n\t\t})\n}\n\nfunc TestLogTicks(t *testing.T) {\n\tm := func(m int) TickOptions {\n\t\treturn TickOptions{Max: m}\n\t}\n\n\t// Test the obvious.\n\tl, _ := NewLog(1, 10, 10)\n\tmajor, minor := l.Ticks(m(5))\n\twmajor, wminor := vec.Logspace(0, 1, 2, 10), vec.Linspace(1, 10, 10)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test two orders of magnitude.\n\tl, _ = NewLog(1, 100, 10)\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Logspace(0, 2, 3, 10), vec.Concat(vec.Linspace(1, 9, 9), vec.Linspace(10, 100, 10))\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test many orders of magnitude (higher tick levels).\n\tl, _ = NewLog(1, 1e8, 10)\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Logspace(0, 4, 5, 100), vec.Logspace(0, 8, 9, 10)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tmajor, minor = l.Ticks(m(4))\n\twmajor, wminor = vec.Logspace(0, 2, 3, 10000), vec.Logspace(0, 4, 5, 100)\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test minor ticks outside major ticks.\n\tl, _ = NewLog(0.91, 200, 10)\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Logspace(0, 2, 3, 10), vec.Concat(vec.Linspace(1, 9, 9), vec.Linspace(10, 100, 10), []float64{200})\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test nicing.\n\tl.Nice(m(5))\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = vec.Logspace(-1, 3, 5, 10), vec.Concat(vec.Linspace(0.1, 0.9, 9), vec.Linspace(1, 9, 9), vec.Linspace(10, 90, 9), vec.Linspace(100, 1000, 10))\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test negative ticks.\n\tneg := vec.Vectorize(func(x float64) float64 { return -x })\n\tl, _ = NewLog(-1, -100, 10)\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = neg(vec.Logspace(2, 0, 3, 10)), neg(vec.Concat(vec.Linspace(100, 10, 10), vec.Linspace(9, 1, 9)))\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tmajor, minor = l.Ticks(m(2))\n\twmajor, wminor = neg(vec.Logspace(1, 0, 2, 100)), neg(vec.Logspace(2, 0, 3, 10))\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\tl.Nice(m(5))\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = neg(vec.Logspace(2, 0, 3, 10)), neg(vec.Concat(vec.Linspace(100, 10, 10), vec.Linspace(9, 1, 9)))\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n\n\t// Test Min==Max.\n\tl, _ = NewLog(5, 5, 10)\n\tmajor, minor = l.Ticks(m(5))\n\twmajor, wminor = []float64{5}, []float64{5}\n\tif !ticksEq(major, wmajor, minor, wminor) {\n\t\tt.Errorf(\"%v.Ticks(5) = %v, %v; want %v, %v\", l, major, minor, wmajor, wminor)\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/package.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package scale provides abstractions for scales that map from one\n// domain to another and provide methods for indicating human-readable\n// intervals in the input domain. The most common type of scale is a\n// quantitative scale, such as a linear or log scale, which is\n// captured by the Quantitative interface.\npackage scale\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/ticks.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\n// TickOptions specifies constraints for constructing scale ticks.\n//\n// A Ticks method will return the ticks at the lowest level (largest\n// number of ticks) that satisfies all of the constraints. The exact\n// meaning of the tick level differs between scale types, but for all\n// scales higher tick levels result in ticks that are further apart\n// (fewer ticks in a given interval). In general, the minor ticks are\n// the ticks from one level below the major ticks.\ntype TickOptions struct {\n\t// Max is the maximum number of major ticks to return.\n\tMax int\n\n\t// MinLevel and MaxLevel are the minimum and maximum tick\n\t// levels to accept, respectively. If they are both 0, there is\n\t// no limit on acceptable tick levels.\n\tMinLevel, MaxLevel int\n}\n\n// A Ticker computes tick marks for a scale. The \"level\" of the ticks\n// controls how many ticks there are and how closely they are spaced.\n// Higher levels have fewer ticks, while lower levels have more ticks.\n// For example, on a numerical scale, one could have ticks at every\n// n*(10^level).\ntype Ticker interface {\n\t// CountTicks returns the number of ticks at level in this\n\t// scale's input range. This is equivalent to\n\t// len(TicksAtLevel(level)), but should be much more\n\t// efficient. CountTicks is a weakly monotonically decreasing\n\t// function of level.\n\tCountTicks(level int) int\n\n\t// TicksAtLevel returns a slice of \"nice\" tick values in\n\t// increasing order at level in this scale's input range.\n\t// Typically, TicksAtLevel(l+1) is a subset of\n\t// TicksAtLevel(l). That is, higher levels remove ticks from\n\t// lower levels.\n\tTicksAtLevel(level int) interface{}\n}\n\n// FindLevel returns the lowest level that satisfies the constraints\n// given by o:\n//\n// * ticker.CountTicks(level) <= o.Max\n//\n// * o.MinLevel <= level <= o.MaxLevel (if MinLevel and MaxLevel != 0).\n//\n// If the constraints cannot be satisfied, it returns 0, false.\n//\n// guess is the level to start the optimization at.\nfunc (o *TickOptions) FindLevel(ticker Ticker, guess int) (int, bool) {\n\tminLevel, maxLevel := o.MinLevel, o.MaxLevel\n\tif minLevel == 0 && maxLevel == 0 {\n\t\tminLevel, maxLevel = -1000, 1000\n\t} else if minLevel > maxLevel {\n\t\treturn 0, false\n\t}\n\tif o.Max < 1 {\n\t\treturn 0, false\n\t}\n\n\t// Start with the initial guess.\n\tl := guess\n\tif l < minLevel {\n\t\tl = minLevel\n\t} else if l > maxLevel {\n\t\tl = maxLevel\n\t}\n\n\t// Optimize count against o.Max.\n\tif ticker.CountTicks(l) <= o.Max {\n\t\t// We're satisfying the o.Max and min/maxLevel\n\t\t// constraints. count is monotonically decreasing, so\n\t\t// decrease level to increase the count until we\n\t\t// violate either o.Max or minLevel.\n\t\tfor l--; l >= minLevel && ticker.CountTicks(l) <= o.Max; l-- {\n\t\t}\n\t\t// We went one too far.\n\t\tl++\n\t} else {\n\t\t// We're over o.Max. Increase level to decrease the\n\t\t// count until we go below o.Max. This may cause us to\n\t\t// violate maxLevel.\n\t\tfor l++; l <= maxLevel && ticker.CountTicks(l) > o.Max; l++ {\n\t\t}\n\t\tif l > maxLevel {\n\t\t\t// We can't satisfy both o.Max and maxLevel.\n\t\t\treturn 0, false\n\t\t}\n\t}\n\n\t// At this point l is the lowest value that satisfies the\n\t// o.Max, minLevel, and maxLevel constraints.\n\n\treturn l, true\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/ticks_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\nimport \"testing\"\n\ntype testTicker struct{}\n\nfunc (testTicker) CountTicks(level int) int {\n\tc := 10 - level\n\tif c < 1 {\n\t\tc = 1\n\t}\n\treturn c\n}\n\nfunc (t testTicker) TicksAtLevel(level int) interface{} {\n\tm := make([]float64, t.CountTicks(level))\n\tfor i := 0; i < len(m); i++ {\n\t\tm[i] = float64(i)\n\t}\n\treturn m\n}\n\nfunc TestTicks(t *testing.T) {\n\tcheck := func(o TickOptions, want int) {\n\t\twantL, wantOK := want, true\n\t\tif want == -999 {\n\t\t\twantL, wantOK = 0, false\n\t\t}\n\t\tfor _, guess := range []int{0, -50, 50} {\n\t\t\tl, ok := o.FindLevel(testTicker{}, guess)\n\t\t\tif l != wantL || ok != wantOK {\n\t\t\t\tt.Errorf(\"%+v.FindLevel with guess %v returned %v, %v; wanted %v, %v\", o, guess, l, ok, wantL, wantOK)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Argument sanity checking.\n\tcheck(TickOptions{}, -999)\n\tcheck(TickOptions{MinLevel: 10, MaxLevel: 9}, -999)\n\n\t// Just max constraint.\n\tcheck(TickOptions{Max: 1}, 9)\n\tcheck(TickOptions{Max: 6}, 4)\n\tcheck(TickOptions{Max: 20}, -10)\n\n\t// Max and level constraints.\n\tcheck(TickOptions{Max: 1, MaxLevel: 9}, 9)\n\tcheck(TickOptions{Max: 1, MaxLevel: 8}, -999)\n\tcheck(TickOptions{Max: 1, MinLevel: 9, MaxLevel: 1000}, 9)\n\tcheck(TickOptions{Max: 1, MinLevel: 10, MaxLevel: 1000}, 10)\n\n\tcheck(TickOptions{Max: 6, MaxLevel: 9}, 4)\n\tcheck(TickOptions{Max: 6, MaxLevel: 3}, -999)\n\tcheck(TickOptions{Max: 6, MinLevel: 10, MaxLevel: 11}, 10)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/scale/util.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage scale\n\n// clamp clamps x to the range [0, 1].\nfunc clamp(x float64) float64 {\n\tif x < 0 {\n\t\treturn 0\n\t}\n\tif x > 1 {\n\t\treturn 1\n\t}\n\treturn x\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/alg.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\n// Miscellaneous helper algorithms\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/aclements/go-moremath/mathx\"\n)\n\nfunc maxint(a, b int) int {\n\tif a > b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc minint(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc sumint(xs []int) int {\n\tsum := 0\n\tfor _, x := range xs {\n\t\tsum += x\n\t}\n\treturn sum\n}\n\n// bisect returns an x in [low, high] such that |f(x)| <= tolerance\n// using the bisection method.\n//\n// f(low) and f(high) must have opposite signs.\n//\n// If f does not have a root in this interval (e.g., it is\n// discontiguous), this returns the X of the apparent discontinuity\n// and false.\nfunc bisect(f func(float64) float64, low, high, tolerance float64) (float64, bool) {\n\tflow, fhigh := f(low), f(high)\n\tif -tolerance <= flow && flow <= tolerance {\n\t\treturn low, true\n\t}\n\tif -tolerance <= fhigh && fhigh <= tolerance {\n\t\treturn high, true\n\t}\n\tif mathx.Sign(flow) == mathx.Sign(fhigh) {\n\t\tpanic(fmt.Sprintf(\"root of f is not bracketed by [low, high]; f(%g)=%g f(%g)=%g\", low, flow, high, fhigh))\n\t}\n\tfor {\n\t\tmid := (high + low) / 2\n\t\tfmid := f(mid)\n\t\tif -tolerance <= fmid && fmid <= tolerance {\n\t\t\treturn mid, true\n\t\t}\n\t\tif mid == high || mid == low {\n\t\t\treturn mid, false\n\t\t}\n\t\tif mathx.Sign(fmid) == mathx.Sign(flow) {\n\t\t\tlow = mid\n\t\t\tflow = fmid\n\t\t} else {\n\t\t\thigh = mid\n\t\t\tfhigh = fmid\n\t\t}\n\t}\n}\n\n// bisectBool implements the bisection method on a boolean function.\n// It returns x1, x2 ∈ [low, high], x1 < x2 such that f(x1) != f(x2)\n// and x2 - x1 <= xtol.\n//\n// If f(low) == f(high), it panics.\nfunc bisectBool(f func(float64) bool, low, high, xtol float64) (x1, x2 float64) {\n\tflow, fhigh := f(low), f(high)\n\tif flow == fhigh {\n\t\tpanic(fmt.Sprintf(\"root of f is not bracketed by [low, high]; f(%g)=%v f(%g)=%v\", low, flow, high, fhigh))\n\t}\n\tfor {\n\t\tif high-low <= xtol {\n\t\t\treturn low, high\n\t\t}\n\t\tmid := (high + low) / 2\n\t\tif mid == high || mid == low {\n\t\t\treturn low, high\n\t\t}\n\t\tfmid := f(mid)\n\t\tif fmid == flow {\n\t\t\tlow = mid\n\t\t\tflow = fmid\n\t\t} else {\n\t\t\thigh = mid\n\t\t\tfhigh = fmid\n\t\t}\n\t}\n}\n\n// series returns the sum of the series f(0), f(1), ...\n//\n// This implementation is fast, but subject to round-off error.\nfunc series(f func(float64) float64) float64 {\n\ty, yp := 0.0, 1.0\n\tfor n := 0.0; y != yp; n++ {\n\t\typ = y\n\t\ty += f(n)\n\t}\n\treturn y\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/deltadist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\n// DeltaDist is the Dirac delta function, centered at T, with total\n// area 1.\n//\n// The CDF of the Dirac delta function is the Heaviside step function,\n// centered at T. Specifically, f(T) == 1.\ntype DeltaDist struct {\n\tT float64\n}\n\nfunc (d DeltaDist) PDF(x float64) float64 {\n\tif x == d.T {\n\t\treturn inf\n\t}\n\treturn 0\n}\n\nfunc (d DeltaDist) pdfEach(xs []float64) []float64 {\n\tres := make([]float64, len(xs))\n\tfor i, x := range xs {\n\t\tif x == d.T {\n\t\t\tres[i] = inf\n\t\t}\n\t}\n\treturn res\n}\n\nfunc (d DeltaDist) CDF(x float64) float64 {\n\tif x >= d.T {\n\t\treturn 1\n\t}\n\treturn 0\n}\n\nfunc (d DeltaDist) cdfEach(xs []float64) []float64 {\n\tres := make([]float64, len(xs))\n\tfor i, x := range xs {\n\t\tres[i] = d.CDF(x)\n\t}\n\treturn res\n}\n\nfunc (d DeltaDist) InvCDF(y float64) float64 {\n\tif y < 0 || y > 1 {\n\t\treturn nan\n\t}\n\treturn d.T\n}\n\nfunc (d DeltaDist) Bounds() (float64, float64) {\n\treturn d.T - 1, d.T + 1\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/dist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport \"math/rand\"\n\n// A DistCommon is a statistical distribution. DistCommon is a base\n// interface provided by both continuous and discrete distributions.\ntype DistCommon interface {\n\t// CDF returns the cumulative probability Pr[X <= x].\n\t//\n\t// For continuous distributions, the CDF is the integral of\n\t// the PDF from -inf to x.\n\t//\n\t// For discrete distributions, the CDF is the sum of the PMF\n\t// at all defined points from -inf to x, inclusive. Note that\n\t// the CDF of a discrete distribution is defined for the whole\n\t// real line (unlike the PMF) but has discontinuities where\n\t// the PMF is non-zero.\n\t//\n\t// The CDF is a monotonically increasing function and has a\n\t// domain of all real numbers. If the distribution has bounded\n\t// support, it has a range of [0, 1]; otherwise it has a range\n\t// of (0, 1). Finally, CDF(-inf)==0 and CDF(inf)==1.\n\tCDF(x float64) float64\n\n\t// Bounds returns reasonable bounds for this distribution's\n\t// PDF/PMF and CDF. The total weight outside of these bounds\n\t// should be approximately 0.\n\t//\n\t// For a discrete distribution, both bounds are integer\n\t// multiples of Step().\n\t//\n\t// If this distribution has finite support, it returns exact\n\t// bounds l, h such that CDF(l')=0 for all l' < l and\n\t// CDF(h')=1 for all h' >= h.\n\tBounds() (float64, float64)\n}\n\n// A Dist is a continuous statistical distribution.\ntype Dist interface {\n\tDistCommon\n\n\t// PDF returns the value of the probability density function\n\t// of this distribution at x.\n\tPDF(x float64) float64\n}\n\n// A DiscreteDist is a discrete statistical distribution.\n//\n// Most discrete distributions are defined only at integral values of\n// the random variable. However, some are defined at other intervals,\n// so this interface takes a float64 value for the random variable.\n// The probability mass function rounds down to the nearest defined\n// point. Note that float64 values can exactly represent integer\n// values between ±2**53, so this generally shouldn't be an issue for\n// integer-valued distributions (likewise, for half-integer-valued\n// distributions, float64 can exactly represent all values between\n// ±2**52).\ntype DiscreteDist interface {\n\tDistCommon\n\n\t// PMF returns the value of the probability mass function\n\t// Pr[X = x'], where x' is x rounded down to the nearest\n\t// defined point on the distribution.\n\t//\n\t// Note for implementers: for integer-valued distributions,\n\t// round x using int(math.Floor(x)). Do not use int(x), since\n\t// that truncates toward zero (unless all x <= 0 are handled\n\t// the same).\n\tPMF(x float64) float64\n\n\t// Step returns s, where the distribution is defined for sℕ.\n\tStep() float64\n}\n\n// TODO: Add a Support method for finite support distributions? Or\n// maybe just another return value from Bounds indicating that the\n// bounds are exact?\n\n// TODO: Plot method to return a pre-configured Plot object with\n// reasonable bounds and an integral function? Have to distinguish\n// PDF/CDF/InvCDF. Three methods? Argument?\n//\n// Doesn't have to be a method of Dist. Could be just a function that\n// takes a Dist and uses Bounds.\n\n// InvCDF returns the inverse CDF function of the given distribution\n// (also known as the quantile function or the percent point\n// function). This is a function f such that f(dist.CDF(x)) == x. If\n// dist.CDF is only weakly monotonic (that it, there are intervals\n// over which it is constant) and y > 0, f returns the smallest x that\n// satisfies this condition. In general, the inverse CDF is not\n// well-defined for y==0, but for convenience if y==0, f returns the\n// largest x that satisfies this condition. For distributions with\n// infinite support both the largest and smallest x are -Inf; however,\n// for distributions with finite support, this is the lower bound of\n// the support.\n//\n// If y < 0 or y > 1, f returns NaN.\n//\n// If dist implements InvCDF(float64) float64, this returns that\n// method. Otherwise, it returns a function that uses a generic\n// numerical method to construct the inverse CDF at y by finding x\n// such that dist.CDF(x) == y. This may have poor precision around\n// points of discontinuity, including f(0) and f(1).\nfunc InvCDF(dist DistCommon) func(y float64) (x float64) {\n\ttype invCDF interface {\n\t\tInvCDF(float64) float64\n\t}\n\tif dist, ok := dist.(invCDF); ok {\n\t\treturn dist.InvCDF\n\t}\n\n\t// Otherwise, use a numerical algorithm.\n\t//\n\t// TODO: For discrete distributions, use the step size to\n\t// inform this computation.\n\treturn func(y float64) (x float64) {\n\t\tconst almostInf = 1e100\n\t\tconst xtol = 1e-16\n\n\t\tif y < 0 || y > 1 {\n\t\t\treturn nan\n\t\t} else if y == 0 {\n\t\t\tl, _ := dist.Bounds()\n\t\t\tif dist.CDF(l) == 0 {\n\t\t\t\t// Finite support\n\t\t\t\treturn l\n\t\t\t} else {\n\t\t\t\t// Infinite support\n\t\t\t\treturn -inf\n\t\t\t}\n\t\t} else if y == 1 {\n\t\t\t_, h := dist.Bounds()\n\t\t\tif dist.CDF(h) == 1 {\n\t\t\t\t// Finite support\n\t\t\t\treturn h\n\t\t\t} else {\n\t\t\t\t// Infinite support\n\t\t\t\treturn inf\n\t\t\t}\n\t\t}\n\n\t\t// Find loX, hiX for which cdf(loX) < y <= cdf(hiX).\n\t\tvar loX, loY, hiX, hiY float64\n\t\tx1, y1 := 0.0, dist.CDF(0)\n\t\txdelta := 1.0\n\t\tif y1 < y {\n\t\t\thiX, hiY = x1, y1\n\t\t\tfor hiY < y && hiX != inf {\n\t\t\t\tloX, loY, hiX = hiX, hiY, hiX+xdelta\n\t\t\t\thiY = dist.CDF(hiX)\n\t\t\t\txdelta *= 2\n\t\t\t}\n\t\t} else {\n\t\t\tloX, loY = x1, y1\n\t\t\tfor y <= loY && loX != -inf {\n\t\t\t\thiX, hiY, loX = loX, loY, loX-xdelta\n\t\t\t\tloY = dist.CDF(loX)\n\t\t\t\txdelta *= 2\n\t\t\t}\n\t\t}\n\t\tif loX == -inf {\n\t\t\treturn loX\n\t\t} else if hiX == inf {\n\t\t\treturn hiX\n\t\t}\n\n\t\t// Use bisection on the interval to find the smallest\n\t\t// x at which cdf(x) <= y.\n\t\t_, x = bisectBool(func(x float64) bool {\n\t\t\treturn dist.CDF(x) < y\n\t\t}, loX, hiX, xtol)\n\t\treturn\n\t}\n}\n\n// Rand returns a random number generator that draws from the given\n// distribution. The returned generator takes an optional source of\n// randomness; if this is nil, it uses the default global source.\n//\n// If dist implements Rand(*rand.Rand) float64, Rand returns that\n// method. Otherwise, it returns a generic generator based on dist's\n// inverse CDF (which may in turn use an efficient implementation or a\n// generic numerical implementation; see InvCDF).\nfunc Rand(dist DistCommon) func(*rand.Rand) float64 {\n\ttype distRand interface {\n\t\tRand(*rand.Rand) float64\n\t}\n\tif dist, ok := dist.(distRand); ok {\n\t\treturn dist.Rand\n\t}\n\n\t// Otherwise, use a generic algorithm.\n\tinv := InvCDF(dist)\n\treturn func(r *rand.Rand) float64 {\n\t\tvar y float64\n\t\tfor y == 0 {\n\t\t\tif r == nil {\n\t\t\t\ty = rand.Float64()\n\t\t\t} else {\n\t\t\t\ty = r.Float64()\n\t\t\t}\n\t\t}\n\t\treturn inv(y)\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/dist_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\ntype funnyCDF struct {\n\tleft float64\n}\n\nfunc (f funnyCDF) CDF(x float64) float64 {\n\tswitch {\n\tcase x < f.left:\n\t\treturn 0\n\tcase x < f.left+1:\n\t\treturn (x - f.left) / 2\n\tcase x < f.left+2:\n\t\treturn 0.5\n\tcase x < f.left+3:\n\t\treturn (x-f.left-2)/2 + 0.5\n\tdefault:\n\t\treturn 1\n\t}\n}\n\nfunc (f funnyCDF) Bounds() (float64, float64) {\n\treturn f.left, f.left + 3\n}\n\nfunc TestInvCDF(t *testing.T) {\n\tfor _, f := range []funnyCDF{funnyCDF{1}, funnyCDF{-1.5}, funnyCDF{-4}} {\n\t\ttestFunc(t, fmt.Sprintf(\"InvCDF(funnyCDF%+v)\", f), InvCDF(f),\n\t\t\tmap[float64]float64{\n\t\t\t\t-0.1: nan,\n\t\t\t\t0:    f.left,\n\t\t\t\t0.25: f.left + 0.5,\n\t\t\t\t0.5:  f.left + 1,\n\t\t\t\t0.75: f.left + 2.5,\n\t\t\t\t1:    f.left + 3,\n\t\t\t\t1.1:  nan,\n\t\t\t})\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/hist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport \"math\"\n\n// TODO: Implement histograms on top of scales.\n\ntype Histogram interface {\n\t// Add adds a sample with value x to histogram h.\n\tAdd(x float64)\n\n\t// Counts returns the number of samples less than the lowest\n\t// bin, a slice of the number of samples in each bin,\n\t// and the number of samples greater than the highest bin.\n\tCounts() (under uint, counts []uint, over uint)\n\n\t// BinToValue returns the value that would appear at the given\n\t// bin index.\n\t//\n\t// For integral values of bin, BinToValue returns the lower\n\t// bound of bin.  That is, a sample value x will be in bin if\n\t// bin is integral and\n\t//\n\t//    BinToValue(bin) <= x < BinToValue(bin + 1)\n\t//\n\t// For non-integral values of bin, BinToValue interpolates\n\t// between the lower and upper bounds of math.Floor(bin).\n\t//\n\t// BinToValue is undefined if bin > 1 + the number of bins.\n\tBinToValue(bin float64) float64\n}\n\n// HistogramQuantile returns the x such that n*q samples in hist are\n// <= x, assuming values are distibuted within each bin according to\n// hist's distribution.\n//\n// If the q'th sample falls below the lowest bin or above the highest\n// bin, returns NaN.\nfunc HistogramQuantile(hist Histogram, q float64) float64 {\n\tunder, counts, over := hist.Counts()\n\ttotal := under + over\n\tfor _, count := range counts {\n\t\ttotal += count\n\t}\n\n\tgoal := uint(float64(total) * q)\n\tif goal <= under || goal > total-over {\n\t\treturn math.NaN()\n\t}\n\tfor bin, count := range counts {\n\t\tif count > goal {\n\t\t\treturn hist.BinToValue(float64(bin) + float64(goal)/float64(count))\n\t\t}\n\t\tgoal -= count\n\t}\n\tpanic(\"goal count not reached\")\n}\n\n// HistogramIQR returns the interquartile range of the samples in\n// hist.\nfunc HistogramIQR(hist Histogram) float64 {\n\treturn HistogramQuantile(hist, 0.75) - HistogramQuantile(hist, 0.25)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/hypergdist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"math\"\n\n\t\"github.com/aclements/go-moremath/mathx\"\n)\n\n// HypergeometicDist is a hypergeometric distribution.\ntype HypergeometicDist struct {\n\t// N is the size of the population. N >= 0.\n\tN int\n\n\t// K is the number of successes in the population. 0 <= K <= N.\n\tK int\n\n\t// Draws is the number of draws from the population. This is\n\t// usually written \"n\", but is called Draws here because of\n\t// limitations on Go identifier naming. 0 <= Draws <= N.\n\tDraws int\n}\n\n// PMF is the probability of getting exactly int(k) successes in\n// d.Draws draws with replacement from a population of size d.N that\n// contains exactly d.K successes.\nfunc (d HypergeometicDist) PMF(k float64) float64 {\n\tki := int(math.Floor(k))\n\tl, h := d.bounds()\n\tif ki < l || ki > h {\n\t\treturn 0\n\t}\n\treturn d.pmf(ki)\n}\n\nfunc (d HypergeometicDist) pmf(k int) float64 {\n\treturn math.Exp(mathx.Lchoose(d.K, k) + mathx.Lchoose(d.N-d.K, d.Draws-k) - mathx.Lchoose(d.N, d.Draws))\n}\n\n// CDF is the probability of getting int(k) or fewer successes in\n// d.Draws draws with replacement from a population of size d.N that\n// contains exactly d.K successes.\nfunc (d HypergeometicDist) CDF(k float64) float64 {\n\t// Based on Klotz, A Computational Approach to Statistics.\n\tki := int(math.Floor(k))\n\tl, h := d.bounds()\n\tif ki < l {\n\t\treturn 0\n\t} else if ki >= h {\n\t\treturn 1\n\t}\n\t// Use symmetry to compute the smaller sum.\n\tflip := false\n\tif ki > (d.Draws+1)/(d.N+1)*(d.K+1) {\n\t\tflip = true\n\t\tki = d.K - ki - 1\n\t\td.Draws = d.N - d.Draws\n\t}\n\tp := d.pmf(ki) * d.sum(ki)\n\tif flip {\n\t\tp = 1 - p\n\t}\n\treturn p\n}\n\nfunc (d HypergeometicDist) sum(k int) float64 {\n\tconst epsilon = 1e-14\n\tsum, ak := 1.0, 1.0\n\tL := maxint(0, d.Draws+d.K-d.N)\n\tfor dk := 1; dk <= k-L && ak/sum > epsilon; dk++ {\n\t\tak *= float64(1+k-dk) / float64(d.Draws-k+dk)\n\t\tak *= float64(d.N-d.K-d.Draws+k+1-dk) / float64(d.K-k+dk)\n\t\tsum += ak\n\t}\n\treturn sum\n}\n\nfunc (d HypergeometicDist) bounds() (int, int) {\n\treturn maxint(0, d.Draws+d.K-d.N), minint(d.Draws, d.K)\n}\n\nfunc (d HypergeometicDist) Bounds() (float64, float64) {\n\tl, h := d.bounds()\n\treturn float64(l), float64(h)\n}\n\nfunc (d HypergeometicDist) Step() float64 {\n\treturn 1\n}\n\nfunc (d HypergeometicDist) Mean() float64 {\n\treturn float64(d.Draws*d.K) / float64(d.N)\n}\n\nfunc (d HypergeometicDist) Variance() float64 {\n\treturn float64(d.Draws*d.K*(d.N-d.K)*(d.N-d.Draws)) /\n\t\tfloat64(d.N*d.N*(d.N-1))\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/hypergdist_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestHypergeometricDist(t *testing.T) {\n\tdist1 := HypergeometicDist{N: 50, K: 5, Draws: 10}\n\ttestFunc(t, fmt.Sprintf(\"%+v.PMF\", dist1), dist1.PMF,\n\t\tmap[float64]float64{\n\t\t\t-0.1: 0,\n\t\t\t4:    0.003964583058,\n\t\t\t4.9:  0.003964583058, // Test rounding\n\t\t\t5:    0.000118937492,\n\t\t\t5.9:  0.000118937492,\n\t\t\t6:    0,\n\t\t})\n\ttestDiscreteCDF(t, fmt.Sprintf(\"%+v.CDF\", dist1), dist1)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/kde.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\n// A KDE is a distribution that estimates the underlying distribution\n// of a Sample using kernel density estimation.\n//\n// Kernel density estimation is a method for constructing an estimate\n// ƒ̂(x) of a unknown distribution ƒ(x) given a sample from that\n// distribution. Unlike many techniques, kernel density estimation is\n// non-parametric: in general, it doesn't assume any particular true\n// distribution (note, however, that the resulting distribution\n// depends deeply on the selected bandwidth, and many bandwidth\n// estimation techniques assume normal reference rules).\n//\n// A kernel density estimate is similar to a histogram, except that it\n// is a smooth probability estimate and does not require choosing a\n// bin size and discretizing the data.\n//\n// Sample is the only required field. All others have reasonable\n// defaults.\ntype KDE struct {\n\t// Sample is the data sample underlying this KDE.\n\tSample Sample\n\n\t// Kernel is the kernel to use for the KDE.\n\tKernel KDEKernel\n\n\t// Bandwidth is the bandwidth to use for the KDE.\n\t//\n\t// If this is zero, the bandwidth is computed from the\n\t// provided data using a default bandwidth estimator\n\t// (currently BandwidthScott).\n\tBandwidth float64\n\n\t// BoundaryMethod is the boundary correction method to use for\n\t// the KDE. The default value is BoundaryReflect; however, the\n\t// default bounds are effectively +/-inf, which is equivalent\n\t// to performing no boundary correction.\n\tBoundaryMethod KDEBoundaryMethod\n\n\t// [BoundaryMin, BoundaryMax) specify a bounded support for\n\t// the KDE. If both are 0 (their default values), they are\n\t// treated as +/-inf.\n\t//\n\t// To specify a half-bounded support, set Min to math.Inf(-1)\n\t// or Max to math.Inf(1).\n\tBoundaryMin float64\n\tBoundaryMax float64\n}\n\n// BandwidthSilverman is a bandwidth estimator implementing\n// Silverman's Rule of Thumb. It's fast, but not very robust to\n// outliers as it assumes data is approximately normal.\n//\n// Silverman, B. W. (1986) Density Estimation.\nfunc BandwidthSilverman(data interface {\n\tStdDev() float64\n\tWeight() float64\n}) float64 {\n\treturn 1.06 * data.StdDev() * math.Pow(data.Weight(), -1.0/5)\n}\n\n// BandwidthScott is a bandwidth estimator implementing Scott's Rule.\n// This is generally robust to outliers: it chooses the minimum\n// between the sample's standard deviation and an robust estimator of\n// a Gaussian distribution's standard deviation.\n//\n// Scott, D. W. (1992) Multivariate Density Estimation: Theory,\n// Practice, and Visualization.\nfunc BandwidthScott(data interface {\n\tStdDev() float64\n\tWeight() float64\n\tQuantile(float64) float64\n}) float64 {\n\tiqr := data.Quantile(0.75) - data.Quantile(0.25)\n\thScale := 1.06 * math.Pow(data.Weight(), -1.0/5)\n\tstdDev := data.StdDev()\n\tif stdDev < iqr/1.349 {\n\t\t// Use Silverman's Rule of Thumb\n\t\treturn hScale * stdDev\n\t} else {\n\t\t// Use IQR/1.349 as a robust estimator of the standard\n\t\t// deviation of a Gaussian distribution.\n\t\treturn hScale * (iqr / 1.349)\n\t}\n}\n\n// TODO(austin) Implement bandwidth estimator from Botev, Grotowski,\n// Kroese. (2010) Kernel Density Estimation via Diffusion.\n\n// KDEKernel represents a kernel to use for a KDE.\ntype KDEKernel int\n\n//go:generate stringer -type=KDEKernel\n\nconst (\n\t// An EpanechnikovKernel is a smooth kernel with bounded\n\t// support. As a result, the KDE will also have bounded\n\t// support. It is \"optimal\" in the sense that it minimizes the\n\t// asymptotic mean integrated squared error (AMISE).\n\tEpanechnikovKernel KDEKernel = iota\n\n\t// A GaussianKernel is a Gaussian (normal) kernel.\n\tGaussianKernel\n\n\t// A DeltaKernel is a Dirac delta function. The PDF of such a\n\t// KDE is not well-defined, but the CDF will represent each\n\t// sample as an instantaneous increase. This kernel ignores\n\t// bandwidth and never requires boundary correction.\n\tDeltaKernel\n)\n\n// KDEBoundaryMethod represents a boundary correction method for\n// constructing a KDE with bounded support.\ntype KDEBoundaryMethod int\n\n//go:generate stringer -type=KDEBoundaryMethod\n\nconst (\n\t// BoundaryReflect reflects the density estimate at the\n\t// boundaries.  For example, for a KDE with support [0, inf),\n\t// this is equivalent to ƒ̂ᵣ(x)=ƒ̂(x)+ƒ̂(-x) for x>=0.  This is a\n\t// simple and fast technique, but enforces that ƒ̂ᵣ'(0)=0, so\n\t// it may not be applicable to all distributions.\n\tBoundaryReflect KDEBoundaryMethod = iota\n)\n\ntype kdeKernel interface {\n\tpdfEach(xs []float64) []float64\n\tcdfEach(xs []float64) []float64\n}\n\nfunc (k *KDE) prepare() (kdeKernel, bool) {\n\t// Compute bandwidth.\n\tif k.Bandwidth == 0 {\n\t\tk.Bandwidth = BandwidthScott(k.Sample)\n\t}\n\n\t// Construct kernel.\n\tkernel := kdeKernel(nil)\n\tswitch k.Kernel {\n\tdefault:\n\t\tpanic(fmt.Sprint(\"unknown kernel\", k))\n\tcase EpanechnikovKernel:\n\t\tkernel = epanechnikovKernel{k.Bandwidth}\n\tcase GaussianKernel:\n\t\tkernel = NormalDist{0, k.Bandwidth}\n\tcase DeltaKernel:\n\t\tkernel = DeltaDist{0}\n\t}\n\n\t// Use boundary correction?\n\tbc := k.BoundaryMin != 0 || k.BoundaryMax != 0\n\n\treturn kernel, bc\n}\n\n// TODO: For KDEs of histograms, make histograms able to create a\n// weighted Sample and simply require the caller to provide a\n// good bandwidth from a StreamStats.\n\n// normalizedXs returns x - kde.Sample.Xs. Evaluating kernels shifted\n// by kde.Sample.Xs all at x is equivalent to evaluating one unshifted\n// kernel at x - kde.Sample.Xs.\nfunc (kde *KDE) normalizedXs(x float64) []float64 {\n\ttxs := make([]float64, len(kde.Sample.Xs))\n\tfor i, xi := range kde.Sample.Xs {\n\t\ttxs[i] = x - xi\n\t}\n\treturn txs\n}\n\nfunc (kde *KDE) PDF(x float64) float64 {\n\tkernel, bc := kde.prepare()\n\n\t// Apply boundary\n\tif bc && (x < kde.BoundaryMin || x >= kde.BoundaryMax) {\n\t\treturn 0\n\t}\n\n\ty := func(x float64) float64 {\n\t\t// Shift kernel to each of kde.xs and evaluate at x\n\t\tys := kernel.pdfEach(kde.normalizedXs(x))\n\n\t\t// Kernel samples are weighted according to the weights of xs\n\t\twys := Sample{Xs: ys, Weights: kde.Sample.Weights}\n\n\t\treturn wys.Sum() / wys.Weight()\n\t}\n\tif !bc {\n\t\treturn y(x)\n\t}\n\tswitch kde.BoundaryMethod {\n\tdefault:\n\t\tpanic(\"unknown boundary correction method\")\n\tcase BoundaryReflect:\n\t\tif math.IsInf(kde.BoundaryMax, 1) {\n\t\t\treturn y(x) + y(2*kde.BoundaryMin-x)\n\t\t} else if math.IsInf(kde.BoundaryMin, -1) {\n\t\t\treturn y(x) + y(2*kde.BoundaryMax-x)\n\t\t} else {\n\t\t\td := 2 * (kde.BoundaryMax - kde.BoundaryMin)\n\t\t\tw := 2 * (x - kde.BoundaryMin)\n\t\t\treturn series(func(n float64) float64 {\n\t\t\t\t// Points >= x\n\t\t\t\treturn y(x+n*d) + y(x+n*d-w)\n\t\t\t}) + series(func(n float64) float64 {\n\t\t\t\t// Points < x\n\t\t\t\treturn y(x-(n+1)*d+w) + y(x-(n+1)*d)\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc (kde *KDE) CDF(x float64) float64 {\n\tkernel, bc := kde.prepare()\n\n\t// Apply boundary\n\tif bc {\n\t\tif x < kde.BoundaryMin {\n\t\t\treturn 0\n\t\t} else if x >= kde.BoundaryMax {\n\t\t\treturn 1\n\t\t}\n\t}\n\n\ty := func(x float64) float64 {\n\t\t// Shift kernel integral to each of cdf.xs and evaluate at x\n\t\tys := kernel.cdfEach(kde.normalizedXs(x))\n\n\t\t// Kernel samples are weighted according to the weights of xs\n\t\twys := Sample{Xs: ys, Weights: kde.Sample.Weights}\n\n\t\treturn wys.Sum() / wys.Weight()\n\t}\n\tif !bc {\n\t\treturn y(x)\n\t}\n\tswitch kde.BoundaryMethod {\n\tdefault:\n\t\tpanic(\"unknown boundary correction method\")\n\tcase BoundaryReflect:\n\t\tif math.IsInf(kde.BoundaryMax, 1) {\n\t\t\treturn y(x) - y(2*kde.BoundaryMin-x)\n\t\t} else if math.IsInf(kde.BoundaryMin, -1) {\n\t\t\treturn y(x) + (1 - y(2*kde.BoundaryMax-x))\n\t\t} else {\n\t\t\td := 2 * (kde.BoundaryMax - kde.BoundaryMin)\n\t\t\tw := 2 * (x - kde.BoundaryMin)\n\t\t\treturn series(func(n float64) float64 {\n\t\t\t\t// Windows >= x-w\n\t\t\t\treturn y(x+n*d) - y(x+n*d-w)\n\t\t\t}) + series(func(n float64) float64 {\n\t\t\t\t// Windows < x-w\n\t\t\t\treturn y(x-(n+1)*d) - y(x-(n+1)*d-w)\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc (kde *KDE) Bounds() (low float64, high float64) {\n\t_, bc := kde.prepare()\n\n\t// TODO(austin) If this KDE came from a histogram, we'd better\n\t// not sample at a significantly higher rate than the\n\t// histogram.  Maybe we want to just return the bounds of the\n\t// histogram?\n\n\t// TODO(austin) It would be nice if this could be instructed\n\t// to include all original data points, even if they are in\n\t// the tail.  Probably that should just be up to the caller to\n\t// pass an axis derived from the bounds of the original data.\n\n\t// Use the lowest and highest samples as starting points\n\tlowX, highX := kde.Sample.Bounds()\n\tif lowX == highX {\n\t\tlowX -= 1\n\t\thighX += 1\n\t}\n\n\t// Find the end points that contain 99% of the CDF's weight.\n\t// Since bisect requires that the root be bracketed, start by\n\t// expanding our range if necessary.  TODO(austin) This can\n\t// definitely be done faster.\n\tconst (\n\t\tlowY      = 0.005\n\t\thighY     = 0.995\n\t\ttolerance = 0.001\n\t)\n\tfor kde.CDF(lowX) > lowY {\n\t\tlowX -= highX - lowX\n\t}\n\tfor kde.CDF(highX) < highY {\n\t\thighX += highX - lowX\n\t}\n\t// Explicitly accept discontinuities, since we may be using a\n\t// discontiguous kernel.\n\tlow, _ = bisect(func(x float64) float64 { return kde.CDF(x) - lowY }, lowX, highX, tolerance)\n\thigh, _ = bisect(func(x float64) float64 { return kde.CDF(x) - highY }, lowX, highX, tolerance)\n\n\t// Expand width by 20% to give some margins\n\twidth := high - low\n\tlow, high = low-0.1*width, high+0.1*width\n\n\t// Limit to bounds\n\tif bc {\n\t\tlow = math.Max(low, kde.BoundaryMin)\n\t\thigh = math.Min(high, kde.BoundaryMax)\n\t}\n\n\treturn\n}\n\ntype epanechnikovKernel struct {\n\th float64\n}\n\nfunc (d epanechnikovKernel) pdfEach(xs []float64) []float64 {\n\tys := make([]float64, len(xs))\n\ta := 0.75 / d.h\n\tinvhh := 1 / (d.h * d.h)\n\tfor i, x := range xs {\n\t\tif -d.h < x && x < d.h {\n\t\t\tys[i] = a * (1 - x*x*invhh)\n\t\t}\n\t}\n\treturn ys\n}\n\nfunc (d epanechnikovKernel) cdfEach(xs []float64) []float64 {\n\tys := make([]float64, len(xs))\n\tinvh := 1 / d.h\n\tfor i, x := range xs {\n\t\tif x > d.h {\n\t\t\tys[i] = 1\n\t\t} else if x > -d.h {\n\t\t\tu := x * invh\n\t\t\tys[i] = 0.25 * (2 + 3*u - u*u*u)\n\t\t}\n\t}\n\treturn ys\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/kde_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n)\n\nfunc TestKDEOneSample(t *testing.T) {\n\tx := float64(5)\n\n\t// Unweighted, fixed bandwidth\n\tkde := KDE{\n\t\tSample:    Sample{Xs: []float64{x}},\n\t\tKernel:    GaussianKernel,\n\t\tBandwidth: 1,\n\t}\n\tif e, g := StdNormal.PDF(0), kde.PDF(x); !aeq(e, g) {\n\t\tt.Errorf(\"bad PDF value at sample: expected %g, got %g\", e, g)\n\t}\n\tif e, g := 0.0, kde.PDF(-10000); !aeq(e, g) {\n\t\tt.Errorf(\"bad PDF value at low tail: expected %g, got %g\", e, g)\n\t}\n\tif e, g := 0.0, kde.PDF(10000); !aeq(e, g) {\n\t\tt.Errorf(\"bad PDF value at high tail: expected %g, got %g\", e, g)\n\t}\n\n\tif e, g := 0.5, kde.CDF(x); !aeq(e, g) {\n\t\tt.Errorf(\"bad CDF value at sample: expected %g, got %g\", e, g)\n\t}\n\tif e, g := 0.0, kde.CDF(-10000); !aeq(e, g) {\n\t\tt.Errorf(\"bad CDF value at low tail: expected %g, got %g\", e, g)\n\t}\n\tif e, g := 1.0, kde.CDF(10000); !aeq(e, g) {\n\t\tt.Errorf(\"bad CDF value at high tail: expected %g, got %g\", e, g)\n\t}\n\n\tlow, high := kde.Bounds()\n\tif e, g := x-2, low; e < g {\n\t\tt.Errorf(\"bad low bound: expected %g, got %g\", e, g)\n\t}\n\tif e, g := x+2, high; e > g {\n\t\tt.Errorf(\"bad high bound: expected %g, got %g\", e, g)\n\t}\n\n\tkde = KDE{\n\t\tSample:    Sample{Xs: []float64{x}},\n\t\tKernel:    EpanechnikovKernel,\n\t\tBandwidth: 2,\n\t}\n\ttestFunc(t, fmt.Sprintf(\"%+v.PDF\", kde), kde.PDF, map[float64]float64{\n\t\tx - 2: 0,\n\t\tx - 1: 0.5625 / 2,\n\t\tx:     0.75 / 2,\n\t\tx + 1: 0.5625 / 2,\n\t\tx + 2: 0,\n\t})\n\ttestFunc(t, fmt.Sprintf(\"%+v.CDF\", kde), kde.CDF, map[float64]float64{\n\t\tx - 2: 0,\n\t\tx - 1: 0.15625,\n\t\tx:     0.5,\n\t\tx + 1: 0.84375,\n\t\tx + 2: 1,\n\t})\n}\n\nfunc TestKDETwoSamples(t *testing.T) {\n\tkde := KDE{\n\t\tSample:    Sample{Xs: []float64{1, 3}},\n\t\tKernel:    GaussianKernel,\n\t\tBandwidth: 2,\n\t}\n\ttestFunc(t, \"PDF\", kde.PDF, map[float64]float64{\n\t\t0: 0.120395730,\n\t\t1: 0.160228251,\n\t\t2: 0.176032663,\n\t\t3: 0.160228251,\n\t\t4: 0.120395730})\n\n\ttestFunc(t, \"CDF\", kde.CDF, map[float64]float64{\n\t\t0: 0.187672369,\n\t\t1: 0.329327626,\n\t\t2: 0.5,\n\t\t3: 0.670672373,\n\t\t4: 0.812327630})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/kdeboundarymethod_string.go",
    "content": "// generated by stringer -type=KDEBoundaryMethod; DO NOT EDIT\n\npackage stats\n\nimport \"fmt\"\n\nconst _KDEBoundaryMethod_name = \"BoundaryReflect\"\n\nvar _KDEBoundaryMethod_index = [...]uint8{0, 15}\n\nfunc (i KDEBoundaryMethod) String() string {\n\tif i < 0 || i+1 >= KDEBoundaryMethod(len(_KDEBoundaryMethod_index)) {\n\t\treturn fmt.Sprintf(\"KDEBoundaryMethod(%d)\", i)\n\t}\n\treturn _KDEBoundaryMethod_name[_KDEBoundaryMethod_index[i]:_KDEBoundaryMethod_index[i+1]]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/kdekernel_string.go",
    "content": "// generated by stringer -type=KDEKernel; DO NOT EDIT\n\npackage stats\n\nimport \"fmt\"\n\nconst _KDEKernel_name = \"GaussianKernelDeltaKernel\"\n\nvar _KDEKernel_index = [...]uint8{0, 14, 25}\n\nfunc (i KDEKernel) String() string {\n\tif i < 0 || i+1 >= KDEKernel(len(_KDEKernel_index)) {\n\t\treturn fmt.Sprintf(\"KDEKernel(%d)\", i)\n\t}\n\treturn _KDEKernel_name[_KDEKernel_index[i]:_KDEKernel_index[i+1]]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/linearhist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\n// LinearHist is a Histogram with uniformly-sized bins.\ntype LinearHist struct {\n\tmin, max, delta float64\n\tlow, high       uint\n\tbins            []uint\n}\n\n// NewLinearHist returns an empty histogram with nbins uniformly-sized\n// bins spanning [min, max].\nfunc NewLinearHist(min, max float64, nbins int) *LinearHist {\n\tdelta := float64(nbins) / (max - min)\n\treturn &LinearHist{min, max, delta, 0, 0, make([]uint, nbins)}\n}\n\nfunc (h *LinearHist) bin(x float64) int {\n\treturn int(h.delta * (x - h.min))\n}\n\nfunc (h *LinearHist) Add(x float64) {\n\tbin := h.bin(x)\n\tif bin < 0 {\n\t\th.low++\n\t} else if bin >= len(h.bins) {\n\t\th.high++\n\t} else {\n\t\th.bins[bin]++\n\t}\n}\n\nfunc (h *LinearHist) Counts() (uint, []uint, uint) {\n\treturn h.low, h.bins, h.high\n}\n\nfunc (h *LinearHist) BinToValue(bin float64) float64 {\n\treturn h.min + bin*h.delta\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/locationhypothesis_string.go",
    "content": "// generated by stringer -type LocationHypothesis; DO NOT EDIT\n\npackage stats\n\nimport \"fmt\"\n\nconst _LocationHypothesis_name = \"LocationLessLocationDiffersLocationGreater\"\n\nvar _LocationHypothesis_index = [...]uint8{0, 12, 27, 42}\n\nfunc (i LocationHypothesis) String() string {\n\ti -= -1\n\tif i < 0 || i+1 >= LocationHypothesis(len(_LocationHypothesis_index)) {\n\t\treturn fmt.Sprintf(\"LocationHypothesis(%d)\", i+-1)\n\t}\n\treturn _LocationHypothesis_name[_LocationHypothesis_index[i]:_LocationHypothesis_index[i+1]]\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/loghist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport \"math\"\n\n// LogHist is a Histogram with logarithmically-spaced bins.\ntype LogHist struct {\n\tb         int\n\tm         float64\n\tmOverLogb float64\n\tlow, high uint\n\tbins      []uint\n}\n\n// NewLogHist returns an empty logarithmic histogram with bins for\n// integral values of m * log_b(x) up to x = max.\nfunc NewLogHist(b int, m float64, max float64) *LogHist {\n\t// TODO(austin) Minimum value as well?  If the samples are\n\t// actually integral, having fractional bin boundaries can\n\t// mess up smoothing.\n\tmOverLogb := m / math.Log(float64(b))\n\tnbins := int(math.Ceil(mOverLogb * math.Log(max)))\n\treturn &LogHist{b: b, m: m, mOverLogb: mOverLogb, low: 0, high: 0, bins: make([]uint, nbins)}\n}\n\nfunc (h *LogHist) bin(x float64) int {\n\treturn int(h.mOverLogb * math.Log(x))\n}\n\nfunc (h *LogHist) Add(x float64) {\n\tbin := h.bin(x)\n\tif bin < 0 {\n\t\th.low++\n\t} else if bin >= len(h.bins) {\n\t\th.high++\n\t} else {\n\t\th.bins[bin]++\n\t}\n}\n\nfunc (h *LogHist) Counts() (uint, []uint, uint) {\n\treturn h.low, h.bins, h.high\n}\n\nfunc (h *LogHist) BinToValue(bin float64) float64 {\n\treturn math.Pow(float64(h.b), bin/h.m)\n}\n\nfunc (h *LogHist) At(x float64) float64 {\n\tbin := h.bin(x)\n\tif bin < 0 || bin >= len(h.bins) {\n\t\treturn 0\n\t}\n\treturn float64(h.bins[bin])\n}\n\nfunc (h *LogHist) Bounds() (float64, float64) {\n\t// XXX Plot will plot this on a linear axis.  Maybe this\n\t// should be able to return the natural axis?\n\t// Maybe then we could also give it the bins for the tics.\n\tlowbin := 0\n\tif h.low == 0 {\n\t\tfor bin, count := range h.bins {\n\t\t\tif count > 0 {\n\t\t\t\tlowbin = bin\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\thighbin := len(h.bins)\n\tif h.high == 0 {\n\t\tfor bin := range h.bins {\n\t\t\tif h.bins[len(h.bins)-bin-1] > 0 {\n\t\t\t\thighbin = len(h.bins) - bin\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn h.BinToValue(float64(lowbin)), h.BinToValue(float64(highbin))\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/normaldist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n)\n\n// NormalDist is a normal (Gaussian) distribution with mean Mu and\n// standard deviation Sigma.\ntype NormalDist struct {\n\tMu, Sigma float64\n}\n\n// StdNormal is the standard normal distribution (Mu = 0, Sigma = 1)\nvar StdNormal = NormalDist{0, 1}\n\n// 1/sqrt(2 * pi)\nconst invSqrt2Pi = 0.39894228040143267793994605993438186847585863116493465766592583\n\nfunc (n NormalDist) PDF(x float64) float64 {\n\tz := x - n.Mu\n\treturn math.Exp(-z*z/(2*n.Sigma*n.Sigma)) * invSqrt2Pi / n.Sigma\n}\n\nfunc (n NormalDist) pdfEach(xs []float64) []float64 {\n\tres := make([]float64, len(xs))\n\tif n.Mu == 0 && n.Sigma == 1 {\n\t\t// Standard normal fast path\n\t\tfor i, x := range xs {\n\t\t\tres[i] = math.Exp(-x*x/2) * invSqrt2Pi\n\t\t}\n\t} else {\n\t\ta := -1 / (2 * n.Sigma * n.Sigma)\n\t\tb := invSqrt2Pi / n.Sigma\n\t\tfor i, x := range xs {\n\t\t\tz := x - n.Mu\n\t\t\tres[i] = math.Exp(z*z*a) * b\n\t\t}\n\t}\n\treturn res\n}\n\nfunc (n NormalDist) CDF(x float64) float64 {\n\treturn math.Erfc(-(x-n.Mu)/(n.Sigma*math.Sqrt2)) / 2\n}\n\nfunc (n NormalDist) cdfEach(xs []float64) []float64 {\n\tres := make([]float64, len(xs))\n\ta := 1 / (n.Sigma * math.Sqrt2)\n\tfor i, x := range xs {\n\t\tres[i] = math.Erfc(-(x-n.Mu)*a) / 2\n\t}\n\treturn res\n}\n\nfunc (n NormalDist) InvCDF(p float64) (x float64) {\n\t// This is based on Peter John Acklam's inverse normal CDF\n\t// algorithm: http://home.online.no/~pjacklam/notes/invnorm/\n\tconst (\n\t\ta1 = -3.969683028665376e+01\n\t\ta2 = 2.209460984245205e+02\n\t\ta3 = -2.759285104469687e+02\n\t\ta4 = 1.383577518672690e+02\n\t\ta5 = -3.066479806614716e+01\n\t\ta6 = 2.506628277459239e+00\n\n\t\tb1 = -5.447609879822406e+01\n\t\tb2 = 1.615858368580409e+02\n\t\tb3 = -1.556989798598866e+02\n\t\tb4 = 6.680131188771972e+01\n\t\tb5 = -1.328068155288572e+01\n\n\t\tc1 = -7.784894002430293e-03\n\t\tc2 = -3.223964580411365e-01\n\t\tc3 = -2.400758277161838e+00\n\t\tc4 = -2.549732539343734e+00\n\t\tc5 = 4.374664141464968e+00\n\t\tc6 = 2.938163982698783e+00\n\n\t\td1 = 7.784695709041462e-03\n\t\td2 = 3.224671290700398e-01\n\t\td3 = 2.445134137142996e+00\n\t\td4 = 3.754408661907416e+00\n\n\t\tplow  = 0.02425\n\t\tphigh = 1 - plow\n\t)\n\n\tif p < 0 || p > 1 {\n\t\treturn nan\n\t} else if p == 0 {\n\t\treturn -inf\n\t} else if p == 1 {\n\t\treturn inf\n\t}\n\n\tif p < plow {\n\t\t// Rational approximation for lower region.\n\t\tq := math.Sqrt(-2 * math.Log(p))\n\t\tx = (((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /\n\t\t\t((((d1*q+d2)*q+d3)*q+d4)*q + 1)\n\t} else if phigh < p {\n\t\t// Rational approximation for upper region.\n\t\tq := math.Sqrt(-2 * math.Log(1-p))\n\t\tx = -(((((c1*q+c2)*q+c3)*q+c4)*q+c5)*q + c6) /\n\t\t\t((((d1*q+d2)*q+d3)*q+d4)*q + 1)\n\t} else {\n\t\t// Rational approximation for central region.\n\t\tq := p - 0.5\n\t\tr := q * q\n\t\tx = (((((a1*r+a2)*r+a3)*r+a4)*r+a5)*r + a6) * q /\n\t\t\t(((((b1*r+b2)*r+b3)*r+b4)*r+b5)*r + 1)\n\t}\n\n\t// Refine approximation.\n\te := 0.5*math.Erfc(-x/math.Sqrt2) - p\n\tu := e * math.Sqrt(2*math.Pi) * math.Exp(x*x/2)\n\tx = x - u/(1+x*u/2)\n\n\t// Adjust from standard normal.\n\treturn x*n.Sigma + n.Mu\n}\n\nfunc (n NormalDist) Rand(r *rand.Rand) float64 {\n\tvar x float64\n\tif r == nil {\n\t\tx = rand.NormFloat64()\n\t} else {\n\t\tx = r.NormFloat64()\n\t}\n\treturn x*n.Sigma + n.Mu\n}\n\nfunc (n NormalDist) Bounds() (float64, float64) {\n\tconst stddevs = 3\n\treturn n.Mu - stddevs*n.Sigma, n.Mu + stddevs*n.Sigma\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/normaldist_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"testing\"\n)\n\nfunc TestNormalDist(t *testing.T) {\n\td := StdNormal\n\n\ttestFunc(t, fmt.Sprintf(\"%+v.PDF\", d), d.PDF, map[float64]float64{\n\t\t-10000: 0, // approx\n\t\t-1:     1 / math.Sqrt(2*math.Pi) * math.Exp(-0.5),\n\t\t0:      1 / math.Sqrt(2*math.Pi),\n\t\t1:      1 / math.Sqrt(2*math.Pi) * math.Exp(-0.5),\n\t\t10000:  0, // approx\n\t})\n\n\ttestFunc(t, fmt.Sprintf(\"%+v.CDF\", d), d.CDF, map[float64]float64{\n\t\t-10000: 0, // approx\n\t\t0:      0.5,\n\t\t10000:  1, // approx\n\t})\n\n\td2 := NormalDist{Mu: 2, Sigma: 5}\n\ttestInvCDF(t, d, false)\n\ttestInvCDF(t, d2, false)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/package.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package stats implements several statistical distributions,\n// hypothesis tests, and functions for descriptive statistics.\n//\n// Currently stats is fairly small, but for what it does implement, it\n// focuses on high quality, fast implementations with good, idiomatic\n// Go APIs.\npackage stats // import \"github.com/aclements/go-moremath/stats\"\n\nimport (\n\t\"errors\"\n\t\"math\"\n)\n\nvar inf = math.Inf(1)\nvar nan = math.NaN()\n\n// TODO: Put all errors in the same place and maybe unify them.\n\nvar (\n\tErrSamplesEqual = errors.New(\"all samples are equal\")\n)\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/sample.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\n// Sample is a collection of possibly weighted data points.\ntype Sample struct {\n\t// Xs is the slice of sample values.\n\tXs []float64\n\n\t// Weights[i] is the weight of sample Xs[i].  If Weights is\n\t// nil, all Xs have weight 1.  Weights must have the same\n\t// length of Xs and all values must be non-negative.\n\tWeights []float64\n\n\t// Sorted indicates that Xs is sorted in ascending order.\n\tSorted bool\n}\n\n// Bounds returns the minimum and maximum values of xs.\nfunc Bounds(xs []float64) (min float64, max float64) {\n\tif len(xs) == 0 {\n\t\treturn math.NaN(), math.NaN()\n\t}\n\tmin, max = xs[0], xs[0]\n\tfor _, x := range xs {\n\t\tif x < min {\n\t\t\tmin = x\n\t\t}\n\t\tif x > max {\n\t\t\tmax = x\n\t\t}\n\t}\n\treturn\n}\n\n// Bounds returns the minimum and maximum values of the Sample.\n//\n// If the Sample is weighted, this ignores samples with zero weight.\n//\n// This is constant time if s.Sorted and there are no zero-weighted\n// values.\nfunc (s Sample) Bounds() (min float64, max float64) {\n\tif len(s.Xs) == 0 || (!s.Sorted && s.Weights == nil) {\n\t\treturn Bounds(s.Xs)\n\t}\n\n\tif s.Sorted {\n\t\tif s.Weights == nil {\n\t\t\treturn s.Xs[0], s.Xs[len(s.Xs)-1]\n\t\t}\n\t\tmin, max = math.NaN(), math.NaN()\n\t\tfor i, w := range s.Weights {\n\t\t\tif w != 0 {\n\t\t\t\tmin = s.Xs[i]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif math.IsNaN(min) {\n\t\t\treturn\n\t\t}\n\t\tfor i := range s.Weights {\n\t\t\tif s.Weights[len(s.Weights)-i-1] != 0 {\n\t\t\t\tmax = s.Xs[len(s.Weights)-i-1]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t} else {\n\t\tmin, max = math.Inf(1), math.Inf(-1)\n\t\tfor i, x := range s.Xs {\n\t\t\tw := s.Weights[i]\n\t\t\tif x < min && w != 0 {\n\t\t\t\tmin = x\n\t\t\t}\n\t\t\tif x > max && w != 0 {\n\t\t\t\tmax = x\n\t\t\t}\n\t\t}\n\t\tif math.IsInf(min, 0) {\n\t\t\tmin, max = math.NaN(), math.NaN()\n\t\t}\n\t}\n\treturn\n}\n\n// Sum returns the (possibly weighted) sum of the Sample.\nfunc (s Sample) Sum() float64 {\n\tif s.Weights == nil {\n\t\treturn vec.Sum(s.Xs)\n\t}\n\tsum := 0.0\n\tfor i, x := range s.Xs {\n\t\tsum += x * s.Weights[i]\n\t}\n\treturn sum\n}\n\n// Weight returns the total weight of the Sasmple.\nfunc (s Sample) Weight() float64 {\n\tif s.Weights == nil {\n\t\treturn float64(len(s.Xs))\n\t}\n\treturn vec.Sum(s.Weights)\n}\n\n// Mean returns the arithmetic mean of xs.\nfunc Mean(xs []float64) float64 {\n\tif len(xs) == 0 {\n\t\treturn math.NaN()\n\t}\n\tm := 0.0\n\tfor i, x := range xs {\n\t\tm += (x - m) / float64(i+1)\n\t}\n\treturn m\n}\n\n// Mean returns the arithmetic mean of the Sample.\nfunc (s Sample) Mean() float64 {\n\tif len(s.Xs) == 0 || s.Weights == nil {\n\t\treturn Mean(s.Xs)\n\t}\n\n\tm, wsum := 0.0, 0.0\n\tfor i, x := range s.Xs {\n\t\t// Use weighted incremental mean:\n\t\t//   m_i = (1 - w_i/wsum_i) * m_(i-1) + (w_i/wsum_i) * x_i\n\t\t//       = m_(i-1) + (x_i - m_(i-1)) * (w_i/wsum_i)\n\t\tw := s.Weights[i]\n\t\twsum += w\n\t\tm += (x - m) * w / wsum\n\t}\n\treturn m\n}\n\n// GeoMean returns the geometric mean of xs. xs must be positive.\nfunc GeoMean(xs []float64) float64 {\n\tif len(xs) == 0 {\n\t\treturn math.NaN()\n\t}\n\tm := 0.0\n\tfor i, x := range xs {\n\t\tif x <= 0 {\n\t\t\treturn math.NaN()\n\t\t}\n\t\tlx := math.Log(x)\n\t\tm += (lx - m) / float64(i+1)\n\t}\n\treturn math.Exp(m)\n}\n\n// GeoMean returns the geometric mean of the Sample. All samples\n// values must be positive.\nfunc (s Sample) GeoMean() float64 {\n\tif len(s.Xs) == 0 || s.Weights == nil {\n\t\treturn GeoMean(s.Xs)\n\t}\n\n\tm, wsum := 0.0, 0.0\n\tfor i, x := range s.Xs {\n\t\tw := s.Weights[i]\n\t\twsum += w\n\t\tlx := math.Log(x)\n\t\tm += (lx - m) * w / wsum\n\t}\n\treturn math.Exp(m)\n}\n\n// Variance returns the sample variance of xs.\nfunc Variance(xs []float64) float64 {\n\tif len(xs) == 0 {\n\t\treturn math.NaN()\n\t} else if len(xs) <= 1 {\n\t\treturn 0\n\t}\n\n\t// Based on Wikipedia's presentation of Welford 1962\n\t// (http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm).\n\t// This is more numerically stable than the standard two-pass\n\t// formula and not prone to massive cancellation.\n\tmean, M2 := 0.0, 0.0\n\tfor n, x := range xs {\n\t\tdelta := x - mean\n\t\tmean += delta / float64(n+1)\n\t\tM2 += delta * (x - mean)\n\t}\n\treturn M2 / float64(len(xs)-1)\n}\n\nfunc (s Sample) Variance() float64 {\n\tif len(s.Xs) == 0 || s.Weights == nil {\n\t\treturn Variance(s.Xs)\n\t}\n\t// TODO(austin)\n\tpanic(\"Weighted Variance not implemented\")\n}\n\n// StdDev returns the sample standard deviation of xs.\nfunc StdDev(xs []float64) float64 {\n\treturn math.Sqrt(Variance(xs))\n}\n\n// StdDev returns the sample standard deviation of the Sample.\nfunc (s Sample) StdDev() float64 {\n\tif len(s.Xs) == 0 || s.Weights == nil {\n\t\treturn StdDev(s.Xs)\n\t}\n\t// TODO(austin)\n\tpanic(\"Weighted StdDev not implemented\")\n}\n\n// Quantile returns the sample value X at which q*weight of the sample\n// is <= X. This uses interpolation method R8 from Hyndman and Fan\n// (1996).\n//\n// q will be capped to the range [0, 1]. If len(xs) == 0 or all\n// weights are 0, returns NaN.\n//\n// Quantile(0.5) is the median. Quantile(0.25) and Quantile(0.75) are\n// the first and third quartiles, respectively. Quantile(P/100) is the\n// P'th percentile.\n//\n// This is constant time if s.Sorted and s.Weights == nil.\nfunc (s Sample) Quantile(q float64) float64 {\n\tif len(s.Xs) == 0 {\n\t\treturn math.NaN()\n\t} else if q <= 0 {\n\t\tmin, _ := s.Bounds()\n\t\treturn min\n\t} else if q >= 1 {\n\t\t_, max := s.Bounds()\n\t\treturn max\n\t}\n\n\tif !s.Sorted {\n\t\t// TODO(austin) Use select algorithm instead\n\t\ts = *s.Copy().Sort()\n\t}\n\n\tif s.Weights == nil {\n\t\tN := float64(len(s.Xs))\n\t\t//n := q * (N + 1) // R6\n\t\tn := 1/3.0 + q*(N+1/3.0) // R8\n\t\tkf, frac := math.Modf(n)\n\t\tk := int(kf)\n\t\tif k <= 0 {\n\t\t\treturn s.Xs[0]\n\t\t} else if k >= len(s.Xs) {\n\t\t\treturn s.Xs[len(s.Xs)-1]\n\t\t}\n\t\treturn s.Xs[k-1] + frac*(s.Xs[k]-s.Xs[k-1])\n\t} else {\n\t\t// TODO(austin): Implement interpolation\n\n\t\ttarget := s.Weight() * q\n\n\t\t// TODO(austin) If we had cumulative weights, we could\n\t\t// do this in log time.\n\t\tfor i, weight := range s.Weights {\n\t\t\ttarget -= weight\n\t\t\tif target < 0 {\n\t\t\t\treturn s.Xs[i]\n\t\t\t}\n\t\t}\n\t\treturn s.Xs[len(s.Xs)-1]\n\t}\n}\n\n// IQR returns the interquartile range of the Sample.\n//\n// This is constant time if s.Sorted and s.Weights == nil.\nfunc (s Sample) IQR() float64 {\n\tif !s.Sorted {\n\t\ts = *s.Copy().Sort()\n\t}\n\treturn s.Quantile(0.75) - s.Quantile(0.25)\n}\n\ntype sampleSorter struct {\n\txs      []float64\n\tweights []float64\n}\n\nfunc (p *sampleSorter) Len() int {\n\treturn len(p.xs)\n}\n\nfunc (p *sampleSorter) Less(i, j int) bool {\n\treturn p.xs[i] < p.xs[j]\n}\n\nfunc (p *sampleSorter) Swap(i, j int) {\n\tp.xs[i], p.xs[j] = p.xs[j], p.xs[i]\n\tp.weights[i], p.weights[j] = p.weights[j], p.weights[i]\n}\n\n// Sort sorts the samples in place in s and returns s.\n//\n// A sorted sample improves the performance of some algorithms.\nfunc (s *Sample) Sort() *Sample {\n\tif s.Sorted || sort.Float64sAreSorted(s.Xs) {\n\t\t// All set\n\t} else if s.Weights == nil {\n\t\tsort.Float64s(s.Xs)\n\t} else {\n\t\tsort.Sort(&sampleSorter{s.Xs, s.Weights})\n\t}\n\ts.Sorted = true\n\treturn s\n}\n\n// Copy returns a copy of the Sample.\n//\n// The returned Sample shares no data with the original, so they can\n// be modified (for example, sorted) independently.\nfunc (s Sample) Copy() *Sample {\n\txs := make([]float64, len(s.Xs))\n\tcopy(xs, s.Xs)\n\n\tweights := []float64(nil)\n\tif s.Weights != nil {\n\t\tweights = make([]float64, len(s.Weights))\n\t\tcopy(weights, s.Weights)\n\t}\n\n\treturn &Sample{xs, weights, s.Sorted}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/sample_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport \"testing\"\n\nfunc TestSampleQuantile(t *testing.T) {\n\ts := Sample{Xs: []float64{15, 20, 35, 40, 50}}\n\ttestFunc(t, \"Quantile\", s.Quantile, map[float64]float64{\n\t\t-1:  15,\n\t\t0:   15,\n\t\t.05: 15,\n\t\t.30: 19.666666666666666,\n\t\t.40: 27,\n\t\t.95: 50,\n\t\t1:   50,\n\t\t2:   50,\n\t})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/stream.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"math\"\n)\n\n// TODO(austin) Unify more with Sample interface\n\n// StreamStats tracks basic statistics for a stream of data in O(1)\n// space.\n//\n// StreamStats should be initialized to its zero value.\ntype StreamStats struct {\n\tCount           uint\n\tTotal, Min, Max float64\n\n\t// Numerically stable online mean\n\tmean          float64\n\tmeanOfSquares float64\n\n\t// Online variance\n\tvM2 float64\n}\n\n// Add updates s's statistics with sample value x.\nfunc (s *StreamStats) Add(x float64) {\n\ts.Total += x\n\tif s.Count == 0 {\n\t\ts.Min, s.Max = x, x\n\t} else {\n\t\tif x < s.Min {\n\t\t\ts.Min = x\n\t\t}\n\t\tif x > s.Max {\n\t\t\ts.Max = x\n\t\t}\n\t}\n\ts.Count++\n\n\t// Update online mean, mean of squares, and variance.  Online\n\t// variance based on Wikipedia's presentation (\"Algorithms for\n\t// calculating variance\") of Knuth's formulation of Welford\n\t// 1962.\n\tdelta := x - s.mean\n\ts.mean += delta / float64(s.Count)\n\ts.meanOfSquares += (x*x - s.meanOfSquares) / float64(s.Count)\n\ts.vM2 += delta * (x - s.mean)\n}\n\nfunc (s *StreamStats) Weight() float64 {\n\treturn float64(s.Count)\n}\n\nfunc (s *StreamStats) Mean() float64 {\n\treturn s.mean\n}\n\nfunc (s *StreamStats) Variance() float64 {\n\treturn s.vM2 / float64(s.Count-1)\n}\n\nfunc (s *StreamStats) StdDev() float64 {\n\treturn math.Sqrt(s.Variance())\n}\n\nfunc (s *StreamStats) RMS() float64 {\n\treturn math.Sqrt(s.meanOfSquares)\n}\n\n// Combine updates s's statistics as if all samples added to o were\n// added to s.\nfunc (s *StreamStats) Combine(o *StreamStats) {\n\tcount := s.Count + o.Count\n\n\t// Compute combined online variance statistics\n\tdelta := o.mean - s.mean\n\tmean := s.mean + delta*float64(o.Count)/float64(count)\n\tvM2 := s.vM2 + o.vM2 + delta*delta*float64(s.Count)*float64(o.Count)/float64(count)\n\n\ts.Count = count\n\ts.Total += o.Total\n\tif o.Min < s.Min {\n\t\ts.Min = o.Min\n\t}\n\tif o.Max > s.Max {\n\t\ts.Max = o.Max\n\t}\n\ts.mean = mean\n\ts.meanOfSquares += (o.meanOfSquares - s.meanOfSquares) * float64(o.Count) / float64(count)\n\ts.vM2 = vM2\n}\n\nfunc (s *StreamStats) String() string {\n\treturn fmt.Sprintf(\"Count=%d Total=%g Min=%g Mean=%g RMS=%g Max=%g StdDev=%g\", s.Count, s.Total, s.Min, s.Mean(), s.RMS(), s.Max, s.StdDev())\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/tdist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"math\"\n\n\t\"github.com/aclements/go-moremath/mathx\"\n)\n\n// A TDist is a Student's t-distribution with V degrees of freedom.\ntype TDist struct {\n\tV float64\n}\n\nfunc lgamma(x float64) float64 {\n\ty, _ := math.Lgamma(x)\n\treturn y\n}\n\nfunc (t TDist) PDF(x float64) float64 {\n\treturn math.Exp(lgamma((t.V+1)/2)-lgamma(t.V/2)) /\n\t\tmath.Sqrt(t.V*math.Pi) * math.Pow(1+(x*x)/t.V, -(t.V+1)/2)\n}\n\nfunc (t TDist) CDF(x float64) float64 {\n\tif x == 0 {\n\t\treturn 0.5\n\t} else if x > 0 {\n\t\treturn 1 - 0.5*mathx.BetaInc(t.V/(t.V+x*x), t.V/2, 0.5)\n\t} else if x < 0 {\n\t\treturn 1 - t.CDF(-x)\n\t} else {\n\t\treturn math.NaN()\n\t}\n}\n\nfunc (t TDist) Bounds() (float64, float64) {\n\treturn -4, 4\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/tdist_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport \"testing\"\n\nfunc TestT(t *testing.T) {\n\ttestFunc(t, \"PDF(%v|v=1)\", TDist{1}.PDF, map[float64]float64{\n\t\t-10: 0.0031515830315226806,\n\t\t-9:  0.0038818278802901312,\n\t\t-8:  0.0048970751720583188,\n\t\t-7:  0.0063661977236758151,\n\t\t-6:  0.0086029698968592104,\n\t\t-5:  0.012242687930145799,\n\t\t-4:  0.018724110951987692,\n\t\t-3:  0.031830988618379075,\n\t\t-2:  0.063661977236758149,\n\t\t-1:  0.15915494309189537,\n\t\t0:   0.31830988618379075,\n\t\t1:   0.15915494309189537,\n\t\t2:   0.063661977236758149,\n\t\t3:   0.031830988618379075,\n\t\t4:   0.018724110951987692,\n\t\t5:   0.012242687930145799,\n\t\t6:   0.0086029698968592104,\n\t\t7:   0.0063661977236758151,\n\t\t8:   0.0048970751720583188,\n\t\t9:   0.0038818278802901312})\n\ttestFunc(t, \"PDF(%v|v=5)\", TDist{5}.PDF, map[float64]float64{\n\t\t-10: 4.0989816415343313e-05,\n\t\t-9:  7.4601664362590413e-05,\n\t\t-8:  0.00014444303269563934,\n\t\t-7:  0.00030134402928803911,\n\t\t-6:  0.00068848154013743002,\n\t\t-5:  0.0017574383788078445,\n\t\t-4:  0.0051237270519179133,\n\t\t-3:  0.017292578800222964,\n\t\t-2:  0.065090310326216455,\n\t\t-1:  0.21967979735098059,\n\t\t0:   0.3796066898224944,\n\t\t1:   0.21967979735098059,\n\t\t2:   0.065090310326216455,\n\t\t3:   0.017292578800222964,\n\t\t4:   0.0051237270519179133,\n\t\t5:   0.0017574383788078445,\n\t\t6:   0.00068848154013743002,\n\t\t7:   0.00030134402928803911,\n\t\t8:   0.00014444303269563934,\n\t\t9:   7.4601664362590413e-05})\n\n\ttestFunc(t, \"CDF(%v|v=1)\", TDist{1}.CDF, map[float64]float64{\n\t\t-10: 0.03172551743055356,\n\t\t-9:  0.035223287477277272,\n\t\t-8:  0.039583424160565539,\n\t\t-7:  0.045167235300866547,\n\t\t-6:  0.052568456711253424,\n\t\t-5:  0.06283295818900117,\n\t\t-4:  0.077979130377369324,\n\t\t-3:  0.10241638234956672,\n\t\t-2:  0.14758361765043321,\n\t\t-1:  0.24999999999999978,\n\t\t0:   0.5,\n\t\t1:   0.75000000000000022,\n\t\t2:   0.85241638234956674,\n\t\t3:   0.89758361765043326,\n\t\t4:   0.92202086962263075,\n\t\t5:   0.93716704181099886,\n\t\t6:   0.94743154328874657,\n\t\t7:   0.95483276469913347,\n\t\t8:   0.96041657583943452,\n\t\t9:   0.96477671252272279})\n\ttestFunc(t, \"CDF(%v|v=5)\", TDist{5}.CDF, map[float64]float64{\n\t\t-10: 8.5473787871481787e-05,\n\t\t-9:  0.00014133998712194845,\n\t\t-8:  0.00024645333028622187,\n\t\t-7:  0.00045837375719920225,\n\t\t-6:  0.00092306914479700695,\n\t\t-5:  0.0020523579900266612,\n\t\t-4:  0.0051617077404157259,\n\t\t-3:  0.015049623948731284,\n\t\t-2:  0.05096973941492914,\n\t\t-1:  0.18160873382456127,\n\t\t0:   0.5,\n\t\t1:   0.81839126617543867,\n\t\t2:   0.9490302605850709,\n\t\t3:   0.98495037605126878,\n\t\t4:   0.99483829225958431,\n\t\t5:   0.99794764200997332,\n\t\t6:   0.99907693085520299,\n\t\t7:   0.99954162624280074,\n\t\t8:   0.99975354666971372,\n\t\t9:   0.9998586600128780})\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/ttest.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"errors\"\n\t\"math\"\n)\n\n// A TTestResult is the result of a t-test.\ntype TTestResult struct {\n\t// N1 and N2 are the sizes of the input samples. For a\n\t// one-sample t-test, N2 is 0.\n\tN1, N2 int\n\n\t// T is the value of the t-statistic for this t-test.\n\tT float64\n\n\t// DoF is the degrees of freedom for this t-test.\n\tDoF float64\n\n\t// AltHypothesis specifies the alternative hypothesis tested\n\t// by this test against the null hypothesis that there is no\n\t// difference in the means of the samples.\n\tAltHypothesis LocationHypothesis\n\n\t// P is p-value for this t-test for the given null hypothesis.\n\tP float64\n}\n\nfunc newTTestResult(n1, n2 int, t, dof float64, alt LocationHypothesis) *TTestResult {\n\tdist := TDist{dof}\n\tvar p float64\n\tswitch alt {\n\tcase LocationDiffers:\n\t\tp = 2 * (1 - dist.CDF(math.Abs(t)))\n\tcase LocationLess:\n\t\tp = dist.CDF(t)\n\tcase LocationGreater:\n\t\tp = 1 - dist.CDF(t)\n\t}\n\treturn &TTestResult{N1: n1, N2: n2, T: t, DoF: dof, AltHypothesis: alt, P: p}\n}\n\n// A TTestSample is a sample that can be used for a one or two sample\n// t-test.\ntype TTestSample interface {\n\tWeight() float64\n\tMean() float64\n\tVariance() float64\n}\n\nvar (\n\tErrSampleSize        = errors.New(\"sample is too small\")\n\tErrZeroVariance      = errors.New(\"sample has zero variance\")\n\tErrMismatchedSamples = errors.New(\"samples have different lengths\")\n)\n\n// TwoSampleTTest performs a two-sample (unpaired) Student's t-test on\n// samples x1 and x2. This is a test of the null hypothesis that x1\n// and x2 are drawn from populations with equal means. It assumes x1\n// and x2 are independent samples, that the distributions have equal\n// variance, and that the populations are normally distributed.\nfunc TwoSampleTTest(x1, x2 TTestSample, alt LocationHypothesis) (*TTestResult, error) {\n\tn1, n2 := x1.Weight(), x2.Weight()\n\tif n1 == 0 || n2 == 0 {\n\t\treturn nil, ErrSampleSize\n\t}\n\tv1, v2 := x1.Variance(), x2.Variance()\n\tif v1 == 0 && v2 == 0 {\n\t\treturn nil, ErrZeroVariance\n\t}\n\n\tdof := n1 + n2 - 2\n\tv12 := ((n1-1)*v1 + (n2-1)*v2) / dof\n\tt := (x1.Mean() - x2.Mean()) / math.Sqrt(v12*(1/n1+1/n2))\n\treturn newTTestResult(int(n1), int(n2), t, dof, alt), nil\n}\n\n// TwoSampleWelchTTest performs a two-sample (unpaired) Welch's t-test\n// on samples x1 and x2. This is like TwoSampleTTest, but does not\n// assume the distributions have equal variance.\nfunc TwoSampleWelchTTest(x1, x2 TTestSample, alt LocationHypothesis) (*TTestResult, error) {\n\tn1, n2 := x1.Weight(), x2.Weight()\n\tif n1 <= 1 || n2 <= 1 {\n\t\t// TODO: Can we still do this with n == 1?\n\t\treturn nil, ErrSampleSize\n\t}\n\tv1, v2 := x1.Variance(), x2.Variance()\n\tif v1 == 0 && v2 == 0 {\n\t\treturn nil, ErrZeroVariance\n\t}\n\n\tdof := math.Pow(v1/n1+v2/n2, 2) /\n\t\t(math.Pow(v1/n1, 2)/(n1-1) + math.Pow(v2/n2, 2)/(n2-1))\n\ts := math.Sqrt(v1/n1 + v2/n2)\n\tt := (x1.Mean() - x2.Mean()) / s\n\treturn newTTestResult(int(n1), int(n2), t, dof, alt), nil\n}\n\n// PairedTTest performs a two-sample paired t-test on samples x1 and\n// x2. If μ0 is non-zero, this tests if the average of the difference\n// is significantly different from μ0. If x1 and x2 are identical,\n// this returns nil.\nfunc PairedTTest(x1, x2 []float64, μ0 float64, alt LocationHypothesis) (*TTestResult, error) {\n\tif len(x1) != len(x2) {\n\t\treturn nil, ErrMismatchedSamples\n\t}\n\tif len(x1) <= 1 {\n\t\t// TODO: Can we still do this with n == 1?\n\t\treturn nil, ErrSampleSize\n\t}\n\n\tdof := float64(len(x1) - 1)\n\n\tdiff := make([]float64, len(x1))\n\tfor i := range x1 {\n\t\tdiff[i] = x1[i] - x2[i]\n\t}\n\tsd := StdDev(diff)\n\tif sd == 0 {\n\t\t// TODO: Can we still do the test?\n\t\treturn nil, ErrZeroVariance\n\t}\n\tt := (Mean(diff) - μ0) * math.Sqrt(float64(len(x1))) / sd\n\treturn newTTestResult(len(x1), len(x2), t, dof, alt), nil\n}\n\n// OneSampleTTest performs a one-sample t-test on sample x. This tests\n// the null hypothesis that the population mean is equal to μ0. This\n// assumes the distribution of the population of sample means is\n// normal.\nfunc OneSampleTTest(x TTestSample, μ0 float64, alt LocationHypothesis) (*TTestResult, error) {\n\tn, v := x.Weight(), x.Variance()\n\tif n == 0 {\n\t\treturn nil, ErrSampleSize\n\t}\n\tif v == 0 {\n\t\t// TODO: Can we still do the test?\n\t\treturn nil, ErrZeroVariance\n\t}\n\tdof := n - 1\n\tt := (x.Mean() - μ0) * math.Sqrt(n) / math.Sqrt(v)\n\treturn newTTestResult(int(n), 0, t, dof, alt), nil\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/ttest_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport \"testing\"\n\nfunc TestTTest(t *testing.T) {\n\ts1 := Sample{Xs: []float64{2, 1, 3, 4}}\n\ts2 := Sample{Xs: []float64{6, 5, 7, 9}}\n\n\tcheck := func(want, got *TTestResult) {\n\t\tif want.N1 != got.N1 || want.N2 != got.N2 ||\n\t\t\t!aeq(want.T, got.T) || !aeq(want.DoF, got.DoF) ||\n\t\t\twant.AltHypothesis != got.AltHypothesis ||\n\t\t\t!aeq(want.P, got.P) {\n\t\t\tt.Errorf(\"want %+v, got %+v\", want, got)\n\t\t}\n\t}\n\tcheck3 := func(test func(alt LocationHypothesis) (*TTestResult, error), n1, n2 int, t, dof float64, pless, pdiff, pgreater float64) {\n\t\twant := &TTestResult{N1: n1, N2: n2, T: t, DoF: dof}\n\n\t\twant.AltHypothesis = LocationLess\n\t\twant.P = pless\n\t\tgot, _ := test(want.AltHypothesis)\n\t\tcheck(want, got)\n\n\t\twant.AltHypothesis = LocationDiffers\n\t\twant.P = pdiff\n\t\tgot, _ = test(want.AltHypothesis)\n\t\tcheck(want, got)\n\n\t\twant.AltHypothesis = LocationGreater\n\t\twant.P = pgreater\n\t\tgot, _ = test(want.AltHypothesis)\n\t\tcheck(want, got)\n\t}\n\n\tcheck3(func(alt LocationHypothesis) (*TTestResult, error) {\n\t\treturn TwoSampleTTest(s1, s1, alt)\n\t}, 4, 4, 0, 6,\n\t\t0.5, 1, 0.5)\n\tcheck3(func(alt LocationHypothesis) (*TTestResult, error) {\n\t\treturn TwoSampleWelchTTest(s1, s1, alt)\n\t}, 4, 4, 0, 6,\n\t\t0.5, 1, 0.5)\n\n\tcheck3(func(alt LocationHypothesis) (*TTestResult, error) {\n\t\treturn TwoSampleTTest(s1, s2, alt)\n\t}, 4, 4, -3.9703446152237674, 6,\n\t\t0.0036820296121056195, 0.0073640592242113214, 0.9963179703878944)\n\tcheck3(func(alt LocationHypothesis) (*TTestResult, error) {\n\t\treturn TwoSampleWelchTTest(s1, s2, alt)\n\t}, 4, 4, -3.9703446152237674, 5.584615384615385,\n\t\t0.004256431565689112, 0.0085128631313781695, 0.9957435684343109)\n\n\tcheck3(func(alt LocationHypothesis) (*TTestResult, error) {\n\t\treturn PairedTTest(s1.Xs, s2.Xs, 0, alt)\n\t}, 4, 4, -17, 3,\n\t\t0.0002216717691559955, 0.00044334353831207749, 0.999778328230844)\n\n\tcheck3(func(alt LocationHypothesis) (*TTestResult, error) {\n\t\treturn OneSampleTTest(s1, 0, alt)\n\t}, 4, 0, 3.872983346207417, 3,\n\t\t0.9847668541689145, 0.030466291662170977, 0.015233145831085482)\n\tcheck3(func(alt LocationHypothesis) (*TTestResult, error) {\n\t\treturn OneSampleTTest(s1, 2.5, alt)\n\t}, 4, 0, 0, 3,\n\t\t0.5, 1, 0.5)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/udist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"math\"\n\n\t\"github.com/aclements/go-moremath/mathx\"\n)\n\n// A UDist is the discrete probability distribution of the\n// Mann-Whitney U statistic for a pair of samples of sizes N1 and N2.\n//\n// The details of computing this distribution with no ties can be\n// found in Mann, Henry B.; Whitney, Donald R. (1947). \"On a Test of\n// Whether one of Two Random Variables is Stochastically Larger than\n// the Other\". Annals of Mathematical Statistics 18 (1): 50–60.\n// Computing this distribution in the presence of ties is described in\n// Klotz, J. H. (1966). \"The Wilcoxon, Ties, and the Computer\".\n// Journal of the American Statistical Association 61 (315): 772-787\n// and Cheung, Ying Kuen; Klotz, Jerome H. (1997). \"The Mann Whitney\n// Wilcoxon Distribution Using Linked Lists\". Statistica Sinica 7:\n// 805-813 (the former paper contains details that are glossed over in\n// the latter paper but has mathematical typesetting issues, so it's\n// easiest to get the context from the former paper and the details\n// from the latter).\ntype UDist struct {\n\tN1, N2 int\n\n\t// T is the count of the number of ties at each rank in the\n\t// input distributions. T may be nil, in which case it is\n\t// assumed there are no ties (which is equivalent to an M+N\n\t// slice of 1s). It must be the case that Sum(T) == M+N.\n\tT []int\n}\n\n// hasTies returns true if d has any tied samples.\nfunc (d UDist) hasTies() bool {\n\tfor _, t := range d.T {\n\t\tif t > 1 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// p returns the p_{d.N1,d.N2} function defined by Mann, Whitney 1947\n// for values of U from 0 up to and including the U argument.\n//\n// This algorithm runs in Θ(N1*N2*U) = O(N1²N2²) time and is quite\n// fast for small values of N1 and N2. However, it does not handle ties.\nfunc (d UDist) p(U int) []float64 {\n\t// This is a dynamic programming implementation of the\n\t// recursive recurrence definition given by Mann and Whitney:\n\t//\n\t//   p_{n,m}(U) = (n * p_{n-1,m}(U-m) + m * p_{n,m-1}(U)) / (n+m)\n\t//   p_{n,m}(U) = 0                           if U < 0\n\t//   p_{0,m}(U) = p{n,0}(U) = 1 / nCr(m+n, n) if U = 0\n\t//                          = 0               if U > 0\n\t//\n\t// (Note that there is a typo in the original paper. The first\n\t// recursive application of p should be for U-m, not U-M.)\n\t//\n\t// Since p{n,m} only depends on p{n-1,m} and p{n,m-1}, we only\n\t// need to store one \"plane\" of the three dimensional space at\n\t// a time.\n\t//\n\t// Furthermore, p_{n,m} = p_{m,n}, so we only construct values\n\t// for n <= m and obtain the rest through symmetry.\n\t//\n\t// We organize the computed values of p as followed:\n\t//\n\t//       n →   N\n\t//     m *\n\t//     ↓ * *\n\t//       * * *\n\t//       * * * *\n\t//       * * * *\n\t//     M * * * *\n\t//\n\t// where each * is a slice indexed by U. The code below\n\t// computes these left-to-right, top-to-bottom, so it only\n\t// stores one row of this matrix at a time. Furthermore,\n\t// computing an element in a given U slice only depends on the\n\t// same and smaller values of U, so we can overwrite the U\n\t// slice we're computing in place as long as we start with the\n\t// largest value of U. Finally, even though the recurrence\n\t// depends on (n,m) above the diagonal and we use symmetry to\n\t// mirror those across the diagonal to (m,n), the mirrored\n\t// indexes are always available in the current row, so this\n\t// mirroring does not interfere with our ability to recycle\n\t// state.\n\n\tN, M := d.N1, d.N2\n\tif N > M {\n\t\tN, M = M, N\n\t}\n\n\tmemo := make([][]float64, N+1)\n\tfor n := range memo {\n\t\tmemo[n] = make([]float64, U+1)\n\t}\n\n\tfor m := 0; m <= M; m++ {\n\t\t// Compute p_{0,m}. This is zero except for U=0.\n\t\tmemo[0][0] = 1\n\n\t\t// Compute the remainder of this row.\n\t\tnlim := N\n\t\tif m < nlim {\n\t\t\tnlim = m\n\t\t}\n\t\tfor n := 1; n <= nlim; n++ {\n\t\t\tlp := memo[n-1] // p_{n-1,m}\n\t\t\tvar rp []float64\n\t\t\tif n <= m-1 {\n\t\t\t\trp = memo[n] // p_{n,m-1}\n\t\t\t} else {\n\t\t\t\trp = memo[m-1] // p{m-1,n} and m==n\n\t\t\t}\n\n\t\t\t// For a given n,m, U is at most n*m.\n\t\t\t//\n\t\t\t// TODO: Actually, it's at most ⌈n*m/2⌉, but\n\t\t\t// then we need to use more complex symmetries\n\t\t\t// in the inner loop below.\n\t\t\tulim := n * m\n\t\t\tif U < ulim {\n\t\t\t\tulim = U\n\t\t\t}\n\n\t\t\tout := memo[n] // p_{n,m}\n\t\t\tnplusm := float64(n + m)\n\t\t\tfor U1 := ulim; U1 >= 0; U1-- {\n\t\t\t\tl := 0.0\n\t\t\t\tif U1-m >= 0 {\n\t\t\t\t\tl = float64(n) * lp[U1-m]\n\t\t\t\t}\n\t\t\t\tr := float64(m) * rp[U1]\n\t\t\t\tout[U1] = (l + r) / nplusm\n\t\t\t}\n\t\t}\n\t}\n\treturn memo[N]\n}\n\ntype ukey struct {\n\tn1   int // size of first sample\n\ttwoU int // 2*U statistic for this permutation\n}\n\n// This computes the cumulative counts of the Mann-Whitney U\n// distribution in the presence of ties using the computation from\n// Cheung, Ying Kuen; Klotz, Jerome H. (1997). \"The Mann Whitney\n// Wilcoxon Distribution Using Linked Lists\". Statistica Sinica 7:\n// 805-813, with much guidance from appendix L of Klotz, A\n// Computational Approach to Statistics.\n//\n// makeUmemo constructs a table memo[K][ukey{n1, 2*U}], where K is the\n// number of ranks (up to len(t)), n1 is the size of the first sample\n// (up to the n1 argument), and U is the U statistic (up to the\n// argument twoU/2). The value of an entry in the memo table is the\n// number of permutations of a sample of size n1 in a ranking with tie\n// vector t[:K] having a U statistic <= U.\nfunc makeUmemo(twoU, n1 int, t []int) []map[ukey]float64 {\n\t// Another candidate for a fast implementation is van de Wiel,\n\t// \"The split-up algorithm: a fast symbolic method for\n\t// computing p-values of distribution-free statistics\". This\n\t// is what's used by R's coin package. It's a comparatively\n\t// recent publication, so it's presumably faster (or perhaps\n\t// just more general) than previous techniques, but I can't\n\t// get my hands on the paper.\n\t//\n\t// TODO: ~40% of this function's time is spent in mapassign on\n\t// the assignment lines in the two loops and another ~20% in\n\t// map access and iteration. Improving map behavior or\n\t// replacing the maps altogether with some other constant-time\n\t// structure could double performance.\n\t//\n\t// TODO: The worst case for this function is when there are\n\t// few ties. Yet the best case overall is when there are *no*\n\t// ties. Can we get the best of both worlds? Use the fast\n\t// algorithm for the most part when there are few ties and mix\n\t// in the general algorithm just where we need it? That's\n\t// certainly possible for sub-problems where t[:k] has no\n\t// ties, but that doesn't help if t[0] has a tie but nothing\n\t// else does. Is it possible to rearrange the ranks without\n\t// messing up our computation of the U statistic for\n\t// sub-problems?\n\n\tK := len(t)\n\n\t// Compute a coefficients. The a slice is indexed by k (a[0]\n\t// is unused).\n\ta := make([]int, K+1)\n\ta[1] = t[0]\n\tfor k := 2; k <= K; k++ {\n\t\ta[k] = a[k-1] + t[k-2] + t[k-1]\n\t}\n\n\t// Create the memo table for the counts function, A. The A\n\t// slice is indexed by k (A[0] is unused).\n\t//\n\t// In \"The Mann Whitney Distribution Using Linked Lists\", they\n\t// use linked lists (*gasp*) for this, but within each K it's\n\t// really just a memoization table, so it's faster to use a\n\t// map. The outer structure is a slice indexed by k because we\n\t// need to find all memo entries with certain values of k.\n\t//\n\t// TODO: The n1 and twoU values in the ukeys follow strict\n\t// patterns. For each K value, the n1 values are every integer\n\t// between two bounds. For each (K, n1) value, the twoU values\n\t// are every integer multiple of a certain base between two\n\t// bounds. It might be worth turning these into directly\n\t// indexible slices.\n\tA := make([]map[ukey]float64, K+1)\n\tA[K] = map[ukey]float64{ukey{n1: n1, twoU: twoU}: 0}\n\n\t// Compute memo table (k, n1, twoU) triples from high K values\n\t// to low K values. This drives the recurrence relation\n\t// downward to figure out all of the needed argument triples.\n\t//\n\t// TODO: Is it possible to generate this table bottom-up? If\n\t// so, this could be a pure dynamic programming algorithm and\n\t// we could discard the K dimension. We could at least store\n\t// the inputs in a more compact representation that replaces\n\t// the twoU dimension with an interval and a step size (as\n\t// suggested by Cheung, Klotz, not that they make it at all\n\t// clear *why* they're suggesting this).\n\ttsum := sumint(t) // always ∑ t[0:k]\n\tfor k := K - 1; k >= 2; k-- {\n\t\ttsum -= t[k]\n\t\tA[k] = make(map[ukey]float64)\n\n\t\t// Construct A[k] from A[k+1].\n\t\tfor A_kplus1 := range A[k+1] {\n\t\t\trkLow := maxint(0, A_kplus1.n1-tsum)\n\t\t\trkHigh := minint(A_kplus1.n1, t[k])\n\t\t\tfor rk := rkLow; rk <= rkHigh; rk++ {\n\t\t\t\ttwoU_k := A_kplus1.twoU - rk*(a[k+1]-2*A_kplus1.n1+rk)\n\t\t\t\tn1_k := A_kplus1.n1 - rk\n\t\t\t\tif twoUmin(n1_k, t[:k], a) <= twoU_k && twoU_k <= twoUmax(n1_k, t[:k], a) {\n\t\t\t\t\tkey := ukey{n1: n1_k, twoU: twoU_k}\n\t\t\t\t\tA[k][key] = 0\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Fill counts in memo table from low K values to high K\n\t// values. This unwinds the recurrence relation.\n\n\t// Start with K==2 base case.\n\t//\n\t// TODO: Later computations depend on these, but these don't\n\t// depend on anything (including each other), so if K==2, we\n\t// can skip the memo table altogether.\n\tif K < 2 {\n\t\tpanic(\"K < 2\")\n\t}\n\tN_2 := t[0] + t[1]\n\tfor A_2i := range A[2] {\n\t\tAsum := 0.0\n\t\tr2Low := maxint(0, A_2i.n1-t[0])\n\t\tr2High := (A_2i.twoU - A_2i.n1*(t[0]-A_2i.n1)) / N_2\n\t\tfor r2 := r2Low; r2 <= r2High; r2++ {\n\t\t\tAsum += mathx.Choose(t[0], A_2i.n1-r2) *\n\t\t\t\tmathx.Choose(t[1], r2)\n\t\t}\n\t\tA[2][A_2i] = Asum\n\t}\n\n\t// Derive counts for the rest of the memo table.\n\ttsum = t[0] // always ∑ t[0:k-1]\n\tfor k := 3; k <= K; k++ {\n\t\ttsum += t[k-2]\n\n\t\t// Compute A[k] counts from A[k-1] counts.\n\t\tfor A_ki := range A[k] {\n\t\t\tAsum := 0.0\n\t\t\trkLow := maxint(0, A_ki.n1-tsum)\n\t\t\trkHigh := minint(A_ki.n1, t[k-1])\n\t\t\tfor rk := rkLow; rk <= rkHigh; rk++ {\n\t\t\t\ttwoU_kminus1 := A_ki.twoU - rk*(a[k]-2*A_ki.n1+rk)\n\t\t\t\tn1_kminus1 := A_ki.n1 - rk\n\t\t\t\tx, ok := A[k-1][ukey{n1: n1_kminus1, twoU: twoU_kminus1}]\n\t\t\t\tif !ok && twoUmax(n1_kminus1, t[:k-1], a) < twoU_kminus1 {\n\t\t\t\t\tx = mathx.Choose(tsum, n1_kminus1)\n\t\t\t\t}\n\t\t\t\tAsum += x * mathx.Choose(t[k-1], rk)\n\t\t\t}\n\t\t\tA[k][A_ki] = Asum\n\t\t}\n\t}\n\n\treturn A\n}\n\nfunc twoUmin(n1 int, t, a []int) int {\n\tK := len(t)\n\ttwoU := -n1 * n1\n\tn1_k := n1\n\tfor k := 1; k <= K; k++ {\n\t\ttwoU_k := minint(n1_k, t[k-1])\n\t\ttwoU += twoU_k * a[k]\n\t\tn1_k -= twoU_k\n\t}\n\treturn twoU\n}\n\nfunc twoUmax(n1 int, t, a []int) int {\n\tK := len(t)\n\ttwoU := -n1 * n1\n\tn1_k := n1\n\tfor k := K; k > 0; k-- {\n\t\ttwoU_k := minint(n1_k, t[k-1])\n\t\ttwoU += twoU_k * a[k]\n\t\tn1_k -= twoU_k\n\t}\n\treturn twoU\n}\n\nfunc (d UDist) PMF(U float64) float64 {\n\tif U < 0 || U >= 0.5+float64(d.N1*d.N2) {\n\t\treturn 0\n\t}\n\n\tif d.hasTies() {\n\t\t// makeUmemo computes the CDF directly. Take its\n\t\t// difference to get the PMF.\n\t\tp1, ok1 := makeUmemo(int(2*U)-1, d.N1, d.T)[len(d.T)][ukey{d.N1, int(2*U) - 1}]\n\t\tp2, ok2 := makeUmemo(int(2*U), d.N1, d.T)[len(d.T)][ukey{d.N1, int(2 * U)}]\n\t\tif !ok1 || !ok2 {\n\t\t\tpanic(\"makeUmemo did not return expected memoization table\")\n\t\t}\n\t\treturn (p2 - p1) / mathx.Choose(d.N1+d.N2, d.N1)\n\t}\n\n\t// There are no ties. Use the fast algorithm. U must be integral.\n\tUi := int(math.Floor(U))\n\t// TODO: Use symmetry to minimize U\n\treturn d.p(Ui)[Ui]\n}\n\nfunc (d UDist) CDF(U float64) float64 {\n\tif U < 0 {\n\t\treturn 0\n\t} else if U >= float64(d.N1*d.N2) {\n\t\treturn 1\n\t}\n\n\tif d.hasTies() {\n\t\t// TODO: Minimize U?\n\t\tp, ok := makeUmemo(int(2*U), d.N1, d.T)[len(d.T)][ukey{d.N1, int(2 * U)}]\n\t\tif !ok {\n\t\t\tpanic(\"makeUmemo did not return expected memoization table\")\n\t\t}\n\t\treturn p / mathx.Choose(d.N1+d.N2, d.N1)\n\t}\n\n\t// There are no ties. Use the fast algorithm. U must be integral.\n\tUi := int(math.Floor(U))\n\t// The distribution is symmetric around U = m * n / 2. Sum up\n\t// whichever tail is smaller.\n\tflip := Ui >= (d.N1*d.N2+1)/2\n\tif flip {\n\t\tUi = d.N1*d.N2 - Ui - 1\n\t}\n\tpdfs := d.p(Ui)\n\tp := 0.0\n\tfor _, pdf := range pdfs[:Ui+1] {\n\t\tp += pdf\n\t}\n\tif flip {\n\t\tp = 1 - p\n\t}\n\treturn p\n}\n\nfunc (d UDist) Step() float64 {\n\treturn 0.5\n}\n\nfunc (d UDist) Bounds() (float64, float64) {\n\t// TODO: More precise bounds when there are ties.\n\treturn 0, float64(d.N1 * d.N2)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/udist_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/aclements/go-moremath/mathx\"\n)\n\nfunc aeqTable(a, b [][]float64) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := range a {\n\t\tif len(a[i]) != len(b[i]) {\n\t\t\treturn false\n\t\t}\n\t\tfor j := range a[i] {\n\t\t\t// \"%f\" precision\n\t\t\tif math.Abs(a[i][j]-b[i][j]) >= 0.000001 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t}\n\treturn true\n}\n\n// U distribution for N=3 up to U=5.\nvar udist3 = [][]float64{\n\t//    m=1         2         3\n\t{0.250000, 0.100000, 0.050000}, // U=0\n\t{0.500000, 0.200000, 0.100000}, // U=1\n\t{0.750000, 0.400000, 0.200000}, // U=2\n\t{1.000000, 0.600000, 0.350000}, // U=3\n\t{1.000000, 0.800000, 0.500000}, // U=4\n\t{1.000000, 0.900000, 0.650000}, // U=5\n}\n\n// U distribution for N=5 up to U=5.\nvar udist5 = [][]float64{\n\t//    m=1         2         3         4         5\n\t{0.166667, 0.047619, 0.017857, 0.007937, 0.003968}, // U=0\n\t{0.333333, 0.095238, 0.035714, 0.015873, 0.007937}, // U=1\n\t{0.500000, 0.190476, 0.071429, 0.031746, 0.015873}, // U=2\n\t{0.666667, 0.285714, 0.125000, 0.055556, 0.027778}, // U=3\n\t{0.833333, 0.428571, 0.196429, 0.095238, 0.047619}, // U=4\n\t{1.000000, 0.571429, 0.285714, 0.142857, 0.075397}, // U=5\n}\n\nfunc TestUDist(t *testing.T) {\n\tmakeTable := func(n int) [][]float64 {\n\t\tout := make([][]float64, 6)\n\t\tfor U := 0; U < 6; U++ {\n\t\t\tout[U] = make([]float64, n)\n\t\t\tfor m := 1; m <= n; m++ {\n\t\t\t\tout[U][m-1] = UDist{N1: m, N2: n}.CDF(float64(U))\n\t\t\t}\n\t\t}\n\t\treturn out\n\t}\n\tfmtTable := func(a [][]float64) string {\n\t\tout := fmt.Sprintf(\"%8s\", \"m=\")\n\t\tfor m := 1; m <= len(a[0]); m++ {\n\t\t\tout += fmt.Sprintf(\"%9d\", m)\n\t\t}\n\t\tout += \"\\n\"\n\n\t\tfor U, row := range a {\n\t\t\tout += fmt.Sprintf(\"U=%-6d\", U)\n\t\t\tfor m := 1; m <= len(a[0]); m++ {\n\t\t\t\tout += fmt.Sprintf(\" %f\", row[m-1])\n\t\t\t}\n\t\t\tout += \"\\n\"\n\t\t}\n\t\treturn out\n\t}\n\n\t// Compare against tables given in Mann, Whitney (1947).\n\tgot3 := makeTable(3)\n\tif !aeqTable(got3, udist3) {\n\t\tt.Errorf(\"For n=3, want:\\n%sgot:\\n%s\", fmtTable(udist3), fmtTable(got3))\n\t}\n\n\tgot5 := makeTable(5)\n\tif !aeqTable(got5, udist5) {\n\t\tt.Errorf(\"For n=5, want:\\n%sgot:\\n%s\", fmtTable(udist5), fmtTable(got5))\n\t}\n}\n\nfunc BenchmarkUDist(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\t// R uses the exact distribution up to N=50.\n\t\t// N*M/2=1250 is the hardest point to get the CDF for.\n\t\tUDist{N1: 50, N2: 50}.CDF(1250)\n\t}\n}\n\nfunc TestUDistTies(t *testing.T) {\n\tmakeTable := func(m, N int, t []int, minx, maxx float64) [][]float64 {\n\t\tout := [][]float64{}\n\t\tdist := UDist{N1: m, N2: N - m, T: t}\n\t\tfor x := minx; x <= maxx; x += 0.5 {\n\t\t\t// Convert x from uQt' to uQv'.\n\t\t\tU := x - float64(m*m)/2\n\t\t\tP := dist.CDF(U)\n\t\t\tif len(out) == 0 || !aeq(out[len(out)-1][1], P) {\n\t\t\t\tout = append(out, []float64{x, P})\n\t\t\t}\n\t\t}\n\t\treturn out\n\t}\n\tfmtTable := func(table [][]float64) string {\n\t\tout := \"\"\n\t\tfor _, row := range table {\n\t\t\tout += fmt.Sprintf(\"%5.1f %f\\n\", row[0], row[1])\n\t\t}\n\t\treturn out\n\t}\n\n\t// Compare against Table 1 from Klotz (1966).\n\tgot := makeTable(5, 10, []int{1, 1, 2, 1, 1, 2, 1, 1}, 12.5, 19.5)\n\twant := [][]float64{\n\t\t{12.5, 0.003968}, {13.5, 0.007937},\n\t\t{15.0, 0.023810}, {16.5, 0.047619},\n\t\t{17.5, 0.071429}, {18.0, 0.087302},\n\t\t{19.0, 0.134921}, {19.5, 0.138889},\n\t}\n\tif !aeqTable(got, want) {\n\t\tt.Errorf(\"Want:\\n%sgot:\\n%s\", fmtTable(want), fmtTable(got))\n\t}\n\n\tgot = makeTable(10, 21, []int{6, 5, 4, 3, 2, 1}, 52, 87)\n\twant = [][]float64{\n\t\t{52.0, 0.000014}, {56.5, 0.000128},\n\t\t{57.5, 0.000145}, {60.0, 0.000230},\n\t\t{61.0, 0.000400}, {62.0, 0.000740},\n\t\t{62.5, 0.000797}, {64.0, 0.000825},\n\t\t{64.5, 0.001165}, {65.5, 0.001477},\n\t\t{66.5, 0.002498}, {67.0, 0.002725},\n\t\t{67.5, 0.002895}, {68.0, 0.003150},\n\t\t{68.5, 0.003263}, {69.0, 0.003518},\n\t\t{69.5, 0.003603}, {70.0, 0.005648},\n\t\t{70.5, 0.005818}, {71.0, 0.006626},\n\t\t{71.5, 0.006796}, {72.0, 0.008157},\n\t\t{72.5, 0.009688}, {73.0, 0.009801},\n\t\t{73.5, 0.010430}, {74.0, 0.011111},\n\t\t{74.5, 0.014230}, {75.0, 0.014612},\n\t\t{75.5, 0.017249}, {76.0, 0.018307},\n\t\t{76.5, 0.020178}, {77.0, 0.022270},\n\t\t{77.5, 0.023189}, {78.0, 0.026931},\n\t\t{78.5, 0.028207}, {79.0, 0.029979},\n\t\t{79.5, 0.030931}, {80.0, 0.038969},\n\t\t{80.5, 0.043063}, {81.0, 0.044262},\n\t\t{81.5, 0.046389}, {82.0, 0.049581},\n\t\t{82.5, 0.056300}, {83.0, 0.058027},\n\t\t{83.5, 0.063669}, {84.0, 0.067454},\n\t\t{84.5, 0.074122}, {85.0, 0.077425},\n\t\t{85.5, 0.083498}, {86.0, 0.094079},\n\t\t{86.5, 0.096693}, {87.0, 0.101132},\n\t}\n\tif !aeqTable(got, want) {\n\t\tt.Errorf(\"Want:\\n%sgot:\\n%s\", fmtTable(want), fmtTable(got))\n\t}\n\n\tgot = makeTable(8, 16, []int{2, 2, 2, 2, 2, 2, 2, 2}, 32, 54)\n\twant = [][]float64{\n\t\t{32.0, 0.000078}, {34.0, 0.000389},\n\t\t{36.0, 0.001088}, {38.0, 0.002642},\n\t\t{40.0, 0.005905}, {42.0, 0.011500},\n\t\t{44.0, 0.021057}, {46.0, 0.035664},\n\t\t{48.0, 0.057187}, {50.0, 0.086713},\n\t\t{52.0, 0.126263}, {54.0, 0.175369},\n\t}\n\tif !aeqTable(got, want) {\n\t\tt.Errorf(\"Want:\\n%sgot:\\n%s\", fmtTable(want), fmtTable(got))\n\t}\n\n\t// Check remaining tables from Klotz against the reference\n\t// implementation.\n\tcheckRef := func(n1 int, tie []int) {\n\t\twantPMF1, wantCDF1 := udistRef(n1, tie)\n\n\t\tdist := UDist{N1: n1, N2: sumint(tie) - n1, T: tie}\n\t\tgotPMF, wantPMF := [][]float64{}, [][]float64{}\n\t\tgotCDF, wantCDF := [][]float64{}, [][]float64{}\n\t\tN := sumint(tie)\n\t\tfor U := 0.0; U <= float64(n1*(N-n1)); U += 0.5 {\n\t\t\tgotPMF = append(gotPMF, []float64{U, dist.PMF(U)})\n\t\t\tgotCDF = append(gotCDF, []float64{U, dist.CDF(U)})\n\t\t\twantPMF = append(wantPMF, []float64{U, wantPMF1[int(U*2)]})\n\t\t\twantCDF = append(wantCDF, []float64{U, wantCDF1[int(U*2)]})\n\t\t}\n\t\tif !aeqTable(wantPMF, gotPMF) {\n\t\t\tt.Errorf(\"For PMF of n1=%v, t=%v, want:\\n%sgot:\\n%s\", n1, tie, fmtTable(wantPMF), fmtTable(gotPMF))\n\t\t}\n\t\tif !aeqTable(wantCDF, gotCDF) {\n\t\t\tt.Errorf(\"For CDF of n1=%v, t=%v, want:\\n%sgot:\\n%s\", n1, tie, fmtTable(wantCDF), fmtTable(gotCDF))\n\t\t}\n\t}\n\tcheckRef(5, []int{1, 1, 2, 1, 1, 2, 1, 1})\n\tcheckRef(5, []int{1, 1, 2, 1, 1, 1, 2, 1})\n\tcheckRef(5, []int{1, 3, 1, 2, 1, 1, 1})\n\tcheckRef(8, []int{1, 2, 1, 1, 1, 1, 2, 2, 1, 2})\n\tcheckRef(12, []int{3, 3, 4, 3, 4, 5})\n\tcheckRef(10, []int{1, 2, 3, 4, 5, 6})\n}\n\nfunc BenchmarkUDistTies(b *testing.B) {\n\t// Worst case: just one tie.\n\tn := 20\n\tt := make([]int, 2*n-1)\n\tfor i := range t {\n\t\tt[i] = 1\n\t}\n\tt[0] = 2\n\n\tfor i := 0; i < b.N; i++ {\n\t\tUDist{N1: n, N2: n, T: t}.CDF(float64(n*n) / 2)\n\t}\n}\n\nfunc XTestPrintUmemo(t *testing.T) {\n\t// Reproduce table from Cheung, Klotz.\n\tties := []int{4, 5, 3, 4, 6}\n\tprintUmemo(makeUmemo(80, 10, ties), ties)\n}\n\n// udistRef computes the PMF and CDF of the U distribution for two\n// samples of sizes n1 and sum(t)-n1 with tie vector t. The returned\n// pmf and cdf are indexed by 2*U.\n//\n// This uses the \"graphical method\" of Klotz (1966). It is very slow\n// (Θ(∏ (t[i]+1)) = Ω(2^|t|)), but very correct, and hence useful as a\n// reference for testing faster implementations.\nfunc udistRef(n1 int, t []int) (pmf, cdf []float64) {\n\t// Enumerate all u vectors for which 0 <= u_i <= t_i. Count\n\t// the number of permutations of two samples of sizes n1 and\n\t// sum(t)-n1 with tie vector t and accumulate these counts by\n\t// their U statistics in count[2*U].\n\tcounts := make([]int, 1+2*n1*(sumint(t)-n1))\n\n\tu := make([]int, len(t))\n\tu[0] = -1 // Get enumeration started.\nenumu:\n\tfor {\n\t\t// Compute the next u vector.\n\t\tu[0]++\n\t\tfor i := 0; i < len(u) && u[i] > t[i]; i++ {\n\t\t\tif i == len(u)-1 {\n\t\t\t\t// All u vectors have been enumerated.\n\t\t\t\tbreak enumu\n\t\t\t}\n\t\t\t// Carry.\n\t\t\tu[i+1]++\n\t\t\tu[i] = 0\n\t\t}\n\n\t\t// Is this a legal u vector?\n\t\tif sumint(u) != n1 {\n\t\t\t// Klotz (1966) has a method for directly\n\t\t\t// enumerating legal u vectors, but the point\n\t\t\t// of this is to be correct, not fast.\n\t\t\tcontinue\n\t\t}\n\n\t\t// Compute 2*U statistic for this u vector.\n\t\ttwoU, vsum := 0, 0\n\t\tfor i, u_i := range u {\n\t\t\tv_i := t[i] - u_i\n\t\t\t// U = U + vsum*u_i + u_i*v_i/2\n\t\t\ttwoU += 2*vsum*u_i + u_i*v_i\n\t\t\tvsum += v_i\n\t\t}\n\n\t\t// Compute Π choose(t_i, u_i). This is the number of\n\t\t// ways of permuting the input sample under u.\n\t\tprod := 1\n\t\tfor i, u_i := range u {\n\t\t\tprod *= int(mathx.Choose(t[i], u_i) + 0.5)\n\t\t}\n\n\t\t// Accumulate the permutations on this u path.\n\t\tcounts[twoU] += prod\n\n\t\tif false {\n\t\t\t// Print a table in the form of Klotz's\n\t\t\t// \"direct enumeration\" example.\n\t\t\t//\n\t\t\t// Convert 2U = 2UQV' to UQt' used in Klotz\n\t\t\t// examples.\n\t\t\tUQt := float64(twoU)/2 + float64(n1*n1)/2\n\t\t\tfmt.Printf(\"%+v %f %-2d\\n\", u, UQt, prod)\n\t\t}\n\t}\n\n\t// Convert counts into probabilities for PMF and CDF.\n\tpmf = make([]float64, len(counts))\n\tcdf = make([]float64, len(counts))\n\ttotal := int(mathx.Choose(sumint(t), n1) + 0.5)\n\tfor i, count := range counts {\n\t\tpmf[i] = float64(count) / float64(total)\n\t\tif i > 0 {\n\t\t\tcdf[i] = cdf[i-1]\n\t\t}\n\t\tcdf[i] += pmf[i]\n\t}\n\treturn\n}\n\n// printUmemo prints the output of makeUmemo for debugging.\nfunc printUmemo(A []map[ukey]float64, t []int) {\n\tfmt.Printf(\"K\\tn1\\t2*U\\tpr\\n\")\n\tfor K := len(A) - 1; K >= 0; K-- {\n\t\tfor i, pr := range A[K] {\n\t\t\t_, ref := udistRef(i.n1, t[:K])\n\t\t\tfmt.Printf(\"%v\\t%v\\t%v\\t%v\\t%v\\n\", K, i.n1, i.twoU, pr, ref[i.twoU])\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/utest.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"math\"\n\t\"sort\"\n\n\t\"github.com/aclements/go-moremath/mathx\"\n)\n\n// A LocationHypothesis specifies the alternative hypothesis of a\n// location test such as a t-test or a Mann-Whitney U-test. The\n// default (zero) value is to test against the alternative hypothesis\n// that they differ.\ntype LocationHypothesis int\n\n//go:generate stringer -type LocationHypothesis\n\nconst (\n\t// LocationLess specifies the alternative hypothesis that the\n\t// location of the first sample is less than the second. This\n\t// is a one-tailed test.\n\tLocationLess LocationHypothesis = -1\n\n\t// LocationDiffers specifies the alternative hypothesis that\n\t// the locations of the two samples are not equal. This is a\n\t// two-tailed test.\n\tLocationDiffers LocationHypothesis = 0\n\n\t// LocationGreater specifies the alternative hypothesis that\n\t// the location of the first sample is greater than the\n\t// second. This is a one-tailed test.\n\tLocationGreater LocationHypothesis = 1\n)\n\n// A MannWhitneyUTestResult is the result of a Mann-Whitney U-test.\ntype MannWhitneyUTestResult struct {\n\t// N1 and N2 are the sizes of the input samples.\n\tN1, N2 int\n\n\t// U is the value of the Mann-Whitney U statistic for this\n\t// test, generalized by counting ties as 0.5.\n\t//\n\t// Given the Cartesian product of the two samples, this is the\n\t// number of pairs in which the value from the first sample is\n\t// greater than the value of the second, plus 0.5 times the\n\t// number of pairs where the values from the two samples are\n\t// equal. Hence, U is always an integer multiple of 0.5 (it is\n\t// a whole integer if there are no ties) in the range [0, N1*N2].\n\t//\n\t// U statistics always come in pairs, depending on which\n\t// sample is \"first\". The mirror U for the other sample can be\n\t// calculated as N1*N2 - U.\n\t//\n\t// There are many equivalent statistics with slightly\n\t// different definitions. The Wilcoxon (1945) W statistic\n\t// (generalized for ties) is U + (N1(N1+1))/2. It is also\n\t// common to use 2U to eliminate the half steps and Smid\n\t// (1956) uses N1*N2 - 2U to additionally center the\n\t// distribution.\n\tU float64\n\n\t// AltHypothesis specifies the alternative hypothesis tested\n\t// by this test against the null hypothesis that there is no\n\t// difference in the locations of the samples.\n\tAltHypothesis LocationHypothesis\n\n\t// P is the p-value of the Mann-Whitney test for the given\n\t// null hypothesis.\n\tP float64\n}\n\n// MannWhitneyExactLimit gives the largest sample size for which the\n// exact U distribution will be used for the Mann-Whitney U-test.\n//\n// Using the exact distribution is necessary for small sample sizes\n// because the distribution is highly irregular. However, computing\n// the distribution for large sample sizes is both computationally\n// expensive and unnecessary because it quickly approaches a normal\n// approximation. Computing the distribution for two 50 value samples\n// takes a few milliseconds on a 2014 laptop.\nvar MannWhitneyExactLimit = 50\n\n// MannWhitneyTiesExactLimit gives the largest sample size for which\n// the exact U distribution will be used for the Mann-Whitney U-test\n// in the presence of ties.\n//\n// Computing this distribution is more expensive than computing the\n// distribution without ties, so this is set lower. Computing this\n// distribution for two 25 value samples takes about ten milliseconds\n// on a 2014 laptop.\nvar MannWhitneyTiesExactLimit = 25\n\n// MannWhitneyUTest performs a Mann-Whitney U-test [1,2] of the null\n// hypothesis that two samples come from the same population against\n// the alternative hypothesis that one sample tends to have larger or\n// smaller values than the other.\n//\n// This is similar to a t-test, but unlike the t-test, the\n// Mann-Whitney U-test is non-parametric (it does not assume a normal\n// distribution). It has very slightly lower efficiency than the\n// t-test on normal distributions.\n//\n// Computing the exact U distribution is expensive for large sample\n// sizes, so this uses a normal approximation for sample sizes larger\n// than MannWhitneyExactLimit if there are no ties or\n// MannWhitneyTiesExactLimit if there are ties. This normal\n// approximation uses both the tie correction and the continuity\n// correction.\n//\n// This can fail with ErrSampleSize if either sample is empty or\n// ErrSamplesEqual if all sample values are equal.\n//\n// This is also known as a Mann-Whitney-Wilcoxon test and is\n// equivalent to the Wilcoxon rank-sum test, though the Wilcoxon\n// rank-sum test differs in nomenclature.\n//\n// [1] Mann, Henry B.; Whitney, Donald R. (1947). \"On a Test of\n// Whether one of Two Random Variables is Stochastically Larger than\n// the Other\". Annals of Mathematical Statistics 18 (1): 50–60.\n//\n// [2] Klotz, J. H. (1966). \"The Wilcoxon, Ties, and the Computer\".\n// Journal of the American Statistical Association 61 (315): 772-787.\nfunc MannWhitneyUTest(x1, x2 []float64, alt LocationHypothesis) (*MannWhitneyUTestResult, error) {\n\tn1, n2 := len(x1), len(x2)\n\tif n1 == 0 || n2 == 0 {\n\t\treturn nil, ErrSampleSize\n\t}\n\n\t// Compute the U statistic and tie vector T.\n\tx1 = append([]float64(nil), x1...)\n\tx2 = append([]float64(nil), x2...)\n\tsort.Float64s(x1)\n\tsort.Float64s(x2)\n\tmerged, labels := labeledMerge(x1, x2)\n\n\tR1 := 0.0\n\tT, hasTies := []int{}, false\n\tfor i := 0; i < len(merged); {\n\t\trank1, nx1, v1 := i+1, 0, merged[i]\n\t\t// Consume samples that tie this sample (including itself).\n\t\tfor ; i < len(merged) && merged[i] == v1; i++ {\n\t\t\tif labels[i] == 1 {\n\t\t\t\tnx1++\n\t\t\t}\n\t\t}\n\t\t// Assign all tied samples the average rank of the\n\t\t// samples, where merged[0] has rank 1.\n\t\tif nx1 != 0 {\n\t\t\trank := float64(i+rank1) / 2\n\t\t\tR1 += rank * float64(nx1)\n\t\t}\n\t\tT = append(T, i-rank1+1)\n\t\tif i > rank1 {\n\t\t\thasTies = true\n\t\t}\n\t}\n\tU1 := R1 - float64(n1*(n1+1))/2\n\n\t// Compute the smaller of U1 and U2\n\tU2 := float64(n1*n2) - U1\n\tUsmall := math.Min(U1, U2)\n\n\tvar p float64\n\tif !hasTies && n1 <= MannWhitneyExactLimit && n2 <= MannWhitneyExactLimit ||\n\t\thasTies && n1 <= MannWhitneyTiesExactLimit && n2 <= MannWhitneyTiesExactLimit {\n\t\t// Use exact U distribution. U1 will be an integer.\n\t\tif len(T) == 1 {\n\t\t\t// All values are equal. Test is meaningless.\n\t\t\treturn nil, ErrSamplesEqual\n\t\t}\n\n\t\tdist := UDist{N1: n1, N2: n2, T: T}\n\t\tswitch alt {\n\t\tcase LocationDiffers:\n\t\t\tif U1 == U2 {\n\t\t\t\t// The distribution is symmetric about\n\t\t\t\t// Usmall. Since the distribution is\n\t\t\t\t// discrete, the CDF is discontinuous\n\t\t\t\t// and if simply double CDF(Usmall),\n\t\t\t\t// we'll double count the\n\t\t\t\t// (non-infinitesimal) probability\n\t\t\t\t// mass at Usmall. What we want is\n\t\t\t\t// just the integral of the whole CDF,\n\t\t\t\t// which is 1.\n\t\t\t\tp = 1\n\t\t\t} else {\n\t\t\t\tp = dist.CDF(Usmall) * 2\n\t\t\t}\n\n\t\tcase LocationLess:\n\t\t\tp = dist.CDF(U1)\n\n\t\tcase LocationGreater:\n\t\t\tp = 1 - dist.CDF(U1-1)\n\t\t}\n\t} else {\n\t\t// Use normal approximation (with tie and continuity\n\t\t// correction).\n\t\tt := tieCorrection(T)\n\t\tN := float64(n1 + n2)\n\t\tμ_U := float64(n1*n2) / 2\n\t\tσ_U := math.Sqrt(float64(n1*n2) * ((N + 1) - t/(N*(N-1))) / 12)\n\t\tif σ_U == 0 {\n\t\t\treturn nil, ErrSamplesEqual\n\t\t}\n\t\tnumer := U1 - μ_U\n\t\t// Perform continuity correction.\n\t\tswitch alt {\n\t\tcase LocationDiffers:\n\t\t\tnumer -= mathx.Sign(numer) * 0.5\n\t\tcase LocationLess:\n\t\t\tnumer += 0.5\n\t\tcase LocationGreater:\n\t\t\tnumer -= 0.5\n\t\t}\n\t\tz := numer / σ_U\n\t\tswitch alt {\n\t\tcase LocationDiffers:\n\t\t\tp = 2 * math.Min(StdNormal.CDF(z), 1-StdNormal.CDF(z))\n\t\tcase LocationLess:\n\t\t\tp = StdNormal.CDF(z)\n\t\tcase LocationGreater:\n\t\t\tp = 1 - StdNormal.CDF(z)\n\t\t}\n\t}\n\n\treturn &MannWhitneyUTestResult{N1: n1, N2: n2, U: U1,\n\t\tAltHypothesis: alt, P: p}, nil\n}\n\n// labeledMerge merges sorted lists x1 and x2 into sorted list merged.\n// labels[i] is 1 or 2 depending on whether merged[i] is a value from\n// x1 or x2, respectively.\nfunc labeledMerge(x1, x2 []float64) (merged []float64, labels []byte) {\n\tmerged = make([]float64, len(x1)+len(x2))\n\tlabels = make([]byte, len(x1)+len(x2))\n\n\ti, j, o := 0, 0, 0\n\tfor i < len(x1) && j < len(x2) {\n\t\tif x1[i] < x2[j] {\n\t\t\tmerged[o] = x1[i]\n\t\t\tlabels[o] = 1\n\t\t\ti++\n\t\t} else {\n\t\t\tmerged[o] = x2[j]\n\t\t\tlabels[o] = 2\n\t\t\tj++\n\t\t}\n\t\to++\n\t}\n\tfor ; i < len(x1); i++ {\n\t\tmerged[o] = x1[i]\n\t\tlabels[o] = 1\n\t\to++\n\t}\n\tfor ; j < len(x2); j++ {\n\t\tmerged[o] = x2[j]\n\t\tlabels[o] = 2\n\t\to++\n\t}\n\treturn\n}\n\n// tieCorrection computes the tie correction factor Σ_j (t_j³ - t_j)\n// where t_j is the number of ties in the j'th rank.\nfunc tieCorrection(ties []int) float64 {\n\tt := 0\n\tfor _, tie := range ties {\n\t\tt += tie*tie*tie - tie\n\t}\n\treturn float64(t)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/utest_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport \"testing\"\n\nfunc TestMannWhitneyUTest(t *testing.T) {\n\tcheck := func(want, got *MannWhitneyUTestResult) {\n\t\tif want.N1 != got.N1 || want.N2 != got.N2 ||\n\t\t\t!aeq(want.U, got.U) ||\n\t\t\twant.AltHypothesis != got.AltHypothesis ||\n\t\t\t!aeq(want.P, got.P) {\n\t\t\tt.Errorf(\"want %+v, got %+v\", want, got)\n\t\t}\n\t}\n\tcheck3 := func(x1, x2 []float64, U float64, pless, pdiff, pgreater float64) {\n\t\twant := &MannWhitneyUTestResult{N1: len(x1), N2: len(x2), U: U}\n\n\t\twant.AltHypothesis = LocationLess\n\t\twant.P = pless\n\t\tgot, _ := MannWhitneyUTest(x1, x2, want.AltHypothesis)\n\t\tcheck(want, got)\n\n\t\twant.AltHypothesis = LocationDiffers\n\t\twant.P = pdiff\n\t\tgot, _ = MannWhitneyUTest(x1, x2, want.AltHypothesis)\n\t\tcheck(want, got)\n\n\t\twant.AltHypothesis = LocationGreater\n\t\twant.P = pgreater\n\t\tgot, _ = MannWhitneyUTest(x1, x2, want.AltHypothesis)\n\t\tcheck(want, got)\n\t}\n\n\ts1 := []float64{2, 1, 3, 5}\n\ts2 := []float64{12, 11, 13, 15}\n\ts3 := []float64{0, 4, 6, 7} // Interleaved with s1, but no ties\n\ts4 := []float64{2, 2, 2, 2}\n\ts5 := []float64{1, 1, 1, 1, 1}\n\n\t// Small sample, no ties\n\tcheck3(s1, s2, 0, 0.014285714285714289, 0.028571428571428577, 1)\n\tcheck3(s2, s1, 16, 1, 0.028571428571428577, 0.014285714285714289)\n\tcheck3(s1, s3, 5, 0.24285714285714288, 0.485714285714285770, 0.8285714285714285)\n\n\t// Small sample, ties\n\t// TODO: Check these against some other implementation.\n\tcheck3(s1, s1, 8, 0.6285714285714286, 1, 0.6285714285714286)\n\tcheck3(s1, s4, 10, 0.8571428571428571, 0.7142857142857143, 0.3571428571428571)\n\tcheck3(s1, s5, 17.5, 1, 0, 0.04761904761904767)\n\n\tr, err := MannWhitneyUTest(s4, s4, LocationDiffers)\n\tif err != ErrSamplesEqual {\n\t\tt.Errorf(\"want ErrSamplesEqual, got %+v, %+v\", r, err)\n\t}\n\n\t// Large samples.\n\tl1 := make([]float64, 500)\n\tfor i := range l1 {\n\t\tl1[i] = float64(i * 2)\n\t}\n\tl2 := make([]float64, 600)\n\tfor i := range l2 {\n\t\tl2[i] = float64(i*2 - 41)\n\t}\n\tl3 := append([]float64{}, l2...)\n\tfor i := 0; i < 30; i++ {\n\t\tl3[i] = l1[i]\n\t}\n\t// For comparing with R's wilcox.test:\n\t// l1 <- seq(0, 499)*2\n\t// l2 <- seq(0,599)*2-41\n\t// l3 <- l2; for (i in 1:30) { l3[i] = l1[i] }\n\n\tcheck3(l1, l2, 135250, 0.0024667680407086112, 0.0049335360814172224, 0.9975346930458906)\n\tcheck3(l1, l1, 125000, 0.5000436801680628, 1, 0.5000436801680628)\n\tcheck3(l1, l3, 134845, 0.0019351907119808942, 0.0038703814239617884, 0.9980659818257166)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/stats/util_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage stats\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/aclements/go-moremath/internal/mathtest\"\n\t\"github.com/aclements/go-moremath/vec\"\n)\n\nvar aeq = mathtest.Aeq\nvar testFunc = mathtest.WantFunc\n\nfunc testDiscreteCDF(t *testing.T, name string, dist DiscreteDist) {\n\t// Build the expected CDF out of the PMF.\n\tl, h := dist.Bounds()\n\ts := dist.Step()\n\twant := map[float64]float64{l - 0.1: 0, h: 1}\n\tsum := 0.0\n\tfor x := l; x < h; x += s {\n\t\tsum += dist.PMF(x)\n\t\twant[x] = sum\n\t\twant[x+s/2] = sum\n\t}\n\n\ttestFunc(t, name, dist.CDF, want)\n}\n\nfunc testInvCDF(t *testing.T, dist Dist, bounded bool) {\n\tinv := InvCDF(dist)\n\tname := fmt.Sprintf(\"InvCDF(%+v)\", dist)\n\tcdfName := fmt.Sprintf(\"CDF(%+v)\", dist)\n\n\t// Test bounds.\n\tvals := map[float64]float64{-0.01: nan, 1.01: nan}\n\tif !bounded {\n\t\tvals[0] = -inf\n\t\tvals[1] = inf\n\t}\n\ttestFunc(t, name, inv, vals)\n\n\tif bounded {\n\t\tlo, hi := inv(0), inv(1)\n\t\tvals := map[float64]float64{\n\t\t\tlo - 0.01: 0, lo: 0,\n\t\t\thi: 1, hi + 0.01: 1,\n\t\t}\n\t\ttestFunc(t, cdfName, dist.CDF, vals)\n\t\tif got := dist.CDF(lo + 0.01); !(got > 0) {\n\t\t\tt.Errorf(\"%s(0)=%v, but %s(%v)=0\", name, lo, cdfName, lo+0.01)\n\t\t}\n\t\tif got := dist.CDF(hi - 0.01); !(got < 1) {\n\t\t\tt.Errorf(\"%s(1)=%v, but %s(%v)=1\", name, hi, cdfName, hi-0.01)\n\t\t}\n\t}\n\n\t// Test points between.\n\tvals = map[float64]float64{}\n\tfor _, p := range vec.Linspace(0, 1, 11) {\n\t\tif p == 0 || p == 1 {\n\t\t\tcontinue\n\t\t}\n\t\tx := inv(p)\n\t\tvals[x] = x\n\t}\n\ttestFunc(t, fmt.Sprintf(\"InvCDF(CDF(%+v))\", dist),\n\t\tfunc(x float64) float64 {\n\t\t\treturn inv(dist.CDF(x))\n\t\t},\n\t\tvals)\n}\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/vec/package.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package vec provides functions for float64 vectors.\npackage vec // import \"github.com/aclements/go-moremath/vec\"\n"
  },
  {
    "path": "benchplot/vendor/github.com/aclements/go-moremath/vec/vec.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage vec\n\nimport \"math\"\n\n// Vectorize returns a function g(xs) that applies f to each x in xs.\n//\n// f may be evaluated in parallel and in any order.\nfunc Vectorize(f func(float64) float64) func(xs []float64) []float64 {\n\treturn func(xs []float64) []float64 {\n\t\treturn Map(f, xs)\n\t}\n}\n\n// Map returns f(x) for each x in xs.\n//\n// f may be evaluated in parallel and in any order.\nfunc Map(f func(float64) float64, xs []float64) []float64 {\n\t// TODO(austin) Parallelize\n\tres := make([]float64, len(xs))\n\tfor i, x := range xs {\n\t\tres[i] = f(x)\n\t}\n\treturn res\n}\n\n// Linspace returns num values spaced evenly between lo and hi,\n// inclusive. If num is 1, this returns an array consisting of lo.\nfunc Linspace(lo, hi float64, num int) []float64 {\n\tres := make([]float64, num)\n\tif num == 1 {\n\t\tres[0] = lo\n\t\treturn res\n\t}\n\tfor i := 0; i < num; i++ {\n\t\tres[i] = lo + float64(i)*(hi-lo)/float64(num-1)\n\t}\n\treturn res\n}\n\n// Logspace returns num values spaced evenly on a logarithmic scale\n// between base**lo and base**hi, inclusive.\nfunc Logspace(lo, hi float64, num int, base float64) []float64 {\n\tres := Linspace(lo, hi, num)\n\tfor i, x := range res {\n\t\tres[i] = math.Pow(base, x)\n\t}\n\treturn res\n}\n\n// Sum returns the sum of xs.\nfunc Sum(xs []float64) float64 {\n\tsum := 0.0\n\tfor _, x := range xs {\n\t\tsum += x\n\t}\n\treturn sum\n}\n\n// Concat returns the concatenation of its arguments. It does not\n// modify its inputs.\nfunc Concat(xss ...[]float64) []float64 {\n\ttotal := 0\n\tfor _, xs := range xss {\n\t\ttotal += len(xs)\n\t}\n\tout := make([]float64, total)\n\tpos := 0\n\tfor _, xs := range xss {\n\t\tpos += copy(out[pos:], xs)\n\t}\n\treturn out\n}\n"
  },
  {
    "path": "benchplot/vendor/update",
    "content": "#!/bin/sh\n\nset -e\n\nmv github.com github.com.old || true\n\nmkdir -p github.com/aclements/go-gg\ngit -C $GOPATH/src/github.com/aclements/go-gg archive HEAD | tar xC github.com/aclements/go-gg\n\nmkdir -p github.com/aclements/go-moremath\ngit -C $GOPATH/src/github.com/aclements/go-moremath archive HEAD | tar xC github.com/aclements/go-moremath\n"
  },
  {
    "path": "benchscripts/bench-many",
    "content": "#!/bin/zsh\n\nset -e\n\nif [[ $# != 3 ]]; then\n    echo \"usage: bench-many rev-file out-dir iterations\" >&2\n    exit 1\nfi\n\nrevFile=$(realpath $1)\noutDir=$2\niterations=$3\nmkdir -p $outDir/by-rev\nmkdir -p $outDir/by-date\noutDir=$(realpath $outDir)\nGOROOT=$(go env GOROOT)\nGOTOOLDIR=$(go env GOTOOLDIR)\n\n# Build benchmarks\ncat $revFile | while read rev; do\n    cd $GOROOT\n    rev=$(git rev-parse $rev)\n    revOut=$outDir/by-rev/$rev\n    if [[ -x $revOut/go1.test && -x $revOut/6g && -x $revOut/xbench ]]; then\n        continue\n    fi\n\n    echo \"Building $rev\"\n    git checkout -q $rev > $outDir/log\n    (cd $GOROOT/src && ./make.bash) >> $outDir/log 2>&1\n\n    if [[ ! -x $revOut/6g ]]; then\n        cp $GOTOOLDIR/6g $revOut/6g\n    fi\n\n    if [[ ! -x $revOut/go1.test ]]; then\n        cd $GOROOT/test/bench/go1\n        go test -c >> $outDir/log\n        mkdir -p $revOut\n        mv go1.test $revOut/go1.test\n    fi\n\n    if [[ ! -x $revOut/xbench ]]; then\n        go build -o $revOut/xbench golang.org/x/benchmarks/bench\n    fi\ndone\n\n# Make date symlinks\ncat $revFile | while read rev; do\n    rev=$(git rev-parse $rev)\n    date=$(git log -n1 --format='%cI' $rev | sed 's/+00:00$//')\n    ln -snf ../by-rev/$rev $outDir/by-date/$date\ndone\n\n# Run benchmarks\nfor i in {1..$iterations}; do\n    cat $revFile | while read rev; do\n        cd $GOROOT\n        rev=$(git rev-parse $rev)\n        cd $outDir/by-rev/$rev\n        logName=go1.out.$(printf %03d $i)\n        if [[ -f $logName ]]; then\n            continue\n        fi\n        echo \"$rev ($i)\"\n        ./go1.test -test.bench . > go1.out.tmp\n        mv go1.out.tmp $logName\n    done\ndone\n\n# TODO: Run 6g \"benchmark\" (with gctrace=1)\n# TODO: Run x/bench benchmarks (with gctrace=1)\n"
  },
  {
    "path": "benchscripts/benchstat2",
    "content": "#!/usr/bin/python3\n\nimport os\nimport sys\nimport tempfile\nimport subprocess\nimport argparse\nimport re\n\ndef expandHash(commits, h):\n    x = None\n    for c in commits:\n        if c.startswith(h):\n            if x != None:\n                raise ValueError(\"ambiguous commit hash \" + h)\n            x = c\n    return x\n\ndef main():\n    parser = argparse.ArgumentParser(description=\"disentangle benchmark output\")\n    parser.add_argument(\"-C\", metavar=\"gitdir\", help=\"git repo for resolving commit hashes\", default=os.path.expanduser(\"~/go.dev\"))\n    parser.add_argument(\"-o\", metavar=\"base\", help=\"write output to base-commit.log instead of invoking benchstat\")\n    parser.add_argument(\"-benchsave\", action=\"store_true\", help=\"invoke benchsave instead of benchstat\")\n    parser.add_argument(\"-geomean\", action=\"store_true\", help=\"pass -geomean to benchstat\")\n    parser.add_argument(\"-delta-test\", help=\"pass -delta-test to benchstat\")\n    parser.add_argument(\"logs\", nargs=\"+\", help=\"input benchmark log files\")\n    parser.add_argument(\"commits\", nargs=\"*\", help=\"commits to show\")\n    args = parser.parse_args()\n\n    benchstat = args.o == None\n    if benchstat:\n        tmpdir = tempfile.TemporaryDirectory()\n        args.o = os.path.join(tmpdir.name, \"out\")\n\n    # Separate logs and commits arguments\n    for i, arg in enumerate(args.logs):\n        if re.fullmatch(\"[0-9a-fA-F]{5,}\", arg):\n            args.commits = args.logs[i:]\n            args.logs = args.logs[:i]\n            break\n        if arg == \"--\":\n            args.commits = args.logs[i+1:]\n            args.logs = args.logs[:i]\n            break\n\n    # Process input files into output files\n    fmap = {}\n    logCommits = set()\n    for inp in args.logs:\n        parseInput(inp, args.o, fmap, logCommits)\n    for f, name in fmap.values():\n        f.close()\n\n    # Get commit order\n    listArgs = [list(logCommits)]\n    if args.commits:\n        # We want to accept revision list arguments, but keep things\n        # in argument order if there's more than one argument. This\n        # means we have to call rev-list separately for each argument.\n        listArgs = [[\"--no-walk\", c] for c in args.commits]\n    commits = []\n    for listArg in listArgs:\n        commits += subprocess.check_output([\"git\", \"-C\", args.C, \"rev-list\", \"--topo-order\", \"--reverse\"] + listArg, universal_newlines=True).splitlines()\n    order = {cid: i for i, cid in enumerate(commits)}\n\n    # Get names in commit order.\n    if args.commits:\n        names = [args.o + \"-\" + expandHash(commits, h)[:10] + \".log\" for h in commits]\n    else:\n        names = [fmap[cid][1]\n                 for cid in sorted(fmap.keys(), key=lambda cid: order[cid])]\n\n    if benchstat:\n        # Invoke benchstat/benchsave\n        try:\n            os.chdir(os.path.dirname(args.o))\n            if args.benchsave:\n                benchargs = [\"benchsave\"]\n            else:\n                benchargs = [\"benchstat\"]\n                if args.geomean:\n                    benchargs.append(\"-geomean\")\n                if args.delta_test:\n                    benchargs.extend([\"-delta-test\", args.delta_test])\n            subprocess.check_call(benchargs + list(map(os.path.basename, names)),\n                                  stdout=sys.stdout, stderr=sys.stderr)\n        finally:\n            # Allow deletion of temporary directory.\n            os.chdir(\"/\")\n    else:\n        print(\" \".join(names))\n\ndef parseInput(path, outbase, fmap, logCommits):\n    infile = open(path)\n    outfile = None\n\n    f = None\n    for l in infile:\n        if l.startswith(\"commit: \"):\n            chash = l.split()[1].strip()\n            logCommits.add(chash)\n            f, name = fmap.get(chash, (None, None))\n            if f is None:\n                name = outbase + \"-\" + chash[:10] + \".log\"\n                f = open(name, \"w\")\n                fmap[chash] = (f, name)\n        elif f:\n            f.write(l)\n\nmain()\n"
  },
  {
    "path": "benchscripts/plot-time",
    "content": "#!/usr/bin/env python3\n# -*- python -*-\n\n# Copyright 2015 The Go Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nimport argparse\nimport os\nimport datetime\n\nimport numpy as np\nimport scipy.stats as stats\nimport pandas as pd\n\nimport matplotlib as mpl\nmpl.use('GTK3Cairo')\nmpl.rc('figure', facecolor='1')\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport seaborn as sns\n\n# TODO: Take a rev-list and work from that instead. Then there can be\n# benchmarks mixed in from other branches, and it will be much easier\n# to specify a baseline.\n\ndef main():\n    argp = argparse.ArgumentParser(\n        description='''Plot benchmarks over time.''')\n    argp.add_argument('--baseline', action='append', type=argparse.FileType(),\n                      help='''Benchmark output files to use as a baseline.''')\n    argp.add_argument('benchout', nargs='+', type=argparse.FileType(),\n                      help='''Benchmark output files to read. Must be in\n                      directories named YYYY-MM-DDTHH:MM:SS.''')\n    args = argp.parse_args()\n\n    baseline = None\n    baselineNames = set()\n    if args.baseline:\n        baseline = pd.DataFrame()\n        for benchout in args.baseline:\n            b1 = parseBenchmarks(benchout)\n            b1['date'] = dateOf(benchout.name)\n            baseline = baseline.append(b1, ignore_index=True)\n            baselineNames.add(benchout.name)\n\n    print(args.baseline, baselineNames)\n    benchmarks = pd.DataFrame()\n    for benchout in args.benchout:\n        if benchout.name in baselineNames:\n            continue\n        if '2015-02-18' in benchout.name:\n            continue            # XXX\n        if '2015-05-05T10:38:48-04:00' in benchout.name:\n            # XXX Off-master commit\n            continue\n        b1 = parseBenchmarks(benchout)\n        b1['date'] = dateOf(benchout.name)\n        benchmarks = benchmarks.append(b1, ignore_index=True)\n\n    benchMeans = getBenchMeans(benchmarks)\n    gmeanByDate = gmeanBenchmarks(benchMeans)\n\n    for i, (gmean1, gmean2) in enumerate(zip(gmeanByDate['ops/sec'], gmeanByDate['ops/sec'][1:])):\n        delta = (gmean2 - gmean1) / gmean1\n        if abs(delta) > 0.05:\n            date = gmeanByDate['date'][i+1]\n            print(\"%s %s %+g%%\" % (shaOfDate(date), date, delta*100))\n\n    if baseline is not None:\n        # Normalize each date geomean to baseline\n        #\n        # XXX Should this just add a line?\n        baselineGmean = stats.gmean(getBenchMeans(baseline)['ops/sec'])\n        gmeanByDate['normalized ops/sec'] = gmeanByDate['ops/sec'] / baselineGmean\n        plotCol = 'normalized ops/sec'\n    else:\n        plotCol = 'ops/sec'\n\n#     # Normalize each benchmark to latest result.\n#     latestBenchMeans = benchMeans.sort('date', ascending=False).groupby('name').\\\n#                        head(1).reset_index(drop=True)\n#     latestBenchMeans = latestBenchMeans.drop('date', 1).set_index('name')\n#     print(latestBenchMeans)\n# #    normBenchMeans = benchMeans.drop('date').groupby('name').\n\n    fig, ax = plt.subplots(1, 1, dpi=120)\n\n    ax.set_title('go1 benchmarks relative to Go 1.4') # XXX\n    ax.plot(gmeanByDate['date'].astype(datetime.datetime), gmeanByDate[plotCol])\n    ax.xaxis.set_major_formatter(mdates.DateFormatter('%b %e'))\n    ax.format_xdata = mdates.DateFormatter('%Y-%m-%dT%H:%M:%S')\n    ax.set_ylim(bottom=0)\n    ax.set_ylabel('Geomean performance (%s)' % plotCol)\n    #fig.autofmt_xdate()\n    fig.tight_layout()\n    plt.show()\n\ndef getBenchMeans(benchmarks):\n    # Compute ops/sec\n    benchmarks['ops/sec'] = 1e9 / benchmarks['ns/op']\n\n    # Compute the mean for each benchmark.\n    # TODO: Discard outliers.\n    return benchmarks.groupby(['date', 'name']).mean().reset_index()\n\ndef gmeanBenchmarks(benchMeans):\n    # For each date, compute the geometric mean across benchmarks.\n    return benchMeans.groupby('date')['ops/sec'].agg(stats.gmean).reset_index()\n\ndef parseBenchmarks(fp):\n    results = []\n    for line in fp:\n        f = line.split()\n        if len(f) < 4:\n            continue\n        name = f[0]\n        ns = None\n        for i, field in enumerate(f):\n            if field == 'ns/op':\n                ns = float(f[i-1])\n        if ns is None:\n            continue\n\n        results.append((name, ns))\n\n    # UGH. This doesn't work if results == []\n    return pd.DataFrame(results,\n                        columns=('name', 'ns/op'))\n\ndef dateOf(path):\n    p = os.path.basename(os.path.dirname(path))\n    if p.endswith('-04:00'):\n        p = p[:-len('-04:00')]  # XXX\n    return datetime.datetime.strptime(p, '%Y-%m-%dT%H:%M:%S')\n\ndef shaOfDate(date):\n    d = date.strftime('%Y-%m-%dT%H:%M:%S')\n    return os.path.basename(os.readlink('history/by-date/' + d))\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "benchscripts/plot-time-2",
    "content": "#!/usr/bin/env python3\n# -*- python -*-\n\n# Copyright 2015 The Go Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\nimport argparse\nimport os\nimport datetime\n\nimport numpy as np\nimport scipy.stats as stats\nimport pandas as pd\n\nimport matplotlib as mpl\nmpl.use('GTK3Cairo')\nmpl.rc('figure', facecolor='1')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# TODO: Take a rev-list and work from that instead. Then there can be\n# benchmarks mixed in from other branches, and it will be much easier\n# to specify a baseline.\n\ndef main():\n    argp = argparse.ArgumentParser(\n        description='''Plot benchmarks over time.''')\n    argp.add_argument('-C', help='''Git directory''')\n    argp.add_argument('--baseline', metavar='COMMIT',\n                      help='''Normalize results to COMMIT results''')\n    argp.add_argument('--history', metavar='DIR', default='history',\n                      help='''Directory of benchmark results (default: %(default)s)''')\n    argp.add_argument('revisions', nargs='*', default=['HEAD'],\n                      help='''Revision range to show''')\n    args = argp.parse_args()\n\n    global gitDir\n    gitDir = argp.C\n\n    revisions = gitRevList(*argp.revisions)\n    baselineRev = None\n    if argp.baseline:\n        baselineRev = gitRevParse(argp.baseline)\n\n    # XXX HERE. I should probably just rewrite this mess in Go.\n\n\n    baseline = None\n    baselineNames = set()\n    if args.baseline:\n        baseline = pd.DataFrame()\n        for benchout in args.baseline:\n            b1 = parseBenchmarks(benchout)\n            b1['date'] = dateOf(benchout.name)\n            baseline = baseline.append(b1, ignore_index=True)\n            baselineNames.add(benchout.name)\n\n    benchmarks = pd.DataFrame()\n    for benchout in args.benchout:\n        if benchout.name in baselineNames:\n            continue\n        b1 = parseBenchmarks(benchout)\n        b1['date'] = dateOf(benchout.name)\n        benchmarks = benchmarks.append(b1, ignore_index=True)\n\n    benchMeans = getBenchMeans(benchmarks)\n    gmeanByDate = gmeanBenchmarks(benchMeans)\n\n    if baseline is not None:\n        # Normalize each date geomean to baseline\n        #\n        # XXX Should this just add a line?\n        baselineGmean = stats.gmean(getBenchMeans(baseline)['ops/sec'])\n        gmeanByDate['normalized ops/sec'] = gmeanByDate['ops/sec'] / baselineGmean\n        plotCol = 'normalized ops/sec'\n    else:\n        plotCol = 'ops/sec'\n\n#     # Normalize each benchmark to latest result.\n#     latestBenchMeans = benchMeans.sort('date', ascending=False).groupby('name').\\\n#                        head(1).reset_index(drop=True)\n#     latestBenchMeans = latestBenchMeans.drop('date', 1).set_index('name')\n#     print(latestBenchMeans)\n# #    normBenchMeans = benchMeans.drop('date').groupby('name').\n\n    fig, ax = plt.subplots(1, 1, dpi=120)\n\n    #ax.set_title('go1 benchmarks relative to Go 1.4') # XXX\n    ax.plot(gmeanByDate['date'].astype(datetime.datetime), gmeanByDate[plotCol])\n    ax.set_ylim(bottom=0)\n    ax.set_ylabel('Geomean performance (%s)' % plotCol)\n    fig.tight_layout()\n    plt.show()\n\nclass Rev(collections.namedtuple('commit date')): pass\n\ndef gitRevList(*args):\n    revs = []\n    for line in subprocess.check_call(\n            ['git', 'rev-list', '--format=format:%H %ct'] + args,\n            stdout=subprocess.PIPE, stdin=subprocess.DEVNULL).splitlines():\n        if line.startswith('commit '):\n            continue\n        commit, date = line.split()\n        pdate = datetime.datetime.fromtimestamp(int(date))\n        revs.append(Rev(commit, pdate))\n    return revs\n\ndef getBenchMeans(benchmarks):\n    # Compute ops/sec\n    benchmarks['ops/sec'] = 1e9 / benchmarks['ns/op']\n\n    # Compute the mean for each benchmark.\n    # TODO: Discard outliers.\n    return benchmarks.groupby(['date', 'name']).mean().reset_index()\n\ndef gmeanBenchmarks(benchMeans):\n    # For each date, compute the geometric mean across benchmarks.\n    return benchMeans.groupby('date')['ops/sec'].agg(stats.gmean).reset_index()\n\ndef parseBenchmarks(fp):\n    results = []\n    for line in fp:\n        f = line.split()\n        if len(f) < 4:\n            continue\n        name = f[0]\n        ns = None\n        for i, field in enumerate(f):\n            if field == 'ns/op':\n                ns = float(f[i-1])\n        if ns is None:\n            continue\n\n        results.append((name, ns))\n\n    return pd.DataFrame(results,\n                        columns=('name', 'ns/op'))\n\ndef dateOf(path):\n    p = os.path.basename(os.path.dirname(path))\n    return datetime.datetime.strptime(p, '%Y-%m-%dT%H:%M:%S')\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "buildstats/alg.go",
    "content": "// Copyright 2022 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nfunc FilterInPlace[T any](xs []T, keep func(x T) bool) []T {\n\tj := 0\n\tfor i := range xs {\n\t\tif keep(xs[i]) {\n\t\t\tif i != j {\n\t\t\t\txs[i] = xs[j]\n\t\t\t}\n\t\t\tj++\n\t\t}\n\t}\n\treturn xs[:j]\n}\n"
  },
  {
    "path": "buildstats/go.mod",
    "content": "module github.com/aclements/go-misc/buildstats\n\ngo 1.18\n\nrequire golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d // indirect\n"
  },
  {
    "path": "buildstats/go.sum",
    "content": "golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d h1:vtUKgx8dahOomfFzLREU8nSv25YHnTgLBn4rDnWZdU0=\ngolang.org/x/exp v0.0.0-20220613132600-b0d781184e0d/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA=\n"
  },
  {
    "path": "buildstats/main.go",
    "content": "// Copyright 2022 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding/base64\"\n\t\"flag\"\n\t\"fmt\"\n\t\"html\"\n\t\"image\"\n\t\"image/color\"\n\t\"image/png\"\n\t\"log\"\n\t\"sort\"\n)\n\nvar since timeFlag\n\ntype result int8\n\nconst (\n\tresNone result = iota\n\tresOK\n\tresFail\n)\n\nfunc resultFromString(s string) result {\n\tswitch s {\n\tcase \"\":\n\t\treturn resNone\n\tcase \"ok\":\n\t\treturn resOK\n\t}\n\treturn resFail\n}\n\n// grid is a 2D collection of results indexed by a string label and a\n// revision.\ntype grid struct {\n\tresults map[gridKey]result\n\tlabels  map[string]sum\n\trevs    []*rev\n}\n\ntype gridKey struct {\n\tlabel string\n\trev   *rev\n}\n\nfunc newGrid(revs []*rev) *grid {\n\treturn &grid{\n\t\tresults: make(map[gridKey]result),\n\t\tlabels:  make(map[string]sum),\n\t\trevs:    revs,\n\t}\n}\n\nfunc (g *grid) add(label string, rev *rev, result result) {\n\tk := gridKey{label, rev}\n\tif _, ok := g.results[k]; ok {\n\t\tlog.Fatalf(\"duplicate key: (%s, %s)\", label, rev)\n\t}\n\tg.results[k] = result\n\tsum := g.labels[label]\n\tsum.add(result)\n\tg.labels[label] = sum\n\t// TODO: Cross-sum on revs, too?\n}\n\n// sortedLabels returns the labels of this grid, sorted from highest\n// to lowest failure rat.e\nfunc (g *grid) sortedLabels() []string {\n\tkeys := make([]string, 0, len(g.labels))\n\tfor k := range g.labels {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Slice(keys, func(i, j int) bool {\n\t\treturn !g.labels[keys[i]].less(g.labels[keys[j]])\n\t})\n\treturn keys\n}\n\n// labelResults returns a slice of results for the given label,\n// indexed by rev ID.\nfunc (g *grid) labelResults(label string) []result {\n\tresults := make([]result, len(g.revs))\n\tfor i, rev := range g.revs {\n\t\tresults[i] = g.results[gridKey{label, rev}]\n\t}\n\treturn results\n}\n\ntype sum struct {\n\tfails int\n\ttotal int\n}\n\nfunc (s *sum) add(r result) {\n\tif r == resNone {\n\t\treturn\n\t}\n\ts.total++\n\tif r == resFail {\n\t\ts.fails++\n\t}\n}\n\nfunc (s sum) failureRate() float64 {\n\tif s.total == 0 {\n\t\treturn 1\n\t}\n\treturn float64(s.fails) / float64(s.total)\n}\n\nfunc (s sum) less(s2 sum) bool {\n\tif f1, f2 := s.failureRate(), s2.failureRate(); f1 != f2 {\n\t\treturn f1 < f2\n\t}\n\treturn s.total < s2.total\n}\n\nfunc rangeBuildResults(rev *rev, cb func(builder string, res result)) {\n\tfor i, builder := range rev.Builders {\n\t\tcb(builder, resultFromString(rev.Results[i]))\n\t}\n}\n\nfunc main() {\n\tflag.Var(&since, \"since\", \"list only failures on revisions since this date, as an RFC-3339 date or date-time\")\n\tflag.Parse()\n\n\trevs := getRevs(since.Time)\n\trevs = FilterInPlace(revs, func(r *rev) bool { return r.Repo == \"go\" })\n\n\tg := newGrid(revs)\n\tfor _, rev := range revs {\n\t\trangeBuildResults(rev, func(label string, res result) {\n\t\t\tg.add(label, rev, res)\n\t\t})\n\t}\n\n\tfmt.Printf(\"<!DOCTYPE html>\\n\")\n\tfmt.Printf(\"<html><body>\\n\")\n\tfmt.Printf(\"<table>\\n\")\n\tfmt.Printf(`<tr><td>builder</td><td>failures</td><td>%s</td><td align=\"right\">%s</td></tr>`, revs[0].date.Format(rfc3339Date), revs[len(revs)-1].date.Format(rfc3339Date))\n\n\tlabels := g.sortedLabels()\n\tfor _, label := range labels {\n\t\tresults := g.labelResults(label)\n\t\tsum := g.labels[label]\n\t\tfmt.Printf(`<tr><td>%s</td><td>%6.2f%% (%d/%d)</td><td colspan=\"2\"><img src=\"%s\" /></td></tr>`, html.EscapeString(label), 100*sum.failureRate(), sum.fails, sum.total, pngURI(makeResults(results)))\n\t}\n\n\tfmt.Printf(\"</table>\\n\")\n\tfmt.Printf(\"</body></html>\\n\")\n}\n\nfunc makeResults(results []result) image.Image {\n\t// TODO: Hilbert curve?\n\n\tvar (\n\t\tcolorNone = color.NRGBA{200, 200, 200, 255}\n\t\tcolorOK   = color.NRGBA{220, 255, 220, 255}\n\t\tcolorFail = color.NRGBA{200, 50, 50, 255}\n\t)\n\n\tconst px = 3 // Size in pixels of a result\n\tconst h = 6  // Height in results\n\tw := (len(results) + h - 1) / h\n\timg := image.NewNRGBA(image.Rect(0, 0, w*px, h*px))\n\tfor i, r := range results {\n\t\tc := color.NRGBA{255, 255, 255, 0}\n\t\tswitch r {\n\t\tcase resNone:\n\t\t\tc = colorNone\n\t\tcase resOK:\n\t\t\tc = colorOK\n\t\tcase resFail:\n\t\t\tc = colorFail\n\t\t}\n\n\t\tfor dx := 0; dx < px; dx++ {\n\t\t\tfor dy := 0; dy < px; dy++ {\n\t\t\t\timg.SetNRGBA(i/h*px+dx, (i%h)*px+dy, c)\n\t\t\t}\n\t\t}\n\t}\n\treturn img\n}\n\nfunc pngURI(img image.Image) []byte {\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"data:image/png;base64,\")\n\tenc := base64.NewEncoder(base64.StdEncoding, &buf)\n\tif err := png.Encode(enc, img); err != nil {\n\t\tlog.Fatalf(\"encoding png: %s\", err)\n\t}\n\tenc.Close()\n\treturn buf.Bytes()\n}\n"
  },
  {
    "path": "buildstats/rev.go",
    "content": "// Copyright 2022 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"time\"\n)\n\ntype rev struct {\n\tpath string\n\tdate time.Time\n\n\trevMeta\n}\n\nvar pathDateRe = regexp.MustCompile(`^(\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2})-[0-9a-f]+$`)\n\nfunc getRevs(since time.Time) []*rev {\n\tcacheDir, err := os.UserCacheDir()\n\tif err != nil {\n\t\tlog.Fatal(\"getting cache directory: \", err)\n\t}\n\trevDir := filepath.Join(cacheDir, \"fetchlogs\", \"rev\")\n\tdirs, err := os.ReadDir(revDir)\n\tif err != nil {\n\t\tlog.Fatalf(\"reading rev directory %s: %s\", revDir, err)\n\t}\n\n\t// Filter the paths down without additional I/O.\n\tvar revs []*rev\n\tfor _, dir := range dirs {\n\t\tif !dir.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tname := dir.Name()\n\t\tm := pathDateRe.FindStringSubmatch(name)\n\t\tif m == nil {\n\t\t\tcontinue\n\t\t}\n\t\tt, err := time.Parse(rfc3339DateTime, m[1])\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif t.Before(since) {\n\t\t\tcontinue\n\t\t}\n\n\t\tpath := filepath.Join(revDir, dir.Name())\n\t\trevs = append(revs, &rev{\n\t\t\tpath: path,\n\t\t\tdate: t,\n\t\t})\n\t}\n\n\t// Load revision metadata.\n\tfor i, rev := range revs {\n\t\tfmt.Fprintf(os.Stderr, \"\\rLoading rev %d/%d...\", i+1, len(revs))\n\t\trev.revMeta = readMeta(rev.path)\n\t}\n\tfmt.Fprintf(os.Stderr, \"\\n\")\n\n\treturn revs\n}\n\nfunc (r *rev) String() string {\n\treturn r.path\n}\n\ntype revMeta struct {\n\tRepo     string   `json:\"repo\"`\n\tBuilders []string `json:\"\"`\n\tResults  []string `json:\"results\"`\n}\n\nfunc readMeta(revPath string) revMeta {\n\tvar meta revMeta\n\n\tpath := filepath.Join(revPath, \".rev.json\")\n\tb, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err = json.Unmarshal(b, &meta); err != nil {\n\t\tlog.Fatalf(\"decoding %s: %s\", path, err)\n\t}\n\n\tpath = filepath.Join(revPath, \".builders.json\")\n\tb, err = ioutil.ReadFile(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err = json.Unmarshal(b, &meta.Builders); err != nil {\n\t\tlog.Fatalf(\"decoding %s: %s\", path, err)\n\t}\n\n\treturn meta\n}\n\nfunc (r *rev) getLogPath(builder string) (string, error) {\n\tp := filepath.Join(r.path, builder)\n\ttarget, err := os.Readlink(p)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting log path: %e\", err)\n\t}\n\treturn filepath.Clean(filepath.Join(p, target)), nil\n}\n\nfunc (r *rev) readLog(builder string) ([]byte, error) {\n\tpath, err := r.getLogPath(builder)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ioutil.ReadFile(path)\n}\n"
  },
  {
    "path": "buildstats/timeflag.go",
    "content": "// Copyright 2021 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"time\"\n)\n\nconst (\n\trfc3339Date     = \"2006-01-02\"\n\trfc3339DateTime = \"2006-01-02T15:04:05\"\n)\n\n// A timeFlag is a flag.Getter that parses a time.Time\n// from either an RFC-3339 date or an RFC-3339 date and time.\n//\n// Fractional seconds and explicit time zones are not allowed.\ntype timeFlag struct {\n\tTime time.Time\n}\n\nvar _ = flag.Getter((*timeFlag)(nil))\n\nfunc (tf *timeFlag) Set(s string) error {\n\tif s == \"\" {\n\t\ttf.Time = time.Time{}\n\t\treturn nil\n\t}\n\n\tt, err := time.Parse(rfc3339Date, s)\n\tif err != nil {\n\t\tt, err = time.Parse(rfc3339DateTime, s)\n\t}\n\tif err == nil {\n\t\ttf.Time = t\n\t}\n\treturn err\n}\n\nfunc (tf *timeFlag) String() string {\n\tif tf.Time.IsZero() {\n\t\treturn \"\"\n\t}\n\tif tf.Time.Hour() == 0 && tf.Time.Minute() == 0 && tf.Time.Second() == 0 {\n\t\treturn tf.Time.Format(rfc3339Date)\n\t}\n\treturn tf.Time.Format(rfc3339DateTime)\n}\n\nfunc (tf *timeFlag) Get() interface{} {\n\treturn tf.Time\n}\n"
  },
  {
    "path": "cl-fetch/main.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// cl-fetch fetches and tags CLs from Gerrit.\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"golang.org/x/build/gerrit\"\n)\n\nvar (\n\tflagOutgoing = flag.Bool(\"outgoing\", false, \"fetch outgoing CLs\")\n\tflagIncoming = flag.Bool(\"incoming\", false, \"fetch incoming CLs\")\n\tflagQuery    = flag.String(\"q\", \"\", \"fetch CLs matching `query`\")\n\tflagVerbose  = flag.Bool(\"v\", false, \"verbose output\")\n\tflagDry      = flag.Bool(\"dry-run\", false, \"print but do not execute commands\")\n)\n\nvar clRe = regexp.MustCompile(\"^[0-9]+$|^I[0-9a-f]{40}$\")\n\ntype Tag struct {\n\ttag    string\n\tcommit *gerrit.CommitInfo\n}\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [flags] [CLs...]\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n\tflag.Parse()\n\n\tqueryParts := []string{}\n\tif *flagOutgoing {\n\t\tqueryParts = append(queryParts, \"is:open owner:self\")\n\t}\n\tif *flagIncoming {\n\t\tqueryParts = append(queryParts, \"is:open reviewer:self -owner:self\")\n\t}\n\tif *flagQuery != \"\" {\n\t\tqueryParts = append(queryParts, *flagQuery)\n\t}\n\tfor _, arg := range flag.Args() {\n\t\tif !clRe.MatchString(arg) {\n\t\t\tfmt.Fprintf(os.Stderr, \"CL must be a CL number or Change-Id\\n\")\n\t\t\tos.Exit(2)\n\t\t}\n\t\tqueryParts = append(queryParts, \"change:\"+arg)\n\t}\n\tif len(queryParts) == 0 {\n\t\tfmt.Fprintf(os.Stderr, \"must specify something to fetch\\n\")\n\t\tos.Exit(2)\n\t}\n\tquery := \"(\" + strings.Join(queryParts, \") OR (\") + \")\"\n\n\tif *flagVerbose {\n\t\tlog.Printf(\"query: %s\", query)\n\t}\n\n\t// Get the origin so we don't pull CLs for other repositories\n\t// in to this one.\n\torigin := gitOutput(\"config\", \"remote.origin.url\")\n\n\t// Get the existing CL tags.\n\thaveTags := map[string]bool{}\n\tfor _, tag := range strings.Split(gitOutput(\"tag\"), \"\\n\") {\n\t\thaveTags[tag] = true\n\t}\n\n\tc := gerrit.NewClient(\"https://go-review.googlesource.com\", gerrit.GitCookiesAuth())\n\n\tcls, err := c.QueryChanges(context.Background(), query, gerrit.QueryChangesOpt{\n\t\tFields: []string{\"CURRENT_REVISION\", \"CURRENT_COMMIT\"},\n\t})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tif *flagVerbose {\n\t\tv, _ := json.MarshalIndent(cls, \"\", \"  \")\n\t\tlog.Printf(\"Query response:\\n%s\\n\", v)\n\t}\n\n\t// Collect git fetch and tag commands.\n\tfetchCmd := []string{\"fetch\", \"--\", origin}\n\ttags := make(map[string]*Tag)\n\thashOrder := []string{}\n\tfor _, cl := range cls {\n\t\tfor commitID, rev := range cl.Revisions {\n\t\t\ttag := fmt.Sprintf(\"cl/%d/%d\", cl.ChangeNumber, rev.PatchSetNumber)\n\t\t\tif !haveTags[tag] {\n\t\t\t\tany := false\n\t\t\t\tfor _, fetch := range rev.Fetch {\n\t\t\t\t\tif fetch.URL == origin {\n\t\t\t\t\t\tfetchCmd = append(fetchCmd, fetch.Ref)\n\t\t\t\t\t\tany = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif !any {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttags[commitID] = &Tag{\n\t\t\t\ttag:    tag,\n\t\t\t\tcommit: rev.Commit,\n\t\t\t}\n\n\t\t\thashOrder = append(hashOrder, commitID)\n\t\t}\n\t}\n\n\t// Execute git fetch and tag commands.\n\tif len(fetchCmd) != 3 {\n\t\tgit(fetchCmd...)\n\t\tfmt.Println()\n\t}\n\tfor commitID, tag := range tags {\n\t\tif !haveTags[tag.tag] {\n\t\t\tgit(\"tag\", tag.tag, commitID)\n\t\t}\n\t}\n\tif *flagDry {\n\t\t// Separate command from printed tags.\n\t\tfmt.Println()\n\t}\n\n\t// Print tags.\n\tleafs := make(map[string]bool)\n\tfor commitID, _ := range tags {\n\t\tleafs[commitID] = true\n\t}\n\tfor _, tag := range tags {\n\t\tfor _, parent := range tag.commit.Parents {\n\t\t\tleafs[parent.CommitID] = false\n\t\t}\n\t}\n\n\tprinted := make(map[string]bool)\n\tneedBlank := false\n\tfor i := range hashOrder {\n\t\tcommitID := hashOrder[len(hashOrder)-i-1]\n\t\tif !leafs[commitID] {\n\t\t\tcontinue\n\t\t}\n\t\tif needBlank {\n\t\t\tfmt.Println()\n\t\t}\n\t\tneedBlank = printChain(tags, commitID, printed)\n\t}\n}\n\nfunc git(args ...string) {\n\tif *flagDry {\n\t\tfmt.Printf(\"git %s\\n\", strings.Join(args, \" \"))\n\t\treturn\n\t}\n\n\tcmd := exec.Command(\"git\", args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Fatalf(\"git %s failed: %s\", strings.Join(args, \" \"), err)\n\t}\n}\n\nfunc gitOutput(args ...string) string {\n\tif *flagDry {\n\t\tfmt.Printf(\"git %s\\n\", strings.Join(args, \" \"))\n\t}\n\n\tcmd := exec.Command(\"git\", args...)\n\tcmd.Stderr = os.Stderr\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"git %s failed: %s\", strings.Join(args, \" \"), err)\n\t}\n\treturn strings.TrimRight(string(out), \"\\n\")\n}\n\nfunc printChain(tags map[string]*Tag, commitID string, printed map[string]bool) bool {\n\tif printed[commitID] {\n\t\treturn false\n\t}\n\tprinted[commitID] = true\n\n\ttag := tags[commitID]\n\tfor _, parent := range tag.commit.Parents {\n\t\tif tags[parent.CommitID] != nil {\n\t\t\tprintChain(tags, parent.CommitID, printed)\n\t\t}\n\t}\n\tfmt.Printf(\"%s %s\\n\", tag.tag, tag.commit.Subject)\n\treturn true\n}\n"
  },
  {
    "path": "dashquery/compile.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage dashquery\n\nimport (\n\t\"fmt\"\n\t\"go/ast\"\n\t\"go/constant\"\n\t\"go/parser\"\n\t\"go/token\"\n\t\"time\"\n)\n\ntype compiler struct {\n\tnames map[string]queryNode\n}\n\nfunc newCompiler(names map[string]queryNode) *compiler {\n\treturn &compiler{names}\n}\n\nfunc (c *compiler) compile(expr string) (boolNode, error) {\n\tfset := token.NewFileSet()\n\tast, err := parser.ParseExprFrom(fset, \"\", expr, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Translate AST into nested closures and type-check.\n\tvar fn boolNode\n\tfunc() {\n\t\tdefer func() {\n\t\t\terr2 := recover()\n\t\t\tif err2, ok := err2.(*compileError); ok {\n\t\t\t\terr = err2\n\t\t\t} else if err2 != nil {\n\t\t\t\tpanic(err2)\n\t\t\t}\n\t\t}()\n\t\tfn = c.bool(ast, c.expr(ast))\n\t}()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn fn, nil\n}\n\n// bad panics with a compileError for the given message.\nfunc (c *compiler) bad(ast ast.Node, format string, a ...interface{}) {\n\t// TODO: Report position information from ast.\n\tpanic(&compileError{format, a})\n}\n\ntype compileError struct {\n\tformat string\n\ta      []interface{}\n}\n\nfunc (e *compileError) Error() string {\n\treturn fmt.Sprintf(e.format, e.a...)\n}\n\ntype queryNode interface {\n\t// typ returns the type of this node's result as a string.\n\ttyp() string\n\t// cfunc returns an evaluation function that wraps its result\n\t// in constant.Value.\n\tcfunc() func(pathInfo) constant.Value\n}\n\ntype (\n\tboolNode   func(pi pathInfo) bool\n\tnumberNode func(pi pathInfo) constant.Value\n\tstringNode func(pi pathInfo) string\n\ttimeNode   func(pi pathInfo) time.Time\n)\n\nfunc (boolNode) typ() string   { return \"bool\" }\nfunc (numberNode) typ() string { return \"number\" }\nfunc (stringNode) typ() string { return \"string\" }\nfunc (timeNode) typ() string   { return \"time\" }\n\nfunc (n boolNode) cfunc() func(pathInfo) constant.Value {\n\treturn func(pi pathInfo) constant.Value {\n\t\treturn constant.MakeBool(n(pi))\n\t}\n}\nfunc (n numberNode) cfunc() func(pathInfo) constant.Value {\n\treturn (func(pathInfo) constant.Value)(n)\n}\nfunc (n stringNode) cfunc() func(pathInfo) constant.Value {\n\treturn func(pi pathInfo) constant.Value {\n\t\treturn constant.MakeString(n(pi))\n\t}\n}\nfunc (n timeNode) cfunc() func(pathInfo) constant.Value {\n\t// Should never happen.\n\tpanic(\"timeNode.cfunc\")\n}\n\n// bool returns n as a boolNode or panics with a type error.\nfunc (c *compiler) bool(ast ast.Expr, n queryNode) boolNode {\n\tfn, ok := n.(boolNode)\n\tif !ok {\n\t\tc.bad(ast, \"want bool, but %s has type %s\", ast, n.typ())\n\t}\n\treturn fn\n}\n\n// number returns n as a numberNode or panics with a type error.\nfunc (c *compiler) number(ast ast.Expr, n queryNode) numberNode {\n\tfn, ok := n.(numberNode)\n\tif !ok {\n\t\tc.bad(ast, \"want number, but %s has type %s\", ast, n.typ())\n\t}\n\treturn fn\n}\n\n// oneOf requires that x's type be one of typs.\nfunc (c *compiler) oneOf(ast ast.Expr, x queryNode, typs ...string) {\n\thave := x.typ()\n\tfor _, typ := range typs {\n\t\tif have == typ {\n\t\t\treturn\n\t\t}\n\t}\n\twant := \"\"\n\tswitch len(typs) {\n\tcase 1:\n\t\twant = typs[0]\n\tcase 2:\n\t\twant = typs[0] + \" or \" + typs[1]\n\tdefault:\n\t\tfor i, typ := range typs {\n\t\t\tif i == len(typs)-1 {\n\t\t\t\twant += \", or\"\n\t\t\t} else if i > 0 {\n\t\t\t\twant += \", \"\n\t\t\t}\n\t\t\twant += typ\n\t\t}\n\t}\n\tc.bad(ast, \"want %s, but %s has type %s\", want, ast, x.typ())\n}\n\n// sameType requires that x and y have the same type and that both are\n// one of typs.\nfunc (c *compiler) sameType(ast *ast.BinaryExpr, x, y queryNode, typs ...string) {\n\tc.oneOf(ast.X, x, typs...)\n\tc.oneOf(ast.Y, y, typs...)\n\tif x.typ() != y.typ() {\n\t\tc.bad(ast, \"operands of %s must have same type, not %s and %s\", ast, x.typ(), y.typ())\n\t}\n}\n\n// expr type-checks and compiles expr to a queryNode.\nfunc (c *compiler) expr(expr ast.Expr) queryNode {\n\tswitch expr := expr.(type) {\n\tcase *ast.BasicLit:\n\t\tv := constant.MakeFromLiteral(expr.Value, expr.Kind, 0)\n\t\tswitch expr.Kind {\n\t\tcase token.INT, token.FLOAT:\n\t\t\treturn numberNode(func(pi pathInfo) constant.Value {\n\t\t\t\treturn v\n\t\t\t})\n\t\tcase token.STRING:\n\t\t\tstr := constant.StringVal(v)\n\t\t\treturn stringNode(func(pi pathInfo) string {\n\t\t\t\treturn str\n\t\t\t})\n\t\t}\n\n\tcase *ast.BinaryExpr:\n\t\tx, y := c.expr(expr.X), c.expr(expr.Y)\n\t\tswitch expr.Op {\n\t\tcase token.ADD:\n\t\t\tc.sameType(expr, x, y, \"number\", \"string\")\n\t\t\tswitch x := x.(type) {\n\t\t\tcase numberNode:\n\t\t\t\ty := y.(numberNode)\n\t\t\t\treturn numberNode(func(pi pathInfo) constant.Value {\n\t\t\t\t\treturn constant.BinaryOp(x(pi), expr.Op, y(pi))\n\t\t\t\t})\n\t\t\tcase stringNode:\n\t\t\t\ty := y.(stringNode)\n\t\t\t\treturn stringNode(func(pi pathInfo) string {\n\t\t\t\t\treturn x(pi) + y(pi)\n\t\t\t\t})\n\t\t\t}\n\t\tcase token.SUB, token.MUL, token.QUO, token.REM:\n\t\t\tx, y := c.number(expr.X, x), c.number(expr.Y, y)\n\t\t\treturn numberNode(func(pi pathInfo) constant.Value {\n\t\t\t\treturn constant.BinaryOp(x(pi), expr.Op, y(pi))\n\t\t\t})\n\n\t\t\t// TODO: AND, OR, XOR, SHL, SHR, AND_NOT\n\n\t\tcase token.LAND:\n\t\t\tx, y := c.bool(expr.X, x), c.bool(expr.Y, y)\n\t\t\treturn boolNode(func(pi pathInfo) bool {\n\t\t\t\treturn x(pi) && y(pi)\n\t\t\t})\n\t\tcase token.LOR:\n\t\t\tx, y := c.bool(expr.X, x), c.bool(expr.Y, y)\n\t\t\treturn boolNode(func(pi pathInfo) bool {\n\t\t\t\treturn x(pi) || y(pi)\n\t\t\t})\n\n\t\tcase token.LSS, token.GTR, token.LEQ, token.GEQ:\n\t\t\tc.sameType(expr, x, y, \"number\", \"string\", \"time\")\n\t\t\tfallthrough\n\t\tcase token.EQL, token.NEQ:\n\t\t\tc.sameType(expr, x, y, \"bool\", \"number\", \"string\", \"time\")\n\t\t\tif x, ok := x.(timeNode); ok {\n\t\t\t\ty := y.(timeNode)\n\t\t\t\treturn boolNode(func(pi pathInfo) bool {\n\t\t\t\t\txv := constant.MakeInt64(x(pi).UnixNano())\n\t\t\t\t\tyv := constant.MakeInt64(y(pi).UnixNano())\n\t\t\t\t\treturn constant.Compare(xv, expr.Op, yv)\n\t\t\t\t})\n\t\t\t}\n\t\t\tx, y := x.cfunc(), y.cfunc()\n\t\t\treturn boolNode(func(pi pathInfo) bool {\n\t\t\t\treturn constant.Compare(x(pi), expr.Op, y(pi))\n\t\t\t})\n\t\t}\n\n\tcase *ast.CallExpr:\n\t\t// TODO: This is awful. Have a real function node.\n\t\tid, ok := expr.Fun.(*ast.Ident)\n\t\tif !ok {\n\t\t\tc.bad(expr, \"bad call %s\", expr)\n\t\t}\n\t\tswitch id.Name {\n\t\tcase \"date\":\n\t\t\t// TODO: Parse date argument. Would be nice if\n\t\t\t// we could constant-fold.\n\t\t}\n\t\tc.bad(expr, \"undefined: %s\", id.Name)\n\n\tcase *ast.Ident:\n\t\tif node, ok := c.names[expr.Name]; ok {\n\t\t\treturn node\n\t\t}\n\t\tc.bad(expr, \"undefined: %s\", expr.Name)\n\n\t\t// TODO: IndexExpr? SliceExpr?\n\n\tcase *ast.ParenExpr:\n\t\treturn c.expr(expr.X)\n\n\tcase *ast.UnaryExpr:\n\t\tx := c.expr(expr.X)\n\t\tswitch expr.Op {\n\t\tcase token.ADD, token.SUB:\n\t\t\tx := c.number(expr.X, x)\n\t\t\treturn numberNode(func(pi pathInfo) constant.Value {\n\t\t\t\treturn constant.UnaryOp(expr.Op, x(pi), 0)\n\t\t\t})\n\t\t\t// TODO: XOR\n\t\tcase token.NOT:\n\t\t\tx := c.bool(expr.X, x)\n\t\t\treturn boolNode(func(pi pathInfo) bool {\n\t\t\t\treturn !x(pi)\n\t\t\t})\n\t\t}\n\t}\n\n\tc.bad(expr, \"unsupported expression %s\", expr)\n\treturn nil\n}\n"
  },
  {
    "path": "dashquery/compile_test.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage dashquery\n\nimport \"testing\"\n\nfunc TestEval(t *testing.T) {\n\ttry := func(expr string, want bool) {\n\t\tt.Helper()\n\t\tq, err := Compile(expr)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: unexpected compile error %s\", expr, err)\n\t\t\treturn\n\t\t}\n\t\tif have := q.fn(pathInfo{}); have != want {\n\t\t\tt.Errorf(\"%s: want %v, have %v\", expr, want, have)\n\t\t}\n\t}\n\n\ttry(`true`, true)\n\ttry(`false`, false)\n\ttry(`1 == 1`, true)\n\ttry(`1 == 2`, false)\n\ttry(`\"a\" == \"a\"`, true)\n\ttry(`\"a\" == \"b\"`, false)\n\ttry(`1+1 == 2`, true)\n\ttry(`\"a\"+\"b\" == \"ab\"`, true)\n\ttry(`1-1 == 0`, true)\n\ttry(`1==1 && 2==2`, true)\n\ttry(`1==1 && 2==3`, false)\n\ttry(`1==1 || 2==2`, true)\n\ttry(`1==2 || 1==2`, false)\n\ttry(`1 < 2`, true)\n\ttry(`1 > 2`, false)\n\ttry(`(1==1) == (2==2)`, true)\n\ttry(`(1==1) == (1==2)`, false)\n\ttry(`-1 == 1-2`, true)\n\ttry(`+1 == 0+1`, true)\n\ttry(`!(1==1) == (1==2)`, true)\n}\n"
  },
  {
    "path": "dashquery/main.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage dashquery\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"go/constant\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/build/types\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\nfunc Compile(expr string) (*Query, error) {\n\tc := newCompiler(builtins)\n\tfn, err := c.compile(expr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Query{fn}, nil\n}\n\nfunc constNum(v int64) numberNode {\n\tcv := constant.MakeInt64(v)\n\treturn numberNode(func(pi pathInfo) constant.Value {\n\t\treturn cv\n\t})\n}\n\nvar startTimeCache time.Time\n\nfunc startTime() time.Time {\n\tif startTimeCache.IsZero() {\n\t\tstartTimeCache = time.Now()\n\t}\n\treturn startTimeCache\n}\n\nvar builtins = map[string]queryNode{\n\t\"true\": boolNode(func(pi pathInfo) bool {\n\t\treturn true\n\t}),\n\t\"false\": boolNode(func(pi pathInfo) bool {\n\t\treturn false\n\t}),\n\n\t\"second\":  constNum(int64(time.Second)),\n\t\"seconds\": constNum(int64(time.Second)),\n\t\"minute\":  constNum(int64(time.Minute)),\n\t\"minutes\": constNum(int64(time.Minute)),\n\t\"hour\":    constNum(int64(time.Hour)),\n\t\"hours\":   constNum(int64(time.Hour)),\n\t\"day\":     constNum(int64(24 * time.Hour)),\n\t\"days\":    constNum(int64(24 * time.Hour)),\n\n\t\"age\": numberNode(func(pi pathInfo) constant.Value {\n\t\tdate, err := time.Parse(time.RFC3339, pi.buildRev().Date)\n\t\tif err != nil {\n\t\t\treturn constant.MakeInt64(0)\n\t\t}\n\t\treturn constant.MakeInt64(int64(startTime().Sub(date)))\n\t}),\n\n\t\"builder\": stringNode(func(pi pathInfo) string {\n\t\treturn pi.builder\n\t}),\n\t\"os\": stringNode(func(pi pathInfo) string {\n\t\ti := strings.IndexByte(pi.builder, '-')\n\t\tif i < 0 {\n\t\t\treturn \"?\"\n\t\t}\n\t\treturn pi.builder[:i]\n\t}),\n\t\"arch\": stringNode(func(pi pathInfo) string {\n\t\ti := strings.IndexByte(pi.builder, '-') + 1\n\t\tif i <= 0 {\n\t\t\treturn \"?\"\n\t\t}\n\t\ti2 := strings.IndexByte(pi.builder[i:], '-')\n\t\tif i2 < 0 {\n\t\t\treturn pi.builder[i:]\n\t\t}\n\t\treturn pi.builder[i : i+i2]\n\t}),\n\n\t// TODO: From .rev.json: repo, revision, date, branch, author\n\t// TODO: File content matching\n}\n\ntype Query struct {\n\tfn boolNode\n}\n\ntype pathInfo struct {\n\tbuilder       string\n\trevPath       string\n\tbuildRevCache *types.BuildRevision\n}\n\nfunc (pi *pathInfo) buildRev() *types.BuildRevision {\n\tif pi.buildRevCache == nil {\n\t\tpi.buildRevCache = new(types.BuildRevision)\n\t\tdata, err := ioutil.ReadFile(filepath.Join(pi.revPath, \".rev.json\"))\n\t\tif err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t} else {\n\t\t\terr = json.Unmarshal(data, pi.buildRevCache)\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn pi.buildRevCache\n}\n\nfunc RevDir() string {\n\treturn filepath.Join(xdgCacheDir(), \"fetchlogs\", \"rev\")\n}\n\n// revs returns revision paths in reverse chronological order.\nfunc revs() ([]string, error) {\n\trevDir := RevDir()\n\tfis, err := ioutil.ReadDir(revDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar paths []string\n\tfor _, fi := range fis {\n\t\tif !fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tpaths = append(paths, filepath.Join(revDir, fi.Name()))\n\t}\n\tsort.Sort(sort.Reverse(sort.StringSlice(paths)))\n\treturn paths, nil\n}\n\n// AllPaths finds all dashboard log paths matching q and passes them\n// to fn. Paths are returned in descending order by date (most recent\n// first).\nfunc (q *Query) AllPaths(fn func(string) error) error {\n\trevs, err := revs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttype task struct {\n\t\tpi    pathInfo\n\t\treply chan bool\n\t}\n\tnworkers := 2 * runtime.GOMAXPROCS(-1)\n\ttasks := make(chan task)\n\treplies := make(chan task, nworkers)\n\tg, ctx := errgroup.WithContext(context.Background())\n\n\t// Feeder.\n\tg.Go(func() error {\n\t\tdefer close(tasks)\n\t\tdefer close(replies)\n\n\t\tvar pi pathInfo\n\t\tfor _, rev := range revs {\n\t\t\tpi.revPath = rev\n\n\t\t\tlogs, err := ioutil.ReadDir(rev)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfor _, log := range logs {\n\t\t\t\tif log.IsDir() || strings.HasPrefix(log.Name(), \".\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tpi.builder = log.Name()\n\n\t\t\t\ttask := task{pi, make(chan bool)}\n\t\t\t\tselect {\n\t\t\t\tcase tasks <- task:\n\t\t\t\t\treplies <- task\n\t\t\t\tcase <-ctx.Done():\n\t\t\t\t\treturn ctx.Err()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\t// Workers.\n\tfor i := 0; i < nworkers; i++ {\n\t\tg.Go(func() error {\n\t\t\tfor {\n\t\t\t\ttask, ok := <-tasks\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\ttask.reply <- q.fn(task.pi)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t// Aggregator.\n\tg.Go(func() error {\n\t\tfor reply := range replies {\n\t\t\tif <-reply.reply {\n\t\t\t\tpi := reply.pi\n\t\t\t\terr := fn(filepath.Join(pi.revPath, pi.builder))\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn g.Wait()\n}\n"
  },
  {
    "path": "dashquery/xdg.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage dashquery\n\nimport (\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"runtime\"\n)\n\n// xdgCacheDir returns the XDG Base Directory Specification cache\n// directory.\nfunc xdgCacheDir() string {\n\tcache := os.Getenv(\"XDG_CACHE_HOME\")\n\tif cache == \"\" {\n\t\thome := os.Getenv(\"HOME\")\n\t\tif home == \"\" {\n\t\t\tu, err := user.Current()\n\t\t\tif err != nil {\n\t\t\t\thome = u.HomeDir\n\t\t\t}\n\t\t}\n\t\t// Not XDG but standard for OS X.\n\t\tif runtime.GOOS == \"darwin\" {\n\t\t\treturn filepath.Join(home, \"Library/Caches\")\n\t\t}\n\t\tcache = filepath.Join(home, \".cache\")\n\t}\n\treturn cache\n}\n\n// xdgCreateDir creates a directory and its parents in accordance with\n// the XDG Base Directory Specification.\nfunc xdgCreateDir(path string) error {\n\treturn os.MkdirAll(path, 0700)\n}\n"
  },
  {
    "path": "findflakes/adtest.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"math\"\n\t\"sort\"\n)\n\nvar (\n\tErrSampleSize = errors.New(\"sample is too small\")\n)\n\ntype SampleValueError struct {\n\tValue  int\n\tDetail string\n}\n\nfunc (e *SampleValueError) Error() string {\n\treturn e.Detail\n}\n\ntype AndersonDarlingTestResult struct {\n\t// A2 is the Anderson-Darling test statistic, A², for the\n\t// goodness of fit of the sample to the probability\n\t// distribution.\n\tA2 float64\n\n\t// P is the p-value for this test. A small value of P\n\t// indicates a significant difference between the sample and\n\t// the distribution.\n\tP float64\n}\n\n// AndersonDarlingTest performs an Anderson-Darling goodness-of-fit\n// test for whether a sample comes from a population with a specified\n// distribution. It tests the null hypothesis that sample follows dist\n// against the alternate hypothesis that sample does not follow dist.\n//\n// Note that this uses a Monte Carlo method (parametric bootstrap) to\n// estimate the distribution of the test statistic and hence the exact\n// P value may vary slightly between calls with the same sample and\n// distribution.\nfunc AndersonDarlingTest(sample []int, dist *GeometricDist) (*AndersonDarlingTestResult, error) {\n\tif len(sample) == 0 {\n\t\treturn nil, ErrSampleSize\n\t}\n\n\tif !sort.IntsAreSorted(sample) {\n\t\tsample = append([]int(nil), sample...)\n\t\tsort.Ints(sample)\n\t}\n\n\tA2, err := andersonDarling(sample, dist)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Use parametric bootstrap to estimate the distribution of\n\t// A².\n\tconst resamples = 1000\n\tnsample := make([]int, len(sample))\n\tngreater := 0\n\tfor i := 0; i < resamples; i++ {\n\t\tfor j := range nsample {\n\t\t\tnsample[j] = dist.Rand()\n\t\t}\n\t\tsort.Ints(nsample)\n\t\tnA2, err := andersonDarling(nsample, dist)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif nA2 >= A2 {\n\t\t\tngreater++\n\t\t}\n\t}\n\tp := float64(ngreater) / resamples\n\n\treturn &AndersonDarlingTestResult{A2, p}, nil\n}\n\n// andersonDarling returns the Anderson-Darling test statistic, A²,\n// for the goodness of fit of sample to dist.\n//\n// sample must be sorted.\nfunc andersonDarling(sample []int, dist *GeometricDist) (float64, error) {\n\tsum := 0.0\n\t// TODO: Rearrange terms so we don't have to compute each\n\t// sample's CDF twice.\n\tfor i, y1 := range sample {\n\t\ty2 := sample[len(sample)-i-1]\n\t\tcdf1, sf2 := dist.CDF(y1), dist.SF(y2)\n\t\tif cdf1 == 0 {\n\t\t\treturn 0, &SampleValueError{\n\t\t\t\tValue:  y1,\n\t\t\t\tDetail: fmt.Sprintf(\"sample %d lies outside support of expected distribution %v\", y1, dist),\n\t\t\t}\n\t\t}\n\t\tif sf2 == 0 {\n\t\t\treturn 0, &SampleValueError{\n\t\t\t\tValue:  y2,\n\t\t\t\tDetail: fmt.Sprintf(\"sample %d lies outside support of expected distribution %v\", y2, dist),\n\t\t\t}\n\t\t}\n\t\tsum += float64(2*i-1) * (math.Log(cdf1) + math.Log(sf2))\n\t}\n\treturn -float64(len(sample)) - sum/float64(len(sample)), nil\n}\n"
  },
  {
    "path": "findflakes/flaketest.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n)\n\ntype FlakeTestResult struct {\n\tAll []FlakeRegion\n}\n\ntype FlakeRegion struct {\n\t// Times gives the times of all failures in this region, in\n\t// increasing order.\n\t//\n\t// TODO: Remove some of the redundant fields?\n\tTimes []int\n\n\t// First and Last are the indexes of the first and last\n\t// failures in this flaky region. These are equivalent to\n\t// Times[0] and Times[len(Times)-1], respectively.\n\tFirst, Last int\n\n\t// Failures is the number of failures in the region. This is\n\t// equivalent to len(Times).\n\tFailures int\n\n\t// FailureProbability is the fraction of builds in this region\n\t// that failed.\n\tFailureProbability float64\n\n\t// GoodnessOfFit is the goodness of fit test for this region\n\t// against the maximum likelihood estimate geometric\n\t// distribution for these failures. This is primarily for\n\t// debugging.\n\tGoodnessOfFit *AndersonDarlingTestResult\n}\n\n// FlakeTest finds ranges of commits over which the failure\n// probability of a test is fairly consistent. The failures argument\n// gives the indexes of commits with failing tests.\n//\n// This works by assuming flaky tests are a Bernoulli process. That\n// is, they fail with some probability and each failure is independent\n// of other failures. Using this assumption, it subdivides the failure\n// events to find subranges where the distribution of times between\n// failures is very similar to a geometric distribution (determined\n// using an Anderson-Darling goodness-of-fit test).\nfunc FlakeTest(failures []int) *FlakeTestResult {\n\tresult := &FlakeTestResult{}\n\tresult.subdivide(failures)\n\treturn result\n}\n\n// subdivide adds events to the flake test result if it has a strongly\n// geometric interarrival distribution. Otherwise, it recursively\n// subdivides events on the longest gap.\n//\n// events must be strictly monotonically increasing.\nfunc (r *FlakeTestResult) subdivide(events []int) {\n\tif len(events) == 1 {\n\t\t// Isolated failure.\n\t\tregion := FlakeRegion{events, events[0], events[0], 1, 1, nil}\n\t\tr.All = append(r.All, region)\n\t\treturn\n\t}\n\n\tmle, ad := interarrivalAnalysis(events)\n\tif ad == nil || ad.P >= 0.05 {\n\t\t// We failed to reject the null hypothesis that this\n\t\t// isn't geometrically distributed. That's about as\n\t\t// close as we're going to get to calling it\n\t\t// geometrically distributed.\n\t\tregion := FlakeRegion{events, events[0], events[len(events)-1], len(events), mle.P, ad}\n\t\tr.All = append(r.All, region)\n\t\treturn\n\t}\n\n\t// We reject the null hypothesis and accept the alternate\n\t// hypothesis that this range of events is not a Bernoulli\n\t// process. Subdivide on the longest gap, which is the least\n\t// likely event in this range.\n\tlongestIndex, longestVal := 0, events[1]-events[0]\n\tfor i := 0; i < len(events)-1; i++ {\n\t\tval := events[i+1] - events[i]\n\t\tif val > longestVal {\n\t\t\tlongestIndex, longestVal = i, val\n\t\t}\n\t}\n\n\t//fmt.Fprintln(os.Stderr, \"subdividing\", events[:longestIndex+1], events[longestIndex+1:], mle.P, ad.P)\n\n\t// Find the more recent ranges first.\n\tr.subdivide(events[longestIndex+1:])\n\tr.subdivide(events[:longestIndex+1])\n}\n\n// interarrivalAnalysis returns the maximum likelihood estimated\n// distribution for the times between events and the Anderson-Darling\n// test for how closely the data matches this distribution. ad will be\n// nil if there is no time between any of the events.\n//\n// events must be strictly monotonically increasing.\nfunc interarrivalAnalysis(events []int) (mle *GeometricDist, ad *AndersonDarlingTestResult) {\n\tinterarrivalTimes := make([]int, len(events)-1)\n\tsum := 0\n\tfor i := 0; i < len(events)-1; i++ {\n\t\tdelta := events[i+1] - events[i] - 1\n\t\tinterarrivalTimes[i] = delta\n\t\tsum += delta\n\t}\n\n\t// Compute maximum likelihood estimate of geometric\n\t// distribution underlying interarrivalTimes.\n\tmle = &GeometricDist{P: float64(len(interarrivalTimes)) / float64(len(interarrivalTimes)+sum)}\n\tif mle.P == 1 {\n\t\t// This happens if there are no gaps between events.\n\t\t// In this case Anderson-Darling is undefined because\n\t\t// the CDF is 1.\n\t\treturn\n\t}\n\n\t// Compute Anderson-Darling goodness-of-fit for the observed\n\t// distribution against the theoretical distribution.\n\tvar err error\n\tad, err = AndersonDarlingTest(interarrivalTimes, mle)\n\tif err != nil {\n\t\tlog.Fatal(\"Anderson-Darling test failed: \", err)\n\t}\n\n\treturn\n}\n\nfunc (r *FlakeTestResult) Dump(w io.Writer) {\n\tfor i := range r.All {\n\t\treg := &r.All[len(r.All)-i-1]\n\t\tgof := 0.0\n\t\tif reg.GoodnessOfFit != nil {\n\t\t\tgof = reg.GoodnessOfFit.P\n\t\t}\n\n\t\tfmt.Fprintln(w, reg.First, 0, 0)\n\t\tfmt.Fprintln(w, reg.First, reg.FailureProbability, gof)\n\t\tfmt.Fprintln(w, reg.Last, reg.FailureProbability, gof)\n\t\tfmt.Fprintln(w, reg.Last, 0, 0)\n\t}\n}\n\n// StillHappening returns the probability that the flake is still\n// happening as of time t.\nfunc (r *FlakeRegion) StillHappening(t int) float64 {\n\tif t < r.First {\n\t\treturn 0\n\t}\n\tdist := GeometricDist{P: r.FailureProbability, Start: r.Last + 1}\n\treturn 1 - dist.CDF(t)\n}\n\n// Bounds returns the time at which the probability that the failure\n// started rises above p and the time at which the probability that\n// the failure stopped falls below p. Note that this has no idea of\n// the \"current\" time, so stop may be \"in the future.\"\nfunc (r *FlakeRegion) Bounds(p float64) (start, stop int) {\n\tdist := GeometricDist{P: r.FailureProbability}\n\tdelta := dist.InvCDF(1 - p)\n\treturn r.First - delta, r.Last + delta\n}\n\n// StartedAtOrBefore returns the probability that the failure start at\n// or before time t.\nfunc (r *FlakeRegion) StartedAtOrBefore(t int) float64 {\n\tif t > r.First {\n\t\treturn 1\n\t}\n\tdist := GeometricDist{P: r.FailureProbability}\n\treturn 1 - dist.CDF(r.First-t-1)\n}\n\nfunc (r *FlakeRegion) StartedAt(t int) float64 {\n\tdist := GeometricDist{P: r.FailureProbability}\n\treturn dist.PMF(r.First - t)\n}\n\n// Culprit gives the probability P that the event at time T was\n// responsible for a failure.\ntype Culprit struct {\n\tP float64\n\tT int\n}\n\n// Culprits returns the possible culprits for this failure up to a\n// cumulative probability of cumProb or at most limit events. Culprits\n// are returned in reverse time order (from most likely culprit to\n// least likely).\nfunc (r *FlakeRegion) Culprits(cumProb float64, limit int) []Culprit {\n\tculprits := []Culprit{}\n\n\ttotal := 0.0\n\tfor t := r.First; t >= 0 && t > r.First-limit; t-- {\n\t\tp := r.StartedAt(t)\n\t\tculprits = append(culprits, Culprit{P: p, T: t})\n\t\ttotal += p\n\t\tif total > cumProb {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn culprits\n}\n"
  },
  {
    "path": "findflakes/geodist.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"math\"\n\t\"math/rand\"\n)\n\n// GeometricDist is a geometric distribution with success probability\n// P.\ntype GeometricDist struct {\n\tP float64\n\n\t// Start is the start of the distribution's support. There are\n\t// two conventional definitions of the geometric distribution:\n\t//\n\t// For Start=0, the distribution gives the number of failures\n\t// before the first success in a Bernoulli process with\n\t// success probability P.\n\t//\n\t// For Start=1, the distribution gives the number of trials\n\t// needed to get one success. This is often called the\n\t// \"shifted geometric distribution.\"\n\t//\n\t// Other values of Start are allowed, but have no conventional\n\t// meaning.\n\tStart int\n}\n\nfunc (d *GeometricDist) PMF(k int) float64 {\n\tif k < d.Start {\n\t\treturn 0\n\t}\n\treturn math.Pow(1-d.P, float64(k-d.Start)) * d.P\n}\n\nfunc (d *GeometricDist) CDF(k int) float64 {\n\tif k < d.Start {\n\t\treturn 0\n\t}\n\treturn 1 - math.Pow(1-d.P, float64(k-d.Start+1))\n}\n\nfunc (d *GeometricDist) SF(k int) float64 {\n\tif k < d.Start {\n\t\treturn 1\n\t}\n\treturn math.Pow(1-d.P, float64(k-d.Start+1))\n}\n\nfunc (d *GeometricDist) InvCDF(y float64) int {\n\treturn int(math.Ceil(math.Log(1-y)/math.Log(1-d.P)-1)) + d.Start\n}\n\nfunc (d *GeometricDist) Rand() int {\n\tu := 1 - rand.Float64()\n\treturn int(math.Log(u)/math.Log(1-d.P)) + d.Start\n}\n"
  },
  {
    "path": "findflakes/html.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"html/template\"\n\t\"io\"\n\t\"log\"\n\t\"reflect\"\n)\n\n// TODO: OS/Arch counts\n\nconst htmlReport = `\n<html>\n  <head>\n    <meta charset=\"utf-8\" />\n    <title>Top test failures</title>\n    <style>\nbody {\n  font-family: sans-serif;\n  color: #222;\n}\na {\n  text-decoration: none;\n}\ntable {\n  border-spacing: 0;\n  border-collapse: collapse;\n}\ntable#failures {\n  width: 100%;\n  max-width: 100%;\n}\ntable>caption {\n  padding-top: 8px;\n  padding-bottom: 8px;\n  color: #777;\n  text-align: left;\n}\ntable>tbody>tr>td, table>tbody>tr>th, table>thead>tr>th {\n  padding: 8px;\n  vertical-align: top;\n  line-height: 1.4;\n}\ntable.lined>tbody>tr:not(.expand)>td, table.lined>tbody>tr:not(.expand)>th {\n  border-top: 1px solid #ddd;\n}\ntable.lined>thead>tr>th {\n  vertical-align: bottom;\n  border-bottom: 2px solid #ddd;\n  border-top: 0px;\n}\nth {\n  text-align: left;\n}\ntable#failures>tbody>tr:not(.expand) {\n  cursor: pointer;\n}\ntable#failures>tbody>tr:not(.expand):hover {\n  color: #337ab7;\n}\ntd.pct, th.pct {\n  text-align: right;\n}\ntd.plus {\n  color: #337ab7;\n}\ntr.expand {\n  display: none;\n}\ntable>tbody>tr.expand>td {\n  padding-top: 0px;\n}\na.hash {\n  font-family: monospace;\n  font-size: 120%;\n}\n.toggleRow {\n  display: none;\n}\n    </style>\n    <script src=\"https://ajax.googleapis.com/ajax/libs/jquery/1.8.2/jquery.min.js\"></script>\n  </head>\n  <body>\n    <table id=\"failures\" class=\"lined\">\n      <caption>Test failures as of {{(lastRev .).Date.Format \"02 Jan 15:04 2006\"}}, sorted by chance the failure is still happening. Click row for details and culprits.</caption>\n      <thead>\n        <tr><th></th><th class=\"pct\">P(current)</th><th class=\"pct\">P(failure)</th><th style=\"width:100%\">Failure</th></tr>\n      </thead>\n      {{range $i, $class := .}}\n      {{$failuresByT := groupByT .Failures}}\n      <tr><td class=\"plus\">+</td><td class=\"pct\">{{pct .Current}}</td><td class=\"pct\">{{pct .Latest.FailureProbability}}</td><td>{{.Class.String}}</td></tr>\n      <tr class=\"expand\"><td></td><td colspan=\"3\">\n        <table>\n          <tr><th>Chance failure is still happening</th><td>{{pct .Current}}</td></tr>\n          {{with .Latest}}\n          <tr><th>Failure probability</th><td>{{pct .FailureProbability}} ({{.Failures}} of {{numCommits .}} commits)</td></tr>\n          {{if eq (numCommits .) 1}}\n          <tr><th>Observed</th><td>{{template \"observation\" (index $failuresByT .First)}}</td></tr>\n          {{else}}\n          <tr><th>First observed</th><td>{{template \"observation\" (index $failuresByT .First)}}</td></tr>\n          {{if ge (numCommits .) 2}}\n          <tr><th></th><td><a href=\"#\" class=\"toggleRows\">show other observations</a></td></tr>\n          {{range $_, $t := (slice .Times 1 -1)}}\n          <tr class=\"toggleRow\"><th></th><td>{{template \"observation\" (index $failuresByT $t)}}</td></tr>\n          {{end}}\n          {{end}}\n          <tr><th>Last observed</th><td>{{template \"observation\" (index $failuresByT .Last)}}</td></tr>\n          <tr><th>Likely culprits</th>\n\t    <td style=\"padding:0px\">\n\t      <table>\n\t\t{{range (.Culprits 0.9 10)}}\n\t\t<tr><td class=\"pct\">{{pct .P}}</td><td>{{template \"revSubject\" (index $class.Revs .T)}}</td></tr>\n\t\t{{end}}\n\t      </table>\n\t    </td>\n          </tr>\n          {{end}}{{/* numCommits == 1*/}}\n          {{end}}{{/* with .Latest */}}\n          {{with (slice .Test.All 1 (len .Test.All))}}\n            <tr><th>{{len .}} past failure(s)</th><td><a href=\"#\" class=\"toggleRows\">show</a></td></tr>\n            {{range .}}\n              <tr class=\"toggleRow\"><th></th><td>{{template \"observation\" (index $failuresByT .First)}} to {{template \"observation\" (index $failuresByT .Last)}}; {{pct .FailureProbability}} failure probability</td></tr>\n            {{end}}\n          {{else}}\n            <tr><th>No known past failures</th></tr>\n          {{end}}\n        </table>\n      </td></tr>\n      {{end}}\n    </table>\n    <script>\n$(\"#failures\").click(function(ev) {\n    var target = $(ev.target);\n    if (target.closest(\"table\").filter(\"#failures\").length === 0)\n      return;\n\n    ev.stopPropagation();\n    var tr = target.closest(\"tr\");\n\n    if (!tr.hasClass(\"expand\")) {\n        tr.next().toggle();\n    }\n});\n$(\"a.toggleRows\").click(function(ev) {\n    ev.stopPropagation();\n    $(ev.target).closest(\"tr\").nextUntil(\":not(.toggleRow)\").toggle();\n    var text = $(ev.target).text();\n    text = text.replace(/show|hide/, function(x) { return x === \"show\" ? \"hide\" : \"show\"; });\n    $(ev.target).text(text);\n    return false;\n});\n    </script>\n  </body>\n</html>\n\n{{/* observation expands a []*failure in to an observation line. */}}\n{{define \"observation\"}}\n{{$first := (index . 0)}}\n{{template \"revDate\" $first.Rev}} ({{$first.CommitsAgo}} commits ago) on{{range .}} <a href=\"{{.Build.LogURL}}\">{{.Build.Builder}}</a>{{end}}\n{{end}}\n{{/* revLink expands a *Revision to a link to that commit. */}}\n{{define \"revLink\"}}\n<a href=\"https://github.com/golang/go/commit/{{.Revision}}\" class=\"hash\">{{printf \"%.7s\" .Revision}}</a>\n{{end}}\n{{/* revDate expands a *Revision to the commit's hash and date. */}}\n{{define \"revDate\"}}\n{{template \"revLink\" .}} {{.Date.Format \"02 Jan 15:04 2006\"}}\n{{end}}\n{{/* revSubject expands a *Revision to the commit's hash and subject. */}}\n{{define \"revSubject\"}}\n{{template \"revLink\" .}} {{.Subject}}\n{{end}}\n`\n\nvar htmlFuncs = template.FuncMap(map[string]interface{}{\n\t\"pct\": pct,\n\t\"lastRev\": func(classes []*failureClass) *Revision {\n\t\t// TODO: Ugh. It's lame that the same Revs is in every\n\t\t// failureClass.\n\t\trevs := classes[0].Revs\n\t\treturn revs[len(revs)-1]\n\t},\n\t\"numCommits\": func(r FlakeRegion) int {\n\t\treturn r.Last - r.First + 1\n\t},\n\t\"groupByT\": func(failures []*failure) map[int][]*failure {\n\t\tout := make(map[int][]*failure)\n\t\tif len(failures) == 0 {\n\t\t\treturn out\n\t\t}\n\t\tlastI, lastT := 0, failures[0].T\n\t\tfor i := 1; i < len(failures); i++ {\n\t\t\tif failures[i].T != lastT {\n\t\t\t\tout[lastT] = failures[lastI:i]\n\t\t\t\tlastI, lastT = i, failures[i].T\n\t\t\t}\n\t\t}\n\t\tout[lastT] = failures[lastI:]\n\t\treturn out\n\t},\n\t\"slice\": func(v interface{}, start, end int) interface{} {\n\t\tval := reflect.ValueOf(v)\n\t\tif start < 0 {\n\t\t\tstart = val.Len() + start\n\t\t}\n\t\tif end < 0 {\n\t\t\tend = val.Len() + end\n\t\t}\n\t\treturn val.Slice(start, end).Interface()\n\t},\n})\n\nvar htmlTemplate = template.Must(template.New(\"report\").Funcs(htmlFuncs).Parse(htmlReport))\n\nfunc printHTMLReport(w io.Writer, classes []*failureClass) {\n\terr := htmlTemplate.Execute(w, classes)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "findflakes/logs.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/build/types\"\n)\n\ntype Revision struct {\n\ttypes.BuildRevision\n\tDate time.Time\n\n\tBuilds []*Build\n\n\tpath string\n}\n\nfunc (r *Revision) String() string {\n\t// Use time format from dashboard, plus year.\n\treturn fmt.Sprintf(\"%s %s\", r.Revision[:7], r.Date.Format(\"02 Jan 15:04 2006\"))\n}\n\nfunc (r *Revision) Subject() string {\n\tsubject := r.Desc\n\tif i := strings.Index(subject, \"\\n\"); i >= 0 {\n\t\tsubject = subject[:i]\n\t}\n\treturn subject\n}\n\nfunc (r *Revision) OneLine() string {\n\treturn fmt.Sprintf(\"%s %s\", r.Revision[:7], r.Subject())\n}\n\ntype Build struct {\n\tRevision *Revision\n\tBuilder  string\n\tStatus   BuildStatus\n\tLogURL   string\n}\n\ntype BuildStatus int\n\nconst (\n\tBuildOK BuildStatus = iota\n\tBuildRunning\n\tBuildFailed\n)\n\nfunc (b *Build) LogPath() string {\n\treturn filepath.Join(b.Revision.path, b.Builder)\n}\n\nfunc (b *Build) ReadLog() ([]byte, error) {\n\treturn ioutil.ReadFile(b.LogPath())\n}\n\n// LoadRevisions loads all saved build revisions from revDir, which\n// must be the \"rev\" directory written by fetchlogs. The returned\n// revisions are ordered from oldest to newest.\nfunc LoadRevisions(revDir string) ([]*Revision, error) {\n\trevFiles, err := ioutil.ReadDir(revDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trevs := []*Revision{}\n\tfor _, revFile := range revFiles {\n\t\tif !revFile.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\trev := &Revision{path: filepath.Join(revDir, revFile.Name())}\n\n\t\t// Load revision metadata.\n\t\tvar builders []string\n\t\terr1 := readJSONFile(filepath.Join(rev.path, \".rev.json\"), &rev.BuildRevision)\n\t\terr2 := readJSONFile(filepath.Join(rev.path, \".builders.json\"), &builders)\n\t\tif os.IsNotExist(err1) || os.IsNotExist(err2) {\n\t\t\tcontinue\n\t\t} else if err1 != nil {\n\t\t\treturn nil, err1\n\t\t} else if err2 != nil {\n\t\t\treturn nil, err2\n\t\t}\n\n\t\trev.Date, err = time.Parse(time.RFC3339, rev.BuildRevision.Date)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\trev.Builds = make([]*Build, len(builders))\n\t\tfor i, builder := range builders {\n\t\t\tvar status BuildStatus\n\t\t\tvar logURL string\n\t\t\ts := rev.Results[i]\n\t\t\tswitch s {\n\t\t\tcase \"ok\":\n\t\t\t\tstatus = BuildOK\n\t\t\tcase \"\":\n\t\t\t\tstatus = BuildRunning\n\t\t\tdefault:\n\t\t\t\tstatus = BuildFailed\n\t\t\t\tlogURL = s\n\t\t\t}\n\t\t\trev.Builds[i] = &Build{\n\t\t\t\tRevision: rev,\n\t\t\t\tBuilder:  builder,\n\t\t\t\tStatus:   status,\n\t\t\t\tLogURL:   logURL,\n\t\t\t}\n\t\t}\n\n\t\trevs = append(revs, rev)\n\t}\n\n\treturn revs, nil\n}\n\nfunc readJSONFile(path string, v interface{}) error {\n\tr, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Close()\n\n\treturn json.NewDecoder(r).Decode(&v)\n}\n"
  },
  {
    "path": "findflakes/main.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"runtime/pprof\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/aclements/go-misc/internal/loganal\"\n)\n\nvar (\n\tflagRevDir = flag.String(\"dir\", defaultRevDir(), \"search logs under `directory`\")\n\tflagBranch = flag.String(\"branch\", \"master\", \"analyze commits to `branch`\")\n\tflagHTML   = flag.Bool(\"html\", false, \"print an HTML report\")\n\tflagLimit  = flag.Int(\"limit\", 0, \"process only most recent `N` revisions\")\n\n\t// TODO: Is this really just a separate mode? Should we have\n\t// subcommands?\n\tflagGrep  = flag.String(\"grep\", \"\", \"show analysis for logs matching `regexp`\")\n\tflagPaths = flag.Bool(\"paths\", false, \"read dir-relative paths of logs with failures from stdin (useful with greplogs -l)\")\n)\n\nfunc defaultRevDir() string {\n\treturn filepath.Join(xdgCacheDir(), \"fetchlogs\", \"rev\")\n}\n\n// TODO: Tool you can point at a failure log to annotate each failure\n// in the log with links to past instances of that failure. This just\n// uses log analysis.\n\n// TODO: If we were careful about merges, we could potentially use\n// information from other branches to add additional samples between\n// merge points.\n\n// TODO: Consider each build a separate event, rather than each\n// revision. It doesn't matter what \"order\" they're in, though we\n// should randomize it for each revision. History subdivision should\n// only happen on revision boundaries.\n//\n// OTOH, this makes deterministic failures on specific\n// OSs/architectures looks like non-deterministic failures.\n//\n// This would also mean it's more important to identify builds in\n// which a test wasn't even executed (e.g., because an earlier test\n// failed) so we don't count those as \"successes\". OTOH, it may be\n// sufficient to consider a test executed unless we see a failure in\n// that test or that build didn't happen (e.g., a gap in the history).\n//\n// This would also help with fixing the problem where hard build\n// failures are considered successes of all tests.\n//\n// This would also be more sound when builders are added at some point\n// in the history. If the probability of a failure is really constant\n// per build, adding a builder will increase the probability of seeing\n// the failure at the commit level. Of course, if it's conditional on\n// OS or architecture or builder, this will make it look *less* likely\n// at the build level.\n//\n// Along these lines, the culprit analysis should have the property\n// that do more runs around possible culprit commits should improve\n// the fidelity of the culprit results.\n\n// TODO: Support pointing this at a set of stress test failures (along\n// with the count of total runs, I guess) and having it classify and\n// report failures. In this case there's no order or commit sequence\n// involved, so there's no time series analysis or\n// first/last/culprits, but the classification and failure probability\n// are still useful.\n//\n// It also makes sense to point this at a stress test of a sequence of\n// commits, in which case the culprit analysis is still useful. This\n// probably integrates well with the previous TODO of considering each\n// build a separate event, and it's closely related to the problem of\n// new builders being added.\n\nfunc main() {\n\tvar cpuprofile = flag.String(\"cpuprofile\", \"\", \"write cpu profile to file\")\n\n\tflag.Parse()\n\tif flag.NArg() > 0 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tif *cpuprofile != \"\" {\n\t\tf, err := os.Create(*cpuprofile)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tpprof.StartCPUProfile(f)\n\t\tdefer pprof.StopCPUProfile()\n\t}\n\n\tallRevs, err := LoadRevisions(*flagRevDir)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Filter to revisions on this branch.\n\trevs := []*Revision{}\n\tfor _, rev := range allRevs {\n\t\tif rev.Branch == *flagBranch {\n\t\t\trevs = append(revs, rev)\n\t\t}\n\t}\n\tif len(revs) == 0 {\n\t\tlog.Fatal(\"no revisions found\")\n\t}\n\n\t// Limit to most recent N revisions.\n\tif *flagLimit > 0 && len(revs) > *flagLimit {\n\t\trevs = revs[len(revs)-*flagLimit:]\n\t}\n\n\tif *flagGrep != \"\" {\n\t\t// Grep mode.\n\t\tre, err := regexp.Compile(*flagGrep)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfailures := grepFailures(revs, re)\n\t\tif len(failures) == 0 {\n\t\t\treturn\n\t\t}\n\t\tfc := newFailureClass(revs, failures)\n\t\tprintTextFlakeReport(os.Stdout, fc)\n\t\treturn\n\t}\n\n\tif *flagPaths {\n\t\t// Paths mode.\n\t\tpaths, err := readPaths(os.Stdin)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfailures := pathFailures(revs, paths)\n\t\tif len(failures) == 0 {\n\t\t\treturn\n\t\t}\n\t\tfc := newFailureClass(revs, failures)\n\t\tprintTextFlakeReport(os.Stdout, fc)\n\t\treturn\n\t}\n\n\t// Extract failures from logs.\n\tfailures := extractFailures(revs)\n\n\t// Classify failures.\n\tlfailures := make([]*loganal.Failure, len(failures))\n\tfor i, f := range failures {\n\t\tlfailures[i] = f.Failure\n\t}\n\tfailureClasses := loganal.Classify(lfailures)\n\n\t// Gather failures from each class and perform flakiness\n\t// tests.\n\tclasses := []*failureClass{}\n\tfor class, indexes := range failureClasses {\n\t\tclassFailures := []*failure{}\n\t\tfor _, fi := range indexes {\n\t\t\tclassFailures = append(classFailures, failures[fi])\n\t\t}\n\t\tfc := newFailureClass(revs, classFailures)\n\t\tfc.Class = class\n\n\t\t// Trim failure classes below thresholds. We leave out\n\t\t// classes with extremely low failure probabilities\n\t\t// because the chance that these are still happening\n\t\t// takes a long time to decay and there's almost\n\t\t// nothing we can do for culprit analysis.\n\t\tif fc.Current < 0.05 || fc.Latest.FailureProbability < 0.01 {\n\t\t\tcontinue\n\t\t}\n\n\t\tclasses = append(classes, fc)\n\t}\n\n\t// Sort failure classes by likelihood that failure is still\n\t// happening.\n\tsort.Sort(sort.Reverse(currentSorter(classes)))\n\n\tif *flagHTML {\n\t\tprintHTMLReport(os.Stdout, classes)\n\t} else {\n\t\tprintTextReport(os.Stdout, classes)\n\t}\n}\n\nfunc processFailureLogs(revs []*Revision, process func(build *Build, data []byte) []*failure) []*failure {\n\t// Create log processing tasks.\n\ttype Task struct {\n\t\tt     int\n\t\tbuild *Build\n\t\tres   []*failure\n\t}\n\ttasks := []Task{}\n\tfor t, rev := range revs {\n\t\tfor _, build := range rev.Builds {\n\t\t\tif build.Status != BuildFailed {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\ttasks = append(tasks, Task{t, build, nil})\n\t\t}\n\t}\n\n\t// Run failure processing.\n\ttodo := make(chan int)\n\tgo func() {\n\t\tfor i := range tasks {\n\t\t\ttodo <- i\n\t\t}\n\t\tclose(todo)\n\t}()\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 4*runtime.GOMAXPROCS(-1); i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tfor i := range todo {\n\t\t\t\ttask := tasks[i]\n\n\t\t\t\tdata, err := task.build.ReadLog()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\n\t\t\t\tfailures := process(task.build, data)\n\n\t\t\t\t// Fill build-related fields.\n\t\t\t\tfor _, failure := range failures {\n\t\t\t\t\tfailure.T = task.t\n\t\t\t\t\tfailure.CommitsAgo = len(revs) - task.t - 1\n\t\t\t\t\tfailure.Rev = revs[task.t]\n\t\t\t\t\tfailure.Build = task.build\n\t\t\t\t}\n\t\t\t\ttasks[i].res = failures\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\n\t// Gather results.\n\tfailures := []*failure{}\n\tfor _, task := range tasks {\n\t\tfailures = append(failures, task.res...)\n\t}\n\treturn failures\n}\n\nfunc extractFailures(revs []*Revision) []*failure {\n\treturn processFailureLogs(revs, func(build *Build, data []byte) []*failure {\n\t\t// TODO: OS/Arch\n\t\tlfailures, err := loganal.Extract(string(data), \"\", \"\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"%s: %v\\n\", build.LogPath(), err)\n\t\t\treturn nil\n\t\t}\n\t\tif len(lfailures) == 0 {\n\t\t\treturn nil\n\t\t}\n\n\t\tfailures := make([]*failure, 0, len(lfailures))\n\t\tfor _, lf := range lfailures {\n\t\t\t// Ignore build failures.\n\t\t\t//\n\t\t\t// TODO: This has the effect of counting these\n\t\t\t// as successes for all tests. In the best\n\t\t\t// case, this cuts down on the number of\n\t\t\t// samples per revision. If we have an\n\t\t\t// across-the-board build failure, it will\n\t\t\t// drive down the failure rates of all\n\t\t\t// failures and may even effect timeline\n\t\t\t// subdivision.\n\t\t\tif strings.Contains(lf.Message, \"build failed\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfailures = append(failures, &failure{\n\t\t\t\tFailure: lf,\n\t\t\t})\n\t\t}\n\t\treturn failures\n\t})\n}\n\nfunc grepFailures(revs []*Revision, re *regexp.Regexp) []*failure {\n\treturn processFailureLogs(revs, func(build *Build, data []byte) []*failure {\n\t\tif !re.Match(data) {\n\t\t\treturn nil\n\t\t}\n\t\treturn []*failure{new(failure)}\n\t})\n}\n\ntype failure struct {\n\t*loganal.Failure\n\n\tT          int\n\tCommitsAgo int\n\tRev        *Revision\n\tBuild      *Build\n}\n\ntype failureClass struct {\n\t// Class gives the common features of this failure class.\n\tClass loganal.Failure\n\n\t// Revs is the sequence of all revisions indexed by time (both\n\t// success and failure).\n\tRevs []*Revision\n\n\t// Failures is a slice of all failures, by order of increasing\n\t// time T. Note that there may be more than one failure at the\n\t// same time T.\n\tFailures []*failure\n\n\t// Test is the results of the flake test for this failure\n\t// class.\n\tTest *FlakeTestResult\n\n\t// Latest is the latest flake region (Test.All[0]).\n\tLatest *FlakeRegion\n\n\t// Current is the probability that this failure is still\n\t// happening.\n\tCurrent float64\n}\n\nfunc newFailureClass(revs []*Revision, failures []*failure) *failureClass {\n\tfc := failureClass{\n\t\tRevs:     revs,\n\t\tFailures: failures,\n\t}\n\ttimes := []int{}\n\tfor i, f := range failures {\n\t\tt := f.T\n\t\tif i == 0 || times[len(times)-1] != t {\n\t\t\ttimes = append(times, t)\n\t\t}\n\t}\n\tfc.Test = FlakeTest(times)\n\tfc.Latest = &fc.Test.All[0]\n\tfc.Current = fc.Latest.StillHappening(len(revs) - 1)\n\treturn &fc\n}\n\ntype currentSorter []*failureClass\n\nfunc (s currentSorter) Len() int {\n\treturn len(s)\n}\n\nfunc (s currentSorter) Less(i, j int) bool {\n\tif s[i].Current != s[j].Current {\n\t\treturn s[i].Current < s[j].Current\n\t}\n\tif s[i].Latest.FailureProbability != s[j].Latest.FailureProbability {\n\t\treturn s[i].Latest.FailureProbability < s[j].Latest.FailureProbability\n\t}\n\treturn s[i].Class.String() < s[j].Class.String()\n}\n\nfunc (s currentSorter) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n"
  },
  {
    "path": "findflakes/paths.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\nfunc readPaths(r io.Reader) ([]string, error) {\n\tout := []string{}\n\tscanner := bufio.NewScanner(os.Stdin)\n\tfor scanner.Scan() {\n\t\tout = append(out, filepath.Join(*flagRevDir, scanner.Text()))\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc pathFailures(revs []*Revision, paths []string) []*failure {\n\tpathSet := make(map[string]bool, len(paths))\n\tfor _, path := range paths {\n\t\tpathSet[path] = true\n\t}\n\n\tfailures := []*failure{}\n\tfor t, rev := range revs {\n\t\tfor _, build := range rev.Builds {\n\t\t\tpath := build.LogPath()\n\t\t\tif pathSet[path] {\n\t\t\t\t// TODO: Fill OS/Arch.\n\t\t\t\tfailures = append(failures, &failure{\n\t\t\t\t\tT:          t,\n\t\t\t\t\tCommitsAgo: len(revs) - t - 1,\n\t\t\t\t\tRev:        rev,\n\t\t\t\t\tBuild:      build,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\treturn failures\n}\n"
  },
  {
    "path": "findflakes/text.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n)\n\nfunc round(x float64) int {\n\treturn int(x + 0.5)\n}\n\nfunc pct(x float64) string {\n\tp := 100 * x\n\tif p >= 9.5 {\n\t\treturn fmt.Sprintf(\"%.0f%%\", p)\n\t} else if p > 0.95 {\n\t\treturn fmt.Sprintf(\"%.1f%%\", p)\n\t} else {\n\t\treturn fmt.Sprintf(\"%.2f%%\", p)\n\t}\n}\n\nfunc printTextReport(w io.Writer, classes []*failureClass) {\n\tfor _, fc := range classes {\n\t\tfmt.Fprintf(w, \"%s\\n\", fc.Class)\n\t\tprintTextFlakeReport(w, fc)\n\t\tfmt.Fprintln(w)\n\t}\n}\n\nfunc printTextFlakeReport(w io.Writer, fc *failureClass) {\n\t// TODO: Report deterministic failures better.\n\t//\n\t// TODO: Report observed OSs/Arches\n\n\tfmt.Fprintf(w, \"First observed %s (%d commits ago)\\n\", fc.Revs[fc.Latest.First], len(fc.Revs)-fc.Latest.First-1)\n\tfmt.Fprintf(w, \"Last observed  %s (%d commits ago)\\n\", fc.Revs[fc.Latest.Last], len(fc.Revs)-fc.Latest.Last-1)\n\tif fc.Latest.First == fc.Latest.Last {\n\t\tfmt.Fprintf(w, \"Isolated failure\\n\")\n\t} else {\n\t\tfmt.Fprintf(w, \"%s chance failure is still happening\\n\", pct(fc.Current))\n\t\tfmt.Fprintf(w, \"%s failure probability (%d of %d commits)\\n\", pct(fc.Latest.FailureProbability), fc.Latest.Failures, fc.Latest.Last-fc.Latest.First+1)\n\t\tfmt.Fprintf(w, \"Likely culprits:\\n\")\n\t\tfor _, c := range fc.Latest.Culprits(0.9, 10) {\n\t\t\tfmt.Fprintf(w, \"  %3d%% %s\\n\", round(100*c.P), fc.Revs[c.T].OneLine())\n\t\t}\n\t}\n\n\tif len(fc.Test.All) > 1 {\n\t\tfmt.Fprintf(w, \"Past failures:\\n\")\n\t\tfor _, reg := range fc.Test.All[1:] {\n\t\t\tif reg.First == reg.Last {\n\t\t\t\trev := fc.Revs[reg.First]\n\t\t\t\tfmt.Fprintf(w, \"  %s (isolated failure)\\n\", rev)\n\t\t\t} else {\n\t\t\t\tfmt.Fprintf(w, \"  %s to %s\\n\", fc.Revs[reg.First], fc.Revs[reg.Last])\n\t\t\t\tfmt.Fprintf(w, \"    %s failure probability (%d of %d commits)\\n\", pct(reg.FailureProbability), reg.Failures, reg.Last-reg.First+1)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tfmt.Fprintf(w, \"No known past failures\\n\")\n\t}\n}\n"
  },
  {
    "path": "findflakes/xdg.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"runtime\"\n)\n\n// xdgCacheDir returns the XDG Base Directory Specification cache\n// directory.\nfunc xdgCacheDir() string {\n\tcache := os.Getenv(\"XDG_CACHE_HOME\")\n\tif cache == \"\" {\n\t\thome := os.Getenv(\"HOME\")\n\t\tif home == \"\" {\n\t\t\tu, err := user.Current()\n\t\t\tif err != nil {\n\t\t\t\thome = u.HomeDir\n\t\t\t}\n\t\t}\n\t\t// Not XDG but standard for OS X.\n\t\tif runtime.GOOS == \"darwin\" {\n\t\t\treturn filepath.Join(home, \"Library/Caches\")\n\t\t}\n\t\tcache = filepath.Join(home, \".cache\")\n\t}\n\treturn cache\n}\n\n// xdgCreateDir creates a directory and its parents in accordance with\n// the XDG Base Directory Specification.\nfunc xdgCreateDir(path string) error {\n\treturn os.MkdirAll(path, 0700)\n}\n"
  },
  {
    "path": "findtypes/main.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Command findtypes compares checkmarks failures with the types in a\n// binary to find likely matches.\n//\n// findtypes deduces the likely pointer/scalar map from the output of\n// a checkmarks failure and compares it against the pointer/scalar\n// maps of all types in a binary. The output is a scored and ranked\n// list of the most closely matching types, along with their\n// pointer/scalar maps.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"debug/dwarf\"\n\t\"debug/elf\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"math/big\"\n\t\"os\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strconv\"\n)\n\nconst ptrSize = 8 // TODO: Get from DWARF.\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s failure binary\\n\", os.Args[0])\n\t}\n\tflag.Parse()\n\tif flag.NArg() != 2 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\tfailPath, binPath := flag.Arg(0), flag.Arg(1)\n\n\t// Parse greyobject failure.\n\tfailFile, err := os.Open(failPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfailure := parseGreyobject(failFile)\n\tfailFile.Close()\n\tif failure.words == nil {\n\t\tlog.Fatal(\"failed to parse failure message in %s\", failPath)\n\t}\n\tfmt.Print(\"failure:\")\n\tfor i, known := range failure.words {\n\t\tif i%32 == 0 {\n\t\t\tfmt.Printf(\"\\n\\t\")\n\t\t} else if i%16 == 0 {\n\t\t\tfmt.Printf(\" \")\n\t\t}\n\t\tswitch known {\n\t\tcase 0:\n\t\t\tfmt.Print(\"S\")\n\t\tcase 1:\n\t\t\tfmt.Print(\"P\")\n\t\tcase 2:\n\t\t\tfmt.Print(\"?\")\n\t\t}\n\t}\n\tfmt.Println()\n\n\t// Parse binary.\n\tf, err := elf.Open(binPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\td, err := f.DWARF()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Find all of the types.\n\ttype comparison struct {\n\t\tti    *typeInfo\n\t\tscore float64\n\t}\n\tvar results []comparison\n\tr := d.Reader()\n\tfor {\n\t\tent, err := r.Next()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif ent == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif ent.Tag != dwarf.TagTypedef {\n\t\t\tcontinue\n\t\t}\n\n\t\tname, ok := ent.Val(dwarf.AttrName).(string)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tbase, ok := ent.Val(dwarf.AttrType).(dwarf.Offset)\n\t\tif !ok {\n\t\t\tlog.Printf(\"type %s has unknown underlying type\", name)\n\t\t\tcontinue\n\t\t}\n\n\t\ttyp, err := d.Type(base)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tti := &typeInfo{name: name, words: int(typ.Size()+ptrSize-1) / ptrSize}\n\t\tti.processType(typ, 0)\n\t\tif ti.incomplete {\n\t\t\tlog.Printf(\"ignoring incomplete type %s\", ti.name)\n\t\t\tcontinue\n\t\t}\n\n\t\tscore := failure.compare(ti)\n\t\tresults = append(results, comparison{ti, score})\n\t}\n\n\t// Print results.\n\tsort.Slice(results, func(i, j int) bool {\n\t\treturn results[i].score < results[j].score\n\t})\n\tif len(results) > 10 {\n\t\tresults = results[len(results)-10:]\n\t}\n\tfor _, c := range results {\n\t\tfmt.Print(c.score, \" \", c.ti.name)\n\t\tfailure.printCompare(c.ti)\n\t}\n}\n\ntype typeInfo struct {\n\tname       string\n\tptr        big.Int\n\twords      int\n\tincomplete bool\n}\n\nfunc (t *typeInfo) processType(typ dwarf.Type, offset int) {\n\tswitch typ := typ.(type) {\n\tcase *dwarf.ArrayType:\n\t\tif typ.Count < 0 || typ.StrideBitSize > 0 {\n\t\t\tt.incomplete = true\n\t\t\treturn\n\t\t}\n\t\tfor i := 0; i < int(typ.Count); i++ {\n\t\t\t// TODO: Alignment?\n\t\t\tt.processType(typ.Type, offset+i*int(typ.Type.Size()))\n\t\t}\n\n\tcase *dwarf.StructType:\n\t\tif typ.Kind == \"union\" {\n\t\t\tt.incomplete = true\n\t\t\tlog.Printf(\"encountered union\")\n\t\t\treturn\n\t\t}\n\t\tif typ.Incomplete {\n\t\t\tt.incomplete = true\n\t\t\treturn\n\t\t}\n\t\tfor _, f := range typ.Field {\n\t\t\tif f.BitSize != 0 {\n\t\t\t\tt.incomplete = true\n\t\t\t\tlog.Printf(\"encountered bit field\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tt.processType(f.Type, offset+int(f.ByteOffset))\n\t\t}\n\n\tcase *dwarf.BoolType, *dwarf.CharType, *dwarf.ComplexType,\n\t\t*dwarf.EnumType, *dwarf.FloatType, *dwarf.IntType,\n\t\t*dwarf.UcharType, *dwarf.UintType:\n\t\t// Nothing\n\n\tcase *dwarf.PtrType:\n\t\tif typ.Size() != ptrSize {\n\t\t\tlog.Fatalf(\"funny PtrSize size: %d\", typ.Size())\n\t\t}\n\t\tif offset%ptrSize != 0 {\n\t\t\tlog.Fatal(\"unaligned pointer\")\n\t\t}\n\t\tt.ptr.SetBit(&t.ptr, offset/ptrSize, 1)\n\n\tcase *dwarf.FuncType:\n\t\t// Size is -1.\n\t\tif offset%ptrSize != 0 {\n\t\t\tlog.Fatal(\"unaligned pointer\")\n\t\t}\n\t\tt.ptr.SetBit(&t.ptr, offset/ptrSize, 1)\n\n\tcase *dwarf.QualType:\n\t\tt.processType(typ.Type, offset)\n\n\tcase *dwarf.TypedefType:\n\t\tt.processType(typ.Type, offset)\n\n\tcase *dwarf.UnspecifiedType:\n\t\tt.incomplete = true\n\t\tlog.Printf(\"encountered UnspecifiedType\")\n\n\tcase *dwarf.VoidType:\n\t\tt.incomplete = true\n\t\tlog.Printf(\"encountered VoidType\")\n\t}\n}\n\ntype greyobjectFailure struct {\n\twords []int // 0 scalar, 1 pointer, 2 unknown\n}\n\nvar (\n\tspanRe = regexp.MustCompile(`base=.* s\\.elemsize=(\\d+)`)\n\tbaseRe = regexp.MustCompile(`\\*\\(base\\+(\\d+)\\) = (0x[0-9a-f]+)( <==)?$`)\n)\n\nfunc parseGreyobject(r io.Reader) *greyobjectFailure {\n\tvar failure greyobjectFailure\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tl := scanner.Text()\n\n\t\tsubs := spanRe.FindStringSubmatch(l)\n\t\tif subs != nil {\n\t\t\telemsize, _ := strconv.Atoi(subs[1])\n\t\t\tfailure.words = make([]int, elemsize/ptrSize)\n\t\t\tfor i := range failure.words {\n\t\t\t\tfailure.words[i] = 2\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tsubs = baseRe.FindStringSubmatch(l)\n\t\tif subs == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\toffset, _ := strconv.ParseInt(subs[1], 0, 64)\n\t\tval, _ := strconv.ParseInt(subs[2], 0, 64)\n\n\t\t// TODO: This only recognizes heap pointers. Maybe\n\t\t// look at the binary to figure out reasonable global\n\t\t// pointers?\n\t\tknown := 2\n\t\tif val>>32 == 0xc4 {\n\t\t\tknown = 1\n\t\t} else if val != 0 {\n\t\t\tknown = 0\n\t\t}\n\n\t\tfailure.words[offset/ptrSize] = known\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(\"reading greyobject output:\", err)\n\t}\n\treturn &failure\n}\n\nfunc (f *greyobjectFailure) compare(ti *typeInfo) float64 {\n\tscore, denom := 0.0, 0.0\n\tfor i, known := range f.words {\n\t\tif known == 2 {\n\t\t\tcontinue\n\t\t}\n\t\tdenom++\n\t\tif ti.words < i {\n\t\t\tscore -= 1\n\t\t} else if int(ti.ptr.Bit(i)) == known {\n\t\t\tscore += 1\n\t\t} else {\n\t\t\tscore -= 1\n\t\t}\n\t}\n\tif ti.words > len(f.words) {\n\t\tscore -= float64(ti.words - len(f.words))\n\t}\n\treturn score / denom\n}\n\nfunc (f *greyobjectFailure) printCompare(ti *typeInfo) {\n\tl := ti.words\n\tif len(f.words) > l {\n\t\tl = len(f.words)\n\t}\n\tfor i := 0; i < l; i++ {\n\t\tif i%32 == 0 {\n\t\t\tfmt.Printf(\"\\n\\t\")\n\t\t} else if i%16 == 0 {\n\t\t\tfmt.Printf(\" \")\n\t\t}\n\n\t\thave := int(ti.ptr.Bit(i))\n\n\t\tvar want int\n\t\tif i < len(f.words) {\n\t\t\twant = f.words[i]\n\t\t} else {\n\t\t\twant = 1 - have\n\t\t}\n\n\t\tswitch {\n\t\tcase want == 2:\n\t\t\tfmt.Print(\"?\")\n\t\tcase have == want:\n\t\t\tif have == 0 {\n\t\t\t\tfmt.Print(\"S\")\n\t\t\t} else {\n\t\t\t\tfmt.Print(\"P\")\n\t\t\t}\n\t\tcase have != want:\n\t\t\tif have == 0 {\n\t\t\t\tfmt.Print(\"s\")\n\t\t\t} else {\n\t\t\t\tfmt.Print(\"p\")\n\t\t\t}\n\t\t}\n\t}\n\tfmt.Println()\n}\n"
  },
  {
    "path": "foreachplatform/go.mod",
    "content": "module github.com/aclements/go-misc/foreachplatform\n\ngo 1.21\n"
  },
  {
    "path": "foreachplatform/main.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"cmp\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"slices\"\n\t\"strings\"\n)\n\ntype Platform struct {\n\tGOOS   string\n\tGOARCH string\n\tSetCgo bool\n\tCgo    bool\n\tRace   bool\n}\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"usage: foreachplatform [-list | command]\\n\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"Run command with every Go platform environment.\\n\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"Examples:\\n\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"Check that the runtime builds in all configurations:\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"\\tforeachplatform go test -c runtime\\n\")\n\t}\n\tflagList := flag.Bool(\"list\", false, \"list platforms instead of running a command\")\n\tflag.Parse()\n\tsubcmd := flag.Args()\n\tif *flagList && len(subcmd) > 0 {\n\t\tfmt.Fprintf(os.Stderr, \"cannot use both -list and command\\n\")\n\t\tos.Exit(2)\n\t}\n\tif !*flagList && len(subcmd) == 0 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\tplats := getPlatforms()\n\tif *flagList {\n\t\tfor _, plat := range plats {\n\t\t\tfmt.Println(plat)\n\t\t}\n\t\treturn\n\t}\n\n\t// TODO: Check if there are any source files *not* covered by plats.\n\n\t// TODO: Run platforms in parallel.\n\n\tfailed := false\n\tfor _, plat := range plats {\n\t\tfmt.Fprintf(os.Stderr, \"# %s\\n\", plat.String())\n\t\tvar buf strings.Builder\n\t\tcmd := exec.Command(subcmd[0], subcmd[1:]...)\n\t\tcmd.Stdout = &buf\n\t\tcmd.Stderr = &buf\n\t\tcmd.Env = append(cmd.Environ(), plat.Env()...)\n\t\terr := cmd.Run()\n\t\tif err != nil {\n\t\t\tif plat.FailOK(buf.String()) {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"# (ignoring expected failure)\\n\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\", buf.String())\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t\tfailed = true\n\t\t}\n\t}\n\tif failed {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc (p Platform) String() string {\n\tvar b strings.Builder\n\tfmt.Fprintf(&b, \"GOOS=%-9s GOARCH=%s\", p.GOOS, p.GOARCH)\n\tif p.SetCgo {\n\t\tfmt.Fprintf(&b, \" CGO_ENABLED=%-5v\", p.Cgo)\n\t}\n\tif p.Race {\n\t\tfmt.Fprintf(&b, \" GOFLAGS=-race\")\n\t}\n\treturn b.String()\n}\n\nfunc (p Platform) Env() []string {\n\tenv := []string{\"GOOS=\" + p.GOOS, \"GOARCH=\" + p.GOARCH}\n\tif p.SetCgo {\n\t\tif p.Cgo {\n\t\t\tenv = append(env, \"CGO_ENABLED=1\")\n\t\t} else {\n\t\t\tenv = append(env, \"CGO_ENABLED=0\")\n\t\t}\n\t}\n\tif p.Race {\n\t\tenv = append(env, \"GOFLAGS=-race\")\n\t}\n\treturn env\n}\n\nfunc (p Platform) FailOK(msg string) bool {\n\tif p.GOOS == \"android\" || p.GOOS == \"ios\" {\n\t\tif strings.Contains(msg, \"loadinternal: cannot find runtime/cgo\\n\") {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc goTool[T any](subcmd ...string) T {\n\tcmd := exec.Command(\"go\", subcmd...)\n\tdata, err := cmd.Output()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tvar out T\n\tif err := json.Unmarshal(data, &out); err != nil {\n\t\tlog.Fatalf(\"go %s: error parsing output: %s\", strings.Join(subcmd, \" \"), err)\n\t}\n\treturn out\n}\n\nfunc getPlatforms() []Platform {\n\tvar plats []Platform\n\n\tenv := goTool[map[string]string](\"env\", \"-json\")\n\n\t// Add the host GOOS/GOARCH, with different combinations of Cgo and Race.\n\thost := Platform{\n\t\tGOOS:   env[\"GOOS\"],\n\t\tGOARCH: env[\"GOARCH\"],\n\t}\n\tcgos := []bool{false}\n\tvar setCgo bool\n\tswitch env[\"CGO_ENABLED\"] {\n\tcase \"0\":\n\t\tsetCgo = true\n\tcase \"1\":\n\t\tcgos = []bool{true, false}\n\t\tsetCgo = true\n\t}\n\tfor _, race := range []bool{false, true} {\n\t\thost.Race = race\n\t\tfor _, cgo := range cgos {\n\t\t\thost.Cgo = cgo\n\t\t\thost.SetCgo = setCgo\n\t\t\tif race && !cgo {\n\t\t\t\t// cgo requires race.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tplats = append(plats, host)\n\t\t}\n\t}\n\n\t// Add the other platforms.\n\ttype distPlatform struct {\n\t\tGOOS       string\n\t\tGOARCH     string\n\t\tFirstClass bool\n\t}\n\tdistList := goTool[[]distPlatform](\"tool\", \"dist\", \"list\", \"-json\")\n\tslices.SortFunc(distList, func(a, b distPlatform) int {\n\t\t// Sort first-class ports first, then our host OS, then alphabetically\n\t\t// by GOOS, then GOARCH.\n\t\treturn or(\n\t\t\ttrueFalse(a.FirstClass, b.FirstClass),\n\t\t\ttrueFalse(a.GOOS == host.GOOS, b.GOOS == host.GOOS),\n\t\t\tcmp.Compare(a.GOOS, b.GOOS),\n\t\t\tcmp.Compare(a.GOARCH, b.GOARCH))\n\t})\n\tfor _, distPlatform := range distList {\n\t\tif distPlatform.GOOS == host.GOOS && distPlatform.GOARCH == host.GOARCH {\n\t\t\tcontinue\n\t\t}\n\t\t// In general we can't build cgo on any non-host platform, so we\n\t\t// ignore that dimension.\n\t\t//\n\t\t// TODO: In some cases we can.\n\t\tplats = append(plats, Platform{\n\t\t\tGOOS:   distPlatform.GOOS,\n\t\t\tGOARCH: distPlatform.GOARCH,\n\t\t})\n\t}\n\n\treturn plats\n}\n\nfunc trueFalse(a, b bool) int {\n\tif a == b {\n\t\treturn 0\n\t}\n\tif a {\n\t\treturn -1\n\t}\n\treturn 1\n}\n\nfunc or[T comparable](vals ...T) T {\n\tvar zero T\n\tfor _, val := range vals {\n\t\tif val != zero {\n\t\t\treturn val\n\t\t}\n\t}\n\treturn zero\n}\n"
  },
  {
    "path": "gc-S/go.mod",
    "content": "module github.com/aclements/go-misc/gc-S\n\ngo 1.20\n"
  },
  {
    "path": "gc-S/main.go",
    "content": "// Copyright 2024 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// gc-S reads the output of compile -S to find a symbol and symbols it\n// references.\npackage main\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"text/tabwriter\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: <compile -S output> | %s regexp\\n\", os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\tregexp, err := regexp.Compile(os.Args[1])\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"regexp error: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tsymCh := parseSyms(os.Stdin)\n\n\t// Collect all symbols. For matching symbols, print them immediately and add\n\t// them as roots to the trace.\n\tsyms := make(map[string]Sym)\n\tq := []string{}\n\tprinted := make(map[string]bool) // false = added, not printed\n\tfor sym := range symCh {\n\t\tif regexp.MatchString(sym.name) {\n\t\t\tsym.Print(os.Stdout)\n\t\t\tprinted[sym.name] = true\n\t\t\tq = append(q, sym.name)\n\t\t}\n\t\tsyms[sym.name] = sym\n\t}\n\n\t// Trace referenced symbols.\n\tfor len(q) > 0 {\n\t\tif sym, ok := syms[q[0]]; ok {\n\t\t\tif !printed[q[0]] {\n\t\t\t\tprinted[q[0]] = true\n\t\t\t\tsym.Print(os.Stdout)\n\t\t\t}\n\t\t\tfor _, ref := range sym.Refs() {\n\t\t\t\tif _, ok := printed[ref]; ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tprinted[ref] = false\n\t\t\t\tq = append(q, ref)\n\t\t\t}\n\t\t}\n\t\tq = q[1:]\n\t}\n}\n\ntype Sym struct {\n\tname string\n\tdata string\n}\n\nfunc parseSyms(r io.Reader) <-chan Sym {\n\tch := make(chan Sym)\n\tgo func() {\n\t\tdefer close(ch)\n\n\t\tscanner := bufio.NewScanner(r)\n\t\tvar accum bytes.Buffer\n\t\tvar name string\n\t\tflush := func() {\n\t\t\tif name != \"\" {\n\t\t\t\tch <- Sym{name, accum.String()}\n\t\t\t\tname = \"\"\n\t\t\t\taccum.Reset()\n\t\t\t}\n\t\t}\n\t\tfor scanner.Scan() {\n\t\t\tl := scanner.Text()\n\t\t\tswitch {\n\t\t\tcase strings.HasPrefix(l, \"#\"):\n\t\t\t\t// Ignore\n\t\t\tdefault:\n\t\t\t\tflush()\n\t\t\t\tname, _, _ = strings.Cut(l, \" \")\n\t\t\t\tfallthrough\n\t\t\tcase len(l) == 0 || l[0] == '\\t':\n\t\t\t\taccum.WriteString(l)\n\t\t\t\taccum.WriteByte('\\n')\n\t\t\t}\n\t\t}\n\t\tflush()\n\t\tif err := scanner.Err(); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, \"reading standard input:\", err)\n\t\t}\n\t}()\n\treturn ch\n}\n\nvar printPathRe = regexp.MustCompile(`(?m)^\\t0x[0-9a-f]+ [0-9]+ \\(([^)]+)\\)`)\n\nfunc (s Sym) Print(w io.Writer) {\n\t// Simplify paths and align tabs.\n\ttw := tabwriter.NewWriter(w, 1, 4, 1, ' ', tabwriter.TabIndent)\n\tprev := 0\n\tfor _, idx := range printPathRe.FindAllStringSubmatchIndex(s.data, -1) {\n\t\ta, b := idx[2], idx[3]\n\t\tpath := s.data[a:b]\n\t\tif path == \"<unknown line number>\" {\n\t\t\tpath = \"???\"\n\t\t} else if filepath.IsAbs(path) {\n\t\t\tpath = \"…/\" + filepath.Base(path)\n\t\t}\n\t\tfmt.Fprintf(tw, \"%s%s\", s.data[prev:a], path)\n\t\tprev = b\n\t}\n\tfmt.Fprintf(tw, \"%s\", s.data[prev:])\n\ttw.Flush()\n}\n\nvar refRe = regexp.MustCompile(`\\b[^\\s]+\\(SB\\)`)\n\nfunc (s Sym) Refs() []string {\n\trefs := refRe.FindAllString(s.data, -1)\n\tfor i, ref := range refs {\n\t\trefs[i] = ref[:len(ref)-len(\"(SB)\")]\n\t}\n\treturn refs\n}\n"
  },
  {
    "path": "gcdense/test.py",
    "content": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport math\nimport random\nimport collections\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Graph:\n    def __init__(self, nnodes):\n        self.nnodes = nnodes\n        self.out = [set() for i in range(nnodes)]\n\n    def pageOf(self, node):\n        return self.address[node] // ADDRS_PER_PAGE\n\n    def bucketOf(self, node):\n        return self.address[node] // ADDRS_PER_BUCKET\n\nADDRS_PER_PAGE = 10\nPAGES_PER_BUCKET = 10\nADDRS_PER_BUCKET = ADDRS_PER_PAGE * PAGES_PER_BUCKET\n\ndef addressGraph(g, density=0.7):\n    \"\"\"Assign an allocation address to each node.\"\"\"\n    addresses = list(range(int(math.ceil(g.nnodes / density))))\n    random.shuffle(addresses)\n    g.address = addresses[:g.nnodes]\n\n#TLB_ENTRIES = 64 + 1024     # Haswell\nTLB_ENTRIES = 64\n\nclass TLB:\n    def __init__(self):\n        self.cache = collections.OrderedDict()\n        self.misses = 0\n\n    def touch(self, obj):\n        page = obj // ADDRS_PER_PAGE\n        if page in self.cache:\n            self.cache.move_to_end(page)\n            return\n        self.misses += 1\n        if len(self.cache) >= TLB_ENTRIES:\n            # Evict.\n            self.cache.popitem(last=False)\n        self.cache[page] = True\n\ndef genERGraph(n, p):\n    \"\"\"Generate an Erdős-Rényi random graph of n nodes.\"\"\"\n\n    g = Graph(n)\n    for i in range(n):\n        # for j in range(n):\n        #     if random.random() < p:\n        #         g.out[i].add(j)\n        # Approximate binomial distribution.\n        nout = int(0.5 + random.gauss(n * p, n * p * (1 - p)))\n        out = g.out[i]\n        while len(out) < nout:\n            out.add(random.randrange(n))\n    return g\n\ndef genDeBruijn(degree, power):\n    n = degree ** power\n    g = Graph(n)\n    for i in range(n):\n        nextnode = i * degree % n\n        for digit in range(degree):\n            g.out[i].add(nextnode + digit)\n    return g\n\ndef costLinear(n):\n    return n\n\ndef costSqrt(n):\n    return n**0.5\n\ndef costAffine10(n):\n    return 10 + n\n\ncosts = [\n    (\"linear\", costLinear),\n    #(\"sqrt\", costSqrt),\n    # Minimizing affine cost just means minimizing step count\n    #(\"affine10\", costAffine10),\n]\n\ndef argmax(iterable):\n    return \n\ndef pickFullest(buckets):\n    return max(enumerate(buckets), key=lambda x: len(x[1]))[0]\n\ndef pickEmptiest(buckets):\n    minidx = None\n    for i, b in enumerate(buckets):\n        if b and (minidx is None or len(b) < len(buckets[minidx])):\n            minidx = i\n    return minidx\n\ndef pickRandom(buckets):\n    nonempty = [i for i, b in enumerate(buckets) if b]\n    return random.choice(nonempty)\n\ndef pickFirst(buckets):\n    for i, b in enumerate(buckets):\n        if b:\n            return i\n\ndef pickQuantile(quantile):\n    def pick(buckets):\n        nonempty = [i for i, b in enumerate(buckets) if b]\n        nonempty.sort(key=lambda i: len(buckets[i]))\n        return nonempty[int(math.floor((len(nonempty) - 1) * quantile))]\n    return pick\n\ndef pickAlternate10(buckets):\n    fullest = pickFullest(buckets)\n    emptiest = pickEmptiest(buckets)\n    if len(buckets[fullest]) >= 10 * len(buckets[emptiest]):\n        return fullest\n    return emptiest\n\npicks = [\n    (\"fullest\", pickFullest),\n    (\"Q3\", pickQuantile(0.75)),\n    (\"median\", pickQuantile(0.5)),\n    (\"Q1\", pickQuantile(0.25)),\n    (\"emptiest\", pickEmptiest),\n    (\"random\", pickRandom),\n    (\"first\", pickFirst),\n    #(\"alternate10\", pickAlternate10), # Not very interesting.\n]\n\nREPROCESS = True\n\ndef run(g, nroots, pick, cost):\n    visited = [False] * g.nnodes\n    buckets = [[] for i in range(g.nnodes)]\n    tlb = TLB()\n\n    # Queue roots\n    for node in range(nroots):\n        buckets[g.bucketOf(node)].append(node)\n\n    # Process\n    scanCost, steps, capacity = 0, 0, []\n    while any(buckets):\n        bidx = pick(buckets)\n\n        # Fetch and clear bucket, since we may add more pointers while\n        # processing this bucket.\n        nodes = buckets[bidx]\n        buckets[bidx] = []\n\n        # Process bucket\n        for node in nodes:\n            # Assume an edge queuing model\n            tlb.touch(-g.address[node]/32 - 1)\n            if visited[node]:\n                continue\n            visited[node] = True\n\n            tlb.touch(g.address[node])\n            for nextnode in g.out[node]:\n                nextbucket = g.bucketOf(nextnode)\n                if REPROCESS and nextbucket == bidx:\n                    nodes.append(nextnode)\n                else:\n                    buckets[nextbucket].append(nextnode)\n\n        scanCost += cost(len(nodes))\n        steps += 1\n        capacity.append(len(nodes))\n\n    meanCapacity = sum(capacity) / len(capacity)\n    return scanCost, steps, meanCapacity, capacity, tlb.misses\n\ndef runGlobalQueue(g, nroots):\n    \"\"\"Simulate the regular global work queue algorithm.\"\"\"\n    visited = [False] * g.nnodes\n    queue = collections.deque(range(nroots))\n    tlb = TLB()\n\n    while len(queue):\n        obj = queue.pop()\n        #page = g.pageOf(obj)\n\n        # Assume the mark bits cover 32X less than the objects.\n        tlb.touch(-g.address[obj]/32 - 1)\n        if visited[obj]:\n            continue\n        visited[obj] = True\n\n        # Scan the object.\n        tlb.touch(g.address[obj])\n        for nextnode in g.out[obj]:\n            queue.appendleft(nextnode)\n\n    # steps is number of buckets, but misses is number of pages. Scale\n    # misses so they're comparable.\n    return tlb.misses // PAGES_PER_BUCKET, tlb.misses\n\ndef ecdf(data):\n    yvals = (np.arange(len(data)) + 1) / len(data)\n    plt.plot(np.sort(data), yvals, drawstyle='steps')\n\ndef main():\n    NROOTS = 10\n\n    graph = genERGraph(2000, 0.01)\n    addressGraph(graph)\n\n    globalMisses, _ = runGlobalQueue(graph, NROOTS)\n    print(\"%s\\t\\t\\t\\t%s\" % (\"global\", globalMisses))\n\n    for (costName, cost) in costs:\n        for (pickName, pick) in picks:\n            scanCost, steps, meanCapacity, capacity, _ = run(graph, NROOTS, pick, cost)\n            print(\"%s\\t%-10s\\t%g\\t%s\\t%g\" % (costName, pickName, scanCost, steps, meanCapacity))\n            ecdf(capacity)\n    plt.show()\n\ndef curve():\n    NROOTS = 10\n\n    for nodes in range(1000, 10000+1000, 1000):\n        graph = genERGraph(nodes, 0.001)\n    # for power in range(8, 15):\n    #     graph = genDeBruijn(2, power)\n    # for power in range(4, 9):\n    #     graph = genDeBruijn(4, power)\n        addressGraph(graph)\n\n        heapsize = sum(len(o) for o in graph.out)\n\n        _, misses = runGlobalQueue(graph, NROOTS)\n        print(\"%d,%d,global\" % (heapsize, misses))\n        _, _, _, _, misses = run(graph, NROOTS, pickFullest, costLinear)\n        print(\"%d,%d,sharded\" % (heapsize, misses))\n\nmain()\n#curve()\n"
  },
  {
    "path": "git-p/gerrit.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n)\n\n// GerritChangeInfo is the JSON struct returned by a Gerrit CL query.\ntype GerritChangeInfo struct {\n\tID                     string\n\tProject                string\n\tBranch                 string\n\tChangeId               string `json:\"change_id\"`\n\tSubject                string\n\tStatus                 string\n\tCreated                string\n\tUpdated                string\n\tMergeable              bool\n\tSubmittable            bool // Requires SUBMITTABLE\n\tInsertions             int\n\tDeletions              int\n\tUnresolvedCommentCount int `json:\"unresolved_comment_count\"`\n\tNumber                 int `json:\"_number\"`\n\tOwner                  *GerritAccount\n\tLabels                 map[string]*GerritLabel    ``                        // Requires LABELS or DETAILED_LABELS\n\tCurrentRevision        string                     `json:\"current_revision\"` // Requires CURRENT_REVISION or ALL_REVISIONS\n\tRevisions              map[string]*GerritRevision ``                        // Requires CURRENT_REVISION or ALL_REVISIONS\n\tMessages               []*GerritChangeMessageInfo ``                        // Requires MESSAGES\n}\n\n// GerritChangeMessageInfo is the JSON struct for a Gerrit ChangeMessageInfo.\ntype GerritChangeMessageInfo struct {\n\tAuthor   *GerritAccount\n\tMessage  string\n\tPatchSet int `json:\"_revision_number\"`\n\tTag      string\n}\n\n// GerritLabel is the JSON struct for a Gerrit LabelInfo.\ntype GerritLabel struct {\n\tOptional bool\n\tBlocking bool\n\tApproved *GerritAccount\n\tRejected *GerritAccount\n\tAll      []*GerritApproval\n}\n\n// GerritAccount is the JSON struct for a Gerrit AccountInfo.\ntype GerritAccount struct {\n\tID       int    `json:\"_account_id\"` // Requires DETAILED_ACCOUNTS\n\tName     string // Requires DETAILED_ACCOUNTS\n\tEmail    string // Requires DETAILED_ACCOUNTS\n\tUsername string // Requires DETAILED_ACCOUNTS\n}\n\n// GerritApproval is the JSON struct for a Gerrit ApprovalInfo.\ntype GerritApproval struct {\n\tGerritAccount\n\tValue int\n\tDate  string\n}\n\n// GerritRevision is the JSON struct for a Gerrit RevisionInfo.\ntype GerritRevision struct {\n\tNumber int `json:\"_number\"`\n\tRef    string\n}\n\ntype Gerrit struct {\n\turl     string\n\tproject string\n\treq     chan<- *GerritChanges\n}\n\nfunc NewGerrit(gerritUrl string) (*Gerrit, error) {\n\turl, err := url.Parse(gerritUrl)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse origin URL %q: %w\", gerritUrl, err)\n\t}\n\tsuf := \".googlesource.com\"\n\tif !strings.HasSuffix(url.Host, suf) {\n\t\treturn nil, fmt.Errorf(\"origin URL %q host does not end in %q (not Gerrit?)\", url, suf)\n\t}\n\tif url.Scheme != \"https\" {\n\t\treturn nil, fmt.Errorf(\"origin URL %q must be https\", url)\n\t}\n\t// Remove trailing slash from the origin, if any.\n\turl.Path = strings.TrimRight(url.Path, \"/\")\n\t// The path is now the project name (with a leading /).\n\tif url.Path == \"\" || strings.Contains(url.Path[1:], \"/\") {\n\t\treturn nil, fmt.Errorf(\"origin URL %q path must be a single non-empty project name\", url)\n\t}\n\tproject := url.Path[1:]\n\t// Drop the project from the URL\n\turl.Path = \"\"\n\t// The API host adds \"-review\".\n\ti := len(url.Host) - len(suf)\n\turl.Host = url.Host[:i] + \"-review\" + url.Host[i:]\n\n\tch := make(chan *GerritChanges, 10)\n\tg := &Gerrit{url.String(), project, ch}\n\tgo func() {\n\t\tdone := false\n\t\tfor !done {\n\t\t\t// Pull queries off the channel in batches of\n\t\t\t// up to 10 (Gerrit's limit). Wait a tiny\n\t\t\t// amount of time to get a batch.\n\t\t\tvar batch []*GerritChanges\n\t\t\ttimeout := time.After(1 * time.Millisecond)\n\t\tloop:\n\t\t\tfor len(batch) < 10 {\n\t\t\t\tselect {\n\t\t\t\tcase req, ok := <-ch:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tdone = true\n\t\t\t\t\t\tbreak loop\n\t\t\t\t\t}\n\t\t\t\t\tbatch = append(batch, req)\n\t\t\t\tcase <-timeout:\n\t\t\t\t\tbreak loop\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(batch) > 0 {\n\t\t\t\tg.queryChanges(batch)\n\t\t\t}\n\t\t}\n\t}()\n\treturn g, nil\n}\n\ntype GerritChanges struct {\n\tquery   string\n\toptions []string\n\n\tresult []*GerritChangeInfo\n\terr    error\n\tdone   chan struct{}\n}\n\nfunc (req *GerritChanges) Wait() ([]*GerritChangeInfo, error) {\n\t<-req.done\n\treturn req.result, req.err\n}\n\nfunc (g *Gerrit) QueryChanges(query string, options ...string) *GerritChanges {\n\treq := &GerritChanges{query: query, options: options, done: make(chan struct{})}\n\tg.req <- req\n\treturn req\n}\n\nfunc (g *Gerrit) queryChanges(queries []*GerritChanges) {\n\t// Split up queries by consistent options.\n\tsubs := make([][]*GerritChanges, 1)\n\toptions := queries[0].options\n\tfor _, q := range queries {\n\t\tif !reflect.DeepEqual(q.options, options) {\n\t\t\tsubs = append(subs, nil)\n\t\t\toptions = q.options\n\t\t}\n\t\tsubs[len(subs)-1] = append(subs[len(subs)-1], q)\n\t}\n\tfor _, subQueries := range subs {\n\t\tg.queryChanges1(subQueries, subQueries[0].options)\n\t}\n}\n\nfunc (g *Gerrit) queryChanges1(queries []*GerritChanges, options []string) {\n\tfailAll := func(err error) {\n\t\tfor _, q := range queries {\n\t\t\tq.err = err\n\t\t\tclose(q.done)\n\t\t}\n\t}\n\n\t// Construct query.\n\tvar queryParams []string\n\tfor _, q := range queries {\n\t\tqueryParams = append(queryParams, \"q=\"+url.QueryEscape(q.query))\n\t}\n\tfor _, opt := range options {\n\t\tqueryParams = append(queryParams, \"o=\"+opt)\n\t}\n\tqueryUrl := g.url + \"/changes/?\" + strings.Join(queryParams, \"&\")\n\n\t// Get results.\n\tresp, err := http.Get(queryUrl)\n\tif err != nil {\n\t\tfailAll(err)\n\t\treturn\n\t}\n\tbody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfailAll(err)\n\t\treturn\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tfailAll(fmt.Errorf(\"%s: %s\", queryUrl, resp.Status))\n\t\treturn\n\t}\n\ti := bytes.IndexByte(body, '\\n')\n\tif i < 0 {\n\t\tfailAll(fmt.Errorf(\"%s: malformed json response\", queryUrl))\n\t\treturn\n\t}\n\tbody = body[i:]\n\tvar target interface{}\n\tvar changes [][]*GerritChangeInfo\n\tif len(queries) == 1 {\n\t\tchanges = make([][]*GerritChangeInfo, 1)\n\t\ttarget = &changes[0]\n\t} else {\n\t\ttarget = &changes\n\t}\n\tif err := json.Unmarshal(body, target); err != nil {\n\t\tfailAll(fmt.Errorf(\"%s: malformed json response\", queryUrl))\n\t\treturn\n\t}\n\tif debugGerrit {\n\t\tr, _ := json.MarshalIndent(target, \"\", \"    \")\n\t\tlog.Printf(\"GET %s =>\\n%s\", queryUrl, r)\n\t}\n\tif len(changes) != len(queries) {\n\t\tfailAll(fmt.Errorf(\"%s: made %d queries, but got %d responses\", queryUrl, len(queries), len(changes)))\n\t\treturn\n\t}\n\n\t// Complete requests.\n\tfor i, q := range queries {\n\t\tq.result = changes[i]\n\t\tclose(q.done)\n\t}\n}\n"
  },
  {
    "path": "git-p/git.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// git runs git with args and returns its output.\nfunc git(args ...string) string {\n\tcmd := exec.Command(\"git\", args...)\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tif err, ok := err.(*exec.ExitError); ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", string(err.Stderr))\n\t\t}\n\t\tlog.Fatalf(\"git %s failed: %s\", shellEscapeList(args), err)\n\t}\n\treturn strings.TrimSuffix(string(out), \"\\n\")\n}\n\n// tryGit runs git with args and returns its output and a non-nil\n// error if the command exits with a non-zero status.\nfunc tryGit(args ...string) (string, error) {\n\tcmd := exec.Command(\"git\", args...)\n\tout, err := cmd.CombinedOutput()\n\tif _, ok := err.(*exec.ExitError); err != nil && !ok {\n\t\tlog.Fatalf(\"git %s failed: %s\", shellEscapeList(args), err)\n\t}\n\treturn strings.TrimSuffix(string(out), \"\\n\"), err\n}\n\nfunc lines(s string) []string {\n\tlines := strings.Split(s, \"\\n\")\n\tif len(lines) > 0 && lines[len(lines)-1] == \"\" {\n\t\tlines = lines[:len(lines)-1]\n\t}\n\treturn lines\n}\n\n// upstreamOf returns the full upstream ref name of the given ref, or\n// \"\".\nfunc upstreamOf(ref string) string {\n\t// This fails with code 128 and \"fatal: no upstream configured\n\t// for branch 'xxx'\" if there's no upstream. It also fails\n\t// with 128 and \"fatal: HEAD does not point to a branch\" if\n\t// ref is not a branch or a symbolic ref to a branch.\n\t//\n\t// The @{u} syntax requires a branchname, not a refname, so\n\t// strip the ref to a branch name.\n\tref = strings.TrimPrefix(ref, \"refs/heads/\")\n\tout, err := tryGit(\"rev-parse\", \"--symbolic-full-name\", ref+\"@{u}\")\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn out\n}\n\n// gitPatchID returns the git patch ID of commit, which is effectively\n// a hash of that commit's diff. See man git-patch-id for details.\nfunc gitPatchID(commit string) (string, error) {\n\tvar err error\n\t// Run git diff-tree -p $commit -- | git patch-id --stable.\n\tdiffTree := exec.Command(\"git\", \"diff-tree\", \"-p\", commit, \"--\")\n\tpatchID := exec.Command(\"git\", \"patch-id\", \"--stable\")\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\tlog.Fatal(\"failed to create pipe: \", err)\n\t}\n\tpatchID.Stdin, diffTree.Stdout = r, w\n\tif err := diffTree.Start(); err != nil {\n\t\tlog.Fatalf(\"failed to start %s: %s\", shellEscapeList(diffTree.Args), err)\n\t}\n\tw.Close()\n\tout, err := patchID.Output()\n\tr.Close()\n\tif err != nil {\n\t\tif err, ok := err.(*exec.ExitError); ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", string(err.Stderr))\n\t\t}\n\t\tlog.Fatalf(\"%s failed: %s\", shellEscapeList(patchID.Args), err)\n\t}\n\tif diffTree.Wait() != nil {\n\t\treturn \"\", fmt.Errorf(\"bad revision %q\", commit)\n\t}\n\tfs := bytes.Fields(out)\n\tif len(fs) != 2 {\n\t\tlog.Fatalf(\"unexpected output from %s: %s\", shellEscapeList(patchID.Args), out)\n\t}\n\treturn string(fs[0]), nil\n}\n\n// gitCommitMessage returns the commit message for commit.\nfunc gitCommitMessage(commit string) (string, error) {\n\t// Get the commit object.\n\tobj, err := tryGit(\"cat-file\", \"commit\", commit)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"bad revision %q\", commit)\n\t}\n\t// Extract the commit message.\n\tif i := strings.Index(obj, \"\\n\\n\"); i >= 0 {\n\t\treturn obj[i+2:], nil\n\t}\n\treturn \"\", nil\n}\n\nvar gerritFields = map[string]bool{\"Reviewed-on\": true, \"Run-TryBot\": true, \"TryBot-Result\": true, \"Reviewed-by\": true}\n\n// canonGerritMessage strips Gerrit-added fields from a commit message.\nfunc canonGerritMessage(msg string) string {\n\tmsg = strings.TrimRight(msg, \"\\n\")\n\tfor {\n\t\t// Consume fields from the end of the message.\n\t\ti := strings.LastIndex(msg, \"\\n\")\n\t\tif i < 0 {\n\t\t\tbreak\n\t\t}\n\t\tsep := i + strings.Index(msg[i:], \": \")\n\t\tif sep < i {\n\t\t\tbreak\n\t\t}\n\t\tif !gerritFields[msg[i+1:sep]] {\n\t\t\tbreak\n\t\t}\n\t\tmsg = msg[:i]\n\t}\n\treturn msg + \"\\n\"\n}\n\n// changeIds returns the full Gerrit change IDs of each commit. The\n// change ID will be \"\" if missing.\nfunc changeIds(project, forBranch string, commits []string) []string {\n\tif i := strings.LastIndexByte(forBranch, '/'); i >= 0 {\n\t\tforBranch = forBranch[i+1:]\n\t}\n\n\t// Construct input.\n\tvar input bytes.Buffer\n\tfor _, c := range commits {\n\t\tfmt.Fprintf(&input, \"%s\\n\", c)\n\t}\n\n\t// Run batch cat-file command.\n\targs := []string{\"cat-file\", \"--batch\", \"--buffer\"}\n\tcmd := exec.Command(\"git\", args...)\n\tcmd.Stdin = &input\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\tif err, ok := err.(*exec.ExitError); ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", string(err.Stderr))\n\t\t}\n\t\tlog.Fatalf(\"git %s failed: %s\", shellEscapeList(args), err)\n\t}\n\n\t// Parse output.\n\tcids := make([]string, len(commits))\n\tfor i, commit := range commits {\n\t\t// Get \"<sha1> SP <type> SP <size> LF\" line.\n\t\tnl := bytes.IndexByte(out, '\\n')\n\t\tif nl < 0 {\n\t\t\tlog.Fatal(\"malformed git cat-file output\")\n\t\t}\n\t\tfs := strings.Fields(string(out[:nl]))\n\t\tout = out[nl+1:]\n\t\tif len(fs) < 2 || fs[0] != commit {\n\t\t\tlog.Fatal(\"malformed git cat-file output\")\n\t\t}\n\t\tif fs[1] == \"missing\" {\n\t\t\tcontinue\n\t\t}\n\t\tif fs[1] != \"commit\" {\n\t\t\tlog.Fatalf(\"unexpected object type %q for %s\", fs[1], fs[0])\n\t\t}\n\n\t\t// Get commit object.\n\t\tsize, _ := strconv.Atoi(fs[2])\n\t\tif len(out) <= size || out[size] != '\\n' {\n\t\t\tlog.Fatal(\"git cat-file out of sync\")\n\t\t}\n\t\tvar obj []byte\n\t\tobj, out = out[:size], out[size+1:]\n\n\t\t// Find the Change-Id in the commit.\n\t\tfor _, line := range bytes.Split(obj, []byte(\"\\n\")) {\n\t\t\tif bytes.HasPrefix(line, []byte(\"Change-Id: \")) {\n\t\t\t\tlfs := bytes.Fields(line)\n\t\t\t\tif len(lfs) == 2 {\n\t\t\t\t\tcid := string(lfs[1])\n\t\t\t\t\tcids[i] = project + \"~\" + forBranch + \"~\" + cid\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn cids\n}\n"
  },
  {
    "path": "git-p/go.mod",
    "content": "module github.com/aclements/go-misc/git-p\n\ngo 1.17\n\nrequire golang.org/x/term v0.16.0\n\nrequire golang.org/x/sys v0.16.0 // indirect\n"
  },
  {
    "path": "git-p/go.sum",
    "content": "golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=\ngolang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE=\ngolang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY=\n"
  },
  {
    "path": "git-p/main.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Command git-p prints the status of pending commits on all branches.\n//\n// git-p summarizes the status of each commit on every branch,\n// starting with HEAD and then the most recently committed-to branch.\n//\n// git-p shows the Gerrit status of each commit and performs several\n// status checks:\n//\n// * It checks if there are any local changes that haven't been mailed\n// (and is sensitive to rebases, so it won't complain if the diff\n// hasn't changed).\n//\n// * It checks if there are any rejections (-1 or -2), or if the CL is\n// marked \"Do not submit\".\n//\n// * It checks if there are any comments on the latest version of the\n// CL, which may indicate it needs changes even if it is submittable.\n//\n// * It checks if the trybots are sad or weren't run.\n//\n// The output is color-coded by status: green indicates a CL is\n// submittable and has no warnings, yellow indicates a CL has\n// warnings, and red indicates a CL has been rejected. Submitted CLs\n// are greyed out.\n//\n// git-p uses the git pager if one is configured.\n//\n// Currently git-p only supports the main Go repository.\n//\n// Example output\n//\n//\t$ git-p gc-free-wbufs-v3\n//\tgc-free-wbufs-v3 for master\n//\t  Not mailed c1e17d722f fixup! runtime: allocate GC workbufs from manually-…\n//\t  Pending    326537d00c runtime: free workbufs during… [golang.org/cl/38582]\n//\t    Local commit message differs\n//\t    1 comment on latest PS from Rick Hudson\n//\t    TryBots failed on linux-386, windows-386-gce, nacl-386, linux-arm\n//\t  Ready      b3b8fef6cb runtime: allocate GC workbufs… [golang.org/cl/38581]\n//\t    1 comment on latest PS from Rick Hudson\n//\t    TryBots failed on windows-386-gce, linux-386, nacl-386, linux-arm\n//\t  Ready      5fc11e7173 runtime: eliminate write barr… [golang.org/cl/38580]\n//\t  Ready      b5c7f08ccb runtime: rename gcBits -> gcB… [golang.org/cl/38579]\n//\t  Pending    d9dd54b571 runtime: eliminate write barr… [golang.org/cl/38578]\n//\t    2 comments on latest PS from Rick Hudson, Austin Clements\n//\t    TryBots failed on linux-amd64\n//\t  Pending    b70f9f7dc2 runtime: don't count manually… [golang.org/cl/38577]\n//\t    1 comment on latest PS from Rick Hudson\n//\t  Ready      1eae861947 runtime: generalize {alloc,fr… [golang.org/cl/38576]\n//\t  Ready      670d05695f runtime: rename mspan.stackfr… [golang.org/cl/38575]\n//\t  Ready      3e531adf5f runtime: rename _MSpanStack -… [golang.org/cl/38574]\n//\t    1 comment on latest PS from Rick Hudson\n//\t  Ready      3e3125c7e5 runtime: initialize more fiel… [golang.org/cl/38573]\n//\t    2 comments on latest PS from Rick Hudson, Austin Clements\n//\t  Submitted  302daf57f6 runtime: improve systemstack-… [golang.org/cl/38572]\n//\t    Local commit message differs\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\nconst debugGerrit = false\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s [flags] [branches...]\\n\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"With no arguments, list the current branch.\\n\\n\")\n\t\tflag.PrintDefaults()\n\t}\n\tdefIgnore, _ := tryGit(\"config\", \"p.ignore\")\n\tflagIgnore := flag.String(\"ignore\", defIgnore, \"ignore branches matching shell `pattern` [git config p.ignore]\")\n\tflagLocal := flag.Bool(\"l\", false, \"local state only; don't query Gerrit\")\n\tflagAll := flag.Bool(\"a\", false, \"list all branches from newest to oldest\")\n\tflag.Parse()\n\tbranches := flag.Args()\n\tignores := strings.Fields(*flagIgnore)\n\n\tif *flagAll {\n\t\tif len(branches) != 0 {\n\t\t\tfmt.Fprintf(os.Stderr, \"cannot use both -a and branches\\n\")\n\t\t\tos.Exit(1)\n\t\t}\n\t} else {\n\t\tif len(branches) == 0 {\n\t\t\tbranches = []string{\"HEAD\"}\n\t\t}\n\t}\n\n\t// Check the branch names.\n\tfor _, b := range branches {\n\t\tif out, err := tryGit(\"rev-parse\", b, \"--\"); err != nil {\n\t\t\tfmt.Printf(\"%s\\n\", out)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// Check ignore patterns.\n\tfor _, ig := range ignores {\n\t\tif _, err := filepath.Match(ig, \"\"); err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"bad ignore pattern %q: %s\", ig, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tif !setupPager() {\n\t\t// We're in a dumb terminal. Turn off control codes.\n\t\tstyle = nil\n\t}\n\n\t// Find the Gerrit host name.\n\tremote := \"origin\"\n\tgerritUrl := git(\"config\", \"remote.\"+remote+\".url\")\n\n\t// Get commits that are available from the Gerrit remote.\n\tupstreams := lines(git(\"for-each-ref\", \"--format\", \"%(objectname)\", \"refs/remotes/\"+remote+\"/\"))\n\tif len(upstreams) == 0 {\n\t\tlog.Fatalf(\"no refs for remote %s\", remote)\n\t}\n\n\tvar gerrit *Gerrit\n\tif !*flagLocal {\n\t\tvar err error\n\t\tgerrit, err = NewGerrit(gerritUrl)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t// Pass a token through each showBranch so we can pipeline\n\t// fetching branch information, while displaying it in order.\n\ttoken := make(chan struct{}, 1)\n\ttoken <- struct{}{}\n\t// But if the output of showBranch is blocked (e.g., by\n\t// back-pressure from a pager), don't start new showBranches.\n\t// This avoids making lots of ultimately ignored requests to\n\t// Gerrit.\n\tlimit := make(chan struct{}, 3)\n\n\tvar head string\n\tif len(branches) == 0 {\n\t\t// Resolve HEAD and show it first regardless of age.\n\t\thead, _ = tryGit(\"symbolic-ref\", \"HEAD\")\n\t\tif head != \"\" {\n\t\t\ttoken = showBranch(gerrit, head, \"HEAD\", remote, upstreams, token, limit)\n\t\t}\n\n\t\t// Get all local branches, sorted by most recent commit date.\n\t\tbranches = lines(git(\"for-each-ref\", \"--format\", \"%(refname)\", \"--sort\", \"-committerdate\", \"refs/heads/\"))\n\t\tif len(ignores) > 0 {\n\t\t\tnBranches := []string{}\n\t\tbranchLoop:\n\t\t\tfor _, b := range branches {\n\t\t\t\tfor _, ig := range ignores {\n\t\t\t\t\tif m, _ := filepath.Match(ig, b); m {\n\t\t\t\t\t\tcontinue branchLoop\n\t\t\t\t\t}\n\t\t\t\t\tif m, _ := filepath.Match(\"refs/heads/\"+ig, b); m {\n\t\t\t\t\t\tcontinue branchLoop\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tnBranches = append(nBranches, b)\n\t\t\t}\n\t\t\tbranches = nBranches\n\t\t}\n\t}\n\n\t// Show all branches.\n\tfor _, branch := range branches {\n\t\tif branch == head {\n\t\t\tcontinue\n\t\t}\n\t\ttoken = showBranch(gerrit, branch, \"\", remote, upstreams, token, limit)\n\t}\n\n\t<-token\n}\n\nfunc showBranch(gerrit *Gerrit, branch, extra string, remote string, upstreams []string, token, limit chan struct{}) chan struct{} {\n\t// Don't start too many showBranches.\n\tlimit <- struct{}{}\n\n\t// Get the Gerrit upstream name so we can construct full\n\t// Change-IDs.\n\tvar haveUpstream bool\n\tupstream := upstreamOf(branch)\n\tif upstream == \"\" {\n\t\tupstream = \"refs/remotes/\" + remote + \"/master\"\n\t} else {\n\t\thaveUpstream = true\n\t}\n\n\t// Get commits from the branch to any upstream.\n\t//\n\t// TODO: This can be quite slow (50–100 ms). git is clearly\n\t// reasonably clever about this, but it has to expand the\n\t// exclusion list and can't share work across all of these\n\t// branches. Maybe this should fully expand the exclusion set\n\t// just once, do limited rev-lists, and cut them off at the\n\t// exclusion set.\n\targs := []string{\"rev-list\", branch}\n\tfor _, u := range upstreams {\n\t\targs = append(args, \"^\"+u)\n\t}\n\targs = append(args, \"--\")\n\tcommits := lines(git(args...))\n\n\t// Get Change-Ids from these commits.\n\tcids := changeIds(gerrit.project, upstream, commits)\n\n\t// Fetch information on all of these changes.\n\t//\n\t// We need DETAILED_LABELS to get numeric values of labels.\n\tchanges := make([]*GerritChanges, len(cids))\n\tif gerrit != nil {\n\t\tfor i, cid := range cids {\n\t\t\t// TODO: Would this be simpler with a single big OR query?\n\t\t\tif cid != \"\" {\n\t\t\t\tchanges[i] = gerrit.QueryChanges(\"change:\"+cid, printChangeOptions...)\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(changes) == 0 {\n\t\t<-limit\n\t\treturn token\n\t}\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\t<-token\n\t\t// Print changes.\n\t\tfmt.Printf(\"%s%s%s\", style[\"branch\"], strings.TrimPrefix(branch, \"refs/heads/\"), style[\"reset\"])\n\t\tif extra != \"\" {\n\t\t\tfmt.Printf(\" (%s%s%s)\", style[\"symbolic-ref\"], extra, style[\"reset\"])\n\t\t}\n\t\tif haveUpstream {\n\t\t\tfmt.Printf(\" for %s\", strings.TrimPrefix(upstream, \"refs/remotes/\"+remote+\"/\"))\n\t\t}\n\t\tfmt.Printf(\"\\n\")\n\t\tfor i, change := range changes {\n\t\t\tprintChange(commits[i], change, gerrit == nil)\n\t\t}\n\t\tfmt.Println()\n\t\t<-limit\n\t\tdone <- struct{}{}\n\t}()\n\treturn done\n}\n\nvar labelMsg = regexp.MustCompile(`^Patch Set [0-9]+: [-a-zA-Z]+\\+[0-9]$`)\nvar trybotFailures = regexp.MustCompile(`(?m)^Failed on ([^:]+):`)\n\nfunc changeStatus(commit string, info *GerritChangeInfo) (status string, warnings []string) {\n\t// TODO: Show attention information?\n\n\t// Check for warnings on current PS. (Requires\n\t// CURRENT_REVISION or ALL_REVISIONS option.)\n\tcurPatchSet := info.Revisions[info.CurrentRevision].Number\n\t// Are there unmailed changes?\n\tif info.CurrentRevision != commit {\n\t\t// How serious are the differences with the mailed changes?\n\t\tpid1, err1 := gitPatchID(info.CurrentRevision)\n\t\tpid2, err2 := gitPatchID(commit)\n\t\tif !(err1 == nil && err2 == nil && pid1 == pid2) {\n\t\t\t// The patches are different.\n\t\t\twarnings = append(warnings, \"Local commit differs from mailed commit\")\n\t\t} else {\n\t\t\tmsg1, err1 := gitCommitMessage(info.CurrentRevision)\n\t\t\tmsg2, err2 := gitCommitMessage(commit)\n\t\t\tif !(err1 == nil && err2 == nil && canonGerritMessage(msg1) == canonGerritMessage(msg2)) {\n\t\t\t\t// Patches are the same, but the\n\t\t\t\t// commit message has changed.\n\t\t\t\twarnings = append(warnings, \"Local commit message differs\")\n\t\t\t}\n\t\t}\n\t}\n\t// Are there rejections?\n\trejected := false\n\tfor labelName, label := range info.Labels {\n\t\tif !label.Optional && label.Rejected != nil {\n\t\t\tif labelName == \"Do-Not-Submit\" {\n\t\t\t\twarnings = append(warnings, \"Marked \\\"Do not submit\\\"\")\n\t\t\t} else {\n\t\t\t\twarnings = append(warnings, fmt.Sprintf(\"Rejected by %s\", label.Rejected.Name))\n\t\t\t\trejected = true\n\t\t\t}\n\t\t}\n\t}\n\t// Are there unresolved comments?\n\t//\n\t// TODO: Don't count the unresolved comment from a running trybot run.\n\t// Unfortunately, to see whether a comment is resolved or not, we have to\n\t// request all of the comments using the /changes/{change-id}/comments\n\t// endpoint. We can't just get them in the ChangeInfo.\n\t//\n\t// TODO: If an unresolved comment is resolved by an unpublished draft, count\n\t// that separately.\n\tif info.UnresolvedCommentCount > 0 {\n\t\tmsg := fmt.Sprintf(\"%d unresolved comment thread\", info.UnresolvedCommentCount)\n\t\tif info.UnresolvedCommentCount > 1 {\n\t\t\tmsg += \"s\"\n\t\t}\n\t\twarnings = append(warnings, msg)\n\t}\n\t// Are there comments on the latest PS? (Requires\n\t// MESSAGES option.)\n\tnComments := 0\n\tcommentUsers, commentUsersSet := []string{}, map[string]bool{}\n\tfor _, msg := range info.Messages {\n\t\tif msg.PatchSet != curPatchSet {\n\t\t\tcontinue\n\t\t}\n\t\t// Ignore automated comments, including TryBot comments.\n\t\tif strings.HasPrefix(msg.Tag, \"autogenerated:\") {\n\t\t\tcontinue\n\t\t}\n\t\t// Ignore label-only messages (ugh, why aren't these\n\t\t// better marked?)\n\t\tif labelMsg.MatchString(msg.Message) {\n\t\t\tcontinue\n\t\t}\n\t\t// Some messages have no author?\n\t\tif msg.Author == nil {\n\t\t\tcontinue\n\t\t}\n\t\tnComments++\n\t\t// Requires DETAILED_ACCOUNTS\n\t\tif !commentUsersSet[msg.Author.Name] {\n\t\t\tcommentUsersSet[msg.Author.Name] = true\n\t\t\tcommentUsers = append(commentUsers, msg.Author.Name)\n\t\t}\n\t}\n\tif nComments > 0 {\n\t\tmsg := \"1 comment\"\n\t\tif nComments > 1 {\n\t\t\tmsg = fmt.Sprintf(\"%d comments\", nComments)\n\t\t}\n\t\tmsg += \" on latest PS from \" + strings.Join(commentUsers, \", \")\n\t\twarnings = append(warnings, msg)\n\t}\n\t// Check trybot status. (Requires LABELS option.)\n\tif tbr := info.Labels[\"LUCI-TryBot-Result\"]; tbr != nil && tbr.Rejected != nil {\n\t\t// TODO: Use checks API to see what failed?\n\t\twarnings = append(warnings, \"TryBots failed\")\n\t} else if tbr == nil || tbr.Approved == nil {\n\t\t// TryBots haven't run. If it's submitted, we don't care.\n\t\tif info.Status != \"MERGED\" {\n\t\t\t// Are they running?\n\t\t\tmsg := \"TryBots not run\"\n\t\t\tif cq := info.Labels[\"Commit-Queue\"]; cq != nil {\n\t\t\t\tfor _, vote := range cq.All {\n\t\t\t\t\tif vote.Value == 1 {\n\t\t\t\t\t\tmsg = \"TryBots running\"\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\twarnings = append(warnings, msg)\n\t\t}\n\t}\n\n\tswitch info.Status {\n\tdefault:\n\t\tstatus = fmt.Sprintf(\"Unknown status %q\", info.Status)\n\tcase \"MERGED\":\n\t\tstatus = \"Submitted\"\n\tcase \"ABANDONED\":\n\t\tstatus = \"Abandoned\"\n\tcase \"DRAFT\":\n\t\tstatus = \"Draft\"\n\tcase \"NEW\":\n\t\t// Submittable? (Requires SUBMITTABLE option.)\n\t\tstatus = \"Pending\"\n\t\tif rejected {\n\t\t\tstatus = \"Rejected\"\n\t\t} else if info.Submittable {\n\t\t\tstatus = \"Ready\"\n\t\t}\n\t}\n\n\treturn status, warnings\n}\n\nvar printChangeOptions = []string{\"SUBMITTABLE\", \"LABELS\", \"CURRENT_REVISION\", \"MESSAGES\", \"DETAILED_ACCOUNTS\"}\n\n// printChange prints a summary of change's status and warnings.\n//\n// change must be retrieved with options printChangeOptions.\nfunc printChange(commit string, change *GerritChanges, local bool) {\n\tlogMsg := git(\"log\", \"-n1\", \"--oneline\", commit)\n\n\tstatus, warnings, link := \"Not mailed\", []string(nil), \"\"\n\tif change != nil {\n\t\tresults, err := change.Wait()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif len(results) > 1 {\n\t\t\tlog.Fatalf(\"multiple changes found for commit %s\", commit)\n\t\t}\n\t\tif len(results) == 1 {\n\t\t\tstatus, warnings = changeStatus(commit, results[0])\n\t\t\t//link = fmt.Sprintf(\"[%s/c/%d]\", gerritUrl, results[0].Number)\n\t\t\tlink = fmt.Sprintf(\" [go.dev/cl/%d]\", results[0].Number)\n\t\t}\n\t} else if local {\n\t\tstatus = \"\"\n\t}\n\n\tvar control, eControl string\n\tif len(warnings) != 0 {\n\t\tif c, ok := style[status+\" warn\"]; ok {\n\t\t\tcontrol = c\n\t\t}\n\t}\n\tif control == \"\" {\n\t\tif c, ok := style[status]; ok {\n\t\t\tcontrol = c\n\t\t}\n\t}\n\tif control != \"\" {\n\t\teControl = style[\"reset\"]\n\t}\n\n\thdr := logMsg\n\tif status != \"\" {\n\t\thdr = fmt.Sprintf(\"%-10s %s\", status, logMsg)\n\t}\n\thdrMax := 80 - len(link) - 2\n\tif utf8.RuneCountInString(hdr) > hdrMax {\n\t\thdr = fmt.Sprintf(\"%*.*s…\", hdrMax-1, hdrMax-1, hdr)\n\t}\n\tfmt.Printf(\"  %s%-*s%s%s\\n\", control, hdrMax, hdr, eControl, link)\n\tfor _, w := range warnings {\n\t\tfmt.Printf(\"    %s\\n\", w)\n\t}\n}\n"
  },
  {
    "path": "git-p/pager.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"syscall\"\n\n\t\"golang.org/x/term\"\n)\n\n// setupPager restarts this process under the git pager. If the\n// process is already under a pager or doesn't want a pager, it\n// returns.\n//\n// setupPager returns true if this process is running in a smart\n// terminal (which includes running under a pager started by\n// setupPager).\nfunc setupPager() (inTerm bool) {\n\t// This is roughly based on pager.c:setup_pager in git, but\n\t// this starts ourselves as a subprocess instead of the pager.\n\t// Doing it this way around means we don't have to babysit the\n\t// pager: signals/panics kill us like normal and leave the\n\t// pager running and the shell waiting on the pager.\n\n\tif os.Getenv(\"GIT_P_PAGER_IN_USE\") != \"\" {\n\t\treturn true\n\t}\n\tif !term.IsTerminal(1) {\n\t\treturn false\n\t}\n\tswitch os.Getenv(\"TERM\") {\n\tcase \"\", \"dumb\":\n\t\treturn false\n\t}\n\n\tpagerCmd := git(\"var\", \"GIT_PAGER\")\n\tif pagerCmd == \"\" {\n\t\treturn true\n\t}\n\n\t// Start ourselves as a subprocess.\n\tme, err := os.Executable()\n\tif err != nil {\n\t\treturn true\n\t}\n\tos.Setenv(\"GIT_P_PAGER_IN_USE\", \"true\")\n\tcmd := exec.Command(me, os.Args[1:]...)\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\treturn true\n\t}\n\tcmd.Stdin = nil\n\tcmd.Stdout = w\n\tcmd.Stderr = w\n\tcmd.Start()\n\n\t// Replace this process with the pager.\n\tw.Close()\n\tsyscall.Dup2(int(r.Fd()), 0)\n\tr.Close()\n\t// We need -R at least to interpret color codes.\n\t// Add -F so single-screen output doesn't invoke paging.\n\tos.Setenv(\"LESS\", \"-FR \"+os.Getenv(\"LESS\"))\n\tif os.Getenv(\"LV\") == \"\" {\n\t\tos.Setenv(\"LV\", \"-c\")\n\t}\n\terr = syscall.Exec(\"/bin/sh\", []string{\"sh\", \"-c\", pagerCmd}, os.Environ())\n\n\t// Didn't work, but there's not much we can do now. Try cat.\n\tsyscall.Exec(\"/bin/cat\", []string{\"cat\"}, os.Environ())\n\n\t// Still didn't work. Bail.\n\tpanic(fmt.Sprintf(\"failed to start pager: %s\", err))\n}\n"
  },
  {
    "path": "git-p/shell.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport \"strings\"\n\n// shellEscape escapes a single shell token.\nfunc shellEscape(x string) string {\n\tif len(x) == 0 {\n\t\treturn \"''\"\n\t}\n\tfor _, r := range x {\n\t\tif 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || strings.ContainsRune(\"@%_-+:,./\", r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Unsafe character.\n\t\treturn \"'\" + strings.Replace(x, \"'\", \"'\\\"'\\\"'\", -1) + \"'\"\n\t}\n\treturn x\n}\n\n// shellEscapeList escapes a list of shell tokens.\nfunc shellEscapeList(xs []string) string {\n\tout := make([]string, len(xs))\n\tfor i, x := range xs {\n\t\tout[i] = shellEscape(x)\n\t}\n\treturn strings.Join(out, \" \")\n}\n"
  },
  {
    "path": "git-p/style.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nvar style = map[string]string{\n\t\"reset\": \"\\x1b[0m\",\n\n\t\"branch\":       \"\\x1b[1;32m\", // Bright green\n\t\"symbolic-ref\": \"\\x1b[1;36m\", // Bright cyan\n\n\t// CL status styles\n\n\t\"Not mailed\": \"\\x1b[35m\", // Magenta\n\n\t\"Pending warn\":  \"\\x1b[33m\",   // Yellow\n\t\"Ready warn\":    \"\\x1b[33m\",   // Yellow\n\t\"Rejected warn\": \"\\x1b[1;31m\", // Bright red\n\n\t\"Ready\": \"\\x1b[32m\", // Green\n\n\t\"Submitted\": \"\\x1b[37m\",   // Gray\n\t\"Abandoned\": \"\\x1b[9;37m\", // Gray, strike-through\n\t\"Draft\":     \"\\x1b[37m\",   // Gray\n}\n"
  },
  {
    "path": "go-weave/amb/det.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage amb\n\nimport \"fmt\"\n\n// StrategyDFS explores the ambiguous value space in depth-first order\n// up to MaxDepth. It is deterministic and (given enough time) will\n// explore the entire space.\ntype StrategyDFS struct {\n\t// MaxDepth specifies the maximum depth of the tree. If this\n\t// is 0, it defaults to DefaultMaxDepth.\n\tMaxDepth int\n\n\tbranchWidths []int\n\tcurPath      []int\n\tstep         int\n}\n\nfunc (s *StrategyDFS) Reset() {\n\ts.branchWidths = nil\n\ts.curPath = nil\n\ts.step = 0\n}\n\nfunc (s *StrategyDFS) maxDepth() int {\n\tif s.MaxDepth == 0 {\n\t\treturn DefaultMaxDepth\n\t}\n\treturn s.MaxDepth\n}\n\nfunc (s *StrategyDFS) Amb(n int) (int, bool) {\n\tif s.step < len(s.curPath) {\n\t\t// We're in replay mode.\n\t\tif n != s.branchWidths[s.step] {\n\t\t\tpanic(&ErrNondeterminism{fmt.Sprintf(\"Amb(%d) during replay, but previous call was Amb(%d)\", n, s.branchWidths[s.step])})\n\t\t}\n\t\tres := s.curPath[s.step]\n\t\ts.step++\n\t\treturn res, true\n\t}\n\n\tif len(s.curPath) == s.maxDepth() {\n\t\treturn 0, false\n\t}\n\n\t// We're in exploration mode.\n\ts.branchWidths = append(s.branchWidths, n)\n\ts.curPath = append(s.curPath, 0)\n\ts.step++\n\treturn 0, true\n}\n\nfunc (s *StrategyDFS) Next() bool {\n\ts.step = 0\n\n\t// Construct the next path prefix to explore.\n\tfor i := len(s.curPath) - 1; i >= 0; i-- {\n\t\ts.curPath[i]++\n\t\tif s.curPath[i] < s.branchWidths[i] {\n\t\t\tbreak\n\t\t}\n\t\ts.curPath = s.curPath[:len(s.curPath)-1]\n\t}\n\ts.branchWidths = s.branchWidths[:len(s.curPath)]\n\tif len(s.branchWidths) == 0 {\n\t\t// We're out of paths.\n\t\treturn false\n\t}\n\treturn true\n}\n\n// ErrNondeterminism is the error used by deterministic strategies to\n// indicate that the strategy detected that the application behaved\n// non-deterministically.\ntype ErrNondeterminism struct {\n\tDetail string\n}\n\nfunc (e *ErrNondeterminism) Error() string {\n\treturn \"non-determinism detected: \" + e.Detail\n}\n"
  },
  {
    "path": "go-weave/amb/progress.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage amb\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\nvar count int64\n\nvar progress struct {\n\tprintLock sync.Mutex\n\tstop      chan struct{}\n\tdone      chan struct{}\n}\n\nconst resetLine = \"\\r\\x1b[2K\"\n\nfunc startProgress() {\n\tprogress.stop = make(chan struct{})\n\tprogress.done = make(chan struct{})\n\n\tgo func() {\n\t\t// Redirect process stdout and stderr.\n\t\t//\n\t\t// Alternatively, we could dup our pipes over stdout\n\t\t// and stderr, but then we're in the way of any\n\t\t// runtime debug output.\n\t\torigStdout, origStderr := os.Stdout, os.Stderr\n\t\tnewStdoutR, newStdoutW, err := os.Pipe()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to create stdout self-pipe: %v\", err)\n\t\t}\n\t\tnewStderrR, newStderrW, err := os.Pipe()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to create stderr self-pipe: %v\", err)\n\t\t}\n\n\t\tdefer func() {\n\t\t\tos.Stdout, os.Stderr = origStdout, origStderr\n\t\t\t// Stop the feeder. It will close the write sides.\n\t\t\tnewStdoutR.Close()\n\t\t\tnewStderrR.Close()\n\t\t}()\n\t\tos.Stdout, os.Stderr = newStdoutW, newStderrW\n\t\tgo pipeFeeder(newStdoutR, origStdout, origStdout)\n\t\tgo pipeFeeder(newStderrR, origStderr, origStderr)\n\n\t\treport := func(final bool) {\n\t\t\tprogress.printLock.Lock()\n\t\t\tfmt.Fprintf(origStderr, \"%s%d done\", resetLine, atomic.LoadInt64(&count))\n\t\t\tif final {\n\t\t\t\tfmt.Fprintf(origStderr, \"\\n\")\n\t\t\t}\n\t\t\tprogress.printLock.Unlock()\n\t\t}\n\t\tticker := time.NewTicker(100 * time.Millisecond)\n\tloop:\n\t\tfor {\n\t\t\treport(false)\n\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\tcase <-progress.stop:\n\t\t\t\treport(true)\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t\tticker.Stop()\n\t\tclose(progress.done)\n\t}()\n}\n\nfunc pipeFeeder(r, w, pstream *os.File) {\n\tvar buf [256]byte\n\tbol := true\n\tfor {\n\t\tn, err := r.Read(buf[:])\n\t\tif n == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif bol {\n\t\t\tbol = false\n\t\t\t// Stop progress printing.\n\t\t\tprogress.printLock.Lock()\n\t\t\t// Clear the progress line.\n\t\t\tpstream.WriteString(resetLine)\n\t\t}\n\t\t// Print this message.\n\t\tif n, err = w.Write(buf[:n]); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif bytes.HasSuffix(buf[:n], []byte(\"\\n\")) {\n\t\t\t// Resume progress printing.\n\t\t\tprogress.printLock.Unlock()\n\t\t\tbol = true\n\t\t}\n\t}\n\tw.Close()\n}\n\nfunc stopProgress() {\n\tclose(progress.stop)\n\t<-progress.done\n}\n"
  },
  {
    "path": "go-weave/amb/rand.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage amb\n\nimport \"math/rand\"\n\n// StrategyRandom explores the ambiguous value space randomly. It\n// makes no attempt to avoid repeatedly visiting the same point, nor\n// does it know when it has explored the entire space.\ntype StrategyRandom struct {\n\t// MaxDepth specifies the maximum depth of the tree. If this\n\t// is 0, it defaults to DefaultMaxDepth.\n\tMaxDepth int\n\n\t// MaxPaths specifies the maximum number of paths to explore.\n\t// If this is 0, the number of paths is unbounded.\n\tMaxPaths int\n\n\tstep, paths int\n}\n\nfunc (s *StrategyRandom) Reset() {\n\ts.step = 0\n\ts.paths = 0\n}\n\nfunc (s *StrategyRandom) maxDepth() int {\n\tif s.MaxDepth == 0 {\n\t\treturn DefaultMaxDepth\n\t}\n\treturn s.MaxDepth\n}\n\nfunc (s *StrategyRandom) Amb(n int) (int, bool) {\n\tif s.step == s.maxDepth() {\n\t\treturn 0, false\n\t}\n\ts.step++\n\treturn rand.Intn(n), true\n}\n\nfunc (s *StrategyRandom) Next() bool {\n\ts.step = 0\n\ts.paths++\n\treturn s.MaxPaths == 0 || s.paths < s.MaxPaths\n}\n"
  },
  {
    "path": "go-weave/amb/run.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage amb\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n)\n\n// A Strategy describes how to explore a space of ambiguous values.\n// Such a space can be viewed as a tree, where a call to Amb\n// introduces a node with fan-out n and a call to Next terminates a\n// path.\ntype Strategy interface {\n\t// Amb returns an \"ambiguous\" value in the range [0, n). If\n\t// the current path cannot be continued (for example, it's\n\t// reached a maximum depth), it returns 0, false.\n\t//\n\t// The first call to Amb after constructing a Strategy or\n\t// calling Next always starts at the root of the tree.\n\t//\n\t// Amb may panic with ErrNondeterminism if it detects that the\n\t// application is behaving non-deterministically (for example,\n\t// when replaying a previously explored path, the value of n\n\t// is different from when Amb was called during a previous\n\t// exploration of this path). This is best-effort and some\n\t// strategies may not be able to detect this.\n\tAmb(n int) (int, bool)\n\n\t// Next terminates the current path. If there are no more\n\t// paths to explore, Next returns false. A Strategy is not\n\t// required to ever return false (for example, a randomized\n\t// strategy may not know that it's explored the entire space).\n\tNext() bool\n\n\t// Reset resets the state of this Strategy to the point where\n\t// no paths have been explored.\n\tReset()\n}\n\n// DefaultMaxDepth is the default maximum tree depth if it is\n// unspecified.\nvar DefaultMaxDepth = 100\n\n// Scheduler uses a Strategy to execute a function repeatedly at\n// different points in a space of ambiguous values.\ntype Scheduler struct {\n\t// Strategy specifies the strategy for exploring the execution\n\t// space.\n\tStrategy Strategy\n\n\tactive bool\n}\n\nvar curStrategy Strategy\nvar curPanic interface{}\n\n// Run calls root repeatedly at different points in the ambiguous\n// value space.\nfunc (s *Scheduler) Run(root func()) {\n\tif s.active {\n\t\tpanic(\"nested Run call\")\n\t}\n\ts.active = true\n\tdefer func() { s.active = false }()\n\n\tcount = 0\n\tstartProgress()\n\tdefer stopProgress()\n\n\ts.Strategy.Reset()\n\tfor {\n\t\ts.run1(root)\n\n\t\tif !s.Strategy.Next() {\n\t\t\tbreak\n\t\t}\n\t\tcount++\n\t}\n}\n\nfunc (s *Scheduler) run1(root func()) {\n\tdefer func() {\n\t\terr := recover()\n\t\tif err != nil {\n\t\t\t// TODO: Report path.\n\t\t\tfmt.Println(\"failure:\", err)\n\t\t\tvar buf []byte\n\t\t\tfor i := 1 << 10; i < 1<<20; i *= 2 {\n\t\t\t\tbuf = make([]byte, i)\n\t\t\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\t\t\tif len(buf) < i {\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tos.Stdout.Write(buf)\n\t\t}\n\t}()\n\troot()\n}\n\n// Amb returns a value in the range [0, n).\n//\n// Amb may panic with PathTerminated to indicate an execution path is\n// being forcibly terminated by the Strategy. If Amb is called on a\n// goroutine other than the goroutine that called Run, the goroutine\n// is responsible for recovering PathTerminated panics and forwarding\n// the panic to the goroutine that called Run.\nfunc (s *Scheduler) Amb(n int) int {\n\tx, ok := s.Strategy.Amb(n)\n\tif !ok {\n\t\tpanic(PathTerminated)\n\t}\n\treturn x\n}\n\n// PathTerminated is panicked by Scheduler.Amb to indicate that Run\n// should continue to the next path.\nvar PathTerminated = errors.New(\"path terminated\")\n"
  },
  {
    "path": "go-weave/models/cl20858.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// cl20858 is a model for checking the proposed scheduler change in CL\n// 20858.\npackage main\n\nimport (\n\t\"github.com/aclements/go-misc/go-weave/amb\"\n\t\"github.com/aclements/go-misc/go-weave/weave\"\n)\n\nvar sched = weave.Scheduler{Strategy: &amb.StrategyDFS{}}\n\nfunc mainOld() {\n\tsched.Run(func() {\n\t\trunnext, runqhead, runqtail = 0, 0, 0\n\n\t\trunqput(1)\n\t\tsched.Go(func() {\n\t\t\trunqput(2)\n\t\t\trunqget()\n\t\t})\n\t\tif runqempty() {\n\t\t\tpanic(\"runqempty\")\n\t\t}\n\t})\n}\n\nfunc main() {\n\tstates := 0\n\tsched.Run(func() {\n\t\tstates++\n\t\trunnext, runqhead, runqtail = 0, 0, 0\n\n\t\tsched.Go(func() {\n\t\t\tfor i := 0; i < 5; i++ {\n\t\t\t\tif sched.Amb(2) == 0 {\n\t\t\t\t\trunqget()\n\t\t\t\t} else {\n\t\t\t\t\trunqput(1)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tvar empty, nonempty, checks int\n\t\tv := runqempty()\n\t\tif empty == 0 && v == true {\n\t\t\tpanic(\"spurious runqempty()\")\n\t\t}\n\t\tif nonempty == checks && v == false {\n\t\t\tpanic(\"spurious !runqempty()\")\n\t\t}\n\t})\n\tprintln(states, \"states\")\n}\n\nvar runnext int\nvar runqhead, runqtail int\nvar runq [256]int\n\nfunc runqput(g int) {\n\told := runnext\n\trunnext = g\n\tsched.Sched()\n\tif old == 0 {\n\t\treturn\n\t}\n\n\th := runqhead\n\tsched.Sched()\n\tt := runqtail\n\tsched.Sched()\n\tif t-h < len(runq) {\n\t\trunq[t%len(runq)] = g\n\t\tsched.Sched()\n\t\trunqtail = t + 1\n\t\tsched.Sched()\n\t\treturn\n\t}\n\tpanic(\"runq full\")\n}\n\nfunc runqget() int {\n\tnext := runnext\n\tif next != 0 {\n\t\trunnext = 0\n\t\tsched.Sched()\n\t\treturn next\n\t}\n\n\tfor {\n\t\th := runqhead\n\t\tsched.Sched()\n\t\tt := runqtail\n\t\tsched.Sched()\n\t\tif t == h {\n\t\t\treturn 0\n\t\t}\n\t\tg := runq[h%len(runq)]\n\t\tsched.Sched()\n\t\tif runqhead == h {\n\t\t\trunqhead = h + 1\n\t\t\tsched.Sched()\n\t\t\treturn g\n\t\t}\n\t}\n}\n\nfunc runqemptyOld() bool {\n\th := runqhead\n\tsched.Sched()\n\tt := runqtail\n\tsched.Sched()\n\tn := runnext\n\tsched.Sched()\n\treturn h == t && n == 0\n}\n\nfunc runqempty() bool {\n\tfor {\n\t\th := runqhead\n\t\tsched.Sched()\n\t\tt := runqtail\n\t\tsched.Sched()\n\t\tn := runnext\n\t\tsched.Sched()\n\t\tt2 := runqtail\n\t\tsched.Sched()\n\t\tif t == t2 {\n\t\t\treturn h == t && n == 0\n\t\t}\n\t}\n}\n\nfunc runqemptyTest() bool {\n\tfor {\n\t\th := runqhead\n\t\tsched.Sched()\n\t\tn := runnext\n\t\tsched.Sched()\n\t\tt := runqtail\n\t\tsched.Sched()\n\t\treturn h == t && n == 0\n\t}\n}\n"
  },
  {
    "path": "go-weave/models/issue16083.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// issue16083 is a model for finding how mark can complete when stack\n// scans are still in progress.\n//\n// XXX Move gcMarkRootCheck to after forEachP to force final workers\n// out?\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/aclements/go-misc/go-weave/amb\"\n\t\"github.com/aclements/go-misc/go-weave/weave\"\n)\n\nvar sched = weave.Scheduler{Strategy: &amb.StrategyRandom{}}\n\ntype State struct {\n\tworkers      weave.AtomicInt32\n\tmarkrootNext weave.AtomicInt32\n\tmarkrootJobs int32\n\tscanned      [2]weave.AtomicInt32\n\n\tmarkDoneSema weave.Mutex\n\n\tdone bool\n}\n\nfunc main() {\n\tsched.Run(func() {\n\t\tvar s State\n\t\ts.markrootJobs = int32(len(s.scanned))\n\n\t\tfor i := 0; i < 4; i++ {\n\t\t\tsched.Go(s.worker)\n\t\t}\n\t})\n}\n\nfunc (s *State) worker() {\n\t// This has a liveness problem, so limit it to 4 iterations.\n\tfor i := 0; i < 3 && !s.done; i++ {\n\t\tsched.Trace(\"s.workers++\")\n\t\tn := s.workers.Add(+1)\n\t\t// XXX This trace appears in the wrong place since Add\n\t\t// did a Sched after the modification. Perhaps we\n\t\t// should pre-Sched? Or I could put this in an atomic block.\n\t\tsched.Tracef(\" => %d\", n)\n\n\t\ts.gcDrain()\n\n\t\tsched.Trace(\"s.workers--\")\n\t\tn = s.workers.Add(-1)\n\t\tsched.Tracef(\" => %d\", n)\n\n\t\tif n == 0 {\n\t\t\tsched.Tracef(\"s.workers == 0\")\n\t\t\tif !s.gcMarkWorkAvailable() {\n\t\t\t\tsched.Tracef(\"!gcMarkWorkAvailable()\")\n\t\t\t\ts.gcMarkDone()\n\t\t\t}\n\t\t}\n\t}\n\tsched.Trace(\"exit\")\n}\n\nfunc (s *State) gcDrain() {\n\tjob := s.markrootNext.Add(1) - 1\n\tif job < s.markrootJobs {\n\t\tsched.Tracef(\"scanning %d\", job)\n\t\ts.scanned[job].Store(1)\n\t}\n}\n\nfunc (s *State) gcMarkWorkAvailable() bool {\n\treturn s.markrootNext.Load() < s.markrootJobs\n}\n\nfunc (s *State) gcMarkDone() {\n\ts.markDoneSema.Lock()\n\tdefer s.markDoneSema.Unlock()\n\n\tif !(s.workers.Load() == 0 && !s.gcMarkWorkAvailable()) {\n\t\tsched.Tracef(\"gcMarkDone retry\")\n\t\treturn\n\t}\n\n\ts.gcMarkRootCheck()\n\n\ts.done = true\n}\n\nfunc (s *State) gcMarkRootCheck() {\n\tfor i := range s.scanned {\n\t\tif s.scanned[i].Load() == 0 {\n\t\t\tpanic(fmt.Sprintf(\"missed %d\", i))\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "go-weave/models/markterm.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// markterm is a model for finding when mark termination can start\n// before all work is drained in Go 1.7. This model is expected to\n// fail.\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/aclements/go-misc/go-weave/amb\"\n\t\"github.com/aclements/go-misc/go-weave/weave\"\n)\n\nvar sched = weave.Scheduler{Strategy: &amb.StrategyRandom{}}\n\ntype State struct {\n\tworkers int\n\tgrey    int\n\tdone    bool\n}\n\nfunc main() {\n\tsched.Run(func() {\n\t\tvar s State\n\t\ts.grey = 3\n\n\t\tfor i := 0; i < 2; i++ {\n\t\t\tsched.Go(s.worker)\n\t\t}\n\t})\n}\n\nfunc (s *State) worker() {\n\t// TODO: This has a liveness problem: if a worker takes the\n\t// last work and then doesn't get scheduled again, the other\n\t// worker will spin. Curiously, duplicate state detection\n\t// would cut off that path, which means if we see a duplicate\n\t// with an earlier state in the same path (versus a different\n\t// path), it's a livelock and perhaps should be reported.\n\tfor {\n\t\ts.workers++\n\t\tsched.Tracef(\"s.workers++ (%d)\", s.workers)\n\t\tsched.Sched()\n\n\t\ts.check()\n\n\t\tswitch {\n\t\tcase s.grey <= 0:\n\t\t\t// Do nothing\n\t\tcase s.grey == 1:\n\t\t\t// Pull one pointer, put none.\n\t\t\ts.grey--\n\t\t\tsched.Tracef(\"s.grey-- (%d)\", s.grey)\n\t\t\tsched.Sched()\n\t\tdefault:\n\t\t\t// Remove two pointers, then add one to simulate\n\t\t\t// pulling a buffer off full and then putting one back\n\t\t\t// one full.\n\t\t\ts.grey -= 2\n\t\t\tsched.Tracef(\"s.grey -= 2 (%d)\", s.grey)\n\t\t\tsched.Sched()\n\t\t\ts.grey++\n\t\t\tsched.Tracef(\"s.grey++ (%d)\", s.grey)\n\t\t\ts.check()\n\t\t\tsched.Sched()\n\t\t}\n\n\t\tvar grey int\n\t\tif true { // Read full list (\"grey\") before dec(&workers)\n\t\t\tgrey = s.grey\n\t\t\tsched.Tracef(\"grey := s.grey (%d)\", s.grey)\n\t\t\tsched.Sched()\n\t\t}\n\n\t\ts.workers--\n\t\tn := s.workers\n\t\tsched.Tracef(\"s.workers-- (%d)\", s.workers)\n\t\tsched.Sched()\n\n\t\tif false {\n\t\t\tgrey = s.grey\n\t\t\tsched.Tracef(\"grey := s.grey (%d)\", s.grey)\n\t\t\tsched.Sched()\n\t\t}\n\n\t\tif n == 0 && grey == 0 {\n\t\t\ts.done = true\n\t\t\tsched.Trace(\"s.done = true\")\n\t\t\tsched.Sched()\n\t\t\ts.check()\n\t\t\tbreak\n\t\t}\n\t}\n\tsched.Trace(\"exit\")\n}\n\nfunc (s *State) check() {\n\tif s.done && s.grey > 0 {\n\t\tpanic(fmt.Sprintf(\"done, but grey==%d\", s.grey))\n\t}\n}\n"
  },
  {
    "path": "go-weave/models/maxtree.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// maxtree is a model for a concurrent max-tree.\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/aclements/go-misc/go-weave/amb\"\n\t\"github.com/aclements/go-misc/go-weave/weave\"\n)\n\nvar sched = weave.Scheduler{Strategy: &amb.StrategyRandom{}}\n\n// DFS doesn't work because there are some infinite schedules from CAS\n// retries.\n//\n//var sched = weave.Scheduler{Strategy: &amb.StrategyDFS{}}\n\nconst Depth = 3\nconst Degree = 2\n\ntype Node struct {\n\tName string\n\n\tParent   *Node\n\tPSlot    int\n\tChildren [Degree]*Node\n\n\tLock weave.Mutex\n\tVals [Degree + 1]int\n}\n\ntype State struct {\n\tRoot *Node\n}\n\nfunc main() {\n\tvar s State\n\tleaves := s.Init()\n\tsched.Run(func() {\n\t\tsched.Trace(\"resetting\")\n\t\ts.Reset()\n\t\tsched.Trace(\"reset\")\n\n\t\tfor times := 0; times < 2; times++ {\n\t\t\tvar wg weave.WaitGroup\n\t\t\tfor i := 0; i < 2; i++ {\n\t\t\t\ti := i\n\t\t\t\twg.Add(1)\n\t\t\t\tsched.Go(func() {\n\t\t\t\t\ts.worker(leaves[i])\n\t\t\t\t\twg.Done()\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tsched.Trace(\"waiting\")\n\t\t\twg.Wait()\n\t\t}\n\n\t\tsched.Trace(\"checking\")\n\t\ts.Root.Check()\n\t\t//fmt.Println(s.Root.Vals)\n\t})\n}\n\nfunc (s *State) Init() (leaves []*Node) {\n\tvar rec func(d int, name string) *Node\n\trec = func(d int, name string) *Node {\n\t\tn := &Node{}\n\t\tn.Name = name\n\t\tif d == 1 {\n\t\t\tleaves = append(leaves, n)\n\t\t\treturn n\n\t\t}\n\t\tfor i := range n.Children {\n\t\t\tchild := rec(d-1, fmt.Sprintf(\"%s/%d\", name, i))\n\t\t\tn.Children[i] = child\n\t\t\tchild.Parent = n\n\t\t\tchild.PSlot = i\n\t\t}\n\t\treturn n\n\t}\n\ts.Root = rec(Depth, \"root\")\n\treturn\n}\n\nfunc (s *State) Reset() {\n\ts.Root.Reset()\n}\n\nfunc (n *Node) Reset() {\n\tif n == nil {\n\t\treturn\n\t}\n\tn.Vals = [Degree + 1]int{}\n\tfor _, c := range n.Children {\n\t\tc.Reset()\n\t}\n}\n\nfunc (n *Node) Check() int {\n\tif n == nil {\n\t\treturn 0\n\t}\n\n\tfor i, c := range n.Children {\n\t\tcmax := c.Check()\n\t\tif n.Vals[i] != cmax {\n\t\t\tpanic(fmt.Sprintf(\"child max %d != parent slot %d\", cmax, n.Vals[i]))\n\t\t}\n\t}\n\n\treturn n.maxNoSched()\n}\n\nfunc (s *State) worker(node *Node) {\n\t// Pick a node.\n\t// var pick func(n *Node) *Node\n\t// pick = func(n *Node) *Node {\n\t// \tif n.Children[0] == nil {\n\t// \t\treturn n\n\t// \t}\n\t// \tidx := sched.Amb(len(n.Children) + 1)\n\t// \tif idx == 0 {\n\t// \t\treturn n\n\t// \t}\n\t// \treturn pick(n.Children[idx-1])\n\t// }\n\t// node := pick(s.Root)\n\t// sched.Trace(\"picked\")\n\t//\n\t// Not necessary when workers are given different nodes.\n\t// node.Lock.Lock()\n\t// defer node.Lock.Unlock()\n\n\t// Set node's value to 0, 1, or 2 so we can both raise and\n\t// lower the max.\n\tnode.Update(sched.Amb(3))\n\tsched.Trace(\"updated\")\n}\n\nfunc (n *Node) Update(val int) {\n\tnewMax, changed := n.set(Degree, val)\n\tif !changed {\n\t\treturn\n\t}\n\n\tfor n.Parent != nil {\n\tretry:\n\t\tpMax, pChanged := n.Parent.set(n.PSlot, newMax)\n\t\tif checkMax := n.max(); newMax != checkMax {\n\t\t\tsched.Tracef(\"retrying newMax=%d checkMax=%d\", newMax, checkMax)\n\t\t\tnewMax = checkMax\n\t\t\tgoto retry\n\t\t}\n\n\t\tif !pChanged {\n\t\t\tbreak\n\t\t}\n\n\t\tn, newMax = n.Parent, pMax\n\t}\n}\n\nfunc (n *Node) set(slot, val int) (newMax int, changed bool) {\n\tsched.Tracef(\"%s[%d] = %d\", n.Name, slot, val)\n\toldMax := n.maxNoSched()\n\tn.Vals[slot] = val\n\tnewMax = n.maxNoSched()\n\tsched.Sched()\n\n\treturn newMax, newMax != oldMax\n}\n\nfunc (n *Node) max() int {\n\tmax := n.maxNoSched()\n\tsched.Sched()\n\treturn max\n}\n\nfunc (n *Node) maxNoSched() int {\n\tm := 0\n\tfor _, v := range n.Vals {\n\t\tif v > m {\n\t\t\tm = v\n\t\t}\n\t}\n\treturn m\n}\n"
  },
  {
    "path": "go-weave/models/rescan.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// rescan is a model of two concurrent stack re-scanning approaches:\n// transitive mark write barriers, and scan restarting.\n//\n// This model is somewhat limited. The mutator is uninteresting and it\n// doesn't model concurrent write barriers (or the mark quiescence\n// necessary with concurrent write barriers). This model formed the\n// basis for the yuasa model, which is much more complete.\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/aclements/go-misc/go-weave/amb\"\n\t\"github.com/aclements/go-misc/go-weave/weave\"\n)\n\n// writeMarks indicates that the write barrier should transitively\n// mark objects before publishing them.\nconst writeMarks = true\n\n// writeRestarts indicates that the write barrier should reset the\n// stack scan.\nconst writeRestarts = false\n\n// ptr is a memory pointer, as an index into mem. 0 is the nil\n// pointer.\ntype ptr int\n\n// obj is an object in memory. An object in the \"heap\" region of\n// memory must not point to an object in the \"stack\" region of memory.\ntype obj struct {\n\tl, r ptr\n}\n\n// mem is the memory, including both the heap and stacks. mem[0] is\n// unused (it's the nil slot). mem[stackBase:stackBase+numThreads] are\n// the stacks. mem[globalRoot:] is the heap. mme[globalRoot] is the\n// global root.\nvar mem []obj\n\nvar marked []bool\n\nconst numThreads = 2\n\nconst stackBase ptr = 1\nconst globalRoot ptr = stackBase + ptr(numThreads)\n\nvar scanClock int\nvar world weave.RWMutex\n\nconst verbose = false\n\nvar sched = weave.Scheduler{Strategy: &amb.StrategyRandom{}}\n\nfunc main() {\n\tsched.Run(func() {\n\t\tif verbose {\n\t\t\tprint(\"start:\")\n\t\t}\n\t\t// Create an ambiguous memory.\n\t\t//\n\t\t// TODO: Tons of these are isomorphic.\n\t\tmem = make([]obj, 6)\n\t\tfor i := 1; i < len(mem); i++ {\n\t\t\tmem[i].l = ambHeapPointer()\n\t\t\tif ptr(i) >= globalRoot {\n\t\t\t\t// For stacks we only use l.\n\t\t\t\tmem[i].r = ambHeapPointer()\n\t\t\t}\n\t\t}\n\t\tmarked = make([]bool, len(mem))\n\t\tif verbose {\n\t\t\tprintMem(mem, marked)\n\t\t}\n\t\tscanClock = 0\n\t\tworld = weave.RWMutex{} // Belt and suspenders.\n\n\t\t// Mark the global root.\n\t\tmark(globalRoot, marked, \"globalRoot\")\n\n\t\t// Start mutators.\n\t\tfor i := 0; i < numThreads; i++ {\n\t\t\ti := i\n\t\t\tsched.Go(func() { mutator(i) })\n\t\t}\n\n\t\t// Re-scan stacks.\n\t\tfor scanClock < numThreads {\n\t\t\tif verbose {\n\t\t\t\tprintln(\"scan\", scanClock)\n\t\t\t}\n\t\t\tscanClock++\n\t\t\tmark(mem[stackBase+ptr(scanClock-1)].l, marked, \"scan\")\n\t\t}\n\n\t\t// Wait for write barriers to complete.\n\t\tworld.Lock()\n\t\tdefer world.Unlock()\n\n\t\t// Check that everything is marked.\n\t\tif verbose {\n\t\t\tprintMem(mem, marked)\n\t\t}\n\t\tcheckmark(globalRoot)\n\t\tfor i := 0; i < numThreads; i++ {\n\t\t\tcheckmark(mem[stackBase+ptr(i)].l)\n\t\t}\n\t})\n}\n\n// ambHeapPointer returns nil or an ambiguous heap pointer.\nfunc ambHeapPointer() ptr {\n\tx := sched.Amb(len(mem) - int(globalRoot) + 1)\n\tif x == 0 {\n\t\treturn 0\n\t}\n\treturn ptr(x-1) + globalRoot\n}\n\n// ambReachableHeapPointer returns an ambiguous reachable heap\n// pointer. Note that the object may not be marked.\nfunc ambReachableHeapPointer() ptr {\n\treachable := make([]bool, len(mem))\n\tmark(globalRoot, reachable, \"\")\n\n\tnreachable := 0\n\tfor _, m := range reachable[globalRoot:] {\n\t\tif m {\n\t\t\tnreachable++\n\t\t}\n\t}\n\tx := sched.Amb(nreachable)\n\tfor i, m := range reachable[globalRoot:] {\n\t\tif m {\n\t\t\tif x == 0 {\n\t\t\t\treturn globalRoot + ptr(i)\n\t\t\t}\n\t\t\tx--\n\t\t}\n\t}\n\tpanic(\"not reached\")\n}\n\nfunc wbarrier(slot, val ptr) {\n\t// TODO: Check that GC is still running?\n\n\t// TODO: Need to mark val regardless (but doesn't have to be\n\t// transitive).\n\n\tif val != 0 {\n\t\tif writeMarks {\n\t\t\tfunc() {\n\t\t\t\t// Block STW termination while marking.\n\t\t\t\tworld.RLock()\n\t\t\t\tdefer world.RUnlock()\n\t\t\t\t// TODO: In reality, concurrent marks\n\t\t\t\t// can collide with each other, so we\n\t\t\t\t// need mark quiescence. This doesn't\n\t\t\t\t// model that.\n\t\t\t\tmark(mem[val].l, marked, \"barrier\")\n\t\t\t}()\n\t\t}\n\t\tif writeRestarts {\n\t\t\tif !marked[val] {\n\t\t\t\tscanClock = 0\n\t\t\t}\n\t\t}\n\t}\n\tmem[slot].l = mem[val].l\n\tsched.Sched()\n}\n\nfunc mutator(id int) {\n\tsptr := stackBase + ptr(id)\n\n\t// TODO: nil pointer writes?\n\n\t// Publish our stack pointer to some live heap object.\n\tobj := ambReachableHeapPointer()\n\t//mem[obj].l = mem[sptr].l\n\tif verbose {\n\t\tprint(obj, \".l = \", mem[sptr].l, \"\\n\")\n\t}\n\twbarrier(obj, sptr)\n\tif verbose {\n\t\tprint(obj, \".l = \", mem[sptr].l, \" done\\n\")\n\t}\n\n\t// Read a pointer from the heap. No write barrier since this\n\t// is a stack write.\n\tobj = ambReachableHeapPointer()\n\tmem[sptr].l = mem[obj].l\n\tsched.Sched()\n}\n\nfunc mark(p ptr, marked []bool, name string) {\n\tif p == 0 || marked[p] {\n\t\treturn\n\t}\n\tmarked[p] = true\n\tif name != \"\" {\n\t\tif verbose {\n\t\t\tprintln(name, \"marked\", p)\n\t\t}\n\t}\n\tmark(mem[p].l, marked, name)\n\tif name != \"\" {\n\t\tsched.Sched()\n\t}\n\tmark(mem[p].r, marked, name)\n\tif name != \"\" {\n\t\tsched.Sched()\n\t}\n}\n\nfunc checkmark(p ptr) {\n\tcheckmarked := make([]bool, len(mem))\n\tvar mark1 func(p ptr)\n\tmark1 = func(p ptr) {\n\t\tif p == 0 {\n\t\t\treturn\n\t\t}\n\t\tif !marked[p] {\n\t\t\tpanic(fmt.Sprintf(\"object not marked: %d\", p))\n\t\t}\n\t\tif checkmarked[p] {\n\t\t\treturn\n\t\t}\n\t\tcheckmarked[p] = true\n\t\tmark1(mem[p].l)\n\t\tmark1(mem[p].r)\n\t}\n\tmark1(p)\n}\n\nfunc printMem(mem []obj, marked []bool) {\n\tfor i := 1; i < len(mem); i++ {\n\t\tif marked[i] {\n\t\t\tprint(\"*\")\n\t\t} else {\n\t\t\tprint(\" \")\n\t\t}\n\t\tprint(i, \"->\", mem[i].l, \",\", mem[i].r, \" \")\n\t}\n\tprintln()\n}\n"
  },
  {
    "path": "go-weave/models/rwmutex.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// rwmutex is a model of runtime.rwmutex.\npackage main\n\nimport (\n\t\"github.com/aclements/go-misc/go-weave/amb\"\n\t\"github.com/aclements/go-misc/go-weave/weave\"\n)\n\n//var sched = weave.Scheduler{Strategy: &amb.StrategyRandom{}}\nvar sched = weave.Scheduler{Strategy: &amb.StrategyDFS{}}\n\nconst verbose = false\n\nfunc main() {\n\tsched.Run(func() {\n\t\tif verbose {\n\t\t\tprint(\"start:\")\n\t\t}\n\t\tvar rw rwmutex\n\t\tfor i := 0; i < 2; i++ {\n\t\t\tsched.Go(func() {\n\t\t\t\trw.lock()\n\t\t\t\trw.unlock()\n\t\t\t})\n\t\t}\n\t\tfor i := 0; i < 2; i++ {\n\t\t\tsched.Go(func() {\n\t\t\t\trw.rlock()\n\t\t\t\trw.runlock()\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc atomicXadd(x *uint32, delta int32) uint32 {\n\t*x += uint32(delta)\n\tr := *x\n\tsched.Sched()\n\treturn r\n}\n\nfunc atomicLoad(x *uint32) uint32 {\n\tr := *x\n\tsched.Sched()\n\treturn r\n}\n\nfunc lock(m *weave.Mutex) {\n\tm.Lock()\n}\n\nfunc unlock(m *weave.Mutex) {\n\tm.Unlock()\n}\n\ntype m struct {\n\tschedlink muintptr\n\tpark      weave.Semaphore\n}\n\ntype g struct {\n\tm *m\n}\n\nvar curG = weave.NewTLS()\n\nfunc notesleep(s *weave.Semaphore) {\n\ts.Acquire(1)\n}\n\nfunc notewakeup(s *weave.Semaphore) {\n\ts.Release(1)\n}\n\nfunc noteclear(s *weave.Semaphore) {\n}\n\nfunc getg() *g {\n\tgp, ok := curG.Get().(*g)\n\tif !ok {\n\t\tgp = &g{&m{}}\n\t\tcurG.Set(gp)\n\t}\n\treturn gp\n}\n\ntype rwmutex struct {\n\trLock      weave.Mutex // protects readers, readerPass, writer\n\treaders    muintptr    // list of pending readers\n\treaderPass uint32      // number of readers to skip readers list\n\n\twLock  weave.Mutex // serializes writers\n\twriter muintptr    // pending writer waiting for completing readers\n\n\treaderCount uint32 // number of pending readers\n\treaderWait  uint32 // number of departing readers\n\n\t// Self-checking\n\tcheckReaders uint32\n\tcheckWriters uint32\n}\n\ntype muintptr struct {\n\tmp *m\n}\n\nfunc (mp *muintptr) set(x *m) {\n\tmp.mp = x\n}\n\nfunc (mp *muintptr) ptr() *m {\n\treturn mp.mp\n}\n\nfunc systemstack(x func()) {\n\tx()\n}\n\nfunc throw(x string) {\n\tpanic(x)\n}\n\nconst rwmutexMaxReaders = 1 << 30\n\n// rlock locks rw for reading.\nfunc (rw *rwmutex) rlock() {\n\tsched.Tracef(\"rw.readerCount (%d) += 1\", rw.readerCount)\n\tif int32(atomicXadd(&rw.readerCount, 1)) < 0 {\n\t\t// A writer is pending. Park on the reader queue.\n\t\tsched.Trace(\"writer pending\")\n\t\tsystemstack(func() {\n\t\t\tlock(&rw.rLock)\n\t\t\t// Writer may have released while we were\n\t\t\t// getting the lock.\n\t\t\tsched.Trace(\"got rLock\")\n\t\t\tif rw.readerPass > 0 {\n\t\t\t\t// Writer finished.\n\t\t\t\trw.readerPass -= 1\n\t\t\t\tunlock(&rw.rLock)\n\t\t\t} else {\n\t\t\t\t// Queue this reader to be woken by\n\t\t\t\t// the writer.\n\t\t\t\tm := getg().m\n\t\t\t\tm.schedlink = rw.readers\n\t\t\t\trw.readers.set(m)\n\t\t\t\tsched.Trace(\"reader queued\")\n\t\t\t\tunlock(&rw.rLock)\n\t\t\t\tnotesleep(&m.park)\n\t\t\t\tnoteclear(&m.park)\n\t\t\t}\n\t\t})\n\t}\n\n\t// Self-check\n\tif rw.checkWriters != 0 {\n\t\tpanic(\"rlock with writers\")\n\t}\n\trw.checkReaders++\n}\n\n// runlock undoes a single rlock call on rw.\nfunc (rw *rwmutex) runlock() {\n\tif rw.checkReaders <= 0 {\n\t\tpanic(\"runlock with no readers\")\n\t}\n\tif rw.checkWriters != 0 {\n\t\tpanic(\"runlock with writers\")\n\t}\n\trw.checkReaders--\n\n\tsched.Tracef(\"rw.readerCount (%d) -= 1\", rw.readerCount)\n\tif r := int32(atomicXadd(&rw.readerCount, -1)); r < 0 {\n\t\tsched.Tracef(\"r = %d\", r)\n\t\tif r+1 == 0 || r+1 == -rwmutexMaxReaders {\n\t\t\tthrow(\"runlock of unlocked rwmutex\")\n\t\t}\n\t\t// A writer is pending.\n\t\tsched.Tracef(\"rw.readerWait (%d) -= 1\", rw.readerWait)\n\t\tif atomicXadd(&rw.readerWait, -1) == 0 {\n\t\t\t// The last reader unblocks the writer.\n\t\t\tsched.Trace(\"last reader\")\n\t\t\tlock(&rw.rLock)\n\t\t\tw := rw.writer.ptr()\n\t\t\tif w != nil {\n\t\t\t\tsched.Trace(\"wake writer\")\n\t\t\t\tnotewakeup(&w.park)\n\t\t\t}\n\t\t\tunlock(&rw.rLock)\n\t\t}\n\t}\n}\n\n// lock locks rw for writing.\nfunc (rw *rwmutex) lock() {\n\t// Resolve competition with other writers.\n\tlock(&rw.wLock)\n\tsched.Trace(\"got wLock\")\n\tm := getg().m\n\t// Announce that there is a pending writer.\n\tsched.Tracef(\"rw.readerCount (%d) -= rwmutexMaxReaders\", rw.readerCount)\n\tr := int32(atomicXadd(&rw.readerCount, -rwmutexMaxReaders)) + rwmutexMaxReaders\n\t// Wait for any active readers to complete.\n\tlock(&rw.rLock) // NEW\n\tif r != 0 {\n\t\tsched.Tracef(\"rw.readerWait (%d) += %d\", rw.readerWait, r)\n\t}\n\tif r != 0 && atomicXadd(&rw.readerWait, r) != 0 {\n\t\tsched.Trace(\"waiting for readers\")\n\t\t// Wait for reader to wake us up.\n\t\tsystemstack(func() {\n\t\t\trw.writer.set(m)\n\t\t\tunlock(&rw.rLock) // NEW\n\t\t\tnotesleep(&m.park)\n\t\t\tnoteclear(&m.park)\n\t\t})\n\t} else {\n\t\tsched.Trace(\"no readers\")\n\t\tunlock(&rw.rLock) // NEW\n\t}\n\n\t// Self-check\n\tif rw.checkReaders != 0 {\n\t\tpanic(\"lock with readers\")\n\t}\n\tif rw.checkWriters != 0 {\n\t\tpanic(\"lock with writers\")\n\t}\n\trw.checkWriters++\n}\n\n// unlock unlocks rw for writing.\nfunc (rw *rwmutex) unlock() {\n\t// Self-check\n\tif rw.checkReaders != 0 {\n\t\tpanic(\"unlock with readers\")\n\t}\n\tif rw.checkWriters != 1 {\n\t\tpanic(\"unlock with wrong writers\")\n\t}\n\trw.checkWriters--\n\n\t// Announce to readers that there is no active writer.\n\tsched.Tracef(\"rw.readerCount (%d) += rwmutexMaxReaders\", rw.readerCount)\n\tr := int32(atomicXadd(&rw.readerCount, rwmutexMaxReaders))\n\tif r >= rwmutexMaxReaders {\n\t\tthrow(\"unlock of unlocked rwmutex\")\n\t}\n\t// Unblock blocked readers.\n\tlock(&rw.rLock)\n\tfor rw.readers.ptr() != nil {\n\t\tsched.Tracef(\"wake reader\")\n\t\treader := rw.readers.ptr()\n\t\trw.readers = reader.schedlink\n\t\treader.schedlink.set(nil)\n\t\tnotewakeup(&reader.park)\n\t\tr -= 1\n\t}\n\t// If r > 0, there are pending readers that aren't on the\n\t// queue. Tell them to skip waiting.\n\trw.readerPass += uint32(r)\n\tunlock(&rw.rLock)\n\t// Allow other writers to proceed.\n\tsched.Tracef(\"release wLock\")\n\tunlock(&rw.wLock)\n}\n"
  },
  {
    "path": "go-weave/models/yuasa.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build ignore\n\n// yuasa is a model of several variants of Yuasa-style deletion\n// barriers intended to eliminate stack re-scanning.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\n\t\"github.com/aclements/go-misc/go-weave/amb\"\n\t\"github.com/aclements/go-misc/go-weave/weave\"\n)\n\ntype barrierType int\n\nconst (\n\t// yuasaBarrier is a Yuasa-style deletion barrier. It requires\n\t// stackBeforeHeap, but does not require rescanStacks.\n\tyuasaBarrier barrierType = iota\n\n\t// dijkstraYuasaBarrier is a combined Dijkstra-style insertion\n\t// barrier and Yuasa-style deletion barrier. It does not\n\t// require stackBeforeHeap or rescanStacks.\n\tdijkstraYuasaBarrier\n\n\t// conditionalDijkstraYuasaBarrier is like\n\t// dijkstraYuasaBarrier before all stacks are blacked, and\n\t// like yuasaBarrier after stacks have been blacked. It does\n\t// not require stackBeforeHeap or rescanStacks.\n\tconditionalDijkstraYuasaBarrier\n\n\t// dijkstraBarrier is a Dijkstra-style insertion barrier. It\n\t// does not require stackBeforeHeap, but it does require\n\t// rescanStacks.\n\tdijkstraBarrier\n)\n\n// barrier indicates the type of write barrier to use.\nconst barrier = conditionalDijkstraYuasaBarrier\n\n// stackBeforeHeap indicates that all stacks must be blackened before\n// any heap objects are blackened.\nconst stackBeforeHeap = false\n\n// rescanStacks indicates that stacks must be re-scanned during STW\n// mark termination.\nconst rescanStacks = false\n\n// ptr is a memory pointer, as an index into mem. 0 is the nil\n// pointer.\ntype ptr int\n\n// obj is an object in memory. An object in the \"global\" or \"heap\"\n// region of memory must not point to an object in the \"stack\" region\n// of memory.\ntype obj [2]ptr\n\n// mem is the memory, including both the heap and stacks. mem[0] is\n// unused (it's the nil slot)\n//\n// mem[stackBase+i] for i < numThreads is the stack for thread i.\n//\n// mem[globalRoot] is the global root.\n//\n// mem[heapBase:] is the heap.\nvar mem []obj\n\n// marked is the set of mark bits. marked[i] corresponds to mem[i].\nvar marked []bool\n\n// work is the work list. This is the set of grey objects.\nvar work []ptr\n\nconst numThreads = 2\n\nconst stackBase ptr = 1\nconst globalRoot ptr = stackBase + numThreads\nconst heapBase ptr = globalRoot + 1\nconst heapCount = 3\n\nvar world weave.RWMutex\nvar stackLocks [numThreads]weave.Mutex\n\n// rootCount is the number of unscanned roots.\nvar rootCount int\n\nconst verbose = false\n\nvar sched = weave.Scheduler{Strategy: &amb.StrategyRandom{}}\n\nfunc main() {\n\tsched.Run(func() {\n\t\tif verbose {\n\t\t\tprint(\"start:\")\n\t\t}\n\t\t// Create an ambiguous memory.\n\t\t//\n\t\t// TODO: Tons of these are isomorphic.\n\t\tmem = make([]obj, heapBase+heapCount)\n\t\tfor i := 1; i < len(mem); i++ {\n\t\t\tmem[i] = obj{ambHeapPointer(), ambHeapPointer()}\n\t\t}\n\t\tmarked = make([]bool, len(mem))\n\t\tif verbose {\n\t\t\tprintln(stringMem(mem, marked))\n\t\t}\n\t\tsched.Tracef(\"memory: %s\", stringMem(mem, marked))\n\t\tworld = weave.RWMutex{} // Belt and suspenders.\n\t\tfor i := range stackLocks {\n\t\t\tstackLocks[i] = weave.Mutex{}\n\t\t}\n\t\trootCount = numThreads + 1\n\n\t\t// Start mutators.\n\t\tfor i := 0; i < numThreads; i++ {\n\t\t\ti := i\n\t\t\tsched.Go(func() { mutator(i) })\n\t\t}\n\n\t\tif stackBeforeHeap {\n\t\t\tsched.Trace(\"scanning stacks\")\n\t\t\t// Scan stacks and global roots. Complete this\n\t\t\t// before allowing any blackening of the heap.\n\t\t\tfor i := stackBase; i < stackBase+numThreads; i++ {\n\t\t\t\tscan(i)\n\t\t\t\tmarked[i] = true\n\t\t\t}\n\t\t\tscan(globalRoot)\n\t\t\tmarked[globalRoot] = true\n\t\t\tsched.Trace(\"done scanning stacks\")\n\t\t} else {\n\t\t\t// Grey stacks and global roots. Drain will\n\t\t\t// scan them.\n\t\t\tfor i := stackBase; i < stackBase+numThreads; i++ {\n\t\t\t\tshade(i)\n\t\t\t}\n\t\t\tshade(globalRoot)\n\t\t}\n\n\t\t// Blacken heap.\n\t\tdrain()\n\n\t\t// Wait for write barriers to complete.\n\t\tworld.Lock()\n\t\tdefer world.Unlock()\n\n\t\tif rescanStacks {\n\t\t\tsched.Trace(\"rescanning stacks\")\n\t\t\t// Rescan stacks. (The write barrier applies\n\t\t\t// to globals, so we don't need to rescan\n\t\t\t// globalRoot.)\n\t\t\tfor i := stackBase; i < stackBase+numThreads; i++ {\n\t\t\t\tmarked[i] = false\n\t\t\t\tshade(i)\n\t\t\t}\n\t\t\tdrain()\n\t\t\tsched.Trace(\"done rescanning stacks\")\n\t\t}\n\n\t\t// Check that everything is marked.\n\t\tif verbose {\n\t\t\tprintln(stringMem(mem, marked))\n\t\t}\n\t\tsched.Tracef(\"memory: %s\", stringMem(mem, marked))\n\t\tcheckmark()\n\t})\n}\n\ntype pointerSet int\n\nconst (\n\t// pointerNil indicates that ambPointer can return a nil\n\t// pointer.\n\tpointerNil pointerSet = 1 << iota\n\n\t// pointerStack indicates that ambPointer can return a pointer\n\t// to the stack.\n\tpointerStack\n\n\t// pointerReachable indicates that ambPointer can return a\n\t// pointer to a reachable heap or global object.\n\tpointerReachable\n\n\t// pointerHeap indicates that ambPointer can return a pointer\n\t// to any global or heap object.\n\tpointerHeap\n)\n\n// ambPointer returns an ambiguous pointer from the union of the\n// specified sets. If ps&(pointerStack|pointerReachable) != 0, tid\n// must specify the thread ID of the stack.\nfunc ambPointer(ps pointerSet, tid int) ptr {\n\tif ps&pointerReachable == 0 {\n\t\t// Easy/fast case.\n\t\tcount := 0\n\t\tif ps&pointerNil != 0 {\n\t\t\tcount++\n\t\t}\n\t\tif ps&pointerStack != 0 {\n\t\t\tcount++\n\t\t}\n\t\tif ps&pointerHeap != 0 {\n\t\t\tcount += 1 + heapCount\n\t\t}\n\t\tx := sched.Amb(count)\n\t\tif ps&pointerNil != 0 {\n\t\t\tif x == 0 {\n\t\t\t\treturn 0\n\t\t\t}\n\t\t\tx--\n\t\t}\n\t\tif ps&pointerStack != 0 {\n\t\t\tif x == 0 {\n\t\t\t\treturn stackBase + ptr(tid)\n\t\t\t}\n\t\t\tx--\n\t\t}\n\t\tif x == 0 {\n\t\t\treturn globalRoot\n\t\t}\n\t\treturn heapBase + ptr(x-1)\n\t}\n\n\t// Tricky case. Create a mask of the pointers we're interested in.\n\tmarked := make([]bool, len(mem))\n\tmark(globalRoot, marked)\n\tmark(stackBase+ptr(tid), marked)\n\tif ps&pointerNil != 0 {\n\t\tmarked[0] = true\n\t}\n\tif ps&pointerStack == 0 {\n\t\tmarked[stackBase+ptr(tid)] = false\n\t}\n\n\t// Select a marked pointer.\n\tnmarked := 0\n\tfor _, m := range marked {\n\t\tif m {\n\t\t\tnmarked++\n\t\t}\n\t}\n\tx := sched.Amb(nmarked)\n\tfor i, m := range marked {\n\t\tif m {\n\t\t\tif x == 0 {\n\t\t\t\treturn ptr(i)\n\t\t\t}\n\t\t\tx--\n\t\t}\n\t}\n\tpanic(\"not reachable\")\n}\n\n// ambHeapPointer returns nil or an ambiguous heap or global pointer.\nfunc ambHeapPointer() ptr {\n\treturn ambPointer(pointerNil|pointerHeap, -1)\n}\n\n// scan scans obj, shading objects that obj re\nfunc scan(obj ptr) {\n\tsched.Tracef(\"scan(%v)\", obj)\n\tif stackBase <= obj && obj < stackBase+numThreads {\n\t\tstackLocks[obj-stackBase].Lock()\n\t\tdefer stackLocks[obj-stackBase].Unlock()\n\t}\n\tfor i := range mem[obj] {\n\t\tp := mem[obj][i]\n\t\tsched.Sched()\n\t\tshade(p)\n\t}\n\tif stackBase <= obj && obj < stackBase+numThreads || obj == globalRoot {\n\t\trootCount--\n\t\tsched.Tracef(\"roots remaining = %d\", rootCount)\n\t}\n}\n\n// shade makes obj grey if it is white.\nfunc shade(obj ptr) {\n\tif obj != 0 && !marked[obj] {\n\t\tsched.Tracef(\"shade(%v)\", obj)\n\t\tmarked[obj] = true\n\t\twork = append(work, obj)\n\t}\n}\n\n// drain scans objects in the work queue until the queue is empty.\nfunc drain() {\n\tfor len(work) > 0 {\n\t\t// Pick an arbitrary object to scan.\n\t\twhich := sched.Amb(len(work))\n\t\tp := work[which]\n\t\tcopy(work[which:], work[which+1:])\n\t\twork = work[:len(work)-1]\n\n\t\tscan(p)\n\t}\n}\n\n// writePointer implements obj[slot] = val.\nfunc writePointer(obj ptr, slot int, val ptr) {\n\t// TODO: Check that GC is still running?\n\n\t// Synchronize with STW. This blocks STW from happening while\n\t// we're in the barrier and blocks this goroutine if we're\n\t// already in STW.\n\tworld.RLock()\n\tdefer world.RUnlock()\n\n\tif obj == 0 {\n\t\tpanic(\"nil pointer write\")\n\t}\n\n\tif stackBase <= obj && obj < stackBase+numThreads {\n\t\tmem[obj][slot] = val\n\t\tsched.Tracef(\"stack write %v[%d] = %v\", obj, slot, val)\n\t\tsched.Sched()\n\t\treturn\n\t}\n\n\tsched.Tracef(\"start %v[%d] = %v\", obj, slot, val)\n\n\tswitch barrier {\n\tcase yuasaBarrier:\n\t\told := mem[obj][slot]\n\t\tsched.Sched()\n\t\tshade(old)\n\n\tcase dijkstraYuasaBarrier:\n\t\told := mem[obj][slot]\n\t\tsched.Sched()\n\t\tshade(old)\n\t\tshade(val)\n\n\tcase conditionalDijkstraYuasaBarrier:\n\t\told := mem[obj][slot]\n\t\tsched.Sched()\n\t\tshade(old)\n\t\tif rootCount > 0 {\n\t\t\tshade(val)\n\t\t}\n\n\tcase dijkstraBarrier:\n\t\tshade(val)\n\t}\n\n\tmem[obj][slot] = val\n\tsched.Tracef(\"done %v[%d] = %v\", obj, slot, val)\n\tsched.Sched()\n}\n\n// mutator is a single mutator goroutine running on stack stackBase+tid.\n// It shuffles pointers between the heap and stack.\nfunc mutator(tid int) {\n\tstackptr := stackBase + ptr(tid)\n\n\tfor i := 0; i < 2; i++ {\n\t\t// Take the stack lock to indicate that we're not at a\n\t\t// safe point. There's no safe point between reading\n\t\t// src and writing pointer since in the model we can't\n\t\t// communicate the pointer we're looking at to the GC.\n\t\t//\n\t\t// Somewhat surprisingly, it's actually necessary to\n\t\t// model this. Otherwise stack writes that race with\n\t\t// the stack scan can hide pointers.\n\t\tstackLocks[tid].Lock()\n\n\t\t// Write a nil, global, or heap pointer to the stack, global,\n\t\t// or heap, or a stack pointer to the stack.\n\t\tsrc := ambPointer(pointerNil|pointerStack|pointerReachable, tid)\n\t\tsched.Sched()\n\t\tvar dst ptr\n\t\tif src == stackptr {\n\t\t\t// Stack pointers can only be written to the stack.\n\t\t\tdst = stackptr\n\t\t} else {\n\t\t\t// Non-stack pointers can be written to stack, global,\n\t\t\t// or heap.\n\t\t\tdst = ambPointer(pointerStack|pointerReachable, tid)\n\t\t}\n\t\twritePointer(dst, sched.Amb(2), src)\n\n\t\t// We're at a safe point again.\n\t\tstackLocks[tid].Unlock()\n\t}\n}\n\n// mark sets marked[i] for every object i reachable from p (including\n// p itself). This is NOT preemptible.\nfunc mark(p ptr, marked []bool) {\n\tif p == 0 || marked[p] {\n\t\treturn\n\t}\n\tmarked[p] = true\n\tfor i := range mem[p] {\n\t\tmark(mem[p][i], marked)\n\t}\n}\n\n// checkmark checks that all objects reachable from the roots are\n// marked.\nfunc checkmark() {\n\tcheckmarked := make([]bool, len(mem))\n\tfor i := stackBase; i < stackBase+numThreads; i++ {\n\t\tmark(i, checkmarked)\n\t}\n\tmark(globalRoot, checkmarked)\n\n\tfor i := range marked {\n\t\tif checkmarked[i] && !marked[i] {\n\t\t\tpanic(fmt.Sprintf(\"object not marked: %v\", i))\n\t\t}\n\t}\n}\n\n// stringMem stringifies a memory with marks.\nfunc stringMem(mem []obj, marked []bool) string {\n\tvar buf bytes.Buffer\n\tfor i := 1; i < len(mem); i++ {\n\t\tif marked[i] {\n\t\t\tbuf.WriteString(\"*\")\n\t\t} else {\n\t\t\tbuf.WriteString(\" \")\n\t\t}\n\t\tfmt.Fprint(&buf, i, \"->\", mem[i][0], \",\", mem[i][1], \" \")\n\t}\n\treturn buf.String()\n}\n"
  },
  {
    "path": "go-weave/weave/atomic.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage weave\n\ntype AtomicInt32 struct {\n\tv int32\n}\n\nfunc (a *AtomicInt32) Add(delta int32) (new int32) {\n\ta.v += delta\n\tnew = a.v\n\tglobalSched.Sched()\n\treturn new\n}\n\nfunc (a *AtomicInt32) CompareAndSwap(old, new int32) (swapped bool) {\n\tswapped = a.v == old\n\tif swapped {\n\t\ta.v = new\n\t}\n\tglobalSched.Sched()\n\treturn\n}\n\nfunc (a *AtomicInt32) Load() int32 {\n\tv := a.v\n\tglobalSched.Sched()\n\treturn v\n}\n\nfunc (a *AtomicInt32) Store(val int32) {\n\ta.v = val\n\tglobalSched.Sched()\n}\n\nfunc (a *AtomicInt32) Swap(new int32) (old int32) {\n\told, a.v = a.v, new\n\tglobalSched.Sched()\n\treturn\n}\n"
  },
  {
    "path": "go-weave/weave/mutex.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage weave\n\nimport \"fmt\"\n\ntype Mutex struct {\n\tlocked  bool\n\twaiters []*thread\n}\n\nfunc (m *Mutex) Lock() {\n\tif !m.locked {\n\t\tm.locked = true\n\t\treturn\n\t}\n\tthis := globalSched.curThread\n\tm.waiters = append(m.waiters, this)\n\tthis.block(m.reset)\n}\n\nfunc (m *Mutex) Unlock() {\n\tif !m.locked {\n\t\tpanic(\"attempt to Unlock unlocked Mutex\")\n\t}\n\tif len(m.waiters) == 0 {\n\t\tm.locked = false\n\t} else {\n\t\t// Pick an arbitrary thread to wake up.\n\t\tnext := globalSched.Amb(len(m.waiters))\n\t\tt := m.waiters[next]\n\t\tm.waiters[next] = m.waiters[len(m.waiters)-1]\n\t\tm.waiters = m.waiters[:len(m.waiters)-1]\n\t\tt.unblock()\n\t}\n\tglobalSched.Sched()\n}\n\nfunc (m *Mutex) reset() {\n\t*m = Mutex{}\n}\n\ntype RWMutex struct {\n\tr, w             int\n\treaders, writers []*thread\n}\n\nfunc (rw *RWMutex) Lock() {\n\tif rw.r == 0 && rw.w == 0 {\n\t\trw.w++\n\t\treturn\n\t}\n\tthis := globalSched.curThread\n\trw.writers = append(rw.writers, this)\n\tthis.block(rw.reset)\n}\n\nfunc (rw *RWMutex) RLock() {\n\tif rw.w == 0 {\n\t\trw.r++\n\t\treturn\n\t}\n\tthis := globalSched.curThread\n\trw.readers = append(rw.readers, this)\n\tthis.block(rw.reset)\n}\n\nfunc (rw *RWMutex) reset() {\n\t*rw = RWMutex{}\n}\n\nfunc (rw *RWMutex) Unlock() {\n\trw.w--\n\trw.release()\n}\n\nfunc (rw *RWMutex) RUnlock() {\n\trw.r--\n\trw.release()\n}\n\nfunc (rw *RWMutex) release() {\n\tif rw.w != 0 {\n\t\tpanic(fmt.Sprintf(\"bad RWMutex writer count: %d\", rw.w))\n\t}\n\tif len(rw.readers) > 0 {\n\t\t// Wake all readers.\n\t\trw.r += len(rw.readers)\n\t\tfor _, t := range rw.readers {\n\t\t\tt.unblock()\n\t\t}\n\t\trw.readers = rw.readers[:0]\n\t} else if rw.r == 0 && len(rw.writers) > 0 {\n\t\t// Wake one writer.\n\t\trw.w++\n\t\tnext := globalSched.Amb(len(rw.writers))\n\t\tt := rw.writers[next]\n\t\trw.writers[next] = rw.writers[len(rw.writers)-1]\n\t\trw.writers = rw.writers[:len(rw.writers)-1]\n\t\tt.unblock()\n\t}\n\tglobalSched.Sched()\n}\n"
  },
  {
    "path": "go-weave/weave/sema.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage weave\n\n// Semaphore is a FIFO counted semaphore.\ntype Semaphore struct {\n\tavail   int\n\twait    *semwait\n\twaitEnd *semwait\n}\n\ntype semwait struct {\n\tn    int\n\tthr  *thread\n\tnext *semwait\n}\n\nfunc (s *Semaphore) Acquire(n int) {\n\tif s.avail >= n {\n\t\ts.avail -= n\n\t\treturn\n\t}\n\tthis := globalSched.curThread\n\tw := &semwait{n, this, nil}\n\tif s.waitEnd != nil {\n\t\ts.waitEnd.next = w\n\t} else {\n\t\ts.wait = w\n\t}\n\ts.waitEnd = w\n\tthis.block(s.reset)\n}\n\nfunc (s *Semaphore) Release(n int) {\n\ts.avail += n\n\tany := false\n\tfor s.wait != nil && s.avail >= s.wait.n {\n\t\tany = true\n\t\tw := s.wait\n\t\ts.wait = w.next\n\t\tif s.wait == nil {\n\t\t\ts.waitEnd = nil\n\t\t}\n\t\ts.avail -= w.n\n\t\tw.thr.unblock()\n\t}\n\tif any {\n\t\tglobalSched.Sched()\n\t}\n}\n\nfunc (s *Semaphore) reset() {\n\t*s = Semaphore{}\n}\n"
  },
  {
    "path": "go-weave/weave/tls.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage weave\n\ntype TLS struct {\n\t_ byte\n}\n\nfunc NewTLS() *TLS {\n\treturn &TLS{}\n}\n\nfunc (v *TLS) Get() interface{} {\n\treturn globalSched.curThread.tls[v]\n}\n\nfunc (v *TLS) Set(val interface{}) {\n\tm := globalSched.curThread.tls\n\tif m == nil {\n\t\tm = make(map[*TLS]interface{})\n\t\tglobalSched.curThread.tls = m\n\t}\n\tm[v] = val\n}\n"
  },
  {
    "path": "go-weave/weave/trace.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage weave\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n)\n\ntype traceEntry struct {\n\ttid int\n\tmsg string\n}\n\nfunc (s *Scheduler) Trace(msg string) {\n\ts.trace = append(s.trace, traceEntry{s.curThread.id, msg})\n}\n\nfunc (s *Scheduler) Tracef(msg string, args ...interface{}) {\n\ts.trace = append(s.trace, traceEntry{s.curThread.id, fmt.Sprintf(msg, args...)})\n}\n\ntype errorWithTrace struct {\n\terr   interface{}\n\ttrace []traceEntry\n}\n\nfunc (e errorWithTrace) Error() string {\n\tif len(e.trace) == 0 {\n\t\treturn fmt.Sprint(e.err)\n\t}\n\n\tvar buf bytes.Buffer\n\tfmt.Fprintf(&buf, \"%v\\n\", e.err)\n\tfmt.Fprintf(&buf, \"trace:\")\n\tfor _, ent := range e.trace {\n\t\tfmt.Fprintf(&buf, \"\\n  T%d %s\", ent.tid, ent.msg)\n\t}\n\treturn buf.String()\n}\n"
  },
  {
    "path": "go-weave/weave/waitgroup.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage weave\n\ntype WaitGroup struct {\n\tn       int\n\twaiters []*thread\n}\n\nfunc (g *WaitGroup) Add(delta int) {\n\tg.n += delta\n\tif g.n == 0 {\n\t\twaiters := g.waiters\n\t\tg.waiters = nil\n\t\tfor _, t := range waiters {\n\t\t\tt.unblock()\n\t\t}\n\t}\n}\n\nfunc (g *WaitGroup) Done() {\n\tg.Add(-1)\n}\n\nfunc (g *WaitGroup) Wait() {\n\tif g.n == 0 {\n\t\tglobalSched.Sched()\n\t\treturn\n\t}\n\tthis := globalSched.curThread\n\tg.waiters = append(g.waiters, this)\n\tthis.block(g.reset)\n}\n\nfunc (g *WaitGroup) reset() {\n\t*g = WaitGroup{}\n}\n"
  },
  {
    "path": "go-weave/weave/weave.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage weave\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/aclements/go-misc/go-weave/amb\"\n)\n\n// TODO: Implement simple partial order reduction. If the next actions\n// on T1 and T2 commute, then we know that [T1,T2,...] and [T2,T1,...]\n// are equivalent (however, we can't just cut off T2, since we still\n// need [T2,T2,...]).\n\n// TODO: Implement a PCT scheduler (https://www.microsoft.com/en-us/research/publication/a-randomized-scheduler-with-probabilistic-guarantees-of-finding-bugs/)\n\ntype Scheduler struct {\n\tStrategy amb.Strategy\n\n\tas amb.Scheduler\n\n\tnextid    int\n\trunnable  []*thread\n\tblocked   []*thread\n\tcurThread *thread\n\tgoErr     interface{}\n\n\t// wakeSched wakes the scheduler to select the next thread to\n\t// run. The waking thread must immediately block on\n\t// thread.wake or exit.\n\twakeSched chan void\n\n\ttrace []traceEntry\n}\n\nvar globalSched *Scheduler\n\ntype void struct{}\n\ntype thread struct {\n\tsched   *Scheduler\n\tid      int\n\tindex   int // Index in Scheduler.runnable or .blocked\n\tblocked bool\n\n\ttls map[*TLS]interface{}\n\n\twake chan void // Send void{} to wake this thread\n}\n\nfunc (t *thread) String() string {\n\treturn fmt.Sprintf(\"T%d\", t.id)\n}\n\nconst debug = false\n\nfunc (s *Scheduler) newThread() *thread {\n\tthr := &thread{s, s.nextid, -1, false, nil, make(chan void)}\n\ts.nextid++\n\tif thr.id != -1 {\n\t\tthr.index = len(s.runnable)\n\t\ts.runnable = append(s.runnable, thr)\n\t}\n\treturn thr\n}\n\nfunc (s *Scheduler) Run(main func()) {\n\tif globalSched != nil {\n\t\tpanic(\"only one weave.Scheduler can be active at a time\")\n\t}\n\tglobalSched = s\n\tdefer func() { globalSched = nil }()\n\n\ts.as = amb.Scheduler{Strategy: s.Strategy}\n\n\ts.as.Run(func() {\n\t\t// Initialize state.\n\t\ts.nextid = 0\n\t\ts.runnable = s.runnable[:0]\n\t\ts.blocked = s.blocked[:0]\n\t\ts.curThread = nil\n\t\ts.goErr = nil\n\t\ts.wakeSched = make(chan void)\n\t\ts.trace = nil\n\t\ts.goNoSched(main)\n\t\ts.scheduler()\n\t\tif s.goErr != nil {\n\t\t\tpanic(errorWithTrace{s.goErr, s.trace})\n\t\t}\n\t\tif len(s.blocked) != 0 {\n\t\t\tpanic(errorWithTrace{fmt.Sprintf(\"threads asleep: %s\", s.blocked), s.trace})\n\t\t}\n\t\tif debug {\n\t\t\tfmt.Println(\"run done\")\n\t\t}\n\t})\n}\n\nfunc (s *Scheduler) goNoSched(f func()) {\n\tthr := s.newThread()\n\tgo func() {\n\t\tdefer func() {\n\t\t\tgoErr := recover()\n\n\t\t\tif debug {\n\t\t\t\tif goErr == threadAbort {\n\t\t\t\t\tfmt.Printf(\"%v aborted\\n\", thr)\n\t\t\t\t} else if goErr != nil {\n\t\t\t\t\tfmt.Printf(\"%v panicked: %v\\n\", thr, goErr)\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"%v exiting normally\\n\", thr)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Remove this thread from runnable.\n\t\t\ts.runnable[thr.index] = s.runnable[len(s.runnable)-1]\n\t\t\ts.runnable[thr.index].index = thr.index\n\t\t\ts.runnable = s.runnable[:len(s.runnable)-1]\n\n\t\t\t// If this is a thread abort, notify the\n\t\t\t// scheduler that we're done aborting and\n\t\t\t// exit.\n\t\t\tif goErr == threadAbort {\n\t\t\t\ts.wakeSched <- void{}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// If we're panicking, report the error so the\n\t\t\t// scheduler can shut down this execution.\n\t\t\t//\n\t\t\t// TODO: Capture the stack trace.\n\t\t\tif goErr != nil {\n\t\t\t\tif s.goErr == nil {\n\t\t\t\t\ts.goErr = goErr\n\t\t\t\t}\n\t\t\t\ts.wakeSched <- void{}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\t// Otherwise, this is a regular thread exit.\n\t\t\tclose(thr.wake)\n\t\t\ts.wakeSched <- void{}\n\t\t}()\n\t\tif debug {\n\t\t\tfmt.Printf(\"%v started\\n\", thr)\n\t\t}\n\t\tthr.desched()\n\t\tf()\n\t}()\n}\n\nfunc (s *Scheduler) Go(f func()) {\n\ts.goNoSched(f)\n\ts.Sched()\n}\n\nvar threadAbort = errors.New(\"thread aborted because of panic in another thread\")\n\n// scheduler runs on the top-level thread and coordinates which thread\n// to execute next.\nfunc (s *Scheduler) scheduler() {\n\tfor len(s.runnable) > 0 {\n\t\t// Pick a thread to run. If we're aborting, we just\n\t\t// pick runnable[0], since it's not useful to explore\n\t\t// this, and we might be aborting because amb\n\t\t// terminated this path anyway.\n\t\tvar tid int\n\t\tif s.goErr == nil {\n\t\t\t// Amb may panic with PathTerminated.\n\t\t\tfunc() {\n\t\t\t\tdefer func() {\n\t\t\t\t\terr := recover()\n\t\t\t\t\tif err == amb.PathTerminated {\n\t\t\t\t\t\ts.goErr = err\n\t\t\t\t\t} else if err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\ttid = s.as.Amb(len(s.runnable))\n\t\t\t}()\n\t\t}\n\t\ts.curThread = s.runnable[tid]\n\n\t\tif debug {\n\t\t\tfmt.Printf(\"scheduling %v from %v\\n\", s.curThread, s.runnable)\n\t\t}\n\n\t\t// Switch to that thread.\n\t\ts.curThread.wake <- void{}\n\n\t\t// Wait for thread to deschedule.\n\t\t<-s.wakeSched\n\t\tif s.goErr != nil {\n\t\t\t// This state will signal all threads to exit,\n\t\t\t// but we have to wake blocked threads so they\n\t\t\t// can exit, too.\n\t\t\ts.runnable = append(s.runnable, s.blocked...)\n\t\t\ts.blocked = nil\n\t\t}\n\t}\n}\n\nfunc (s *Scheduler) Sched() {\n\tthis := s.curThread\n\ts.wakeSched <- void{}\n\tthis.desched()\n}\n\nfunc (t *thread) desched() {\n\t<-t.wake\n\tif t.sched.goErr != nil {\n\t\t// We're shutting down this execution.\n\t\tpanic(threadAbort)\n\t}\n}\n\nfunc (s *Scheduler) Amb(n int) int {\n\treturn s.as.Amb(n)\n}\n\nfunc (t *thread) block(abortf func()) {\n\tif t.blocked {\n\t\tpanic(\"thread blocked multiple times\")\n\t}\n\tt.blocked = true\n\n\ts := t.sched\n\ts.runnable[t.index] = s.runnable[len(s.runnable)-1]\n\ts.runnable[t.index].index = t.index\n\ts.runnable = s.runnable[:len(s.runnable)-1]\n\n\tt.index = len(s.blocked)\n\ts.blocked = append(s.blocked, t)\n\n\tif abortf != nil {\n\t\tdefer func() {\n\t\t\tif abortf != nil {\n\t\t\t\tabortf()\n\t\t\t}\n\t\t}()\n\t}\n\tt.sched.Sched()\n\tabortf = nil\n}\n\nfunc (t *thread) unblock() {\n\tif !t.blocked {\n\t\tpanic(\"thread unblocked while not blocked\")\n\t}\n\tt.blocked = false\n\n\ts := t.sched\n\ts.blocked[t.index] = s.blocked[len(s.blocked)-1]\n\ts.blocked[t.index].index = t.index\n\ts.blocked = s.blocked[:len(s.blocked)-1]\n\n\tt.index = len(s.runnable)\n\ts.runnable = append(s.runnable, t)\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/aclements/go-misc\n\ngo 1.17\n\nrequire (\n\tgithub.com/aclements/go-gg v0.0.0-20170323211221-abd1f791f5ee\n\tgithub.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794\n\tgolang.org/x/build v0.0.0-20210804225706-d1bc548deb19\n\tgolang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97\n\tgolang.org/x/image v0.0.0-20210628002857-a66eb6448b8d\n\tgolang.org/x/tools v0.1.5\n)\n\nrequire (\n\tgithub.com/ajstarks/svgo v0.0.0-20210406150507-75cfd577ce75 // indirect\n\tgithub.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac // indirect\n\tgithub.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 // indirect\n\tgithub.com/gonum/internal v0.0.0-20181124074243-f884aa714029 // indirect\n\tgithub.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 // indirect\n\tgithub.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 // indirect\n\tgolang.org/x/mod v0.4.2 // indirect\n\tgolang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect\n\tgolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect\n\tgolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "cloud.google.com/go v0.0.0-20170206221025-ce650573d812/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=\ncloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=\ncloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=\ncloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=\ncloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=\ncloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=\ncloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=\ncloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=\ncloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=\ncloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=\ncloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=\ncloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=\ncloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=\ncloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=\ncloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=\ncloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=\ncloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=\ncloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=\ncloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=\ncloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=\ncloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=\ncloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=\ncloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=\ncloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=\ncloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=\ncloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=\ncloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=\ncloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=\ncloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=\ncloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=\ncontrib.go.opencensus.io/exporter/prometheus v0.3.0/go.mod h1:rpCPVQKhiyH8oomWgm34ZmgIdZa8OVYO5WAIygPbBBE=\ncontrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=\ndmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=\ngithub.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20190129172621-c8b1d7a94ddf/go.mod h1:aJ4qN3TfrelA6NZ6AXsXRfmEVaYin3EDbSPJrKS8OXo=\ngithub.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=\ngithub.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=\ngithub.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=\ngithub.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=\ngithub.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=\ngithub.com/aclements/go-gg v0.0.0-20170118225347-6dbb4e4fefb0/go.mod h1:55qNq4vcpkIuHowELi5C8e+1yUHtoLoOUR9QU5j7Tes=\ngithub.com/aclements/go-gg v0.0.0-20170323211221-abd1f791f5ee h1:KJgh99JlYRhfgHtb7XyhAZSJMdfkjVmo3PP7XO1/HO8=\ngithub.com/aclements/go-gg v0.0.0-20170323211221-abd1f791f5ee/go.mod h1:55qNq4vcpkIuHowELi5C8e+1yUHtoLoOUR9QU5j7Tes=\ngithub.com/aclements/go-moremath v0.0.0-20161014184102-0ff62e0875ff/go.mod h1:idZL3yvz4kzx1dsBOAC+oYv6L92P1oFEhUXUB1A/lwQ=\ngithub.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794 h1:xlwdaKcTNVW4PtpQb8aKA4Pjy0CdJHEqvFbAnvR5m2g=\ngithub.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY=\ngithub.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=\ngithub.com/ajstarks/svgo v0.0.0-20210406150507-75cfd577ce75 h1:tuK1xIp+jrEEF0l3xXab78w89ilYr0Am170KdSml2xc=\ngithub.com/ajstarks/svgo v0.0.0-20210406150507-75cfd577ce75/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=\ngithub.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=\ngithub.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=\ngithub.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=\ngithub.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=\ngithub.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=\ngithub.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=\ngithub.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=\ngithub.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=\ngithub.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=\ngithub.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=\ngithub.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=\ngithub.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=\ngithub.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=\ngithub.com/aws/aws-sdk-go v1.30.15/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=\ngithub.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=\ngithub.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=\ngithub.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=\ngithub.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=\ngithub.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=\ngithub.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=\ngithub.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=\ngithub.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=\ngithub.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=\ngithub.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=\ngithub.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=\ngithub.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=\ngithub.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=\ngithub.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=\ngithub.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=\ngithub.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=\ngithub.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=\ngithub.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=\ngithub.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=\ngithub.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=\ngithub.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=\ngithub.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=\ngithub.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=\ngithub.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=\ngithub.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=\ngithub.com/gliderlabs/ssh v0.3.3/go.mod h1:ZSS+CUoKHDrqVakTfTWUlKSr9MtMFkC4UvtQKD7O914=\ngithub.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=\ngithub.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=\ngithub.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=\ngithub.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=\ngithub.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=\ngithub.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=\ngithub.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=\ngithub.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=\ngithub.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=\ngithub.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=\ngithub.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=\ngithub.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=\ngithub.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=\ngithub.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=\ngithub.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=\ngithub.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=\ngithub.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac h1:Q0Jsdxl5jbxouNs1TQYt0gxesYMU4VXRbsTlgDloZ50=\ngithub.com/gonum/blas v0.0.0-20181208220705-f22b278b28ac/go.mod h1:P32wAyui1PQ58Oce/KYkOqQv8cVw1zAapXOl+dRFGbc=\ngithub.com/gonum/floats v0.0.0-20181209220543-c233463c7e82 h1:EvokxLQsaaQjcWVWSV38221VAK7qc2zhaO17bKys/18=\ngithub.com/gonum/floats v0.0.0-20181209220543-c233463c7e82/go.mod h1:PxC8OnwL11+aosOB5+iEPoV3picfs8tUpkVd0pDo+Kg=\ngithub.com/gonum/internal v0.0.0-20181124074243-f884aa714029 h1:8jtTdc+Nfj9AR+0soOeia9UZSvYBvETVHZrugUowJ7M=\ngithub.com/gonum/internal v0.0.0-20181124074243-f884aa714029/go.mod h1:Pu4dmpkhSyOzRwuXkOgAvijx4o+4YMUJJo9OvPYMkks=\ngithub.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9 h1:7qnwS9+oeSiOIsiUMajT+0R7HR6hw5NegnKPmn/94oI=\ngithub.com/gonum/lapack v0.0.0-20181123203213-e4cdc5a0bff9/go.mod h1:XA3DeT6rxh2EAE789SSiSJNqxPaC0aE9J8NTOI0Jo/A=\ngithub.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9 h1:V2IgdyerlBa/MxaEFRbV5juy/C3MGdj4ePi+g6ePIp4=\ngithub.com/gonum/matrix v0.0.0-20181209220409-c518dec07be9/go.mod h1:0EXg4mc1CNP0HCqCz+K4ts155PXIlUywf0wqN+GfPZw=\ngithub.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=\ngithub.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=\ngithub.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=\ngithub.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=\ngithub.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=\ngithub.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=\ngithub.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/gax-go v0.0.0-20161107002406-da06d194a00e/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=\ngithub.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=\ngithub.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=\ngithub.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=\ngithub.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=\ngithub.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=\ngithub.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=\ngithub.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=\ngithub.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=\ngithub.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=\ngithub.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=\ngithub.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=\ngithub.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=\ngithub.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=\ngithub.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=\ngithub.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=\ngithub.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=\ngithub.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=\ngithub.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=\ngithub.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=\ngithub.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=\ngithub.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=\ngithub.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=\ngithub.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=\ngithub.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=\ngithub.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=\ngithub.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=\ngithub.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=\ngithub.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=\ngithub.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=\ngithub.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=\ngithub.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=\ngithub.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=\ngithub.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=\ngithub.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=\ngithub.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=\ngithub.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=\ngithub.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=\ngithub.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=\ngithub.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=\ngithub.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=\ngithub.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=\ngithub.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=\ngithub.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=\ngithub.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=\ngithub.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=\ngithub.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=\ngithub.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=\ngithub.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=\ngithub.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=\ngithub.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=\ngithub.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=\ngithub.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=\ngithub.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=\ngithub.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=\ngithub.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=\ngithub.com/mattn/go-sqlite3 v1.14.5/go.mod h1:WVKg1VTActs4Qso6iwGbiFih2UIHo0ENGwNd0Lj+XmI=\ngithub.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=\ngithub.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=\ngithub.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=\ngithub.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=\ngithub.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=\ngithub.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=\ngithub.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=\ngithub.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=\ngithub.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=\ngithub.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=\ngithub.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=\ngithub.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=\ngithub.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=\ngithub.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=\ngithub.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=\ngithub.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=\ngithub.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=\ngithub.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=\ngithub.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=\ngithub.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=\ngithub.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=\ngithub.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=\ngithub.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=\ngithub.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=\ngithub.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=\ngithub.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=\ngithub.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=\ngithub.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=\ngithub.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=\ngithub.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=\ngithub.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=\ngithub.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=\ngithub.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=\ngithub.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=\ngithub.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=\ngithub.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=\ngithub.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=\ngithub.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=\ngithub.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=\ngithub.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4=\ngithub.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=\ngithub.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=\ngithub.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=\ngithub.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=\ngithub.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=\ngithub.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=\ngithub.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=\ngithub.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=\ngithub.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=\ngithub.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=\ngithub.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=\ngithub.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=\ngithub.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=\ngithub.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=\ngithub.com/prometheus/statsd_exporter v0.20.0/go.mod h1:YL3FWCG8JBBtaUSxAg4Gz2ZYu22bS84XM89ZQXXTWmQ=\ngithub.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=\ngithub.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=\ngithub.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=\ngithub.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=\ngithub.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=\ngithub.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=\ngithub.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=\ngithub.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=\ngithub.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=\ngithub.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=\ngithub.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=\ngithub.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=\ngithub.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=\ngithub.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=\ngithub.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=\ngithub.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=\ngithub.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=\ngithub.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=\ngithub.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=\ngithub.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=\ngithub.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=\ngithub.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=\ngithub.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=\ngithub.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=\ngithub.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=\ngithub.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=\ngo.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=\ngo.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=\ngo.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=\ngo.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=\ngo.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=\ngo.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=\ngo.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=\ngo.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=\ngo.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=\ngo.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=\ngo.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=\ngo.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=\ngo.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=\ngo.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=\ngo.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=\ngo4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=\ngolang.org/x/build v0.0.0-20210804225706-d1bc548deb19 h1:H/IPWd+24RJiQXliZOeWRWhwSquQafqeKPYPKhIzalU=\ngolang.org/x/build v0.0.0-20210804225706-d1bc548deb19/go.mod h1:FNsqWAHwz0DFmHs9P2xSbeswghADCVF+VcfD2eJLRjI=\ngolang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97 h1:/UOmuWzQfxxo9UtlXMwuQU8CMgg1eZXqTRwkSQJWKOI=\ngolang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=\ngolang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=\ngolang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=\ngolang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=\ngolang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=\ngolang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=\ngolang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=\ngolang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=\ngolang.org/x/image v0.0.0-20210628002857-a66eb6448b8d h1:RNPAfi2nHY7C2srAV8A49jpsYr0ADedCk1wq6fTMTvs=\ngolang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=\ngolang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=\ngolang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=\ngolang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=\ngolang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=\ngolang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=\ngolang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=\ngolang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=\ngolang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=\ngolang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=\ngolang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=\ngolang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=\ngolang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/oauth2 v0.0.0-20170207211851-4464e7848382/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=\ngolang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=\ngolang.org/x/perf v0.0.0-20210220033136-40a54f11e909/go.mod h1:KRSrLY7jerMEa0Ih7gBheQ3FYDiSx6liMnniX1o3j2g=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=\ngolang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=\ngolang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=\ngolang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=\ngolang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=\ngolang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=\ngolang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=\ngolang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=\ngolang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=\ngolang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=\ngolang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngoogle.golang.org/api v0.0.0-20170206182103-3d017632ea10/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=\ngoogle.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=\ngoogle.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=\ngoogle.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=\ngoogle.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=\ngoogle.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=\ngoogle.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=\ngoogle.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=\ngoogle.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=\ngoogle.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=\ngoogle.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=\ngoogle.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=\ngoogle.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=\ngoogle.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=\ngoogle.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=\ngoogle.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=\ngoogle.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=\ngoogle.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=\ngoogle.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=\ngoogle.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=\ngoogle.golang.org/grpc v0.0.0-20170208002647-2a6bf6142e96/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=\ngoogle.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=\ngoogle.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=\ngoogle.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngoogle.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=\ngoogle.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\ngoogle.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=\ngoogle.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=\ngoogle.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=\ngoogle.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=\ngoogle.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=\ngopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=\ngopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=\ngopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=\ngopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=\ngopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=\ngopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngrpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=\nhonnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=\nhonnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=\nhonnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=\nrsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=\nrsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=\nrsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=\nsigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=\nsourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=\n"
  },
  {
    "path": "goi/main.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"go/scanner\"\n\t\"go/token\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"plugin\"\n\t\"strings\"\n)\n\nfunc main() {\n\tf := os.Stdin\n\tfor {\n\t\tsrc, err := readLine(f)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfmt.Fprintf(os.Stderr, \"error reading %s: %s\\n\", f, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\t_ = src\n\n\t\tsrc = transform(src)\n\n\t\tso := compile(src)\n\t\tif so == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trun(so)\n\n\t\t// TODO: Declare global exported functions to access\n\t\t// all unexported variables and fields. How do I get\n\t\t// at types?\n\t}\n\n\tif tempDir != \"\" {\n\t\t// TODO\n\t}\n}\n\nvar index int\n\nfunc readLine(r io.Reader) (string, error) {\n\t// TODO: Continuation lines.\n\tfmt.Printf(\"> \")\n\tr2 := bufio.NewReader(r)\n\treturn r2.ReadString('\\n')\n}\n\nvar imports []string\n\nfunc transform(src string) string {\n\t// TODO: Detect top-level var/type/func/import versus\n\t// statement versus expression.\n\n\tfs := token.NewFileSet()\n\tvar s scanner.Scanner\n\t// XXX error handler argument?\n\ts.Init(fs.AddFile(\"<stdin>\", 1, len(src)), []byte(src), nil, 0)\n\n\t// Split into top-level statements.\n\ttype stmt struct {\n\t\ttoks []token.Token\n\t}\n\n\t_, tok, _ := s.Scan()\n\tif tok == token.IMPORT {\n\t\t// XXX Check that it's only imports. Or split statements?\n\t\t//src = \"package main; \" + src\n\n\t\t// XXX Import _ the package to make sure we can import\n\t\t// it (and to get inits)\n\t\timports = append(imports, src)\n\t\treturn \"package main; func Main() { }\"\n\t}\n\n\t// TODO: Figure out the right subset of current imports for\n\t// this src.\n\n\t// TODO: For expressions, print the result and make it\n\t// available in a convenience variable.\n\n\t// XXX Docs don't say anything about importing \"C\". Is that\n\t// necessary?\n\tsrc = fmt.Sprintf(`package main\n%s\nfunc Main() {\n\t%s\n}`, strings.Join(imports, \"\\n\"), src)\n\t//import \\\"fmt\\\"; func Main() {\" + src + \"}\"\n\treturn src\n}\n\nvar tempDir string\n\nfunc compile(src string) string {\n\tif tempDir == \"\" {\n\t\tvar err error\n\t\ttempDir, err = ioutil.TempDir(\"\", \"goi-\")\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to create temporary directory: %s\", err)\n\t\t}\n\t}\n\n\t// XXX Clean up after loading so.\n\n\tpkg := fmt.Sprintf(\"x%d\", index)\n\tindex++\n\n\tgopath := os.Getenv(\"GOPATH\")\n\tif gopath != \"\" {\n\t\tgopath = tempDir + string(filepath.ListSeparator) + gopath\n\t} else {\n\t\tgopath = tempDir\n\t}\n\n\tbase := filepath.Join(tempDir, \"src\", pkg)\n\tif err := os.MkdirAll(base, 0700); err != nil {\n\t\tlog.Fatalf(\"failed to create temporary directory: %s\", err)\n\t}\n\tpath := filepath.Join(base, \"x.go\")\n\tif err := ioutil.WriteFile(path, []byte(src), 0600); err != nil {\n\t\tlog.Fatalf(\"error writing temporary source: %s\", err)\n\t}\n\tso := filepath.Join(base, \"x.so\")\n\t// TODO: Make sure the runtime is available in plugin mode or\n\t// else this takes a long time.\n\t//\n\t// Assuming dependent packages are installed, we spend most of\n\t// the time in the linker (and most of that time in the host\n\t// linker). -w disables DWARF and -s disables the symbol table\n\t// (XXX is that safe?).\n\tcmd := exec.Command(\"go\", \"build\", \"-buildmode\", \"plugin\", \"-i\", \"-o\", so, \"-ldflags=-w -s\", pkg)\n\tcmd.Env = append([]string{\"GOPATH=\" + gopath}, os.Environ()...)\n\t// TODO: Translate errors.\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\t// TODO: Distinguish compile errors from exec error.\n\t\tso = \"\"\n\t}\n\treturn so\n}\n\nfunc run(so string) {\n\tp, err := plugin.Open(so)\n\tif err != nil {\n\t\tlog.Fatalf(\"error loading compiled code: %s\", err)\n\t}\n\tsym, err := p.Lookup(\"Main\")\n\tif err != nil {\n\t\tlog.Fatalf(\"no Main in compiled code: %s\", err)\n\t}\n\tmain, ok := sym.(func())\n\tif !ok {\n\t\tlog.Fatal(\"Main has wrong type\")\n\t}\n\tmain()\n}\n"
  },
  {
    "path": "gover/cache.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n)\n\nvar hashNameRe = regexp.MustCompile(`^[0-9a-f]{7,40}(\\+[0-9a-f]{1,10})?$`)\nvar fullHashRe = regexp.MustCompile(\"^[0-9a-f]{40}$\")\nvar hashPlusRe = regexp.MustCompile(`^[0-9a-f]{40}(\\+[0-9a-f]{10})?$`)\n\n// resolveName returns the path to the root of the named build and\n// whether or not that path exists. It will log an error and exit if\n// name is ambiguous. If the path does not exist, the returned path is\n// where this build should be saved.\nfunc resolveName(name string) (path string, ok bool) {\n\t// If the name exactly matches a saved version, return it.\n\tsavePath := filepath.Join(*verDir, name)\n\tst, err := os.Stat(savePath)\n\tif err == nil && st.IsDir() {\n\t\treturn savePath, true\n\t}\n\n\t// Otherwise, try to resolve it as an unambiguous hash prefix.\n\tif hashNameRe.MatchString(name) {\n\t\tnameParts := strings.SplitN(name, \"+\", 2)\n\t\tbuilds, err := listBuilds(0)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tvar fullName string\n\t\tfor _, b := range builds {\n\t\t\tif !strings.HasPrefix(b.commitHash, nameParts[0]) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif (len(nameParts) == 1) != (b.deltaHash == \"\") {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif len(nameParts) > 1 && !strings.HasPrefix(b.deltaHash, nameParts[1]) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// We found a match.\n\t\t\tif fullName != \"\" {\n\t\t\t\tlog.Fatalf(\"ambiguous name `%s`\", name)\n\t\t\t}\n\t\t\tfullName = b.fullName()\n\t\t}\n\t\tif fullName != \"\" {\n\t\t\treturn filepath.Join(*verDir, fullName), true\n\t\t}\n\t}\n\n\treturn savePath, false\n}\n\ntype buildInfo struct {\n\tpath       string\n\tcommitHash string\n\tdeltaHash  string\n\tnames      []string\n\tcommit     *commit\n}\n\nfunc (i buildInfo) fullName() string {\n\tif i.deltaHash == \"\" {\n\t\treturn i.commitHash\n\t}\n\treturn i.commitHash + \"+\" + i.deltaHash\n}\n\nfunc (i buildInfo) shortName() string {\n\t// TODO: Print more than 7 characters if necessary.\n\tif i.deltaHash == \"\" {\n\t\treturn i.commitHash[:7]\n\t}\n\treturn i.commitHash[:7] + \"+\" + i.deltaHash\n}\n\ntype listFlags int\n\nconst (\n\tlistNames listFlags = 1 << iota\n\tlistCommit\n)\n\nfunc listBuilds(flags listFlags) ([]*buildInfo, error) {\n\tfiles, err := ioutil.ReadDir(*verDir)\n\tif os.IsNotExist(err) {\n\t\treturn nil, nil\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Collect the saved builds.\n\tbuilds := []*buildInfo{}\n\tvar baseMap map[string]*buildInfo\n\tif flags&listNames != 0 {\n\t\tbaseMap = make(map[string]*buildInfo)\n\t}\n\tfor _, file := range files {\n\t\tif !file.IsDir() || !hashPlusRe.MatchString(file.Name()) {\n\t\t\tcontinue\n\t\t}\n\t\tnameParts := strings.SplitN(file.Name(), \"+\", 2)\n\t\tinfo := &buildInfo{path: filepath.Join(*verDir, file.Name()), commitHash: nameParts[0]}\n\t\tif len(nameParts) > 1 {\n\t\t\tinfo.deltaHash = nameParts[1]\n\t\t}\n\n\t\tbuilds = append(builds, info)\n\t\tif baseMap != nil {\n\t\t\tbaseMap[file.Name()] = info\n\t\t}\n\n\t\tif flags&listCommit != 0 {\n\t\t\tcommit, err := ioutil.ReadFile(filepath.Join(*verDir, file.Name(), \"commit\"))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t} else {\n\t\t\t\tinfo.commit = parseCommit(commit)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Collect the names for each build.\n\tif flags&listNames != 0 {\n\t\tfor _, file := range files {\n\t\t\tif file.Mode()&os.ModeType == os.ModeSymlink {\n\t\t\t\tbase, err := os.Readlink(filepath.Join(*verDir, file.Name()))\n\t\t\t\tif err != nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif info, ok := baseMap[base]; ok {\n\t\t\t\t\tinfo.names = append(info.names, file.Name())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn builds, nil\n}\n\ntype commit struct {\n\tauthorDate time.Time\n\ttopLine    string\n}\n\nfunc parseCommit(obj []byte) *commit {\n\tout := &commit{}\n\tlines := strings.Split(string(obj), \"\\n\")\n\tfor i, line := range lines {\n\t\tif strings.HasPrefix(line, \"author \") {\n\t\t\tfs := strings.Fields(line)\n\t\t\tsecs, err := strconv.ParseInt(fs[len(fs)-2], 10, 64)\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatalf(\"malformed author in commit: %s\", err)\n\t\t\t}\n\t\t\tout.authorDate = time.Unix(secs, 0)\n\t\t}\n\t\tif len(line) == 0 {\n\t\t\tout.topLine = lines[i+1]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn out\n}\n"
  },
  {
    "path": "gover/gover.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Command gover manages saved Go build trees.\n//\n// gover saves builds of the Go source tree and runs commands using\n// these saved Go versions. For example,\n//\n//     cd $GOROOT\n//     git checkout go1.5.1\n//     gover build 1.5.1\n//\n// will checkout Go 1.5.1, build the source tree, and save it under\n// the name \"1.5.1\", as well as its commit hash (f2e4c8b). You can\n// then later run commands with Go 1.5.1. For example, the following\n// will run \"go install\" using Go 1.5.1:\n//\n//     gover 1.5.1 install\n//\n//\n// Usage\n//\n//     gover [flags] save [name]\n//\n// Save current build under it's commit hash and, optionally, as\n// \"name\".\n//\n//     gover [flags] build [name]\n//\n// Like \"save\", but first run make.bash in the current tree.\n//\n//     gover [flags] <name> <args>...\n//\n// Run \"go <args>...\" using saved build <name>. <name> may be an\n// unambiguous commit hash or an explicit build name.\n//\n//     gover [flags] with <name> <command>...\n//\n// Run <command> with PATH and GOROOT for build <name>.\n//\n//     gover [flags] env <name>\n//\n// Print the environment for running commands in build <name>. This is\n// printed as shell code appropriate for eval.\n//\n//     gover [flags] list\n//\n// List saved builds.\n//\n//     gover [flags] gc\n//\n// Clean the deduplication cache. This is useful after removing saved\n// builds to free up space.\n//\n//\n// Recipies\n//\n// To build and save all versions of Go:\n//\n//     git clone https://go.googlesource.com/go && cd go\n//     for tag in $(git tag | grep '^go[0-9.]*$'); do\n//       git checkout $tag && git clean -df && gover build ${tag##go}\n//     done\npackage main\n\n// TODO: Should untagged saved commits be treated like a cache and\n// deleted automatically?\n\nimport (\n\t\"bytes\"\n\t\"crypto/sha1\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"sort\"\n\t\"strings\"\n\t\"syscall\"\n)\n\n// TODO: Consider also accepting a path for name, which could let this\n// replace rego.\n\n// TODO: Half of these global flags only apply to save and build.\n\n// TODO: The hash and diff hash aren't everything. Environment\n// variables like GOEXPERIMENT also affect the build, but right now\n// goenv save will complain that the hash already exists.\n\nvar (\n\tverbose    = flag.Bool(\"v\", false, \"print commands being run\")\n\tverDir     = flag.String(\"dir\", defaultVerDir(), \"`directory` of saved Go roots\")\n\tnoDedup    = flag.Bool(\"no-dedup\", false, \"disable deduplication of saved trees\")\n\tgorootFlag = flag.String(\"C\", defaultGoroot(), \"use `dir` as the root of the Go tree for save and build\")\n)\n\nvar binTools = []string{\"go\", \"godoc\", \"gofmt\"}\n\nfunc defaultVerDir() string {\n\tcache := os.Getenv(\"XDG_CACHE_HOME\")\n\tif cache == \"\" {\n\t\thome := os.Getenv(\"HOME\")\n\t\tif home == \"\" {\n\t\t\tu, err := user.Current()\n\t\t\tif err != nil {\n\t\t\t\thome = u.HomeDir\n\t\t\t}\n\t\t}\n\t\tcache = filepath.Join(home, \".cache\")\n\t}\n\treturn filepath.Join(cache, \"gover\")\n}\n\nfunc defaultGoroot() string {\n\tc := exec.Command(\"git\", \"rev-parse\", \"--show-cdup\")\n\toutput, err := c.Output()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\tgoroot := strings.TrimSpace(string(output))\n\tif goroot == \"\" {\n\t\t// The empty string is --show-cdup's helpful way of\n\t\t// saying \"the current directory\".\n\t\tgoroot = \".\"\n\t}\n\tif !isGoroot(goroot) {\n\t\treturn \"\"\n\t}\n\treturn goroot\n}\n\n// isGoroot returns true if path is the root of a Go tree. It is\n// somewhat heuristic.\nfunc isGoroot(path string) bool {\n\tst, err := os.Stat(filepath.Join(path, \"src\", \"cmd\", \"go\"))\n\treturn err == nil && st.IsDir()\n}\n\nfunc main() {\n\tlog.SetFlags(0)\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage:\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"  %s [flags] save [name] - save Go build tree\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"  %s [flags] build [name] - build and save current tree\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"  %s [flags] <name> <args>... - run go <args> using build <name>\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"  %s [flags] with <name> <command>... - run <command> using build <name>\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"  %s [flags] env <name> - print the environment for build <name> as shell code\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"  %s [flags] list - list saved builds\\n\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"  %s [flags] gc [-rm-unlabeled] - clean the deduplication cache\", os.Args[0])\n\t\tfmt.Fprintf(os.Stderr, \"\\n\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"<name> may be an unambiguous commit hash or a string name.\\n\\n\")\n\t\tfmt.Fprintf(os.Stderr, \"Flags:\\n\")\n\t\tflag.PrintDefaults()\n\t}\n\n\tflag.Parse()\n\tif flag.NArg() < 1 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\t// Make gorootFlag absolute.\n\tif *gorootFlag != \"\" {\n\t\tabs, err := filepath.Abs(*gorootFlag)\n\t\tif err != nil {\n\t\t\t*gorootFlag = abs\n\t\t}\n\t}\n\n\tswitch flag.Arg(0) {\n\tcase \"save\", \"build\":\n\t\t// TODO: Annoying: if gover save has already saved a\n\t\t// commit by its hash, you can't then \"gover save x\"\n\t\t// to name it. You have to \"gover build x\", but you're\n\t\t// not building at all.\n\n\t\tif flag.NArg() > 2 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(2)\n\t\t}\n\t\thash, diff := getHash()\n\t\tname := \"\"\n\t\tif flag.NArg() >= 2 {\n\t\t\tname = flag.Arg(1)\n\t\t\tif name == hash {\n\t\t\t\tname = \"\"\n\t\t\t}\n\t\t}\n\n\t\t// Validate paths.\n\t\tsavePath, hashExists := resolveName(hash)\n\n\t\tnamePath, nameExists, nameRight := \"\", false, true\n\t\tif name != \"\" && name != hash {\n\t\t\tnamePath, nameExists = resolveName(name)\n\t\t\tif nameExists {\n\t\t\t\tst1, _ := os.Stat(savePath)\n\t\t\t\tst2, _ := os.Stat(namePath)\n\t\t\t\tnameRight = os.SameFile(st1, st2)\n\t\t\t}\n\t\t}\n\n\t\tif flag.Arg(0) == \"build\" {\n\t\t\tif hashExists {\n\t\t\t\tif !nameRight {\n\t\t\t\t\tlog.Fatalf(\"name `%s' exists and refers to another build\", name)\n\t\t\t\t}\n\t\t\t\tmsg := fmt.Sprintf(\"saved build `%s' already exists\", hash)\n\t\t\t\tif namePath != \"\" && !nameExists {\n\t\t\t\t\tdoLink(hash, namePath)\n\t\t\t\t\tmsg += fmt.Sprintf(\"; added name `%s'\", name)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintln(os.Stderr, msg)\n\t\t\t\tos.Exit(0)\n\t\t\t}\n\n\t\t\tdoBuild()\n\t\t} else {\n\t\t\tif hashExists {\n\t\t\t\tlog.Fatalf(\"saved build `%s' already exists\", hash)\n\t\t\t}\n\t\t\tif nameExists {\n\t\t\t\tlog.Fatalf(\"saved build `%s' already exists\", name)\n\t\t\t}\n\t\t}\n\t\tdoSave(hash, diff)\n\t\tif namePath != \"\" {\n\t\t\tdoLink(hash, namePath)\n\t\t}\n\t\tif name == \"\" {\n\t\t\tfmt.Fprintf(os.Stderr, \"saved build as `%s'\\n\", hash)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"saved build as `%s' and `%s'\\n\", hash, name)\n\t\t}\n\n\tcase \"list\":\n\t\tif flag.NArg() > 1 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(2)\n\t\t}\n\t\tdoList()\n\n\tcase \"with\":\n\t\tif flag.NArg() < 3 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(2)\n\t\t}\n\t\tdoWith(flag.Arg(1), flag.Args()[2:])\n\n\tcase \"env\":\n\t\tif flag.NArg() != 2 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(2)\n\t\t}\n\t\tdoEnv(flag.Arg(1))\n\n\tcase \"gc\":\n\t\tif flag.NArg() == 2 && flag.Arg(1) == \"-rm-unlabeled\" {\n\t\t\tdoRemoveUnlabeled()\n\t\t} else if flag.NArg() > 1 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(2)\n\t\t}\n\t\tdoGC()\n\n\tdefault:\n\t\tif flag.NArg() < 2 {\n\t\t\tflag.Usage()\n\t\t\tos.Exit(2)\n\t\t}\n\t\tif _, ok := resolveName(flag.Arg(0)); !ok {\n\t\t\tlog.Fatalf(\"unknown name or subcommand `%s'\", flag.Arg(0))\n\t\t}\n\t\tdoWith(flag.Arg(0), append([]string{\"go\"}, flag.Args()[1:]...))\n\t}\n}\n\nfunc goroot() string {\n\tif *gorootFlag == \"\" {\n\t\tlog.Fatal(\"not a git repository\")\n\t}\n\treturn *gorootFlag\n}\n\nfunc gitCmd(cmd string, args ...string) string {\n\targs = append([]string{\"-C\", goroot(), cmd}, args...)\n\tc := exec.Command(\"git\", args...)\n\tc.Stderr = os.Stderr\n\toutput, err := c.Output()\n\tif err != nil {\n\t\tlog.Fatalf(\"error executing git %s: %s\", strings.Join(args, \" \"), err)\n\t}\n\treturn string(output)\n}\n\nfunc getHash() (string, []byte) {\n\trev := strings.TrimSpace(string(gitCmd(\"rev-parse\", \"HEAD\")))\n\n\tdiff := []byte(gitCmd(\"diff\", \"HEAD\"))\n\n\tif len(bytes.TrimSpace(diff)) > 0 {\n\t\tdiffHash := fmt.Sprintf(\"%x\", sha1.Sum(diff))\n\t\treturn rev + \"+\" + diffHash[:10], diff\n\t}\n\treturn rev, nil\n}\n\nfunc doBuild() {\n\tc := exec.Command(\"./make.bash\")\n\tc.Dir = filepath.Join(goroot(), \"src\")\n\tc.Stdout = os.Stdout\n\tc.Stderr = os.Stderr\n\tif err := c.Run(); err != nil {\n\t\tlog.Fatalf(\"error executing make.bash: %s\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc doSave(hash string, diff []byte) {\n\t// Create a minimal GOROOT at $GOROOT/gover/hash.\n\tsavePath, _ := resolveName(hash)\n\tgoos, goarch := runtime.GOOS, runtime.GOARCH\n\tif x := os.Getenv(\"GOOS\"); x != \"\" {\n\t\tgoos = x\n\t}\n\tif x := os.Getenv(\"GOARCH\"); x != \"\" {\n\t\tgoarch = x\n\t}\n\tosArch := goos + \"_\" + goarch\n\n\tgoroot := goroot()\n\tfor _, binTool := range binTools {\n\t\tsrc := filepath.Join(goroot, \"bin\", binTool)\n\t\tif _, err := os.Stat(src); err == nil {\n\t\t\tcp(src, filepath.Join(savePath, \"bin\", binTool))\n\t\t}\n\t}\n\tcpR(filepath.Join(goroot, \"pkg\", osArch), filepath.Join(savePath, \"pkg\", osArch))\n\tcpR(filepath.Join(goroot, \"pkg\", \"tool\", osArch), filepath.Join(savePath, \"pkg\", \"tool\", osArch))\n\tcpR(filepath.Join(goroot, \"pkg\", \"include\"), filepath.Join(savePath, \"pkg\", \"include\"))\n\t// TODO: Use \"go list\" and save only the stuff depended on? Or\n\t// maybe just save the types of files go list can return, plus\n\t// \"testdata\" directories?\n\tcpR(filepath.Join(goroot, \"src\"), filepath.Join(savePath, \"src\"))\n\t// Copy tracer static resources.\n\tcpR(filepath.Join(goroot, \"misc\", \"trace\"), filepath.Join(savePath, \"misc\", \"trace\"))\n\n\tif diff != nil {\n\t\tif err := ioutil.WriteFile(filepath.Join(savePath, \"diff\"), diff, 0666); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\t// Save commit object.\n\tcommit := gitCmd(\"cat-file\", \"commit\", \"HEAD\")\n\tif err := ioutil.WriteFile(filepath.Join(savePath, \"commit\"), []byte(commit), 0666); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc doLink(hash, namePath string) {\n\terr := os.Symlink(hash, namePath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\ntype buildInfoSorter []*buildInfo\n\nfunc (s buildInfoSorter) Len() int {\n\treturn len(s)\n}\n\nfunc (s buildInfoSorter) Less(i, j int) bool {\n\treturn s[i].commit.authorDate.Before(s[j].commit.authorDate)\n}\n\nfunc (s buildInfoSorter) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\nfunc doList() {\n\tbuilds, err := listBuilds(listNames | listCommit)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsort.Sort(buildInfoSorter(builds))\n\n\tfor _, info := range builds {\n\t\tfmt.Print(info.shortName())\n\t\tif !info.commit.authorDate.IsZero() {\n\t\t\tfmt.Printf(\" %s\", info.commit.authorDate.Local().Format(\"2006-01-02T15:04:05\"))\n\t\t}\n\t\tif len(info.names) > 0 {\n\t\t\tfmt.Printf(\" %s\", info.names)\n\t\t}\n\t\tif info.commit.topLine != \"\" {\n\t\t\tfmt.Printf(\" %s\", info.commit.topLine)\n\t\t}\n\t\tfmt.Println()\n\t}\n}\n\nfunc doWith(name string, cmd []string) {\n\tsavePath, ok := resolveName(name)\n\tif !ok {\n\t\tlog.Fatalf(\"unknown name `%s'\", name)\n\t}\n\tgoroot, path := getEnv(savePath)\n\n\t// exec.Command looks up the command in this process' PATH.\n\t// Unfortunately, this is a rather complex process and there's\n\t// no way to provide a different PATH, so set the process'\n\t// PATH.\n\tos.Setenv(\"PATH\", path)\n\tc := exec.Command(cmd[0], cmd[1:]...)\n\n\t// Build the rest of the command environment.\n\tfor _, env := range os.Environ() {\n\t\tif strings.HasPrefix(env, \"GOROOT=\") {\n\t\t\tcontinue\n\t\t}\n\t\tc.Env = append(c.Env, env)\n\t}\n\tc.Env = append(c.Env, \"GOROOT=\"+goroot)\n\n\t// Run command.\n\tc.Stdin, c.Stdout, c.Stderr = os.Stdin, os.Stdout, os.Stderr\n\tif err := c.Run(); err != nil {\n\t\tfmt.Printf(\"command failed: %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n\nfunc doEnv(name string) {\n\tsavePath, ok := resolveName(name)\n\tif !ok {\n\t\tlog.Fatalf(\"unknown name `%s'\", name)\n\t}\n\n\tgoroot, path := getEnv(savePath)\n\tfmt.Printf(\"PATH=%s;\\n\", shellEscape(path))\n\tfmt.Printf(\"GOROOT=%s;\\n\", shellEscape(goroot))\n\tfmt.Printf(\"export GOROOT;\\n\")\n}\n\n// getEnv returns the GOROOT and PATH for the Go tree rooted at savePath.\nfunc getEnv(savePath string) (goroot, path string) {\n\tp := []string{filepath.Join(savePath, \"bin\")}\n\t// Strip existing Go tree from PATH.\n\tfor _, dir := range filepath.SplitList(os.Getenv(\"PATH\")) {\n\t\tif isGoroot(filepath.Join(dir, \"..\")) {\n\t\t\tcontinue\n\t\t}\n\t\tp = append(p, dir)\n\t}\n\n\treturn savePath, strings.Join(p, string(filepath.ListSeparator))\n}\n\nfunc doRemoveUnlabeled() {\n\tbuilds, err := listBuilds(listNames)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trms := 0\n\tfor _, build := range builds {\n\t\tif len(build.names) != 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif err := os.RemoveAll(build.path); err != nil {\n\t\t\t// Not fatal.\n\t\t\tlog.Println(err)\n\t\t} else {\n\t\t\trms++\n\t\t}\n\t}\n\tfmt.Printf(\"removed %d unlabeled saved build(s)\\n\", rms)\n}\n\nvar goodDedupPath = regexp.MustCompile(\"/[0-9a-f]{2}/[0-9a-f]{38}$\")\n\nfunc doGC() {\n\tremoved, space := 0, int64(0)\n\tfilepath.Walk(filepath.Join(*verDir, \"_dedup\"), func(path string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tst, ok := info.Sys().(*syscall.Stat_t)\n\t\tif !ok || st.Nlink != 1 {\n\t\t\treturn nil\n\t\t}\n\t\tif !goodDedupPath.MatchString(path) {\n\t\t\t// Be paranoid about removing files.\n\t\t\tlog.Printf(\"unexpected file in dedup cache: %s\\n\", path)\n\t\t\treturn nil\n\t\t}\n\t\tif err := os.Remove(path); err != nil {\n\t\t\tlog.Printf(\"failed to remove %s: %v\", path, err)\n\t\t} else {\n\t\t\tspace += info.Size()\n\t\t\tremoved++\n\t\t}\n\t\treturn nil\n\t})\n\tfmt.Printf(\"removed %d MB in %d unused file(s)\\n\", space>>20, removed)\n}\n\nfunc cp(src, dst string) {\n\tdata, err := ioutil.ReadFile(src)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\twriteFile, xdst := true, dst\n\tif !*noDedup {\n\t\thash := fmt.Sprintf(\"%x\", sha1.Sum(data))\n\t\txdst = filepath.Join(*verDir, \"_dedup\", hash[:2], hash[2:])\n\t\tif _, err := os.Stat(xdst); err == nil {\n\t\t\twriteFile = false\n\t\t}\n\t}\n\tif writeFile {\n\t\tif *verbose {\n\t\t\tfmt.Printf(\"cp %s %s\\n\", src, xdst)\n\t\t}\n\t\tst, err := os.Stat(src)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := os.MkdirAll(filepath.Dir(xdst), 0777); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := ioutil.WriteFile(xdst, data, st.Mode()); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := os.Chtimes(xdst, st.ModTime(), st.ModTime()); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tif dst != xdst {\n\t\tif *verbose {\n\t\t\tfmt.Printf(\"ln %s %s\\n\", xdst, dst)\n\t\t}\n\t\tif err := os.MkdirAll(filepath.Dir(dst), 0777); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := os.Link(xdst, dst); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n\nfunc cpR(src, dst string) {\n\tfilepath.Walk(src, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tbase := filepath.Base(path)\n\t\tif base == \"core\" || strings.HasSuffix(base, \".test\") {\n\t\t\treturn nil\n\t\t}\n\n\t\tcp(path, dst+path[len(src):])\n\t\treturn nil\n\t})\n}\n"
  },
  {
    "path": "gover/shutil.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport \"strings\"\n\nfunc shellEscape(x string) string {\n\tif len(x) == 0 {\n\t\treturn \"''\"\n\t}\n\tfor _, r := range x {\n\t\tif 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || strings.ContainsRune(\"@%_-+:,./\", r) {\n\t\t\tcontinue\n\t\t}\n\t\t// Unsafe character.\n\t\treturn \"'\" + strings.Replace(x, \"'\", \"'\\\"'\\\"'\", -1) + \"'\"\n\t}\n\treturn x\n\n}\n"
  },
  {
    "path": "greplogs/main.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Command greplogs is deprecated.\n//\n// Please see golang.org/x/build/cmd/greplogs.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n)\n\nfunc main() {\n\tfmt.Fprintf(os.Stderr, \"This copy of greplogs is deprecated. Please update your greplogs using:\\n\")\n\tfmt.Fprintf(os.Stderr, \"\\tgo install golang.org/x/build/cmd/greplogs@latest\\n\")\n\tos.Exit(2)\n}\n"
  },
  {
    "path": "internal/loganal/classify.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage loganal\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n)\n\nvar (\n\tcanonMsg = regexp.MustCompile(`[0-9]+`)\n\n\t// numberWords matches words that consist of both letters and\n\t// digits. Since this is meant to canonicalize numeric fields\n\t// of error messages, we accept any Unicode letter, but only\n\t// digits 0-9. We match the whole word to catch things like\n\t// hexadecimal and temporary file names.\n\tnumberWords = regexp.MustCompile(`\\pL*[0-9][\\pL0-9]*`)\n)\n\nfunc (f *Failure) canonicalMessage() string {\n\t// Do we need to do anything to the message?\n\tfor _, c := range f.Message {\n\t\tif '0' <= c && c <= '9' {\n\t\t\tgoto rewrite\n\t\t}\n\t}\n\treturn f.Message\n\nrewrite:\n\t// Canonicalize any \"word\" of the message containing numbers.\n\t//\n\t// TODO: \"Escape\" any existing … to make this safe as a key\n\t// for later use with canonicalFields (direct use is\n\t// unimportant).\n\treturn numberWords.ReplaceAllString(f.Message, \"…\")\n}\n\nfunc (f *Failure) canonicalFields() []string {\n\tfields := []string{}\n\tmsg := f.Message\n\tfor len(msg) > 0 {\n\t\tnext := numberWords.FindStringIndex(msg)\n\t\tif next == nil {\n\t\t\tfields = append(fields, msg)\n\t\t\tbreak\n\t\t}\n\t\tif next[0] > 0 {\n\t\t\tfields = append(fields, msg[:next[0]])\n\t\t}\n\t\tfields = append(fields, msg[next[0]:next[1]])\n\t\tmsg = msg[next[1]:]\n\t}\n\treturn fields\n}\n\n// Classify groups a set of failures in to canonicalized failure\n// classes. The returned map maps from each failure class to the\n// indexes of the input failures in that class. Each input failure\n// will be in exactly one failure class.\nfunc Classify(fs []*Failure) map[Failure][]int {\n\t// Map maximally canonicalized failures to input indexes.\n\tcanon := map[Failure][]int{}\n\tfor i, f := range fs {\n\t\t// TODO: Match up nearby line numbers?\n\t\tkey := Failure{\n\t\t\tPackage:  f.Package,\n\t\t\tTest:     f.Test,\n\t\t\tMessage:  f.canonicalMessage(),\n\t\t\tFunction: f.Function,\n\t\t\tFile:     f.File,\n\t\t}\n\n\t\tcanon[key] = append(canon[key], i)\n\t}\n\n\t// De-canonicalize fields that all of the failures in a class\n\t// have a common.\n\tout := make(map[Failure][]int, len(canon))\n\tfor key, class := range canon {\n\t\tif len(class) == 1 {\n\t\t\tout[key] = class\n\t\t\tcontinue\n\t\t}\n\n\t\t// Does the message need de-canonicalization?\n\t\tif key.Message != fs[class[0]].Message {\n\t\t\tfields := fs[class[0]].canonicalFields()\n\t\t\tfor _, fi := range class[1:] {\n\t\t\t\tnfields := fs[fi].canonicalFields()\n\t\t\t\tfor i, field := range fields {\n\t\t\t\t\tif field != nfields[i] {\n\t\t\t\t\t\tfields[i] = \"…\"\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tkey.Message = strings.Join(fields, \"\")\n\t\t}\n\n\t\t// De-canonicalize Line, OS, and Arch.\n\t\tline, os, arch := fs[class[0]].Line, fs[class[0]].OS, fs[class[0]].Arch\n\t\tfor _, fi := range class[1:] {\n\t\t\tif fs[fi].Line != line {\n\t\t\t\tline = 0\n\t\t\t}\n\t\t\tif fs[fi].OS != os {\n\t\t\t\tos = \"\"\n\t\t\t}\n\t\t\tif fs[fi].Arch != arch {\n\t\t\t\tarch = \"\"\n\t\t\t}\n\t\t}\n\t\tkey.Line, key.OS, key.Arch = line, os, arch\n\n\t\tout[key] = class\n\t}\n\n\treturn out\n}\n"
  },
  {
    "path": "internal/loganal/doc.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package loganal contains functions for analyzing build and test\n// logs produced by all.bash.\npackage loganal\n"
  },
  {
    "path": "internal/loganal/failure.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage loganal\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n)\n\n// Failure records a failure extracted from an all.bash log.\ntype Failure struct {\n\t// Package is the Go package of this failure. In the case of a\n\t// testing.T failure, this will be the package of the test.\n\tPackage string\n\n\t// Test identifies the failed test function. If this is not a\n\t// testing.T failure, this will be \"\".\n\tTest string\n\n\t// Message is the summarized failure message. This will be one\n\t// line of text.\n\tMessage string\n\n\t// FullMessage is a substring of the log that captures the\n\t// entire failure message. It may be many lines long.\n\tFullMessage string\n\n\t// Function is the fully qualified name of the function where\n\t// this failure happened, if known. This helps distinguish\n\t// between generic errors like \"out of bounds\" and is more\n\t// stable for matching errors than file/line.\n\tFunction string\n\n\t// File is the source file where this failure happened, if\n\t// known.\n\tFile string\n\n\t// Line is the source line where this failure happened, if\n\t// known.\n\tLine int\n\n\t// OS and Arch are the GOOS and GOARCH of this failure.\n\tOS, Arch string\n}\n\nfunc (f Failure) String() string {\n\ts := f.Package\n\tif f.Test != \"\" {\n\t\ts += \".\" + f.Test\n\t}\n\tif f.Function != \"\" || f.File != \"\" {\n\t\tif s != \"\" {\n\t\t\ts += \" \"\n\t\t}\n\t\tif f.Function != \"\" {\n\t\t\ts += \"at \" + f.Function\n\t\t} else {\n\t\t\ts += \"at \" + f.File\n\t\t\tif f.Line != 0 {\n\t\t\t\ts += fmt.Sprintf(\":%d\", f.Line)\n\t\t\t}\n\t\t}\n\t}\n\tif s != \"\" {\n\t\ts += \": \"\n\t}\n\ts += f.Message\n\treturn s\n}\n\nvar (\n\tlinesStar = `(?:.*\\n)*?`\n\tlinesPlus = `(?:.*\\n)+?`\n\n\t// failPkg matches the FAIL line for a package.\n\t//\n\t// In case of failure the Android wrapper prints \"exitcode=1\" without a newline,\n\t// so for logs prior to the fix for https://golang.org/issue/49317 we need to\n\t// strip that from the beginning of the line.\n\tfailPkg = `(?m:^(?:exitcode=1)?FAIL[ \\t]+(\\S+))`\n\n\t// logTruncated matches the \"log truncated\" line injected by the coordinator.\n\tlogTruncated = `(?:\\n\\.\\.\\. log truncated \\.\\.\\.)`\n\n\tendOfTest = `(?:` + failPkg + `|` + logTruncated + `)`\n\n\tcanonLine = regexp.MustCompile(`\\r+\\n`)\n\n\t// testingHeader matches the beginning of the go test std\n\t// section. On Plan 9 there used to be just one #.\n\ttestingHeader = regexp.MustCompile(`^#+ Testing packages`)\n\n\t// sectionHeader matches the header of each testing section\n\t// printed by go tool dist test.\n\tsectionHeader = regexp.MustCompile(`^##### (.*)`)\n\n\t// testingFailed matches a testing.T failure. This may be a\n\t// T.Error or a recovered panic. There was a time when the\n\t// test name included GOMAXPROCS (like how benchmark names\n\t// do), so we strip that out.\n\ttestingFailed = regexp.MustCompile(`^--- FAIL: ([^-\\s]+).*\\n(` + linesStar + `)` + endOfTest)\n\n\t// testingError matches the file name and message of the last\n\t// T.Error in a testingFailed log.\n\ttestingError = regexp.MustCompile(`(?:.*\\n)*\\t([^:]+):([0-9]+): (.*)\\n`)\n\n\t// testingPanic matches a recovered panic in a testingFailed\n\t// log.\n\ttestingPanic = regexp.MustCompile(`panic: (.*?)(?: \\[recovered\\])`)\n\n\t// gotestFailed matches a $GOROOT/test failure.\n\tgotestFailed = regexp.MustCompile(`^# go run run\\.go.*\\n(` + linesPlus + `)` + endOfTest)\n\n\t// buildFailed matches build failures from the testing package.\n\tbuildFailed = regexp.MustCompile(`^` + failPkg + `\\s+\\[build failed\\]`)\n\n\t// timeoutPanic1 matches a test timeout detected by the testing package.\n\ttimeoutPanic1 = regexp.MustCompile(`^panic: test timed out after .*\\n(` + linesStar + `)` + endOfTest)\n\n\t// timeoutPanic2 matches a test timeout detected by go test.\n\ttimeoutPanic2 = regexp.MustCompile(`^\\*\\*\\* Test killed.*ran too long\\n` + endOfTest)\n\n\t// coordinatorTimeout matches a test timeout detected by the\n\t// coordinator, for both non-sharded and sharded tests.\n\tcoordinatorTimeout = regexp.MustCompile(`(?m)^Build complete.*Result: error: timed out|^Test \"[^\"]+\" ran over [0-9a-z]+ limit`)\n\n\t// tbEntry is a regexp string that matches a single\n\t// function/line number entry in a traceback. Group 1 matches\n\t// the fully qualified function name. Groups 2 and 3 match the\n\t// file name and line number.\n\t// Most entries have trailing stack metadata for each frame,\n\t// but inlined calls, lacking a frame, may omit that metadata.\n\ttbEntry = `(\\S+)\\(.*\\)\\n\\t(.*):([0-9]+)(?: .*)?\\n`\n\n\t// runtimeFailed matches a runtime throw or testing package\n\t// panic. Matching the panic is fairly loose because in some\n\t// cases a \"fatal error:\" can be preceded by a \"panic:\" if\n\t// we've started the panic and then realize we can't (e.g.,\n\t// sigpanic). Also gather up the \"runtime:\" prints preceding a\n\t// throw.\n\truntimeFailed        = regexp.MustCompile(`^(?:runtime:.*\\n)*.*(?:panic: |fatal error: )(.*)`)\n\truntimeLiterals      = []string{\"runtime:\", \"panic:\", \"fatal error:\"}\n\truntimeFailedTrailer = regexp.MustCompile(`^(?:exit status.*\\n)?(?:\\*\\*\\* Test killed.*\\n)?` + endOfTest + `?`)\n\n\t// apiCheckerFailed matches an API checker failure.\n\tapiCheckerFailed = regexp.MustCompile(`^Error running API checker: (.*)`)\n\n\t// goodLine matches known-good lines so we can ignore them\n\t// before doing more aggressive/fuzzy failure extraction.\n\tgoodLine = regexp.MustCompile(`^#|^ok\\s|^\\?\\s|^Benchmark|^PASS|^=== |^--- `)\n\n\t// testingUnknownFailed matches the last line of some unknown\n\t// failure detected by the testing package.\n\ttestingUnknownFailed = regexp.MustCompile(`^` + endOfTest)\n\n\t// miscFailed matches the log.Fatalf in go tool dist test when\n\t// a test fails. We use this as a last resort, mostly to pick\n\t// up failures in sections that don't use the testing package.\n\tmiscFailed = regexp.MustCompile(`^.*Failed: (?:exit status|test failed)`)\n)\n\n// An extractCache speeds up failure extraction from multiple logs by\n// caching known lines. It is *not* thread-safe, so we track it in a\n// sync.Pool.\ntype extractCache struct {\n\tboringLines map[string]bool\n}\n\nvar extractCachePool sync.Pool\n\nfunc init() {\n\textractCachePool.New = func() interface{} {\n\t\treturn &extractCache{make(map[string]bool)}\n\t}\n}\n\n// Extract parses the failures from all.bash log m.\nfunc Extract(m string, os, arch string) ([]*Failure, error) {\n\tfs := []*Failure{}\n\ttestingStarted := false\n\tsection := \"\"\n\tsectionHeaderFailures := 0 // # failures at section start\n\tunknown := []string{}\n\tcache := extractCachePool.Get().(*extractCache)\n\tdefer extractCachePool.Put(cache)\n\n\t// Canonicalize line endings. Note that some logs have a mix\n\t// of line endings and some somehow have multiple \\r's.\n\tm = canonLine.ReplaceAllString(m, \"\\n\")\n\n\tvar s []string\n\tmatcher := newMatcher(m)\n\tconsume := func(r *regexp.Regexp) bool {\n\t\tmatched := matcher.consume(r)\n\t\ts = matcher.groups\n\t\tif matched && !strings.HasSuffix(s[0], \"\\n\") {\n\t\t\t// Consume the rest of the line.\n\t\t\tmatcher.line()\n\t\t}\n\t\treturn matched\n\t}\n\tfirstBadLine := func() string {\n\t\tfor _, u := range unknown {\n\t\t\tif len(u) > 0 {\n\t\t\t\treturn u\n\t\t\t}\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tfor !matcher.done() {\n\t\t// Check for a cached result.\n\t\tline, nextLinePos := matcher.peekLine()\n\t\tisGoodLine, cached := cache.boringLines[line]\n\n\t\t// Process the line.\n\t\tisKnown := true\n\t\tswitch {\n\t\tcase cached:\n\t\t\tmatcher.pos = nextLinePos\n\t\t\tif !isGoodLine {\n\t\t\t\t// This line is known to not match any\n\t\t\t\t// regexps. Follow the default case.\n\t\t\t\tisKnown = false\n\t\t\t\tunknown = append(unknown, line)\n\t\t\t}\n\n\t\tcase consume(testingHeader):\n\t\t\ttestingStarted = true\n\n\t\tcase consume(sectionHeader):\n\t\t\tsection = s[1]\n\t\t\tsectionHeaderFailures = len(fs)\n\n\t\tcase consume(testingFailed):\n\t\t\tf := &Failure{\n\t\t\t\tTest:        s[1],\n\t\t\t\tPackage:     s[3],\n\t\t\t\tFullMessage: s[0],\n\t\t\t\tMessage:     \"unknown testing.T failure\",\n\t\t\t}\n\n\t\t\t// TODO: Can have multiple errors per FAIL:\n\t\t\t// ../fetchlogs/rev/2015-03-24T19:51:21-41f9c43/linux-arm64-canonical\n\n\t\t\tsError := testingError.FindStringSubmatch(s[2])\n\t\t\tsPanic := testingPanic.FindStringSubmatch(s[2])\n\t\t\tif sError != nil {\n\t\t\t\tf.File, f.Line, f.Message = sError[1], atoi(sError[2]), sError[3]\n\t\t\t} else if sPanic != nil {\n\t\t\t\tf.Function, f.File, f.Line = panicWhere(s[2])\n\t\t\t\tf.Message = sPanic[1]\n\t\t\t}\n\n\t\t\tfs = append(fs, f)\n\n\t\tcase consume(gotestFailed):\n\t\t\tfs = append(fs, &Failure{\n\t\t\t\tPackage:     \"test/\" + s[2],\n\t\t\t\tFullMessage: s[0],\n\t\t\t\tMessage:     firstLine(s[1]),\n\t\t\t})\n\n\t\tcase consume(buildFailed):\n\t\t\t// This may have an accompanying compiler\n\t\t\t// crash, but it's interleaved with other \"ok\"\n\t\t\t// lines, so it's hard to find.\n\t\t\tfs = append(fs, &Failure{\n\t\t\t\tFullMessage: s[0],\n\t\t\t\tMessage:     \"build failed\",\n\t\t\t\tPackage:     s[1],\n\t\t\t})\n\n\t\tcase consume(timeoutPanic1):\n\t\t\tfs = append(fs, &Failure{\n\t\t\t\tTest:        testFromTraceback(s[1]),\n\t\t\t\tFullMessage: s[0],\n\t\t\t\tMessage:     \"test timed out\",\n\t\t\t\tPackage:     s[2],\n\t\t\t})\n\n\t\tcase consume(timeoutPanic2):\n\t\t\ttb := strings.Join(unknown, \"\\n\")\n\t\t\tfs = append(fs, &Failure{\n\t\t\t\tTest:        testFromTraceback(tb),\n\t\t\t\tFullMessage: tb + \"\\n\" + s[0],\n\t\t\t\tMessage:     \"test timed out\",\n\t\t\t\tPackage:     s[1],\n\t\t\t})\n\n\t\tcase matcher.lineHasLiteral(runtimeLiterals...) && consume(runtimeFailed):\n\t\t\tstart := matcher.matchPos\n\t\t\tmsg := s[1]\n\t\t\tpkg := \"testing\"\n\t\t\tif strings.Contains(s[0], \"fatal error:\") {\n\t\t\t\tpkg = \"runtime\"\n\t\t\t}\n\t\t\ttraceback := consumeTraceback(matcher)\n\t\t\tmatcher.consume(runtimeFailedTrailer)\n\t\t\tfn, file, line := panicWhere(traceback)\n\t\t\tfs = append(fs, &Failure{\n\t\t\t\tPackage:     pkg,\n\t\t\t\tFullMessage: matcher.str[start:matcher.pos],\n\t\t\t\tMessage:     msg,\n\t\t\t\tFunction:    fn,\n\t\t\t\tFile:        file,\n\t\t\t\tLine:        line,\n\t\t\t})\n\n\t\tcase consume(apiCheckerFailed):\n\t\t\tfs = append(fs, &Failure{\n\t\t\t\tPackage:     \"API checker\",\n\t\t\t\tFullMessage: s[0],\n\t\t\t\tMessage:     s[1],\n\t\t\t})\n\n\t\tcase consume(goodLine):\n\t\t\t// Ignore. Just cache and clear unknown.\n\t\t\tcache.boringLines[line] = true\n\n\t\tcase consume(testingUnknownFailed):\n\t\t\tfs = append(fs, &Failure{\n\t\t\t\tPackage:     s[1],\n\t\t\t\tFullMessage: s[0],\n\t\t\t\tMessage:     \"unknown failure: \" + firstBadLine(),\n\t\t\t})\n\n\t\tcase len(fs) == sectionHeaderFailures && consume(miscFailed):\n\t\t\tfs = append(fs, &Failure{\n\t\t\t\tPackage:     section,\n\t\t\t\tFullMessage: s[0],\n\t\t\t\tMessage:     \"unknown failure: \" + firstBadLine(),\n\t\t\t})\n\n\t\tdefault:\n\t\t\tisKnown = false\n\t\t\tunknown = append(unknown, line)\n\t\t\tcache.boringLines[line] = false\n\t\t\tmatcher.pos = nextLinePos\n\t\t}\n\n\t\t// Clear unknown lines on any known line.\n\t\tif isKnown {\n\t\t\tunknown = unknown[:0]\n\t\t}\n\t}\n\n\t// TODO: FullMessages for these.\n\tif len(fs) == 0 && strings.Contains(m, \"no space left on device\") {\n\t\tfs = append(fs, &Failure{\n\t\t\tMessage: \"build failed (no space left on device)\",\n\t\t})\n\t}\n\tif len(fs) == 0 && coordinatorTimeout.MatchString(m) {\n\t\t// all.bash was killed by coordinator.\n\t\tfs = append(fs, &Failure{\n\t\t\tMessage: \"build failed (timed out)\",\n\t\t})\n\t}\n\tif len(fs) == 0 && strings.Contains(m, \"Failed to schedule\") {\n\t\t// Test sharding failed.\n\t\tfs = append(fs, &Failure{\n\t\t\tMessage: \"build failed (failed to schedule)\",\n\t\t})\n\t}\n\tif len(fs) == 0 && strings.Contains(m, \"nosplit stack overflow\") {\n\t\tfs = append(fs, &Failure{\n\t\t\tMessage: \"build failed (nosplit stack overflow)\",\n\t\t})\n\t}\n\n\t// If the same (message, where) shows up in more than five\n\t// packages, it's probably a systemic issue, so collapse it\n\t// down to one failure with no package.\n\ttype dedup struct {\n\t\tpackages map[string]bool\n\t\tkept     bool\n\t}\n\tmsgDedup := map[Failure]*dedup{}\n\tfailureMap := map[*Failure]*dedup{}\n\tmaxCount := 0\n\tfor _, f := range fs {\n\t\tkey := Failure{\n\t\t\tMessage:  f.canonicalMessage(),\n\t\t\tFunction: f.Function,\n\t\t\tFile:     f.File,\n\t\t\tLine:     f.Line,\n\t\t}\n\n\t\td := msgDedup[key]\n\t\tif d == nil {\n\t\t\td = &dedup{packages: map[string]bool{}}\n\t\t\tmsgDedup[key] = d\n\t\t}\n\t\td.packages[f.Package] = true\n\t\tif len(d.packages) > maxCount {\n\t\t\tmaxCount = len(d.packages)\n\t\t}\n\t\tfailureMap[f] = d\n\t}\n\tif maxCount >= 5 {\n\t\tfsn := []*Failure{}\n\t\tfor _, f := range fs {\n\t\t\td := failureMap[f]\n\t\t\tif len(d.packages) < 5 {\n\t\t\t\tfsn = append(fsn, f)\n\t\t\t} else if !d.kept {\n\t\t\t\td.kept = true\n\t\t\t\tf.Test, f.Package = \"\", \"\"\n\t\t\t\tfsn = append(fsn, f)\n\t\t\t}\n\t\t}\n\t\tfs = fsn\n\t}\n\n\t// Check if we even got as far as testing. Note that there was\n\t// a period when we didn't print the \"testing\" header, so as\n\t// long as we found failures, we don't care if we found the\n\t// header.\n\tif !testingStarted && len(fs) == 0 {\n\t\tfs = append(fs, &Failure{\n\t\t\tMessage: \"toolchain build failed\",\n\t\t})\n\t}\n\n\tfor _, f := range fs {\n\t\tf.OS, f.Arch = os, arch\n\n\t\t// Clean up package. For misc/cgo tests, this will be\n\t\t// something like\n\t\t// _/tmp/buildlet-scatch825855615/go/misc/cgo/test.\n\t\tif strings.HasPrefix(f.Package, \"_/tmp/\") {\n\t\t\tf.Package = strings.SplitN(f.Package, \"/\", 4)[3]\n\t\t}\n\n\t\t// Trim trailing newlines from FullMessage.\n\t\tf.FullMessage = strings.TrimRight(f.FullMessage, \"\\n\")\n\t}\n\treturn fs, nil\n}\n\nfunc atoi(s string) int {\n\tv, err := strconv.Atoi(s)\n\tif err != nil {\n\t\tpanic(\"expected number, got \" + s)\n\t}\n\treturn v\n}\n\n// firstLine returns the first line from s, not including the line\n// terminator.\nfunc firstLine(s string) string {\n\tif i := strings.Index(s, \"\\n\"); i >= 0 {\n\t\treturn s[:i]\n\t}\n\treturn s\n}\n\nvar (\n\ttracebackStart = regexp.MustCompile(`^(goroutine [0-9]+.*:|runtime stack:)\\n`)\n\ttracebackEntry = regexp.MustCompile(`^` + tbEntry)\n)\n\n// consumeTraceback consumes a traceback from m.\nfunc consumeTraceback(m *matcher) string {\n\t// Find the beginning of the traceback.\n\tfor !m.done() && !m.peek(tracebackStart) {\n\t\tm.line()\n\t}\n\n\tstart := m.pos\nloop:\n\tfor !m.done() {\n\t\tswitch {\n\t\tcase m.hasPrefix(\"\\n\") || m.hasPrefix(\"\\t\") ||\n\t\t\tm.hasPrefix(\"goroutine \") || m.hasPrefix(\"runtime stack:\") ||\n\t\t\tm.hasPrefix(\"created by \"):\n\t\t\tm.line()\n\n\t\tcase m.consume(tracebackEntry):\n\t\t\t// Do nothing.\n\n\t\tdefault:\n\t\t\tbreak loop\n\t\t}\n\t}\n\treturn m.str[start:m.pos]\n}\n\nvar (\n\t// testFromTracebackRe matches a traceback entry from a\n\t// function named Test* in a file named *_test.go. It ignores\n\t// \"created by\" lines.\n\ttestFromTracebackRe = regexp.MustCompile(`\\.(Test[^(\\n]+)\\(.*\\n.*_test\\.go`)\n\n\tpanicWhereRe = regexp.MustCompile(`(?m:^)` + tbEntry)\n)\n\n// testFromTraceback attempts to return the test name from a\n// traceback.\nfunc testFromTraceback(tb string) string {\n\ts := testFromTracebackRe.FindStringSubmatch(tb)\n\tif s == nil {\n\t\treturn \"\"\n\t}\n\treturn s[1]\n}\n\n// panicWhere attempts to return the fully qualified name, source\n// file, and line number of the panicking function in traceback tb.\nfunc panicWhere(tb string) (fn string, file string, line int) {\n\tm := matcher{str: tb}\n\tfor m.consume(panicWhereRe) {\n\t\tfn := m.groups[1]\n\n\t\t// Ignore functions involved in panic handling.\n\t\tif strings.HasPrefix(fn, \"runtime.panic\") || fn == \"runtime.throw\" || fn == \"runtime.sigpanic\" {\n\t\t\tcontinue\n\t\t}\n\t\treturn fn, m.groups[2], atoi(m.groups[3])\n\t}\n\treturn \"\", \"\", 0\n}\n"
  },
  {
    "path": "internal/loganal/matcher.go",
    "content": "// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage loganal\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n)\n\n// A matcher implements incrementally consuming a string using\n// regexps.\ntype matcher struct {\n\tstr    string // string being matched\n\tpos    int\n\tgroups []string // match groups\n\n\t// matchPos is the byte position of the beginning of the\n\t// match in str.\n\tmatchPos int\n\n\t// literals maps from literal strings to the index of the\n\t// next occurrence of that string.\n\tliterals map[string]int\n}\n\nfunc newMatcher(str string) *matcher {\n\treturn &matcher{str: str, literals: map[string]int{}}\n}\n\nfunc (m *matcher) done() bool {\n\treturn m.pos >= len(m.str)\n}\n\n// consume searches for r in the remaining text. If found, it consumes\n// up to the end of the match, fills m.groups with the matched groups,\n// and returns true.\nfunc (m *matcher) consume(r *regexp.Regexp) bool {\n\tidx := r.FindStringSubmatchIndex(m.str[m.pos:])\n\tif idx == nil {\n\t\tm.groups = m.groups[:0]\n\t\treturn false\n\t}\n\tif len(idx)/2 <= cap(m.groups) {\n\t\tm.groups = m.groups[:len(idx)/2]\n\t} else {\n\t\tm.groups = make([]string, len(idx)/2, len(idx))\n\t}\n\tfor i := range m.groups {\n\t\tif idx[i*2] >= 0 {\n\t\t\tm.groups[i] = m.str[m.pos+idx[i*2] : m.pos+idx[i*2+1]]\n\t\t} else {\n\t\t\tm.groups[i] = \"\"\n\t\t}\n\t}\n\tm.matchPos = m.pos + idx[0]\n\tm.pos += idx[1]\n\treturn true\n}\n\n// peek returns whether r matches the remaining text.\nfunc (m *matcher) peek(r *regexp.Regexp) bool {\n\treturn r.MatchString(m.str[m.pos:])\n}\n\n// lineHasLiteral returns whether any of literals is found before the\n// end of the current line.\nfunc (m *matcher) lineHasLiteral(literals ...string) bool {\n\t// Find the position of the next literal.\n\tnextLiteral := len(m.str)\n\tfor _, literal := range literals {\n\t\tnext, ok := m.literals[literal]\n\n\t\tif !ok || next < m.pos {\n\t\t\t// Update the literal position.\n\t\t\ti := strings.Index(m.str[m.pos:], literal)\n\t\t\tif i < 0 {\n\t\t\t\tnext = len(m.str)\n\t\t\t} else {\n\t\t\t\tnext = m.pos + i\n\t\t\t}\n\t\t\tm.literals[literal] = next\n\t\t}\n\n\t\tif next < nextLiteral {\n\t\t\tnextLiteral = next\n\t\t}\n\t}\n\t// If the next literal comes after this line, this line\n\t// doesn't have any of literals.\n\tif nextLiteral != len(m.str) {\n\t\teol := strings.Index(m.str[m.pos:], \"\\n\")\n\t\tif eol >= 0 && eol+m.pos < nextLiteral {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// hasPrefix returns whether the remaining text begins with s.\nfunc (m *matcher) hasPrefix(s string) bool {\n\treturn strings.HasPrefix(m.str[m.pos:], s)\n}\n\n// line consumes and returns the remainder of the current line, not\n// including the line terminator.\nfunc (m *matcher) line() string {\n\tif i := strings.Index(m.str[m.pos:], \"\\n\"); i >= 0 {\n\t\tline := m.str[m.pos : m.pos+i]\n\t\tm.pos += i + 1\n\t\treturn line\n\t} else {\n\t\tline := m.str[m.pos:]\n\t\tm.pos = len(m.str)\n\t\treturn line\n\t}\n}\n\n// peekLine returns the remainder of the current line, not including\n// the line terminator, and the position of the beginning of the next\n// line.\nfunc (m *matcher) peekLine() (string, int) {\n\tif i := strings.Index(m.str[m.pos:], \"\\n\"); i >= 0 {\n\t\treturn m.str[m.pos : m.pos+i], m.pos + i + 1\n\t} else {\n\t\treturn m.str[m.pos:], len(m.str)\n\t}\n}\n"
  },
  {
    "path": "minutes3/README.md",
    "content": "# Generate Google Sheets token\n\nGo to https://console.developers.google.com/.\n\nCreate a new GCP project. I called mine `proposal-minutes`.\n\nConfigure the OAuth consent screen: Go to APIs & Services > OAuth consent\nscreen. Select \"Internal\" and click \"Create\". Enter an app name. I called it\n`proposal-minutes`. Fill in other required fields, though most can be left\nblank. Click \"Save and continue\". You don't need to add any scopes. Click \"Save\nand continue\".\n\nEnable Google Sheets: Go to APIs & Services > Enabled APIs and Services. Click\n\"Enable APIs and Services\". Search for the \"Google Sheets API\" and enable it.\n\nCreate OAuth credentials: Go to APIs & Services > Credentials. Click Create\nCredentials > OAuth client ID. Select \"Desktop app\", give it a name (I used\n`proposal-minutes`, again), and click Create. On the next screen, click\n\"Download JSON\" and save this file as `~/.config/proposal-minutes/gdoc.json`.\n\nEnable write scope for spreadsheets: Go to APIs & Services > OAuth consent\nscreen > Data Access and click \"Add or remove scopes\". Add the\n`https://www.googleapis.com/auth/spreadsheets` scope, either by finding it in\nthe table of known scopes or by manually entering it. Click \"Update\". Finally,\nclick \"Save\" on the Data Access page. (Note: If you already had a cached OAuth\ntoken, you'll have to delete it.)\n\n# Generate GitHub token\n\nGo to GitHub, then Account Settings > Developer Options > Personal Access\nTokens > Fine-grained Tokens.\n\nClick \"Generate new token\"\n\nThe name of the token can be anything. I used `proposal-minutes`. The expiration\ncan be set up to one year out.\n\nSet the \"Resource owner\" to \"golang\", then select \"Only select repositories\" and\nadd the \"golang/go\" repository.\n\nUnder \"Repository permissions\", set \"Issues\" to \"Read and write\".\n\nUnder \"Organization permissions\", set \"Projects\" to \"Read and write\".\n\nSave the token.\n\nCopy the access token and save it to `~/.config/proposal-minutes/github.tok`.\n"
  },
  {
    "path": "minutes3/gdoc.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/api/option\"\n\t\"google.golang.org/api/sheets/v4\"\n)\n\nfunc getOAuthConfig(scopes []string) *oauth2.Config {\n\t// Read the \"client\" (application) config.\n\tdata, err := os.ReadFile(getConfig(\"gdoc.json\"))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrNotExist) {\n\t\t\tlog.Println(\"Please follow the instructions in README.md to create a GCP OAuth client ID.\")\n\t\t}\n\t\tlog.Fatal(err)\n\t}\n\tconfig, err := google.ConfigFromJSON(data, scopes...)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to parse client secret file to config: %v\", err)\n\t}\n\treturn config\n}\n\ntype Doc struct {\n\tDate   time.Time\n\tWho    []string\n\tIssues []*Issue\n\n\tsrv     *sheets.Service\n\tdocID   string\n\tsheet   *sheets.Sheet\n\twhoCell coord\n\tdateRow rowIndex\n\tcols    colMap\n}\n\ntype Issue struct {\n\tNumber  int\n\tTitle   string\n\tDetails string\n\tMinutes string\n\tComment string\n\tNotes   string\n\n\t// IssueLink is the URL of the GitHub comment linked in \"current status\",\n\t// or \"\" if none.\n\tIssueLink string\n\n\trow rowIndex\n}\n\nvar (\n\tdebugJSON = flag.String(\"debugjson\", \"\", \"json debug mode (save, load)\")\n)\n\nfunc parseDoc(docID string) *Doc {\n\tconst (\n\t\tsheetName = \"Proposals\"\n\t\tfields    = \"sheets.properties,sheets.data.rowData.values(effectiveValue,hyperlink)\"\n\t)\n\n\td := new(Doc)\n\n\tvar spreadsheet *sheets.Spreadsheet\n\tif *debugJSON == \"load\" {\n\t\tspreadsheet = new(sheets.Spreadsheet)\n\t\tdata, err := os.ReadFile(\"debug.json\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif err := json.Unmarshal(data, spreadsheet); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t} else {\n\t\tscopes := []string{\n\t\t\t//\"https://www.googleapis.com/auth/spreadsheets.readonly\",\n\n\t\t\t// Request write access to update status columns.\n\t\t\t//\n\t\t\t// There's no way to limit this to just one doc! >:(\n\t\t\t\"https://www.googleapis.com/auth/spreadsheets\",\n\t\t}\n\t\tconfig := getOAuthConfig(scopes)\n\t\tclient := makeOAuthClient(getCacheDir(), config)\n\t\tsrv, err := sheets.NewService(context.Background(), option.WithHTTPClient(client))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to retrieve Docs client: %v\", err)\n\t\t}\n\t\td.srv = srv\n\n\t\tspreadsheet, err = srv.Spreadsheets.Get(docID).Ranges(\"'\" + sheetName + \"'\").Fields(fields).Do()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to retrieve data from document: %v\", err)\n\t\t}\n\t\td.docID = docID\n\n\t\tif *debugJSON == \"save\" {\n\t\t\tjs, _ := json.MarshalIndent(spreadsheet, \"\", \"\\t\")\n\t\t\tjs = append(js, '\\n')\n\t\t\tos.WriteFile(\"debug.json\", js, 0666)\n\t\t\tos.Exit(0)\n\t\t}\n\t}\n\n\tvar sheet *sheets.Sheet\n\tfor _, s := range spreadsheet.Sheets {\n\t\tif s.Properties.Title == sheetName {\n\t\t\tsheet = s\n\t\t\tbreak\n\t\t}\n\t}\n\tif sheet == nil {\n\t\tlog.Fatalf(\"did not find %s sheet\", sheetName)\n\t}\n\td.sheet = sheet\n\n\tvar metaCols, cols colMap\n\theaderRow := rowIndex(-1)\n\tblank := 0\n\tmeta := true\n\tfor _, data := range sheet.Data {\n\t\tfor r, row := range data.RowData {\n\t\t\tr := rowIndex(r)\n\t\t\t// On the first row, figure out the meta columns.\n\t\t\tif metaCols == nil {\n\t\t\t\tvar nonempty []colIndex\n\t\t\t\tfor i, val := range row.Values {\n\t\t\t\t\tif val.EffectiveValue != nil {\n\t\t\t\t\t\tnonempty = append(nonempty, colIndex(i))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif len(nonempty) != 2 {\n\t\t\t\t\tlog.Fatalf(\"on first spreadsheet row, expected two non-empty cells, got %d\", len(nonempty))\n\t\t\t\t}\n\t\t\t\tmetaCols = colMap{\"0\": 0, \"key\": nonempty[0], \"value\": nonempty[1]}\n\t\t\t}\n\t\t\t// Should we switch to the body?\n\t\t\tif meta && metaCols.getString(row, \"0\") == \"Issue\" {\n\t\t\t\tmeta = false\n\t\t\t\tcols = newColMap(row)\n\t\t\t\theaderRow = r\n\t\t\t\td.cols = cols\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Process metadata cells\n\t\t\tif meta {\n\t\t\t\tswitch key := metaCols.getString(row, \"key\"); key {\n\t\t\t\tcase \"Who:\":\n\t\t\t\t\td.whoCell = coord{d.sheet, metaCols.col(\"value\"), r}\n\t\t\t\t\tval := metaCols.getString(row, \"value\")\n\t\t\t\t\tif val == \"\" {\n\t\t\t\t\t\tlog.Printf(\"%s: who list not updated\", d.whoCell)\n\t\t\t\t\t} else {\n\t\t\t\t\t\td.Who = regexp.MustCompile(`[,\\s]+`).Split(val, -1)\n\t\t\t\t\t}\n\t\t\t\tcase \"\":\n\t\t\t\t\t// ignore\n\t\t\t\tdefault:\n\t\t\t\t\tlog.Printf(\"%s: unknown meta key %q\", coord{sheet, metaCols.col(\"key\"), r}, key)\n\t\t\t\t\tfailure = true\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Process second header row.\n\t\t\tif r == headerRow+1 {\n\t\t\t\tcell := coord{sheet, cols.col(\"New status\"), r}\n\t\t\t\tval := cols.getEV(row, \"New status\")\n\t\t\t\tif val != nil && val.StringValue != nil && *val.StringValue == \"<DATE>\" {\n\t\t\t\t\tlog.Printf(\"%s: date not updated\", cell)\n\t\t\t\t\tfailure = true\n\t\t\t\t} else {\n\t\t\t\t\tdate, ok := parseSpreadsheetDate(val)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tlog.Printf(\"%s: bad date %q\", cell, cols.getString(row, \"New status\"))\n\t\t\t\t\t\tfailure = true\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\td.Date = date\n\t\t\t\t}\n\t\t\t\td.dateRow = rowIndex(r)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Process body\n\t\t\tcells := cols.getterString(row)\n\n\t\t\tvar issue Issue\n\t\t\tissue.Minutes = cells(\"New status\")\n\t\t\tissue.Title = cells(\"Title\")\n\t\t\tissue.Details = cells(\"Proposal Details\")\n\t\t\tnum := cells(\"Issue\")\n\t\t\tif num == \"\" && issue == (Issue{}) {\n\t\t\t\tblank++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif blank > 10 {\n\t\t\t\tlog.Printf(\"found stray non-empty row %d\", r+1)\n\t\t\t\tfailure = true\n\t\t\t}\n\t\t\tn, err := strconv.Atoi(num)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"%s: bad issue number %q\", coord{sheet, cols.col(\"Issue\"), r}, num)\n\t\t\t\tfailure = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tissue.Number = n\n\n\t\t\tissue.row = r\n\n\t\t\t// Get the current issue comment link, if any.\n\t\t\tissue.IssueLink = row.Values[cols.col(\"Issue\")].Hyperlink\n\n\t\t\td.Issues = append(d.Issues, &issue)\n\t\t}\n\t}\n\n\tif d.Date.IsZero() {\n\t\tlog.Printf(\"spreadsheet Date: missing\")\n\t\tfailure = true\n\t} else if time.Since(d.Date) > 5*24*time.Hour || -time.Since(d.Date) > 24*time.Hour {\n\t\tlog.Printf(\"spreadsheet Date: too old\")\n\t\tfailure = true\n\t}\n\n\treturn d\n}\n\nfunc (di *Issue) URL() string {\n\treturn \"https://go.dev/issue/\" + fmt.Sprint(di.Number)\n}\n\ntype action struct {\n\tcol     string // New proposal project column\n\treason  string // Optional reason for declining; key in [updateMsgs] for detailed issue post\n\tisCheck bool   // Perform \"check\" post to issue\n\tminutes string // Short summary to post to minutes issue\n}\n\nvar sheetActions = map[string]action{\n\t// Active states\n\t\"add\": {\n\t\tcol:     \"Active\",\n\t\tminutes: \"added to minutes\",\n\t},\n\t\"skip\": {\n\t\tcol:     \"Active\",\n\t\tminutes: \"discussion ongoing\",\n\t},\n\t\"discuss\": {\n\t\tcol:     \"Active\",\n\t\tminutes: \"discussion ongoing\",\n\t},\n\t\"comment\": {\n\t\tcol:     \"Active\",\n\t\tminutes: \"commented\",\n\t},\n\t\"check\": {\n\t\tcol:     \"Active\",\n\t\tisCheck: true,\n\t\tminutes: \"commented\",\n\t},\n\n\t// Hold states\n\t\"hold\": {\n\t\tcol:     \"Hold\",\n\t\tminutes: \"put on hold\",\n\t},\n\n\t// Accept states\n\t\"likely accept\": {\n\t\tcol:     \"Likely Accept\",\n\t\tminutes: \"**likely accept**; last call for comments ⏳\",\n\t},\n\t\"accept\": {\n\t\tcol:     \"Accepted\",\n\t\tminutes: \"no change in consensus; **accepted** 🎉\",\n\t},\n\n\t// Decline states\n\t\"likely decline\": {\n\t\tcol:     \"Likely Decline\",\n\t\tminutes: \"**likely decline**; last call for comments ⏳\",\n\t},\n\t\"decline\": {\n\t\tcol:     \"Declined\",\n\t\tminutes: \"no change in consensus; **declined**\",\n\t},\n\t\"duplicate\": {\n\t\tcol:     \"Declined\",\n\t\treason:  \"duplicate\",\n\t\tminutes: \"duplicate\",\n\t},\n\t\"retracted\": {\n\t\tcol:     \"Declined\",\n\t\treason:  \"retracted\",\n\t\tminutes: \"proposal retracted by author; **declined**\",\n\t},\n\t\"infeasible\": {\n\t\tcol:     \"Declined\",\n\t\treason:  \"infeasible\",\n\t\tminutes: \"declined as infeasible\",\n\t},\n\t\"obsolete\": {\n\t\tcol:     \"Declined\",\n\t\treason:  \"obsolete\",\n\t\tminutes: \"obsolete\",\n\t},\n\n\t// Misc\n\t\"removed\": {\n\t\tcol:     \"none\",\n\t\treason:  \"removed\",\n\t\tminutes: \"removed from proposal process\",\n\t},\n}\n\nfunc (di *Issue) parseActions() (a action, err error) {\n\tkey := strings.TrimSpace(di.Minutes)\n\n\t// TODO: Remove \"skip\" action and just use a blank.\n\n\t// TODO: We no longer use the \"unhold\" (\"taken off hold\") action; we\n\t// should infer that from the previous state.\n\n\t// Special cases\n\tif strings.HasPrefix(key, \"hold \") {\n\t\ta = sheetActions[\"hold\"]\n\t\ta.minutes = key\n\t\treturn a, nil\n\t}\n\n\tif a, ok := sheetActions[key]; ok {\n\t\treturn a, nil\n\t}\n\n\treturn action{}, fmt.Errorf(\"#%d unknown action %q\", di.Number, key)\n}\n\n// FinishDoc performs post-minutes updates to the Doc.\n//\n// Specifically, it moves the \"new status\" column to the \"current status\"\n// column, clears the \"new status\" column and attendees list, and updates issue\n// links to point to the latest comments.\n//\n// commentURLs maps from issue number to the URL of the most recent comment to\n// link to from the sheet.\nfunc (d *Doc) FinishDoc(commentURLs map[int]string) {\n\tlog.Printf(\"updating status columns in sheet\")\n\n\tsrv := d.srv\n\n\tlinkCol := d.cols.col(\"Issue\")\n\tcurStatus := d.cols.col(\"Cur. status\")\n\tnewStatus := d.cols.col(\"New status\")\n\n\tupdateSpreadsheetRequest := &sheets.BatchUpdateSpreadsheetRequest{}\n\taddRequest := func(req *sheets.Request) {\n\t\tupdateSpreadsheetRequest.Requests = append(updateSpreadsheetRequest.Requests, req)\n\t}\n\n\t// Move \"new status\" to \"current status\". (CutPasteRequest *almost* works\n\t// for this, but doesn't seem to implement PASTE_VALUES correctly.)\n\tcopyRange := coord{d.sheet, newStatus, d.dateRow}.To(newStatus+1, -1)\n\tpasteRange := coord{d.sheet, curStatus, d.dateRow}\n\tcopyReq := &sheets.CopyPasteRequest{\n\t\tSource:      copyRange,\n\t\tDestination: pasteRange.To(pasteRange.col, pasteRange.row), // Empty means same size as input\n\t\tPasteType:   \"PASTE_VALUES\",\n\t}\n\tclearReq := &sheets.UpdateCellsRequest{\n\t\tFields: \"userEnteredValue\",\n\t\tRange:  copyRange,\n\t}\n\taddRequest(&sheets.Request{CopyPaste: copyReq})\n\taddRequest(&sheets.Request{UpdateCells: clearReq})\n\n\t// Put placeholder in \"new status\"\n\tdatePlaceholder := \"<DATE>\"\n\tupdateDateReq := &sheets.UpdateCellsRequest{\n\t\tFields: \"userEnteredValue\",\n\t\tStart:  coord{d.sheet, newStatus, d.dateRow}.Coord(),\n\t\tRows: []*sheets.RowData{{\n\t\t\tValues: []*sheets.CellData{{\n\t\t\t\tUserEnteredValue: &sheets.ExtendedValue{\n\t\t\t\t\tStringValue: &datePlaceholder,\n\t\t\t\t},\n\t\t\t}},\n\t\t}},\n\t}\n\taddRequest(&sheets.Request{UpdateCells: updateDateReq})\n\n\t// Clear \"who\" (this triggers a conditional formatting)\n\twhoPlaceholder := \"\"\n\tupdateWhoReq := &sheets.UpdateCellsRequest{\n\t\tFields: \"userEnteredValue\",\n\t\tStart:  d.whoCell.Coord(),\n\t\tRows: []*sheets.RowData{{\n\t\t\tValues: []*sheets.CellData{{\n\t\t\t\tUserEnteredValue: &sheets.ExtendedValue{\n\t\t\t\t\tStringValue: &whoPlaceholder,\n\t\t\t\t},\n\t\t\t}},\n\t\t}},\n\t}\n\taddRequest(&sheets.Request{UpdateCells: updateWhoReq})\n\n\t// Update comment links\n\tissueToRow := make(map[int]rowIndex)\n\tfor _, issue := range d.Issues {\n\t\tissueToRow[issue.Number] = issue.row\n\t}\n\tfor issue, url := range commentURLs {\n\t\trow, ok := issueToRow[issue]\n\t\tif !ok {\n\t\t\tlog.Fatalf(\"comment URLs contains issue %d that is not in the sheet\", issue)\n\t\t}\n\t\tnumberString := fmt.Sprint(issue)\n\t\tsetLinkReq := &sheets.UpdateCellsRequest{\n\t\t\t// We need to set the text content as well as the format: if the\n\t\t\t// entered value is a HYPERLINK formula and we don't override that,\n\t\t\t// the link in the text format has no effect.\n\t\t\tFields: \"userEnteredValue,userEnteredFormat.textFormat\",\n\t\t\tStart:  coord{d.sheet, linkCol, row}.Coord(),\n\t\t\tRows: []*sheets.RowData{{\n\t\t\t\tValues: []*sheets.CellData{{\n\t\t\t\t\tUserEnteredValue: &sheets.ExtendedValue{\n\t\t\t\t\t\tStringValue: &numberString,\n\t\t\t\t\t},\n\t\t\t\t\tUserEnteredFormat: &sheets.CellFormat{\n\t\t\t\t\t\tTextFormat: &sheets.TextFormat{\n\t\t\t\t\t\t\tLink: &sheets.Link{\n\t\t\t\t\t\t\t\tUri: url,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t}},\n\t\t}\n\t\taddRequest(&sheets.Request{UpdateCells: setLinkReq})\n\t}\n\n\t// Perform updates\n\t_, err := srv.Spreadsheets.BatchUpdate(d.docID, updateSpreadsheetRequest).Do()\n\tif err != nil {\n\t\tlog.Printf(\"failed to update status columns in sheet: %s\", err)\n\t\tfailure = true\n\t}\n}\n\nfunc parseSpreadsheetDate(cell *sheets.ExtendedValue) (time.Time, bool) {\n\tif cell == nil || cell.NumberValue == nil {\n\t\treturn time.Time{}, false\n\t}\n\n\tday := int(*cell.NumberValue)\n\tif day == 0 {\n\t\treturn time.Time{}, false\n\t}\n\tvar day0 = time.Date(1899, time.December, 30, 12, 0, 0, 0, time.UTC)\n\treturn day0.Add(time.Duration(time.Duration(day) * 24 * time.Hour)), true\n}\n\ntype colMap map[string]colIndex\n\nfunc newColMap(row *sheets.RowData) colMap {\n\tm := make(map[string]colIndex)\n\tfor i, label := range row.Values {\n\t\tif label.EffectiveValue != nil && label.EffectiveValue.StringValue != nil {\n\t\t\tm[*label.EffectiveValue.StringValue] = colIndex(i)\n\t\t}\n\t}\n\treturn m\n}\n\nfunc (m colMap) col(name string) colIndex {\n\ti, ok := m[name]\n\tif !ok {\n\t\tpanic(\"unknown column label: \" + name)\n\t}\n\treturn i\n}\n\nfunc (m colMap) getEV(row *sheets.RowData, name string) *sheets.ExtendedValue {\n\ti, ok := m[name]\n\tif !ok {\n\t\tpanic(\"unknown column label: \" + name)\n\t}\n\tif int(i) >= len(row.Values) {\n\t\treturn nil\n\t}\n\treturn row.Values[i].EffectiveValue\n}\n\nfunc (m colMap) getString(row *sheets.RowData, name string) string {\n\tv := m.getEV(row, name)\n\tif v == nil || v.StringValue == nil {\n\t\treturn \"\"\n\t}\n\treturn *v.StringValue\n}\n\nfunc (m colMap) getterString(row *sheets.RowData) func(name string) string {\n\treturn func(name string) string {\n\t\treturn m.getString(row, name)\n\t}\n}\n\ntype colIndex int // 0-based\n\ntype rowIndex int // 0-based\n\ntype coord struct {\n\tsheet *sheets.Sheet\n\tcol   colIndex\n\trow   rowIndex\n}\n\nfunc (c coord) String() string {\n\treturn fmt.Sprintf(\"%s!%c%d\", c.sheet.Properties.Title, 'A'+rune(c.col), 1+c.row)\n}\n\nfunc (c coord) Coord() *sheets.GridCoordinate {\n\treturn &sheets.GridCoordinate{\n\t\tSheetId:     c.sheet.Properties.SheetId,\n\t\tRowIndex:    int64(c.row),\n\t\tColumnIndex: int64(c.col),\n\t}\n}\n\n// To returns a range over columns [c.col, col) and rows [c.row, row).\n//\n// Col must be >= c.col and row must be >= c.row. In general, they should be\n// strictly greater than; if they're equal than this range is empty.\n//\n// A special value of -1 indicates the range is unbounded in that direction.\nfunc (c coord) To(col colIndex, row rowIndex) *sheets.GridRange {\n\t// The encoding of this is SO ANNOYING.\n\tr := &sheets.GridRange{\n\t\tSheetId: c.sheet.Properties.SheetId,\n\t}\n\tset := func(f *int64, v int64, name string) {\n\t\t*f = v\n\t\tif v == -1 {\n\t\t\t// Set the field to the zero value and omit it from NullFields so it\n\t\t\t// doesn't get sent.\n\t\t\t*f = 0\n\t\t\treturn\n\t\t} else if v == 0 {\n\t\t\t// Force send the field, even though it's the zero value.\n\t\t\tr.NullFields = append(r.NullFields, name)\n\t\t}\n\t}\n\tset(&r.StartRowIndex, int64(c.row), \"StartRowIndex\")\n\tset(&r.StartColumnIndex, int64(c.col), \"StartColumnIndex\")\n\tset(&r.EndRowIndex, int64(row), \"EndRowIndex\")\n\tset(&r.EndColumnIndex, int64(col), \"EndColumnIndex\")\n\treturn r\n}\n"
  },
  {
    "path": "minutes3/gdoc_test.go",
    "content": "// Copyright 2025 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestParseActions(t *testing.T) {\n\twant := `\nadd:\n  col: \"Active\"\n  reason: \"\"\n  minutes: \"added to minutes\"\nskip:\n  col: \"Active\"\n  reason: \"\"\n  minutes: \"discussion ongoing\"\ndiscuss:\n  col: \"Active\"\n  reason: \"\"\n  minutes: \"discussion ongoing\"\ncomment:\n  col: \"Active\"\n  reason: \"\"\n  minutes: \"commented\"\ncheck:\n  col: \"Active\"\n  reason: \"\"\n  minutes: \"commented\"\n  check\nhold:\n  col: \"Hold\"\n  reason: \"\"\n  minutes: \"put on hold\"\nlikely accept:\n  col: \"Likely Accept\"\n  reason: \"\"\n  minutes: \"**likely accept**; last call for comments ⏳\"\naccept:\n  col: \"Accepted\"\n  reason: \"\"\n  minutes: \"no change in consensus; **accepted** 🎉\"\nlikely decline:\n  col: \"Likely Decline\"\n  reason: \"\"\n  minutes: \"**likely decline**; last call for comments ⏳\"\ndecline:\n  col: \"Declined\"\n  reason: \"\"\n  minutes: \"no change in consensus; **declined**\"\nduplicate:\n  col: \"Declined\"\n  reason: \"duplicate\"\n  minutes: \"duplicate\"\nretracted:\n  col: \"Declined\"\n  reason: \"retracted\"\n  minutes: \"proposal retracted by author; **declined**\"\ninfeasible:\n  col: \"Declined\"\n  reason: \"infeasible\"\n  minutes: \"declined as infeasible\"\nobsolete:\n  col: \"Declined\"\n  reason: \"obsolete\"\n  minutes: \"obsolete\"\nremoved:\n  col: \"none\"\n  reason: \"removed\"\n  minutes: \"removed from proposal process\"\nhold for #1234:\n  col: \"Hold\"\n  reason: \"\"\n  minutes: \"hold for #1234\"\n`\n\twant = strings.TrimPrefix(want, \"\\n\")\n\n\tvar got strings.Builder\n\tcheck := func(a string) {\n\t\tdi := Issue{Minutes: a}\n\t\tact, err := di.parseActions()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%s: %s\", a, err)\n\t\t\treturn\n\t\t}\n\t\tfmt.Fprintf(&got, \"%s:\\n  col: %q\\n  reason: %q\\n  minutes: %q\\n\", a, act.col, act.reason, act.minutes)\n\t\tif act.isCheck {\n\t\t\tfmt.Fprintf(&got, \"  check\\n\")\n\t\t}\n\t}\n\n\t// Active states\n\tcheck(\"add\")\n\tcheck(\"skip\")\n\tcheck(\"discuss\")\n\tcheck(\"comment\")\n\tcheck(\"check\")\n\n\t// Hold states\n\tcheck(\"hold\")\n\n\t// Terminal states\n\tcheck(\"likely accept\")\n\tcheck(\"accept\")\n\n\tcheck(\"likely decline\")\n\tcheck(\"decline\")\n\tcheck(\"duplicate\")\n\tcheck(\"retracted\")\n\tcheck(\"infeasible\")\n\tcheck(\"obsolete\")\n\tcheck(\"removed\")\n\n\t// Hold with comments\n\tcheck(\"hold for #1234\")\n\n\tif want != got.String() {\n\t\tdiff(t, want, got.String())\n\t}\n}\n\nfunc diff(t *testing.T, want, got string) {\n\tdir := t.TempDir()\n\tif err := os.WriteFile(dir+\"/want\", []byte(want), 0666); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := os.WriteFile(dir+\"/got\", []byte(got), 0666); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcmd := exec.Command(\"diff\", \"-u\", \"want\", \"got\")\n\tcmd.Dir = dir\n\tout, err := cmd.CombinedOutput()\n\tif _, ok := err.(*exec.ExitError); !ok {\n\t\tt.Errorf(\"diff failed: %s\\nwant:\\n%s\\ngot:\\n%s\", err, want, got)\n\t}\n\tt.Errorf(\"\\n%s\", out)\n}\n"
  },
  {
    "path": "minutes3/github.go",
    "content": "// Copyright 2024 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"errors\"\n\t\"log/slog\"\n\t\"strings\"\n\n\t\"rsc.io/github\"\n\t\"rsc.io/github/schema\"\n)\n\n// GitHubClient wraps the [github.Client] API to provide injection points.\ntype GitHubClient interface {\n\tGraphQLQuery(query string, vars github.Vars) (*schema.Query, error)\n\tGraphQLMutation(query string, vars github.Vars) (*schema.Mutation, error)\n\n\tCurrentUser() (string, error)\n\n\tSearchLabels(org string, repo string, query string) ([]*github.Label, error)\n\tSearchMilestones(org string, repo string, query string) ([]*github.Milestone, error)\n\n\tIssue(org string, repo string, n int) (*github.Issue, error)\n\tIssueComments(issue *github.Issue) ([]*github.IssueComment, error)\n\tAddIssueComment(issue *github.Issue, text string) (url string, _ error)\n\tAddIssueLabels(issue *github.Issue, labels ...*github.Label) error\n\tRemoveIssueLabels(issue *github.Issue, labels ...*github.Label) error\n\tCloseIssue(issue *github.Issue, reason schema.IssueClosedStateReason) error\n\tRetitleIssue(issue *github.Issue, title string) error\n\tRemilestoneIssue(issue *github.Issue, milestone *github.Milestone) error\n\n\tProjects(org string, query string) ([]*github.Project, error)\n\tProjectItems(p *github.Project) ([]*github.ProjectItem, error)\n\tDeleteProjectItem(project *github.Project, item *github.ProjectItem) error\n\tSetProjectItemFieldOption(project *github.Project, item *github.ProjectItem, field *github.ProjectField, option *github.ProjectFieldOption) error\n\n\tDiscussions(org string, repo string) ([]*github.Discussion, error)\n}\n\n// githubClient builds on top of the [github.Client] API for the needs of minutes3.\ntype githubClient struct{ *github.Client }\n\n// CurrentUser returns the user name of the current user.\nfunc (c githubClient) CurrentUser() (string, error) {\n\tconst graphql = `\n\t  query {\n\t    viewer {\n\t      login\n\t    }\n\t  }\n\t`\n\tout, err := c.GraphQLQuery(graphql, nil)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn out.Viewer.Login, nil\n}\n\n// AddIssueComment is equivalent to [github.Client.AddIssueComment],\n// but returns the URL of the new comment.\nfunc (c githubClient) AddIssueComment(issue *github.Issue, text string) (url string, _ error) {\n\tconst graphql = `\n\t  mutation($ID: ID!, $Text: String!) {\n\t    addComment(input: {subjectId: $ID, body: $Text}) {\n\t      clientMutationId\n\t      commentEdge {\n\t        node {\n\t          url\n\t        }\n\t      }\n\t    }\n\t  }\n\t`\n\tm, err := c.GraphQLMutation(graphql, github.Vars{\"ID\": issue.ID, \"Text\": text})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(m.AddComment.CommentEdge.Node.Url), nil\n}\n\n// CloseIssue is equivalent to [github.Client.CloseIssue],\n// but closes issue with the provided reason.\nfunc (c githubClient) CloseIssue(issue *github.Issue, reason schema.IssueClosedStateReason) error {\n\tconst graphql = `\n\t  mutation($ID: ID!, $Reason: IssueClosedStateReason) {\n\t    closeIssue(input: {issueId: $ID, stateReason: $Reason}) {\n\t      clientMutationId\n\t    }\n\t  }\n\t`\n\t_, err := c.GraphQLMutation(graphql, github.Vars{\"ID\": issue.ID, \"Reason\": reason})\n\treturn err\n}\n\n// GitHubLogger is a GitHubClient that logs all mutation operations and\n// delegates all actions to an underlying client.\ntype GitHubLogger struct {\n\tc      GitHubClient\n\tlogger *slog.Logger\n}\n\nvar _ GitHubClient = (*GitHubLogger)(nil)\n\nfunc (c *GitHubLogger) GraphQLQuery(query string, vars github.Vars) (*schema.Query, error) {\n\treturn c.c.GraphQLQuery(query, vars)\n}\n\nfunc (c *GitHubLogger) GraphQLMutation(query string, vars github.Vars) (*schema.Mutation, error) {\n\tc.logger.Info(\"github\", \"action\", \"GraphQLMutation\", \"query\", query, \"vars\", vars)\n\treturn c.c.GraphQLMutation(query, vars)\n}\n\nfunc (c *GitHubLogger) CurrentUser() (string, error) {\n\treturn c.c.CurrentUser()\n}\n\nfunc (c *GitHubLogger) SearchLabels(org string, repo string, query string) ([]*github.Label, error) {\n\treturn c.c.SearchLabels(org, repo, query)\n}\n\nfunc (c *GitHubLogger) SearchMilestones(org string, repo string, query string) ([]*github.Milestone, error) {\n\treturn c.c.SearchMilestones(org, repo, query)\n}\n\nfunc (c *GitHubLogger) Issue(org string, repo string, n int) (*github.Issue, error) {\n\treturn c.c.Issue(org, repo, n)\n}\n\nfunc (c *GitHubLogger) IssueComments(issue *github.Issue) ([]*github.IssueComment, error) {\n\treturn c.c.IssueComments(issue)\n}\n\nfunc (c *GitHubLogger) AddIssueComment(issue *github.Issue, text string) (string, error) {\n\tc.logger.Info(\"github\", \"action\", \"AddIssueComment\", \"issue\", issue.Number, \"text\", text)\n\treturn c.c.AddIssueComment(issue, text)\n}\n\ntype labelList []*github.Label\n\nfunc (ll labelList) String() string {\n\tvar b strings.Builder\n\tfor i, l := range ll {\n\t\tif i > 0 {\n\t\t\tb.WriteByte(',')\n\t\t}\n\t\tb.WriteString(l.Name)\n\t}\n\treturn b.String()\n}\n\nfunc (c *GitHubLogger) AddIssueLabels(issue *github.Issue, labels ...*github.Label) error {\n\tc.logger.Info(\"github\", \"action\", \"AddIssueLabels\", \"issue\", issue.Number, \"labels\", labelList(labels))\n\treturn c.c.AddIssueLabels(issue, labels...)\n}\n\nfunc (c *GitHubLogger) RemoveIssueLabels(issue *github.Issue, labels ...*github.Label) error {\n\tc.logger.Info(\"github\", \"action\", \"RemoveIssueLabels\", \"issue\", issue.Number, \"labels\", labelList(labels))\n\treturn c.c.RemoveIssueLabels(issue, labels...)\n}\n\nfunc (c *GitHubLogger) CloseIssue(issue *github.Issue, reason schema.IssueClosedStateReason) error {\n\tc.logger.Info(\"github\", \"action\", \"CloseIssue\", \"issue\", issue.Number, \"reason\", reason)\n\treturn c.c.CloseIssue(issue, reason)\n}\n\nfunc (c *GitHubLogger) RetitleIssue(issue *github.Issue, title string) error {\n\tc.logger.Info(\"github\", \"action\", \"RetitleIssue\", \"issue\", issue.Number, \"title\", title)\n\treturn c.c.RetitleIssue(issue, title)\n}\n\nfunc (c *GitHubLogger) RemilestoneIssue(issue *github.Issue, milestone *github.Milestone) error {\n\tc.logger.Info(\"github\", \"action\", \"RemilestoneIssue\", \"issue\", issue.Number, \"milestone\", milestone.Title)\n\treturn c.c.RemilestoneIssue(issue, milestone)\n}\n\nfunc (c *GitHubLogger) Projects(org string, query string) ([]*github.Project, error) {\n\treturn c.c.Projects(org, query)\n}\n\nfunc (c *GitHubLogger) ProjectItems(p *github.Project) ([]*github.ProjectItem, error) {\n\treturn c.c.ProjectItems(p)\n}\n\nfunc (c *GitHubLogger) DeleteProjectItem(project *github.Project, item *github.ProjectItem) error {\n\tc.logger.Info(\"github\", \"action\", \"DeleteProjectItem\", \"project\", project.Title, \"item\", item.Issue.Number)\n\treturn c.c.DeleteProjectItem(project, item)\n}\n\nfunc (c *GitHubLogger) SetProjectItemFieldOption(project *github.Project, item *github.ProjectItem, field *github.ProjectField, option *github.ProjectFieldOption) error {\n\tc.logger.Info(\"github\", \"action\", \"SetProjectItemFieldOption\", \"project\", project.Title, \"item\", item.Issue.Number, \"field\", field.Name, \"option\", option.Name)\n\treturn c.c.SetProjectItemFieldOption(project, item, field, option)\n}\n\nfunc (c *GitHubLogger) Discussions(org string, repo string) ([]*github.Discussion, error) {\n\treturn c.c.Discussions(org, repo)\n}\n\n// GitHubDryClient is a dry-run client that rejects mutation operations.\ntype GitHubDryClient struct {\n\tc GitHubClient\n}\n\nvar _ GitHubClient = (*GitHubDryClient)(nil)\n\nfunc (c *GitHubDryClient) GraphQLQuery(query string, vars github.Vars) (*schema.Query, error) {\n\treturn c.c.GraphQLQuery(query, vars)\n}\n\nvar ErrReadOnly = errors.New(\"cannot perform mutation on read-only client\")\n\nfunc (c *GitHubDryClient) GraphQLMutation(query string, vars github.Vars) (*schema.Mutation, error) {\n\t// No logging here, as GitHubLogger will handle it.\n\treturn nil, ErrReadOnly\n}\n\nfunc (c *GitHubDryClient) CurrentUser() (string, error) {\n\treturn c.c.CurrentUser()\n}\n\nfunc (c *GitHubDryClient) SearchLabels(org string, repo string, query string) ([]*github.Label, error) {\n\treturn c.c.SearchLabels(org, repo, query)\n}\n\nfunc (c *GitHubDryClient) SearchMilestones(org string, repo string, query string) ([]*github.Milestone, error) {\n\treturn c.c.SearchMilestones(org, repo, query)\n}\n\nfunc (c *GitHubDryClient) Issue(org string, repo string, n int) (*github.Issue, error) {\n\treturn c.c.Issue(org, repo, n)\n}\n\nfunc (c *GitHubDryClient) IssueComments(issue *github.Issue) ([]*github.IssueComment, error) {\n\treturn c.c.IssueComments(issue)\n}\n\nfunc (c *GitHubDryClient) AddIssueComment(issue *github.Issue, text string) (string, error) {\n\treturn \"\", nil\n}\n\nfunc (c *GitHubDryClient) AddIssueLabels(issue *github.Issue, labels ...*github.Label) error {\n\treturn nil\n}\n\nfunc (c *GitHubDryClient) RemoveIssueLabels(issue *github.Issue, labels ...*github.Label) error {\n\treturn nil\n}\n\nfunc (c *GitHubDryClient) CloseIssue(issue *github.Issue, reason schema.IssueClosedStateReason) error {\n\treturn nil\n}\n\nfunc (c *GitHubDryClient) RetitleIssue(issue *github.Issue, title string) error {\n\treturn nil\n}\n\nfunc (c *GitHubDryClient) RemilestoneIssue(issue *github.Issue, milestone *github.Milestone) error {\n\treturn nil\n}\n\nfunc (c *GitHubDryClient) Projects(org string, query string) ([]*github.Project, error) {\n\treturn c.c.Projects(org, query)\n}\n\nfunc (c *GitHubDryClient) ProjectItems(p *github.Project) ([]*github.ProjectItem, error) {\n\treturn c.c.ProjectItems(p)\n}\n\nfunc (c *GitHubDryClient) DeleteProjectItem(project *github.Project, item *github.ProjectItem) error {\n\treturn nil\n}\n\nfunc (c *GitHubDryClient) SetProjectItemFieldOption(project *github.Project, item *github.ProjectItem, field *github.ProjectField, option *github.ProjectFieldOption) error {\n\treturn nil\n}\n\nfunc (c *GitHubDryClient) Discussions(org string, repo string) ([]*github.Discussion, error) {\n\treturn c.c.Discussions(org, repo)\n}\n"
  },
  {
    "path": "minutes3/go.mod",
    "content": "module github.com/aclements/go-misc/minutes3\n\ngo 1.23\n\nrequire (\n\tgolang.org/x/oauth2 v0.21.0\n\tgoogle.golang.org/api v0.189.0\n\trsc.io/github v0.5.1-0.20250216154006-6eda34706df1\n)\n\nrequire (\n\tcloud.google.com/go/auth v0.7.2 // indirect\n\tcloud.google.com/go/auth/oauth2adapt v0.2.3 // indirect\n\tcloud.google.com/go/compute/metadata v0.5.0 // indirect\n\tgithub.com/felixge/httpsnoop v1.0.4 // indirect\n\tgithub.com/go-logr/logr v1.4.2 // indirect\n\tgithub.com/go-logr/stdr v1.2.2 // indirect\n\tgithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect\n\tgithub.com/golang/protobuf v1.5.4 // indirect\n\tgithub.com/google/s2a-go v0.1.7 // indirect\n\tgithub.com/google/uuid v1.6.0 // indirect\n\tgithub.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect\n\tgithub.com/googleapis/gax-go/v2 v2.12.5 // indirect\n\tgo.opencensus.io v0.24.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect\n\tgo.opentelemetry.io/otel v1.24.0 // indirect\n\tgo.opentelemetry.io/otel/metric v1.24.0 // indirect\n\tgo.opentelemetry.io/otel/trace v1.24.0 // indirect\n\tgolang.org/x/crypto v0.25.0 // indirect\n\tgolang.org/x/net v0.27.0 // indirect\n\tgolang.org/x/sys v0.22.0 // indirect\n\tgolang.org/x/text v0.16.0 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade // indirect\n\tgoogle.golang.org/grpc v1.64.1 // indirect\n\tgoogle.golang.org/protobuf v1.34.2 // indirect\n)\n"
  },
  {
    "path": "minutes3/go.sum",
    "content": "cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=\ncloud.google.com/go/auth v0.7.2 h1:uiha352VrCDMXg+yoBtaD0tUF4Kv9vrtrWPYXwutnDE=\ncloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs=\ncloud.google.com/go/auth/oauth2adapt v0.2.3 h1:MlxF+Pd3OmSudg/b1yZ5lJwoXCEaeedAguodky1PcKI=\ncloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I=\ncloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY=\ncloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY=\ngithub.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=\ngithub.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=\ngithub.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=\ngithub.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=\ngithub.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=\ngithub.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=\ngithub.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=\ngithub.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=\ngithub.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=\ngithub.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=\ngithub.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=\ngithub.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=\ngithub.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=\ngithub.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=\ngithub.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=\ngithub.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=\ngithub.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=\ngithub.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=\ngithub.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=\ngithub.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=\ngithub.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=\ngithub.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=\ngithub.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=\ngithub.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=\ngithub.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=\ngithub.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=\ngithub.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=\ngo.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=\ngo.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=\ngo.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=\ngo.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=\ngo.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=\ngo.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=\ngo.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=\ngo.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=\ngolang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=\ngolang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=\ngolang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=\ngolang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=\ngolang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=\ngolang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=\ngolang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=\ngolang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=\ngolang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=\ngolang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs=\ngolang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=\ngolang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=\ngolang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=\ngolang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=\ngolang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=\ngolang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=\ngolang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=\ngolang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=\ngolang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngoogle.golang.org/api v0.189.0 h1:equMo30LypAkdkLMBqfeIqtyAnlyig1JSZArl4XPwdI=\ngoogle.golang.org/api v0.189.0/go.mod h1:FLWGJKb0hb+pU2j+rJqwbnsF+ym+fQs73rbJ+KAUgy8=\ngoogle.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=\ngoogle.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=\ngoogle.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=\ngoogle.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=\ngoogle.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=\ngoogle.golang.org/genproto v0.0.0-20240722135656-d784300faade h1:lKFsS7wpngDgSCeFn7MoLy+wBDQZ1UQIJD4UNM1Qvkg=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade h1:oCRSWfwGXQsqlVdErcyTt4A93Y8fo0/9D4b1gnI++qo=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20240722135656-d784300faade/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=\ngoogle.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=\ngoogle.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=\ngoogle.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=\ngoogle.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=\ngoogle.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=\ngoogle.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=\ngoogle.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=\ngoogle.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=\ngoogle.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=\ngoogle.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=\ngoogle.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=\ngoogle.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=\ngoogle.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=\ngoogle.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=\ngoogle.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=\ngoogle.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\nhonnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nhonnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=\nrsc.io/github v0.5.1-0.20250216154006-6eda34706df1 h1:/qOyUAYLPYgW2SUIHarlfDPVsS5orpePdFEwtqD4se0=\nrsc.io/github v0.5.1-0.20250216154006-6eda34706df1/go.mod h1:O+mjyPFYgj/EdC2kTQT2oG8hmB2+K30YW//KcICRUxI=\n"
  },
  {
    "path": "minutes3/minutes.go",
    "content": "// Copyright 2022 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Minutes is the program we use to post the proposal review minutes.\n// It is a demonstration of the use of the rsc.io/github API, but it is also not great code,\n// which is why it is buried in an internal directory.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"encoding/csv\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"log\"\n\t\"log/slog\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"rsc.io/github\"\n\t\"rsc.io/github/schema\"\n)\n\nvar docjson = flag.Bool(\"docjson\", false, \"print google doc info in json\")\nvar doccsv = flag.Bool(\"doccsv\", false, \"print google doc info in csv\")\nvar apply = flag.Bool(\"apply\", false, \"perform actions\")\nvar testSheet = flag.String(\"test-sheet\", \"\", \"use sheet doc `id` for testing\")\nvar flagV = flag.Bool(\"v\", false, \"log all GitHub actions\")\n\nvar failure = false\n\nfunc main() {\n\tconst sheetDocID = \"1EG7oPcLls9HI_exlHLYuwk2YaN4P5mDc4O2vGyRqZHU\"\n\n\tlog.SetPrefix(\"minutes3: \")\n\tlog.SetFlags(0)\n\n\tflag.Parse()\n\tdocID := sheetDocID\n\tif *testSheet != \"\" {\n\t\tif *apply {\n\t\t\tlog.Fatalf(\"cannot use both -test-sheet and -apply\")\n\t\t}\n\t\tdocID = *testSheet\n\t\tif docID == sheetDocID {\n\t\t\tlog.Fatalf(\"-test-sheet is the ID of the main sheet\")\n\t\t}\n\t}\n\tdoc := parseDoc(docID)\n\tif *docjson {\n\t\tjs, err := json.MarshalIndent(doc, \"\", \"\\t\")\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tos.Stdout.Write(append(js, '\\n'))\n\t\treturn\n\t}\n\tif *doccsv {\n\t\tvar out [][]string\n\t\tfor _, issue := range doc.Issues {\n\t\t\tout = append(out, []string{fmt.Sprint(issue.Number), issue.Minutes, issue.Title, issue.Details, issue.Comment, issue.Notes})\n\t\t}\n\t\tw := csv.NewWriter(os.Stdout)\n\t\tw.WriteAll(out)\n\t\tw.Flush()\n\t\treturn\n\t}\n\n\tr, err := NewReporter(!*apply)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tr.RetireOld()\n\n\tminutes, commentURLs := r.Update(doc)\n\tif failure {\n\t\t// TODO: Should we delay updates and apply them only if there are no\n\t\t// failures?\n\t\tos.Exit(1)\n\t}\n\tconst minutesIssue = 33502 // AKA https://go.dev/s/proposal-minutes\n\tr.PostMinutes(minutes, minutesIssue)\n\n\tif !*apply && *testSheet == \"\" {\n\t\tfmt.Println()\n\t\tfmt.Printf(\"Re-run with -apply to perform above actions\\n\")\n\t\treturn\n\t}\n\n\tdoc.FinishDoc(commentURLs)\n\tif failure {\n\t\tos.Exit(1)\n\t}\n}\n\nfunc getConfig(path ...string) string {\n\tcfgDir, err := os.UserConfigDir()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn filepath.Join(append([]string{cfgDir, \"proposal-minutes\"}, path...)...)\n}\n\nfunc getCacheDir() string {\n\tcacheDir, err := os.UserCacheDir()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcacheDir = filepath.Join(cacheDir, \"proposal-minutes\")\n\tif err := os.MkdirAll(cacheDir, 0777); err != nil {\n\t\tlog.Fatalf(\"creating cache directory: %s\", err)\n\t}\n\treturn cacheDir\n}\n\ntype Reporter struct {\n\tClient    GitHubClient\n\tProposals *github.Project\n\tItems     map[int]*github.ProjectItem\n\tLabels    map[string]*github.Label\n\tBacklog   *github.Milestone\n}\n\nfunc NewReporter(dryRun bool) (*Reporter, error) {\n\ttoken, err := os.ReadFile(getConfig(\"github.tok\"))\n\tif err != nil {\n\t\tif errors.Is(err, fs.ErrNotExist) {\n\t\t\tlog.Println(\"Please follow the instructions in README.md to create a GitHub token.\")\n\t\t}\n\t\tlog.Fatal(err)\n\t}\n\ttoken = bytes.TrimSpace(token)\n\n\tvar c GitHubClient\n\tc = githubClient{github.NewClient(string(token))}\n\tif dryRun {\n\t\tc = &GitHubDryClient{c}\n\t}\n\tif *flagV {\n\t\tc = &GitHubLogger{c, slog.Default()}\n\t}\n\n\tr := &Reporter{Client: c}\n\n\tps, err := r.Client.Projects(\"golang\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, p := range ps {\n\t\tif p.Title == \"Proposals\" {\n\t\t\tr.Proposals = p\n\t\t\tbreak\n\t\t}\n\t}\n\tif r.Proposals == nil {\n\t\treturn nil, fmt.Errorf(\"cannot find Proposals project\")\n\t}\n\n\tlabels, err := r.Client.SearchLabels(\"golang\", \"go\", \"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.Labels = make(map[string]*github.Label)\n\tfor _, label := range labels {\n\t\tr.Labels[label.Name] = label\n\t}\n\n\tmilestones, err := r.Client.SearchMilestones(\"golang\", \"go\", \"Backlog\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, m := range milestones {\n\t\tif m.Title == \"Backlog\" {\n\t\t\tr.Backlog = m\n\t\t\tbreak\n\t\t}\n\t}\n\tif r.Backlog == nil {\n\t\treturn nil, fmt.Errorf(\"cannot find Backlog milestone\")\n\t}\n\n\titems, err := r.Client.ProjectItems(r.Proposals)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr.Items = make(map[int]*github.ProjectItem)\n\tfor _, item := range items {\n\t\tif item.Issue == nil {\n\t\t\tlog.Printf(\"unexpected project item with no issue\")\n\t\t\tfailure = true\n\t\t\tcontinue\n\t\t}\n\t\tr.Items[item.Issue.Number] = item\n\t}\n\n\treturn r, nil\n}\n\ntype Minutes struct {\n\tDate   time.Time\n\tWho    []string\n\tEvents []*Event\n}\n\ntype Event struct {\n\tColumn  string\n\tIssue   string\n\tTitle   string\n\tActions []string\n}\n\nconst checkQuestion = \"Have all remaining concerns about this proposal been addressed?\"\n\nfunc (r *Reporter) Update(doc *Doc) (*Minutes, map[int]string) {\n\tm := new(Minutes)\n\tm.Date = doc.Date\n\n\t// Attendees\n\tif len(doc.Who) == 0 {\n\t\tlog.Fatalf(\"missing attendees\")\n\t}\n\tm.Who = make([]string, len(doc.Who))\n\tfor i, w := range doc.Who {\n\t\tm.Who[i] = gitWho(w)\n\t}\n\tsort.Strings(m.Who)\n\n\t// Get current user's login for constructing messages\n\tuserName, err := r.Client.CurrentUser()\n\tif err != nil {\n\t\tlog.Fatalf(\"getting current user: %v\", err)\n\t}\n\n\tseen := make(map[int]bool)\n\tcommentURLs := make(map[int]string)\nIssues:\n\tfor _, di := range doc.Issues {\n\t\tvar commentURL string\n\t\titem := r.Items[di.Number]\n\t\tif item == nil {\n\t\t\t// TODO: Maybe \"add\" should add it to the proposal project if it\n\t\t\t// isn't already there and set the Status to \"Active\".\n\t\t\tlog.Printf(\"missing from proposal project: #%d\", di.Number)\n\t\t\tfailure = true\n\t\t\tcontinue\n\t\t}\n\t\tseen[di.Number] = true\n\t\tissue := item.Issue\n\t\tstatus := item.FieldByName(\"Status\")\n\t\tif status == nil {\n\t\t\tlog.Printf(\"item missing status in proposal project (set to incoming?): #%d\", di.Number)\n\t\t\tfailure = true\n\t\t\tcontinue\n\t\t}\n\n\t\ttitle := strings.TrimSpace(strings.TrimPrefix(issue.Title, \"proposal:\"))\n\t\tif title != di.Title {\n\t\t\tlog.Printf(\"#%d title mismatch:\\nGH: %s\\nDoc: %s\", di.Number, issue.Title, di.Title)\n\t\t\tfailure = true\n\t\t}\n\n\t\turl := di.URL()\n\t\taction, err := di.parseActions()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"#%d: %s\", di.Number, err)\n\t\t\tfailure = true\n\t\t\tcontinue\n\t\t}\n\n\t\tcommentsOnce := sync.OnceValues(func() ([]*github.IssueComment, error) {\n\t\t\tcomments, err := r.Client.IssueComments(issue)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"%s: cannot read issue comments\\n\", url)\n\t\t\t\tfailure = true\n\t\t\t}\n\t\t\treturn comments, err\n\t\t})\n\n\t\tif action.isCheck {\n\t\t\tcomments, err := commentsOnce()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := len(comments) - 1; i >= 0; i-- {\n\t\t\t\tc := comments[i]\n\t\t\t\tif time.Since(c.CreatedAt) < 5*24*time.Hour && strings.Contains(c.Body, checkQuestion) {\n\t\t\t\t\tlog.Printf(\"%s: recently checked\", url)\n\t\t\t\t\tcommentURL = c.URL\n\t\t\t\t\tcontinue Issues\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif di.Details == \"\" {\n\t\t\t\tlog.Printf(\"%s: missing proposal details\", url)\n\t\t\t\tfailure = true\n\t\t\t\tcontinue Issues\n\t\t\t}\n\t\t\tmsg := fmt.Sprintf(\"%s\\n\\n%s\", checkQuestion, di.Details)\n\t\t\t// log.Fatalf(\"wouldpost %s\\n%s\", url, msg)\n\t\t\tif url, err := r.Client.AddIssueComment(issue, msg); err != nil && err != ErrReadOnly {\n\t\t\t\tlog.Printf(\"%s: posting comment: %v\", url, err)\n\t\t\t\tfailure = true\n\t\t\t} else {\n\t\t\t\tcommentURL = url\n\t\t\t}\n\t\t\tlog.Printf(\"posted %s\", url)\n\t\t}\n\n\t\tif status.Option.Name != action.col {\n\t\t\tmsg := updateMsg(status.Option.Name, action.col, action.reason, userName)\n\t\t\tif msg == \"\" {\n\t\t\t\tlog.Fatalf(\"no update message for %s\", action.col)\n\t\t\t}\n\t\t\tif action.col == \"Likely Accept\" || action.col == \"Accepted\" {\n\t\t\t\tif di.Details == \"\" {\n\t\t\t\t\tlog.Printf(\"%s: missing proposal details\", url)\n\t\t\t\t\tfailure = true\n\t\t\t\t\tcontinue Issues\n\t\t\t\t}\n\t\t\t\tmsg += \"\\n\\n\" + di.Details\n\t\t\t}\n\t\t\tf := r.Proposals.FieldByName(\"Status\")\n\t\t\tif action.col == \"none\" {\n\t\t\t\tif err := r.Client.DeleteProjectItem(r.Proposals, item); err != nil {\n\t\t\t\t\tlog.Printf(\"%s: deleting proposal item: %v\", url, err)\n\t\t\t\t\tfailure = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\to := f.OptionByName(action.col)\n\t\t\t\tif o == nil {\n\t\t\t\t\tlog.Printf(\"%s: moving from %s to %s: no such status\\n\", url, status.Option.Name, action.col)\n\t\t\t\t\tfailure = true\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err := r.Client.SetProjectItemFieldOption(r.Proposals, item, f, o); err != nil {\n\t\t\t\t\tlog.Printf(\"%s: moving from %s to %s: %v\\n\", url, status.Option.Name, action.col, err)\n\t\t\t\t\tfailure = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif cURL, err := r.Client.AddIssueComment(issue, msg); err != nil && err != ErrReadOnly {\n\t\t\t\tlog.Printf(\"%s: posting comment: %v\", url, err)\n\t\t\t\tfailure = true\n\t\t\t} else {\n\t\t\t\tcommentURL = cURL\n\t\t\t}\n\t\t}\n\n\t\tneedLabel := func(name string) {\n\t\t\tif issue.LabelByName(name) == nil {\n\t\t\t\tlab := r.Labels[name]\n\t\t\t\tif lab == nil {\n\t\t\t\t\tlog.Fatalf(\"%s: cannot find label %s\", url, name)\n\t\t\t\t}\n\t\t\t\tif err := r.Client.AddIssueLabels(issue, lab); err != nil {\n\t\t\t\t\tlog.Printf(\"%s: adding %s: %v\", url, name, err)\n\t\t\t\t\tfailure = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tdropLabel := func(name string) {\n\t\t\tif lab := issue.LabelByName(name); lab != nil {\n\t\t\t\tif err := r.Client.RemoveIssueLabels(issue, lab); err != nil {\n\t\t\t\t\tlog.Printf(\"%s: removing %s: %v\", url, name, err)\n\t\t\t\t\tfailure = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tsetLabel := func(name string, val bool) {\n\t\t\tif val {\n\t\t\t\tneedLabel(name)\n\t\t\t} else {\n\t\t\t\tdropLabel(name)\n\t\t\t}\n\t\t}\n\n\t\tforceCloseAsNotPlanned := func() {\n\t\t\tif !issue.Closed {\n\t\t\t\tif err := r.Client.CloseIssue(issue, schema.IssueClosedStateReason_NOT_PLANNED); err != nil {\n\t\t\t\t\tlog.Printf(\"%s: closing issue: %v\", url, err)\n\t\t\t\t\tfailure = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif action.col == \"Accepted\" {\n\t\t\tif strings.HasPrefix(issue.Title, \"proposal:\") {\n\t\t\t\tif err := r.Client.RetitleIssue(issue, title); err != nil {\n\t\t\t\t\tlog.Printf(\"%s: retitling: %v\", url, err)\n\t\t\t\t\tfailure = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif issue.Milestone == nil || issue.Milestone.Title == \"Proposal\" {\n\t\t\t\tif err := r.Client.RemilestoneIssue(issue, r.Backlog); err != nil {\n\t\t\t\t\tlog.Printf(\"%s: moving out of Proposal milestone: %v\", url, err)\n\t\t\t\t\tfailure = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif action.col == \"Declined\" {\n\t\t\tforceCloseAsNotPlanned()\n\t\t}\n\n\t\tsetLabel(\"Proposal-Accepted\", action.col == \"Accepted\")\n\t\tsetLabel(\"Proposal-FinalCommentPeriod\", action.col == \"Likely Accept\" || action.col == \"Likely Decline\")\n\t\tsetLabel(\"Proposal-Hold\", action.col == \"Hold\")\n\n\t\tm.Events = append(m.Events, &Event{Column: action.col, Issue: fmt.Sprint(di.Number), Title: title, Actions: []string{action.minutes}})\n\n\t\tif commentURL == \"\" {\n\t\t\t// Search for the latest comment from a committee member.\n\t\t\t//\n\t\t\t// TODO: Don't touch the link for \"skip\" or \"discuss\". There can be\n\t\t\t// multiple actions, so this isn't completely straightforward.\n\t\t\t//\n\t\t\t// TODO: For status \"comment\", check that what we find is recent?\n\t\t\tcomments, err := commentsOnce()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor i := len(comments) - 1; i >= 0; i-- {\n\t\t\t\tc := comments[i]\n\t\t\t\tif committeeUsers[c.Author] {\n\t\t\t\t\tcommentURL = c.URL\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tif commentURL != \"\" {\n\t\t\tcommentURLs[di.Number] = commentURL\n\t\t}\n\t}\n\n\tfor id, item := range r.Items {\n\t\tstatus := item.FieldByName(\"Status\")\n\t\tif status != nil {\n\t\t\tswitch status.Option.Name {\n\t\t\tcase \"Active\", \"Likely Accept\", \"Likely Decline\":\n\t\t\t\tif !seen[id] {\n\t\t\t\t\tlog.Printf(\"#%d: missing from doc\", id)\n\t\t\t\t\tfailure = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Slice(m.Events, func(i, j int) bool {\n\t\treturn m.Events[i].Title < m.Events[j].Title\n\t})\n\treturn m, commentURLs\n}\n\nfunc (r *Reporter) PostMinutes(m *Minutes, issueNum int) {\n\tvar buf bytes.Buffer\n\n\tprefix := fmt.Sprintf(\"**%s / \", m.Date.Format(\"2006-01-02\"))\n\tbuf.WriteString(prefix)\n\tfor i, who := range m.Who {\n\t\tif i > 0 {\n\t\t\tfmt.Fprintf(&buf, \", \")\n\t\t}\n\t\tfmt.Fprintf(&buf, \"%s\", who)\n\t}\n\tfmt.Fprintf(&buf, \"**\\n\\n\")\n\n\tdisc, err := r.Client.Discussions(\"golang\", \"go\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfirst := true\n\tfor _, d := range disc {\n\t\tif d.Locked || d.Closed {\n\t\t\tcontinue\n\t\t}\n\t\tif first {\n\t\t\tfmt.Fprintf(&buf, \"**Discussions (not yet proposals)**\\n\\n\")\n\t\t\tfirst = false\n\t\t}\n\t\tfmt.Fprintf(&buf, \"- **%s** [#%d](https://go.dev/issue/%d)\\n\", markdownEscape(strings.TrimSpace(d.Title)), d.Number, d.Number)\n\t}\n\tif !first {\n\t\tfmt.Fprintf(&buf, \"\\n\")\n\t}\n\n\tcolumns := []string{\n\t\t\"Accepted\",\n\t\t\"Declined\",\n\t\t\"Likely Accept\",\n\t\t\"Likely Decline\",\n\t\t\"Active\",\n\t\t\"Hold\",\n\t\t\"Other\",\n\t}\n\n\tfor _, col := range columns {\n\t\tn := 0\n\t\tfor i, e := range m.Events {\n\t\t\tif e == nil || e.Column != col && col != \"Other\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif n == 0 {\n\t\t\t\tfmt.Fprintf(&buf, \"**%s**\\n\\n\", col)\n\t\t\t}\n\t\t\tn++\n\t\t\tfmt.Fprintf(&buf, \"- **%s** [#%s](https://go.dev/issue/%s)\\n\", markdownEscape(strings.TrimSpace(e.Title)), e.Issue, e.Issue)\n\t\t\tfor _, a := range e.Actions {\n\t\t\t\tif a == \"\" {\n\t\t\t\t\t// If we print an empty string, the - by itself will turn\n\t\t\t\t\t// the previous line into a markdown heading!\n\t\t\t\t\t// Also everything should have an action.\n\t\t\t\t\tlog.Fatalf(\"#%s: missing action\", e.Issue)\n\t\t\t\t}\n\t\t\t\tfmt.Fprintf(&buf, \"  - %s\\n\", a)\n\t\t\t}\n\t\t\tm.Events[i] = nil\n\t\t}\n\t\tif n == 0 && col != \"Hold\" && col != \"Other\" {\n\t\t\tfmt.Fprintf(&buf, \"**%s**\\n\\n\", col)\n\t\t\tfmt.Fprintf(&buf, \"- none\\n\")\n\t\t}\n\t\tfmt.Fprintf(&buf, \"\\n\")\n\t}\n\n\tpost := buf.String()\n\n\t// Check if we've already posted this.\n\tissue, err := r.Client.Issue(\"golang\", \"go\", issueNum)\n\tif err != nil {\n\t\tlog.Fatalf(\"could not find minutes issue #%d: %s\", issueNum, err)\n\t}\n\tcomments, err := r.Client.IssueComments(issue)\n\tif err != nil {\n\t\tlog.Fatalf(\"could not read minutes issue comments: %s\", err)\n\t}\n\tfor _, c := range comments {\n\t\tif strings.Contains(c.Body, prefix) {\n\t\t\tif c.Body != post {\n\t\t\t\tlog.Fatalf(\"minutes issue #%d has has comment from %s, but does not match full post\", issueNum, m.Date.Format(\"2006-01-02\"))\n\t\t\t}\n\t\t\tlog.Printf(\"already posted to minutes #%d\", issueNum)\n\t\t\treturn\n\t\t}\n\t}\n\n\t// Post minutes\n\tlog.Printf(\"posting to minutes #%d\", issueNum)\n\tif _, err := r.Client.AddIssueComment(issue, post); err != nil {\n\t\tlog.Fatalf(\"error posting to minutes #%d: %s\", issueNum, err)\n\t}\n}\n\nvar markdownEscaper = strings.NewReplacer(\n\t\"_\", `\\_`,\n\t\"*\", `\\*`,\n\t\"`\", \"\\\\`\",\n\t\"[\", `\\[`,\n)\n\nfunc markdownEscape(s string) string {\n\treturn markdownEscaper.Replace(s)\n}\n\nfunc (r *Reporter) RetireOld() {\n\tfor _, item := range r.Items {\n\t\tissue := item.Issue\n\t\tif issue.Closed && !issue.ClosedAt.IsZero() && time.Since(issue.ClosedAt) > 365*24*time.Hour {\n\t\t\tlog.Printf(\"retire #%d\", issue.Number)\n\t\t\tif err := r.Client.DeleteProjectItem(r.Proposals, item); err != nil {\n\t\t\t\tlog.Printf(\"#%d: deleting proposal item: %v\", issue.Number, err)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "minutes3/oauth.go",
    "content": "// Copyright 2024 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"log\"\n\t\"net\"\n\t\"net/http\"\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"golang.org/x/oauth2\"\n)\n\n// Based on https://github.com/eliben/code-for-blog/blob/main/2024/go-docs-sheets-auth/using-oauth2-auto-token.go\n\n// makeOAuthClient creates a new http.Client with oauth2 set up from the\n// given config.\nfunc makeOAuthClient(cacheDir string, config *oauth2.Config) *http.Client {\n\ttokFile := filepath.Join(cacheDir, \"token.json\")\n\ttok, err := loadCachedToken(tokFile)\n\tif err != nil {\n\t\ttok = getTokenFromWeb(config)\n\t\tsaveCachedToken(tokFile, tok)\n\t}\n\treturn config.Client(context.Background(), tok)\n}\n\n// getTokenFromWeb launches a web browser to authenticate the user vs. Google's\n// auth server and returns the token.\nfunc getTokenFromWeb(config *oauth2.Config) *oauth2.Token {\n\tconst redirectPath = \"/redirect\"\n\t// We spin up a goroutine with a web server listening on the redirect route,\n\t// which the auth server will redirect the user's browser to after\n\t// authentication.\n\tlistener, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tport := listener.Addr().(*net.TCPAddr).Port\n\n\t// When the web server receives redirection, it sends the code to codeChan.\n\tcodeChan := make(chan string)\n\tvar srv http.Server\n\n\tgo func() {\n\t\tmux := http.NewServeMux()\n\t\tmux.HandleFunc(redirectPath, func(w http.ResponseWriter, req *http.Request) {\n\t\t\tcodeChan <- req.URL.Query().Get(\"code\")\n\t\t\tfmt.Fprintf(w, \"You may now close this tab.\")\n\t\t})\n\t\tsrv.Handler = mux\n\t\tif err := srv.Serve(listener); err != http.ErrServerClosed {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}()\n\n\tconfig.RedirectURL = fmt.Sprintf(\"http://localhost:%d%s\", port, redirectPath)\n\t// Use PKCE to protect against CSRF attacks\n\tverifier := oauth2.GenerateVerifier()\n\tauthURL := config.AuthCodeURL(\"state-token\", oauth2.AccessTypeOffline, oauth2.S256ChallengeOption(verifier))\n\tfmt.Fprintln(os.Stderr, \"Click this link to authenticate:\\n\", authURL)\n\n\t// Receive code from the web server and shut it down.\n\tauthCode := <-codeChan\n\tif err := srv.Shutdown(context.Background()); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Exchange the auth code for a token (and check the verifier).\n\ttok, err := config.Exchange(context.Background(), authCode, oauth2.VerifierOption(verifier))\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to retrieve token from web: %v\", err)\n\t}\n\treturn tok\n}\n\n// loadCachedToken tries to load a cached token from a local file.\nfunc loadCachedToken(file string) (*oauth2.Token, error) {\n\tb, err := os.ReadFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttok := &oauth2.Token{}\n\terr = json.Unmarshal(b, &tok)\n\treturn tok, err\n}\n\n// saveCachedToken saves an oauth2 token to a local file.\nfunc saveCachedToken(path string, token *oauth2.Token) {\n\tlog.Printf(\"Saving token to: %s\\n\", path)\n\tb, err := json.Marshal(token)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\terr = os.WriteFile(path, b, 0600)\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to cache OAuth token: %v\", err)\n\t}\n}\n"
  },
  {
    "path": "minutes3/tables.go",
    "content": "// Copyright 2022 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"iter\"\n\t\"maps\"\n\t\"os\"\n)\n\nvar whoMap = map[string]string{\n\t\"Alan\":   \"adonovan\",\n\t\"Andy\":   \"andybons\",\n\t\"Austin\": \"aclements\",\n\t\"Brad\":   \"bradfitz\",\n\t\"Cherry\": \"cherrymui\",\n\t\"Damien\": \"neild\",\n\t\"Ian\":    \"ianlancetaylor\",\n\t\"Robert\": \"griesemer\",\n\t\"Roland\": \"rolandshoemaker\",\n\t\"Russ\":   \"rsc\",\n}\n\nfunc gitWho(who string) string {\n\tif whoMap[who] != \"\" {\n\t\treturn \"@\" + whoMap[who]\n\t}\n\tfmt.Fprintf(os.Stderr, \"warning: unknown attendee %s; assuming GitHub @%s\\n\", who, who)\n\treturn \"@\" + who\n}\n\nvar committeeUsers = mapSet(maps.Values(whoMap))\n\nfunc mapSet[T comparable](values iter.Seq[T]) map[T]bool {\n\tm := make(map[T]bool)\n\tfor value := range values {\n\t\tm[value] = true\n\t}\n\treturn m\n}\n\n// There's also \"check\", which is mapped to \"comment\" plus posting the proposal\n// details.\n\nfunc updateMsg(old, new, reason, userName string) string {\n\tmsg := updateMsgs[reason]\n\tif msg == \"\" {\n\t\tmsg = updateMsgs[new]\n\t}\n\tif msg == \"\" {\n\t\treturn \"\"\n\t}\n\treturn msg + msgFooter(userName) + \"\\n\"\n}\n\nfunc msgFooter(userName string) string {\n\treturn fmt.Sprintf(\"— %s for the proposal review group\", userName)\n}\n\nvar updateMsgs = map[string]string{\n\t\"duplicate\": `\nThis proposal is a duplicate of a previously discussed proposal, as noted above,\nand there is no significant new information to justify reopening the discussion.\nThe issue has therefore been **[declined as a duplicate](https://go.dev/s/proposal-status#declined-as-duplicate)**.\n`,\n\t\"retracted\": `\nThis proposal has been **[declined as retracted](https://go.dev/s/proposal-status#declined-as-retracted)**.\n`,\n\t\"infeasible\": `\nThis proposal has been **[declined as infeasible](https://go.dev/s/proposal-status#declined-as-infeasible)**.\n`,\n\t\"obsolete\": `\nThis proposal has been **[declined as obsolete](https://go.dev/s/proposal-status#declined-as-obsolete)**.\n`,\n\t\"Active\": `\nThis proposal has been added to the [active column](https://go.dev/s/proposal-status#active) of the proposals project\nand will now be reviewed at the weekly proposal review meetings.\n`,\n\t\"Likely Accept\": `\nBased on the discussion above, this proposal seems like a **[likely accept](https://go.dev/s/proposal-status#likely-accept)**.\n`,\n\t\"Likely Decline\": `\nBased on the discussion above, this proposal seems like a **[likely decline](https://go.dev/s/proposal-status#likely-decline)**.\n`,\n\t\"Accepted\": `\nNo change in consensus, so **[accepted](https://go.dev/s/proposal-status#accepted)**. 🎉\nThis issue now tracks the work of implementing the proposal.\n`,\n\t\"Declined\": `\nNo change in consensus, so **[declined](https://go.dev/s/proposal-status#declined)**.\n`,\n\t\"Hold\": `\n**[Placed on hold](https://go.dev/s/proposal-status#hold)**.\n`,\n\t\"removed\": `\n**Removed from the [proposal process](https://go.dev/s/proposal)**.\nThis was determined not to be a “significant change to the language, libraries, or tools”\nor otherwise of significant importance or interest to the broader Go community.\n`,\n}\n"
  },
  {
    "path": "pcvaluetab/README.md",
    "content": "This implements an alternate Go PCDATA encoding that's designed to be much\nfaster to decode, at the cost of a slight increase in size.\n\n# Overview\n\nEach function in Go has several PCDATA tables. Each table logically maps each PC\nin the function to an int32 value. Typically, many PCs in a row will have the\nsame value, and the encoding optimizes for this. Almost all uses of PCDATA\ntables at run time involving looking up the value in a table for a specific PC\n(versus traversing the entire table).\n\n# Go 1.21 varint delta format\n\nThe current format is simple and compact, but very inefficient to decode. It\nconsists of a repeated sequence of:\n\n    valueDelta Varint\n    runLen     Uvarint\n\nfollowed by a 0 byte.\n\nThe decoder implicitly starts with a \"current\" value of -1. Each record gives a\ndelta to add to the current value and the length of the run of PCs the have that\nvalue.\n\nNote that if the value at PC 0 is -1, this encoding will start with a 0 byte.\nThus, decoders must not treat a 0 byte at the beginning of the encoding as a\nterminator byte. After this, a 0 valueDelta unambiguously indicates the end of\nthe encoded stream.\n\nThis format is simple and quite compact. It takes advantage of the typically\nlong runs of identical values, and the fact the values may be large but tend to\nbe clustered (for example, line numbers). It also implicitly encodes the size of\nthe function, so a decoder can detect requests for PCs outside the range of the\ntable.\n\nBut this format has several downsides. Varints are expensive to decode, and\nfinding the value for a particular PC requires decoding the table from the very\nbeginning until we pass the requested PC. Decoding is so expensive that the Go\nruntime uses a cache on top of these tables, which helps even though this cache\nhas a fairly low hit rate.\n\n# Alternate \"linear index\" format\n\nThis package implements an alternate encoding. The central goal of this format\nis to support point queries with minimal scanning.\n\nWe break the PC range of a function into 256 byte \"chunks\". For example, if a\nfunction is 900 bytes long, it will consist of four chunks, one for each 256\nbytes of the function. (For architectures with a PC quantum of greater than 1\nbyte, we multiply all of this by the PC quantum.)\n\nThe overall encoding consists of a chunk index, followed by the encoding of each\nchunk.\n\nUnlike the varint delta encoding, the linear index format does not encode the\nsize of functions. In fact, decoding this format requires knowing the size of\nthe function. Thus, it must be encoded out of band. It's possible to rearrange\nthe `func_` structure to make room for this, so there's no space overhead for\nthis.\n\n## Chunk index\n\nThe chunk index encodes the byte offset of each chunk relative to the end of the\nchunk index. Given n chunk offsets, the chunk index has three possible layouts:\n\n    [n-1]uint8        If all offsets are <= 0xff\n    0xfe [n-1]uint16  If all offsets are <= 0xffff\n    0xff [n-1]uint32  Otherwise\n\nThe relative offset of chunk 0 is always 0, so it's not represented in the\nindex. This also means that if a function is less than 256 bytes and thus has\nonly one chunk, the chunk index will be 0 bytes long.\n\nThe vast majority of tables can represent all chunk offsets in one byte, so they\nwill use the first form. If the first byte would be 0xfe or 0xff, we fall back\nto the second form, since otherwise this would be ambiguous.\n\nA decoder first looks up `pc>>8` in the chunk index to find the offset of the\nchunk for target PC.\n\n## Chunk encoding\n\nEach chunk covers a 256 byte range of the function and is encoded as follows:\n\n    n    uint8\n    pcs  [n]byte\n    vlen [n+1]uint2 // padded to a byte\n    vals [n+1]vint\n\nThe `pcs` field is a list of `pc&0xff` for each PC at which the value differs\nfrom the previous PC in the chunk, in ascending order. The first PC in the chunk\n(`pc&0xff == 0`) is never listed in `pcs`, since there is no previous PC in the\nchunk. This means there are at most 255 PCs in `pcs`, so the length of `pcs` can\nfit in the single byte `n` field.\n\nThe PC list is followed by n+1 values in a variable-length encoding. `vals[0]`\nis the value of the first PC in the chunk, as well as the \"bias\" value for all\nother values in this chunk. The value from `pcs[i]` to `pcs[i+1]` (or to the end\nof the block) is `bias + vals[i+1]`. Since values are often large by tend to be\nclustered, this bias value often makes it possible to encode the remaining\nvalues in fewer bytes.\n\nThe value list starts with the byte lengths of all values, encoded in the `vlen`\narray as packed 2-bit values. In this encoding, 0b01 corresponds to a 1-byte\nvalue, 0b10 to a 2-byte value, and 0b11 to a 4-byte value. This is padded out to\na byte with \"0\" bits. This is followed by the values themselves in the `vals`\nfield, where each value is encoded in an int8, int16, or int32. A decoder can\nfind the offset of the i'th value by summing the lengths of values 0 through\ni-1.\n\nThe particular encoding of `vlen` makes it possible to mask unused fields to\n0b00 and then use a single \"sum\" operation to compute both cumulative sums of\n`vlen` and individual fields.\n\nA decoder scans `pcs` to find the index `i` of the last PC <= the target PC, or\nelse -1. It reads the bias value `bias` from  `vals[0]`, with length `vlens[0]`.\nIf `i` is 0, it returns `bias`. Otherwise, it then gets `vlens[i]` and computes\nthe sum of `vlens[:i]` to get the byte size and offset of `vals[i]`. Finally, it\nreturns `bias + vals[i]`.\n\n## Constant chunk deduplication\n\nThe encoder deduplicates chunks where all 256 PCs have the same value, which is\ncommon in large functions. Since the index contains offsets to the encoding of\neach chunk, the encoder simply uses the same offset for later duplicates of\nconstant chunks. In principle, this could be used for non-constant chunks that\nencode identically, but this doesn't happen often outside of constant chunks.\n\nThis optimization is transparent to decoders.\n\n"
  },
  {
    "path": "pcvaluetab/alt.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\n// Alternate PCDATA encoding\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"log\"\n\t\"math/bits\"\n\t\"slices\"\n\t\"unsafe\"\n)\n\ntype indexScheme int\n\nconst (\n\t// All index entries are the same width, which depends on the function\n\t// size.\n\tindexFixedWidth indexScheme = iota\n\t// The index is encoded as a group varint.\n\t//\n\t// This saves a fair amount over indexFixedWidth, but is pretty hard to\n\t// decode.\n\tindexGroupVarint\n\t// The index is encoded as 1-byte, 2-byte, or 4-byte offsets. The\n\t// default is 1-byte, but two reserved values for the first byte\n\t// indicate 2- or 4-byte encoding.\n\t//\n\t// This is the clear winner on size, and is pretty easy to decode.\n\tindexByteOrHeader\n)\n\nconst useIndex = indexByteOrHeader\n\ntype biasScheme int\n\nconst (\n\t// No value biasing\n\tbiasNone biasScheme = iota\n\t// Bias all values by a fixed value (fixedBias). Values use signed\n\t// encoding, but in practice very few values are negative and we only\n\t// use small negative values, so this lets us encode more positive\n\t// values in one byte.\n\tbiasFixed\n\t// Compute a per-chunk bias value and encode that in each chunk. This\n\t// saves a *tiny* bit, but is probably not worth the added complexity.\n\tbiasPerChunk\n\t// Use the start value of a chunk as the bias for the rest of the\n\t// values.\n\t//\n\t// This is the pretty clear winner, and fairly easy to decode.\n\tbiasStartValue\n)\n\nconst useBias = biasStartValue\n\n// For biasFixed, the bias to add to each value. Generally the minimum value\n// is -2, so this lets us fit more values in a signed 8-bit number.\nconst fixedBias = -120\n\n// linearIndex encodes tab in the alternate \"linear index\" format.\nfunc linearIndex(tab *VarintPCData) []byte {\n\tconst debug = false\n\n\tvar indexVals []int32\n\tvar pcdata []byte\n\n\tchunks := uint32((tab.TextLen + 255) >> 8)\n\n\tencodeUint16 := func(buf *[]byte, val uint64) {\n\t\tif val > 0xffff {\n\t\t\tpanic(\"value too large\")\n\t\t}\n\t\t*buf = append(*buf, byte(val), byte(val>>8))\n\t}\n\tencodeUint32 := func(buf *[]byte, val uint64) {\n\t\tif val > 0xffffffff {\n\t\t\tpanic(\"value too large\")\n\t\t}\n\t\t*buf = append(*buf, byte(val), byte(val>>8), byte(val>>16), byte(val>>24))\n\t}\n\tencodeValue := func(buf *[]byte, val int32) uint8 {\n\t\tif int32(int8(uint8(val))) == val {\n\t\t\t*buf = append(*buf, uint8(val))\n\t\t\treturn 0b01\n\t\t} else if int32(int16(uint16(val))) == val {\n\t\t\t*buf = append(*buf, uint8(val), uint8(val>>8))\n\t\t\treturn 0b10\n\t\t} else {\n\t\t\tencodeUint32(buf, uint64(uint32(val)))\n\t\t\treturn 0b11\n\t\t}\n\t}\n\tencodeGroup := func(buf *[]byte, vals []int32) {\n\t\t// Encode group header, at two bits per value.\n\t\tbits := 2 * len(vals)\n\t\tbytes := (bits + 7) / 8\n\t\theaderOff := len(*buf)\n\t\t*buf = append(*buf, make([]uint8, bytes)...)\n\t\tfor i, val := range vals {\n\t\t\tvalLen := encodeValue(buf, val)\n\t\t\t(*buf)[headerOff+i/4] |= (valLen << ((i % 4) * 2))\n\t\t}\n\t\t// TODO: Also try variant where the group header is in the high bits of\n\t\t// the PCs.\n\t}\n\n\t// Encode each chunk\n\tpcIndex := 0\n\t// For constant-valued chunked, map from value to starting offset.\n\tconstChunkOffs := make(map[int32]int32)\n\tfor chunk := uint32(0); chunk < chunks; chunk++ {\n\t\t// Find range of PCs in this chunk.\n\t\tstartPCIndex := pcIndex\n\t\tfor pcIndex < len(tab.PCs) && tab.PCs[pcIndex]>>8 == chunk {\n\t\t\tpcIndex++\n\t\t}\n\t\t// Each chunk implicitly starts with PC 0, which means there's no need\n\t\t// to encode an explicit PC 0.\n\t\tif startPCIndex < pcIndex && tab.PCs[startPCIndex]&0xff == 0 {\n\t\t\tstartPCIndex++\n\t\t}\n\t\t// Get the starting value of this chunk.\n\t\tstartValue := int32(-1) // PCDATA tables start with -1.\n\t\tif startPCIndex > 0 {\n\t\t\tstartValue = tab.Vals[startPCIndex-1]\n\t\t}\n\t\tif startPCIndex == pcIndex {\n\t\t\t// This is a constant chunk.\n\t\t\tif off, ok := constChunkOffs[startValue]; ok {\n\t\t\t\t// We can just point to this chunk from the index.\n\t\t\t\tindexVals = append(indexVals, off)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// This is a new constant chunk.\n\t\t\tconstChunkOffs[startValue] = int32(len(pcdata))\n\t\t}\n\n\t\t// Add to the index.\n\t\tif chunk > 0 {\n\t\t\tindexVals = append(indexVals, int32(len(pcdata)))\n\t\t}\n\n\t\t// Encode PC count (N). Note that it's important that we never include\n\t\t// PC 0 here because that means the maximum count is 255, so it always\n\t\t// fits in a byte.\n\t\tn := pcIndex - startPCIndex\n\t\tif n < 0 {\n\t\t\tpanic(\"skipped past start\")\n\t\t}\n\t\tif n > 255 {\n\t\t\tpanic(\"PC count > 255\")\n\t\t}\n\t\tpcdata = append(pcdata, uint8(n))\n\n\t\t// Encode N PCs.\n\t\tfor i := startPCIndex; i < pcIndex; i++ {\n\t\t\tpcdata = append(pcdata, uint8(tab.PCs[i]&0xff))\n\t\t}\n\n\t\t// Compute bias.\n\t\tvar vals []int32\n\t\tvar bias int32\n\t\tswitch useBias {\n\t\tcase biasFixed:\n\t\t\tbias = fixedBias\n\t\tcase biasPerChunk:\n\t\t\tminVal, maxVal := startValue, startValue\n\t\t\tif startPCIndex < pcIndex {\n\t\t\t\tmin2 := slices.Min(tab.Vals[startPCIndex:pcIndex])\n\t\t\t\tif min2 < minVal {\n\t\t\t\t\tminVal = min2\n\t\t\t\t}\n\t\t\t\tmax2 := slices.Max(tab.Vals[startPCIndex:pcIndex])\n\t\t\t\tif max2 > maxVal {\n\t\t\t\t\tmaxVal = max2\n\t\t\t\t}\n\t\t\t}\n\t\t\tif int32(int8(minVal)) == minVal && int32(int8(maxVal)) == maxVal {\n\t\t\t\t// No need to bias. We need a way to encode this in the bits.\n\t\t\t} else {\n\t\t\t\t// Shift the minimum value to -127. min + bias = -127.\n\t\t\t\tbias = -127 - minVal\n\t\t\t\tvals = append(vals, bias) // TODO: Could be unsigned.\n\t\t\t}\n\t\tcase biasStartValue:\n\t\t\tbias = -startValue\n\t\t}\n\n\t\t// Encode values, beginning with the value in effect at the start of\n\t\t// this chunk's PC range.\n\t\tif useBias == biasStartValue {\n\t\t\tvals = append(vals, startValue)\n\t\t} else {\n\t\t\tvals = append(vals, startValue+bias)\n\t\t}\n\t\tfor i := startPCIndex; i < pcIndex; i++ {\n\t\t\tvals = append(vals, tab.Vals[i]+bias)\n\t\t}\n\n\t\tencodeGroup(&pcdata, vals)\n\t}\n\tif pcIndex != len(tab.PCs) {\n\t\tlog.Fatalf(\"didn't consume all PCs, pcIndex=%d, len(pcs)=%d\", pcIndex, len(tab.PCs))\n\t}\n\n\t// Encode index.\n\tvar index []byte\n\tswitch useIndex {\n\tcase indexFixedWidth:\n\t\tif chunks < 32 {\n\t\t\t// Two bytes per entry is enough.\n\t\t\tfor _, val := range indexVals {\n\t\t\t\tindex = append(index, byte(val), byte(val>>8))\n\t\t\t}\n\t\t} else {\n\t\t\t// Four bytes per entry.\n\t\t\tfor _, val := range indexVals {\n\t\t\t\tencodeUint32(&index, uint64(val))\n\t\t\t}\n\t\t}\n\tcase indexGroupVarint:\n\t\t// To start with, the offsets are relative to the end of the index, but\n\t\t// consumers don't know the length of the index, so we really want them\n\t\t// to be relative to the start of the index. But we don't know the\n\t\t// length of the index. So reach a fixed point. In practice this almost\n\t\t// never requires more than two iterations.\n\t\tprevLen := 0\n\t\tfor {\n\t\t\tindex = index[:0]\n\t\t\tencodeGroup(&index, indexVals)\n\t\t\tif len(index) <= prevLen {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tfor i := range indexVals {\n\t\t\t\tindexVals[i] += int32(len(index) - prevLen)\n\t\t\t}\n\t\t\tprevLen = len(index)\n\t\t}\n\tcase indexByteOrHeader:\n\t\tsize := 1\n\t\tfor i, val := range indexVals {\n\t\t\tif val > 0xffff {\n\t\t\t\tsize = 4\n\t\t\t\tbreak\n\t\t\t} else if val > 0xff || (i == 0 && val >= 0xfe) {\n\t\t\t\tsize = 2\n\t\t\t}\n\t\t}\n\t\tif size == 1 {\n\t\t\t// Everything fits in a byte and the first byte's value isn't a\n\t\t\t// special marker.\n\t\t\tfor _, val := range indexVals {\n\t\t\t\tindex = append(index, uint8(val))\n\t\t\t}\n\t\t} else if size == 2 {\n\t\t\t// Put an 0xfe marker, followed by 2-byte offsets.\n\t\t\tindex = append(index, 0xfe)\n\t\t\tfor _, val := range indexVals {\n\t\t\t\tencodeUint16(&index, uint64(val))\n\t\t\t}\n\t\t} else if size == 4 {\n\t\t\t// Put an 0xff marker, followed by 4-byte offsets.\n\t\t\tindex = append(index, 0xff)\n\t\t\tfor _, val := range indexVals {\n\t\t\t\tencodeUint32(&index, uint64(val))\n\t\t\t}\n\t\t} else {\n\t\t\tpanic(\"bad size\")\n\t\t}\n\t}\n\n\tif debug {\n\t\tfmt.Println(\"const chunks: \", constChunkOffs)\n\t\tfmt.Println(\"index vals: \", indexVals)\n\t}\n\n\t// Combine index and values.\n\tpcdata = append(index, pcdata...)\n\n\treturn pcdata\n}\n\n// lookupLinearIndex performs a point query for the value associated with pc in\n// PCDATA encoded with in the linear index format.\nfunc lookupLinearIndex(data []byte, textLen, pc uint32) int32 {\n\tconst debug = false\n\n\tchunks := uint32((textLen + 255) >> 8)\n\n\tif debug {\n\t\tfmt.Println(\"lookup\", pc)\n\t\tfmt.Println(\"chunks:\", chunks)\n\t}\n\n\t// Lookup the chunk in data\n\tvar chunk []byte\n\tswitch useIndex {\n\tdefault:\n\t\tpanic(\"index scheme not implemented\")\n\tcase indexByteOrHeader:\n\t\t// Compute the offset of the chunk from data.\n\t\tif chunks == 1 {\n\t\t\t// In this case it's not safe to look at the header byte. We could\n\t\t\t// say \"|| chunks==1\" in the 1-byte encoding case, but this is so\n\t\t\t// common is seems worth a fast path anyway.\n\t\t\tchunk = data\n\t\t\tbreak\n\t\t}\n\t\tvar chunkOff uint32\n\t\tchunkID := pc >> 8\n\t\tif data[0] < 0xfe {\n\t\t\t// 1-byte encoding\n\t\t\tchunkOff = chunks - 1 // Skip index\n\t\t\tif chunkID > 0 {\n\t\t\t\tchunkOff += uint32(data[chunkID-1])\n\t\t\t}\n\t\t} else if data[0] == 0xfe {\n\t\t\t// 2-byte encoding\n\t\t\tchunkOff = 1 + (chunks-1)*2\n\t\t\tif chunkID > 0 {\n\t\t\t\tchunkOff += uint32(binary.LittleEndian.Uint16(data[1+(chunkID-1)*2:]))\n\t\t\t}\n\t\t} else {\n\t\t\t// 4-byte encoding\n\t\t\tchunkOff = 1 + (chunks-1)*4\n\t\t\tif chunkID > 0 {\n\t\t\t\tchunkOff += binary.LittleEndian.Uint32(data[1+(chunkID-1)*4:])\n\t\t\t}\n\t\t}\n\t\tif debug {\n\t\t\tfmt.Println(\"chunk offset:\", chunkOff)\n\t\t}\n\t\tchunk = data[chunkOff:]\n\t}\n\n\t// Load PCs: N byte, PCs [N]byte\n\tn := chunk[0]\n\tpcs := chunk[1 : 1+n]\n\tif debug {\n\t\tfmt.Println(\"n:\", n, \"pcs:\", pcs)\n\t}\n\n\t// Search for the PC, find the value index.\n\t//\n\t// TODO: This is one of the hottest things in this function and could easily\n\t// be vectorized.\n\tindex := int(n)\n\tfor i, pc1 := range pcs {\n\t\tif pc1 > uint8(pc) {\n\t\t\tindex = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif debug {\n\t\tfmt.Println(\"index:\", index)\n\t}\n\n\t// Load values: lens [N+1]uint2, vals [N+1]varlen\n\tgroupBits := 2 * int(n+1)\n\tgroupBytes := (groupBits + 7) / 8\n\tlens := chunk[1+n:]\n\tvals := lens[groupBytes:]\n\tif debug {\n\t\tfmt.Println(\"lens:\", lens[:groupBytes], \"vals:\", vals)\n\t}\n\n\tvar bias int32\n\n\tswitch useBias {\n\tdefault:\n\t\tpanic(\"bias scheme not implemented\")\n\tcase biasNone:\n\t\tbreak\n\tcase biasFixed:\n\t\tbias = fixedBias\n\tcase biasStartValue:\n\t\t// Decode the start value.\n\t\t//\n\t\t// TODO: Only correct on little endian\n\t\t//\n\t\t// TODO: Requires ensuring we can read up to three bytes past the end of\n\t\t// the pcdata encoding. In practice all the tables are concatenated, so\n\t\t// that's easy padding to add.\n\n\t\t// Logically, we're selecting the low field, computing the byte length\n\t\t// of \"bias\", and computing a shift from that, but we've precomputed\n\t\t// those operations into one table lookup.\n\n\t\t//startLen := count0124(lens[0] & 0b11)\n\t\tshift := shiftTab[lens[0]&0b11] % 32 // %32 to avoid branch\n\t\tbias = *(*int32)(unsafe.Pointer(&vals[0]))\n\t\t//shift := ((4 - startLen) * 8) % 32 // %32 to avoid branch\n\t\tbias = (bias << shift) >> shift\n\t\tif debug {\n\t\t\t//fmt.Println(\"start len:\", startLen)\n\t\t\tfmt.Println(\"bias:\", bias)\n\t\t}\n\t\tif index == 0 {\n\t\t\treturn bias\n\t\t}\n\t}\n\n\t// Find the offset of the value.\n\tvalOff := uint(0)\n\tfor _, v := range lens[:index/4] {\n\t\tvalOff += count0124(v)\n\t}\n\t// TODO: I think on little endian, I can do larger loads with the masking.\n\t// TODO: Since I'm just counting, can I shift instead of masking?\n\tvalOff += count0124(lens[index/4] & masks[index%4])\n\n\t// Load the value.\n\t//\n\t// We don't have to shift the field down: since we're just summing up the\n\t// fields and 00 adds 0, we can just mask out the one field we want.\n\tvalLen := count0124(lens[index/4] & selMask[index%4])\n\tif debug {\n\t\tfmt.Println(\"valOff:\", valOff, \"valLen:\", valLen)\n\t\tfmt.Printf(\"%02x\\n\", lens[index/4]&selMask[index%4])\n\t}\n\tval := *(*int32)(unsafe.Pointer(&vals[valOff]))\n\tshift := ((4 - valLen) * 8) % 32 // %32 to avoid branch\n\tval = (val << shift) >> shift\n\tval += bias\n\n\treturn val\n}\n\nvar masks = [...]uint8{0, 0b11, 0b1111, 0b111111}\nvar selMask = [...]uint8{0b11, 0b1100, 0b110000, 0b11000000}\nvar shiftTab = [...]uint8{0, 24, 16, 0}\n\nfunc count0124Formula(x uint8) uint {\n\t// See also streamvbyte for some ideas.\n\n\t// A table is faster than this for 1 byte. That might not be true for larger\n\t// values, if we switch to using larger values.\n\n\t// The first OnesCount maps:\n\t//   00 => 0\n\t//   01 => 1\n\t//   10 => 1 (want 2; need to add 1)\n\t//   11 => 2 (want 4; need to add 2)\n\t//\n\t// Then we map x to a new bitmap where the OnesCount of each field is the\n\t// amount we want to add to the first OnesCount:\n\t//   00 => 00 (count 0)\n\t//   01 => 00 (count 0)\n\t//   10 => 10 (count 1)\n\t//   11 => 11 (count 2)\n\t//\n\t// We can then add the OnesCount of this residue to get the final count.\n\n\th := x & 0b10101010\n\treturn uint(bits.OnesCount8(x) + bits.OnesCount8(h|((h>>1)&x)))\n}\n\nfunc count0124Slow(x uint8) uint {\n\tvar sum uint\n\tfor i := 0; i < 4; i++ {\n\t\tfield := (x >> (i * 2)) & 0b11\n\t\tif field == 0b11 {\n\t\t\tfield = 4\n\t\t}\n\t\tsum += uint(field)\n\t}\n\treturn sum\n}\n\nvar count0124Tab = [...]uint8{\n\t0, 1, 2, 4, 1, 2, 3, 5, 2, 3, 4, 6, 4, 5, 6, 8,\n\t1, 2, 3, 5, 2, 3, 4, 6, 3, 4, 5, 7, 5, 6, 7, 9,\n\t2, 3, 4, 6, 3, 4, 5, 7, 4, 5, 6, 8, 6, 7, 8, 10,\n\t4, 5, 6, 8, 5, 6, 7, 9, 6, 7, 8, 10, 8, 9, 10, 12,\n\t1, 2, 3, 5, 2, 3, 4, 6, 3, 4, 5, 7, 5, 6, 7, 9,\n\t2, 3, 4, 6, 3, 4, 5, 7, 4, 5, 6, 8, 6, 7, 8, 10,\n\t3, 4, 5, 7, 4, 5, 6, 8, 5, 6, 7, 9, 7, 8, 9, 11,\n\t5, 6, 7, 9, 6, 7, 8, 10, 7, 8, 9, 11, 9, 10, 11, 13,\n\t2, 3, 4, 6, 3, 4, 5, 7, 4, 5, 6, 8, 6, 7, 8, 10,\n\t3, 4, 5, 7, 4, 5, 6, 8, 5, 6, 7, 9, 7, 8, 9, 11,\n\t4, 5, 6, 8, 5, 6, 7, 9, 6, 7, 8, 10, 8, 9, 10, 12,\n\t6, 7, 8, 10, 7, 8, 9, 11, 8, 9, 10, 12, 10, 11, 12, 14,\n\t4, 5, 6, 8, 5, 6, 7, 9, 6, 7, 8, 10, 8, 9, 10, 12,\n\t5, 6, 7, 9, 6, 7, 8, 10, 7, 8, 9, 11, 9, 10, 11, 13,\n\t6, 7, 8, 10, 7, 8, 9, 11, 8, 9, 10, 12, 10, 11, 12, 14,\n\t8, 9, 10, 12, 9, 10, 11, 13, 10, 11, 12, 14, 12, 13, 14, 16,\n}\n\n// count0124 returns the sum of vector x, where x contains 4 2-bit values where\n// 0b00 => 0, 0b01 => 1, 0b10 => 2, 0b11 => 4.\nfunc count0124(x uint8) uint {\n\treturn uint(count0124Tab[x])\n}\n"
  },
  {
    "path": "pcvaluetab/alt_test.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar sinkInt int\n\nfunc TestCount0124(t *testing.T) {\n\tvar src strings.Builder\n\tfmt.Fprintf(&src, \"var count0124Tab = [...]uint8{\\n\")\n\n\tfor x := 0; x < 256; x++ {\n\t\twant := count0124Slow(uint8(x))\n\t\tgotFormula := count0124Formula(uint8(x))\n\t\tgotTable := count0124(uint8(x))\n\n\t\tt.Logf(\"%#08b => %d %d %d\", x, want, gotFormula, gotTable)\n\t\tif want != gotFormula || want != gotTable {\n\t\t\tt.Errorf(\"count implementations differ for x=%#08b\", x)\n\t\t}\n\n\t\tfmt.Fprintf(&src, \"%d,\", want)\n\t\tif x%16 == 15 {\n\t\t\tfmt.Fprintf(&src, \"\\n\")\n\t\t}\n\t}\n\n\tsrc.WriteByte('}')\n\tt.Log(src.String())\n}\n\nfunc BenchmarkCount0124(b *testing.B) {\n\t// Generate test data.\n\tvar data [1024]byte // Must be a power of 2 for optimal codegen\n\trand.Read(data[:])\n\n\tb.Run(\"formula\", func(b *testing.B) {\n\t\tvar sink uint\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tsink = count0124Formula(data[i&(len(data)-1)])\n\t\t}\n\t\tsinkInt = int(sink)\n\t})\n\tb.Run(\"slow\", func(b *testing.B) {\n\t\tvar sink uint\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tsink = count0124Slow(data[i&(len(data)-1)])\n\t\t}\n\t\tsinkInt = int(sink)\n\t})\n\tb.Run(\"table\", func(b *testing.B) {\n\t\tvar sink uint\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tsink = count0124(data[i&(len(data)-1)])\n\t\t}\n\t\tsinkInt = int(sink)\n\t})\n}\n"
  },
  {
    "path": "pcvaluetab/bench_test.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"math\"\n\t\"math/rand\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n)\n\nvar flagBinary stringList\nvar flagSizeStats = flag.Bool(\"size-stats\", true, \"report file and table size statistics (disable for profiling)\")\n\nfunc init() {\n\tflag.Var(&flagBinary, \"bench-binary\", \"use PCDATA from `binary` for benchmarks; can be given multiple times\")\n}\n\ntype stringList struct {\n\tlist []string\n}\n\nfunc (l *stringList) String() string {\n\treturn strings.Join(l.list, \",\")\n}\n\nfunc (l *stringList) Set(x string) error {\n\tl.list = append(l.list, x)\n\treturn nil\n}\n\nfunc BenchmarkDecode(b *testing.B) {\n\tif len(flagBinary.list) == 0 {\n\t\tb.Skip(\"-bench-binary not set\")\n\t}\n\n\tfor _, bin := range flagBinary.list {\n\t\tb.Run(\"bin=\"+filepath.Base(bin), func(b *testing.B) {\n\t\t\tsymtab := LoadSymTab(bin)\n\n\t\t\tfor _, pcMode := range []uint32{pcModeRandom, 0, 4096} {\n\t\t\t\tlabel := fmt.Sprint(pcMode)\n\t\t\t\tif pcMode == pcModeRandom {\n\t\t\t\t\tlabel = \"random\"\n\t\t\t\t}\n\n\t\t\t\tb.Run(\"pc=\"+label, func(b *testing.B) {\n\t\t\t\t\tdecode1(b, symtab, pcMode)\n\t\t\t\t})\n\t\t\t}\n\t\t\tif *flagSizeStats {\n\t\t\t\tfileSizeStats(b, bin, symtab)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc fileSizeStats(b *testing.B, bin string, symtab *SymTab) {\n\tvar fileBytes int\n\tif stat, err := os.Stat(bin); err != nil {\n\t\tb.Fatal(err)\n\t} else {\n\t\tfileBytes = int(stat.Size())\n\t}\n\n\tvar varintBytes, altBytes int\n\tgather := sync.OnceFunc(func() {\n\t\t// Collect the total size of the varint and alt tables.\n\t\taltDups := map[string]bool{}\n\t\tfor _, tab := range symtab.PCTabs {\n\t\t\tvarintBytes += len(tab.Raw)\n\n\t\t\t// Re-encode the varint tables.\n\t\t\taltTab := linearIndex(tab)\n\t\t\tif altDups[string(altTab)] {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\taltDups[string(altTab)] = true\n\t\t\taltBytes += len(altTab)\n\t\t}\n\t})\n\n\t// This is a bit goofy. We're not measuring time, so the normal testing.B\n\t// looping doesn't work. We start a sub-benchmark so that -test.bench\n\t// selection works, and then print out own result and call SkipNow to\n\t// prevent looping. This hack means results won't align nicely, and if this\n\t// this is the first \"benchmark\" to run, our results will appear before the\n\t// benchmark tags.\n\n\tb.Run(\"enc=varint\", func(b *testing.B) {\n\t\tgather()\n\t\tfmt.Printf(\"%s\\t%d\\t%v table-bytes\\t%v file-bytes\\n\", b.Name(), 1, varintBytes, fileBytes)\n\t\tb.SkipNow()\n\t})\n\n\tb.Run(\"enc=alt\", func(b *testing.B) {\n\t\tgather()\n\t\taltFileBytes := fileBytes - varintBytes + altBytes\n\t\tfmt.Printf(\"%s\\t%d\\t%v table-bytes\\t%v file-bytes\\n\", b.Name(), 1, altBytes, altFileBytes)\n\t\t// Metrics get kind of weird with \"percent change\", so just log it.\n\t\tfmt.Printf(\"file-bytes alt versus varint: %+f%%\\n\", diffPct(fileBytes, altFileBytes))\n\t\tb.SkipNow()\n\t})\n}\n\nconst pcModeRandom = math.MaxUint32\n\nfunc decode1(b *testing.B, symtab *SymTab, pcMode uint32) {\n\t// Random sample of tables.\n\tconst nSamples = 1024\n\ttype sample struct {\n\t\tvarintTab *VarintPCData\n\t\taltTab    []byte\n\t\ttextLen   uint32\n\t\tpc        uint32\n\t}\n\tsamples := make([]sample, 0, nSamples)\n\tfor len(samples) < nSamples {\n\t\t// Pick a random table.\n\t\tvar tab *VarintPCData\n\t\tfor _, tab = range symtab.PCTabs {\n\t\t\tbreak\n\t\t}\n\t\t// Pick a PC.\n\t\tpc := pcMode\n\t\tif pcMode == math.MaxUint32 {\n\t\t\tpc = uint32(rand.Intn(int(tab.TextLen)))\n\t\t} else if pc >= tab.TextLen {\n\t\t\t// Try again\n\t\t\tcontinue\n\t\t}\n\t\t// Re-encode it.\n\t\taltTab := linearIndex(tab)\n\n\t\tsamples = append(samples, sample{tab, altTab, tab.TextLen, pc})\n\t}\n\n\tb.Run(\"enc=varint\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tsample := &samples[i%len(samples)]\n\t\t\tlookupVarintPCData(sample.varintTab.Raw, uintptr(sample.pc), nil)\n\t\t}\n\t})\n\tb.Run(\"enc=varint/cachehit=0\", func(b *testing.B) {\n\t\tvar cache pcvalueCache\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t// In practice this will never hit in the cache because there\n\t\t\t// are so many random samples.\n\t\t\tsample := &samples[i%len(samples)]\n\t\t\tlookupVarintPCData(sample.varintTab.Raw, uintptr(sample.pc), &cache)\n\t\t}\n\t})\n\tb.Run(\"enc=varint/cachehit=7:1\", func(b *testing.B) {\n\t\tvar cache pcvalueCache\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\t// Hit 7 times out of 8. That's probably dramatically higher\n\t\t\t// than the hit rate in real applications.\n\t\t\tsample := &samples[(i/8)%len(samples)]\n\t\t\tlookupVarintPCData(sample.varintTab.Raw, uintptr(sample.pc), &cache)\n\t\t}\n\t})\n\tb.Run(\"enc=alt\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tsample := &samples[i%len(samples)]\n\t\t\tlookupLinearIndex(sample.altTab, sample.textLen, sample.pc)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "pcvaluetab/dist.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strings\"\n)\n\ntype Dist struct {\n\tvals   []int\n\tsorted bool\n}\n\nfunc (d *Dist) Add(val int) {\n\td.vals = append(d.vals, val)\n\td.sorted = false\n}\n\nfunc (d *Dist) Quantile(q float64) int {\n\tif !d.sorted {\n\t\tsort.Ints(d.vals)\n\t}\n\ti := int((q * float64(len(d.vals)-1)) + 0.5)\n\treturn d.vals[i]\n}\n\nfunc (d *Dist) StringSummary() string {\n\tconst qs = 10\n\tvar out strings.Builder\n\tfor i := 0; i <= qs; i++ {\n\t\tfmt.Fprintf(&out, \" %7s\", fmt.Sprintf(\"p%d\", i*100/qs))\n\t}\n\tout.WriteByte('\\n')\n\tfor i := 0; i <= qs; i++ {\n\t\tv := d.Quantile(float64(i) / qs)\n\t\tfmt.Fprintf(&out, \" %7d\", v)\n\t}\n\tfmt.Fprintf(&out, \" N=%d\", len(d.vals))\n\treturn out.String()\n}\n"
  },
  {
    "path": "pcvaluetab/enc.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\n// Raw file encodings of symbol table types from Go 1.21.\n\ntype rawPCHeader1 struct {\n\tMagic      uint32 // 0xFFFFFFF1\n\tPad1, Pad2 uint8  // 0,0\n\tMinLC      uint8  // min instruction size\n\tPtrSize    uint8  // size of a ptr in bytes\n}\n\ntype rawPCHeader struct {\n\trawPCHeader1\n\tNfunc          int     // number of functions in the module\n\tNfiles         uint    // number of entries in the file tab\n\tTextStart      uintptr // base for function entry PC offsets in this module, equal to moduledata.text\n\tFuncnameOffset uintptr // offset to the funcnametab variable from pcHeader\n\tCuOffset       uintptr // offset to the cutab variable from pcHeader\n\tFiletabOffset  uintptr // offset to the filetab variable from pcHeader\n\tPctabOffset    uintptr // offset to the pctab variable from pcHeader\n\tPclnOffset     uintptr // offset to the pclntab variable from pcHeader\n}\n\ntype rawFunc struct {\n\tEntryOff uint32 // start pc, as offset from moduledata.text/pcHeader.textStart\n\tNameOff  int32  // function name, as index into moduledata.funcnametab.\n\n\tArgs        int32  // in/out args size\n\tDeferreturn uint32 // offset of start of a deferreturn call instruction from entry, if any.\n\n\tPcsp      uint32\n\tPcfile    uint32\n\tPcln      uint32\n\tNpcdata   uint32\n\tCuOffset  uint32       // runtime.cutab offset of this function's CU\n\tStartLine int32        // line number of start of function (func keyword/TEXT directive)\n\tFuncID    rawABIFuncID // set for certain special runtime functions\n\tFlag      rawABIFuncFlag\n\tPad       [1]byte // pad\n\tNfuncdata uint8   // must be last, must end on a uint32-aligned boundary\n\n\t// Followed by Npcdata 4-byte offsets into pctab, then\n\t// Nfuncdata 4-byte offsets into moduledata.gofunc.\n\t// Padded to PtrSize.\n}\n\ntype rawABIFuncID uint8\ntype rawABIFuncFlag uint8\n"
  },
  {
    "path": "pcvaluetab/go.mod",
    "content": "module github.com/aclements/go-misc/pcvaluetab\n\ngo 1.21\n\nrequire golang.org/x/exp v0.0.0-20230807204917-050eac23e9de\n"
  },
  {
    "path": "pcvaluetab/go.sum",
    "content": "golang.org/x/exp v0.0.0-20230807204917-050eac23e9de h1:l5Za6utMv/HsBWWqzt4S8X17j+kt1uVETUX5UFhn2rE=\ngolang.org/x/exp v0.0.0-20230807204917-050eac23e9de/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=\n"
  },
  {
    "path": "pcvaluetab/main.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// pcvaluetab is an experiment with alternate pcvalue encodings.\n//\n// Usage: pcvaluetab {binary}\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"sort\"\n\n\t\"golang.org/x/exp/maps\"\n)\n\nfunc main() {\n\tconst debug = false\n\tconst debugCheckDecode = true\n\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\tbinPath := flag.Arg(0)\n\n\tvar fileBytes int\n\tif stat, err := os.Stat(binPath); err != nil {\n\t\tlog.Fatal(err)\n\t} else {\n\t\tfileBytes = int(stat.Size())\n\t}\n\n\tsymtab := LoadSymTab(binPath)\n\n\t// Walk the funcs.\n\tvar fnSizes Dist\n\tvar funcBytes int\n\tvar tabOffsetDist Dist\n\trefBytes := 0\n\ttype tabInfo struct {\n\t\ttab   *VarintPCData\n\t\talt   []byte\n\t\tcount int\n\t}\n\tdups := make(map[PCTabKey]*tabInfo)\n\taltDups := make(map[string]int)\n\tfor _, fn := range symtab.Funcs {\n\t\tif debug {\n\t\t\tfmt.Printf(\"%+v\\n\", fn)\n\t\t}\n\n\t\tfuncBytes += fn.EncSize\n\n\t\tfor _, pcTabKey := range fn.PCTabs {\n\t\t\ttabOffsetDist.Add(int(pcTabKey))\n\n\t\t\trefBytes += 4\n\t\t\tif pcTabKey == 0 {\n\t\t\t\t// Unused.\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tinfo := dups[pcTabKey]\n\t\t\tif info == nil {\n\t\t\t\tinfo = new(tabInfo)\n\t\t\t\tdups[pcTabKey] = info\n\n\t\t\t\tinfo.tab = symtab.PCTabs[pcTabKey]\n\t\t\t\tinfo.alt = linearIndex(info.tab)\n\t\t\t\tif debug {\n\t\t\t\t\tfmt.Printf(\"%+v\\n\", info.tab)\n\t\t\t\t\tfmt.Printf(\"% 3x\\n\", info.alt)\n\t\t\t\t\tif len(info.alt) > len(info.tab.Raw) {\n\t\t\t\t\t\tfmt.Println(\"LONGER\", len(info.alt), len(info.tab.Raw))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif debugCheckDecode {\n\t\t\t\t\tfor pc := uint32(0); pc < info.tab.TextLen; pc++ {\n\t\t\t\t\t\twant := info.tab.Lookup(pc)\n\t\t\t\t\t\tgot := lookupLinearIndex(info.alt, info.tab.TextLen, pc)\n\n\t\t\t\t\t\tif want != got {\n\t\t\t\t\t\t\tlog.Fatalf(\"at PC %d, want %d, got %d\", pc, want, got)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tinfo.count++\n\n\t\t\t// Add to the altDups table. The alternate encoding might\n\t\t\t// deduplicate better than the varint encoding, so we count this\n\t\t\t// separately.\n\t\t\taltDups[string(info.alt)]++\n\t\t}\n\n\t\tfnSizes.Add(fn.TextLen)\n\t}\n\n\t// TODO: Does it make sense to combine all of the tables of a function into\n\t// one indexed by (tableID * pcLen) + pc? Then we really can't dedup, but we\n\t// could put this combined table right after the func_ and not need the\n\t// references. Is there any way we could combine this with optional\n\t// deduplication?\n\n\tfmt.Printf(\"file: %d bytes\\n\", fileBytes)\n\tfmt.Printf(\"functab: %d bytes\\n\", funcBytes)\n\tfmt.Printf(\"refs: %d bytes\\n\", refBytes)\n\tfmt.Printf(\"function sizes:\\n%s\\n\", fnSizes.StringSummary())\n\tfmt.Printf(\"pcdata table offsets:\\n%s\\n\", tabOffsetDist.StringSummary())\n\tfmt.Println()\n\n\tfmt.Printf(\"## varint encoding\\n\")\n\tpostDedupBytes := 0\n\tpreDedupBytes := 0\n\tvar sizes Dist\n\tfor _, info := range dups {\n\t\tsize := len(info.tab.Raw)\n\t\tpostDedupBytes += size\n\t\tpreDedupBytes += size * info.count\n\t\tsizes.Add(size)\n\t}\n\tfmt.Printf(\"tabs: %d bytes post-dedup\\n%s\\n\", postDedupBytes, sizes.StringSummary())\n\tfmt.Printf(\"tabs: %d bytes pre-dedup\\n\", preDedupBytes)\n\tfmt.Printf(\"dedup saves: %d bytes\\n\", preDedupBytes-postDedupBytes)\n\tif true {\n\t\tdedupCountBySize := make(map[int]int)\n\t\tfor _, info := range dups {\n\t\t\tdedupCountBySize[len(info.tab.Raw)] += info.count\n\t\t}\n\n\t\tfmt.Printf(\"duplicates by size:\\n\")\n\t\tfmt.Printf(\"%7s %7s %7s:\\n\", \"size\", \"#dups\", \"saving\")\n\t\tsizes := maps.Keys(dedupCountBySize)\n\t\tsort.Ints(sizes)\n\t\tfor _, size := range sizes {\n\t\t\tfmt.Printf(\"%7d %7d %7d\\n\", size, dedupCountBySize[size], size*dedupCountBySize[size])\n\t\t}\n\t}\n\tfmt.Println()\n\n\tfmt.Printf(\"## alternate encoding\\n\")\n\taltPostDedupBytes := 0\n\taltPreDedupBytes := 0\n\tvar altSizes Dist\n\tfor alt, count := range altDups {\n\t\taltPostDedupBytes += len(alt)\n\t\taltPreDedupBytes += len(alt) * count\n\t\taltSizes.Add(len(alt))\n\t}\n\tfmt.Printf(\"tabs: %d bytes post-dedup (%+f%% vs varint)\\n%s\\n\", altPostDedupBytes, diffPct(postDedupBytes, altPostDedupBytes), altSizes.StringSummary())\n\tfmt.Printf(\"tabs: %d bytes pre-dedup\\n\", altPreDedupBytes)\n\tfmt.Printf(\"dedup saves: %d bytes\\n\", altPreDedupBytes-altPostDedupBytes)\n\tfmt.Printf(\"file size change: %+f%%\\n\", diffPct(fileBytes, fileBytes-postDedupBytes+altPostDedupBytes))\n}\n\nfunc diffPct(before, after int) float64 {\n\treturn float64(100*after)/float64(before) - 100\n}\n"
  },
  {
    "path": "pcvaluetab/read.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"reflect\"\n)\n\ntype Decoder struct {\n\tOrder   binary.ByteOrder\n\tIntSize int // 4 or 8\n\tPtrSize int // 4 or 8\n}\n\nfunc (d *Decoder) Read(data []byte, out any) (int, error) {\n\trv := reflect.ValueOf(out)\n\tswitch rv.Kind() {\n\tcase reflect.Pointer:\n\t\treturn d.read1(data, rv.Elem())\n\tcase reflect.Slice:\n\t\tpos := 0\n\t\tn := rv.Len()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tlen, err := d.read1(data[pos:], rv.Index(i))\n\t\t\tif err != nil {\n\t\t\t\treturn pos, err\n\t\t\t}\n\t\t\tpos += len\n\t\t}\n\t\treturn pos, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"out must be a pointer, got %T\", out)\n\t}\n}\n\nfunc (d *Decoder) read1(data []byte, out reflect.Value) (int, error) {\n\tkind := out.Kind()\n\tvar size int\n\n\tswitch kind {\n\tcase reflect.Struct:\n\t\tpos := 0\n\t\tnf := out.NumField()\n\t\tfor i := 0; i < nf; i++ {\n\t\t\tlen, err := d.read1(data[pos:], out.Field(i))\n\t\t\tif err != nil {\n\t\t\t\treturn pos, err\n\t\t\t}\n\t\t\tpos += len\n\t\t}\n\t\treturn pos, nil\n\n\tcase reflect.Array:\n\t\tpos := 0\n\t\tn := out.Len()\n\t\tfor i := 0; i < n; i++ {\n\t\t\tlen, err := d.read1(data[pos:], out.Index(i))\n\t\t\tif err != nil {\n\t\t\t\treturn pos, err\n\t\t\t}\n\t\t\tpos += len\n\t\t}\n\t\treturn pos, nil\n\n\t// Flatten kinds.\n\tcase reflect.Uintptr:\n\t\tswitch d.PtrSize {\n\t\tcase 4:\n\t\t\tkind = reflect.Uint32\n\t\tcase 8:\n\t\t\tkind = reflect.Uint64\n\t\tdefault:\n\t\t\treturn 0, fmt.Errorf(\"cannot decode into uintptr before PtrSize is set\")\n\t\t}\n\tcase reflect.Int:\n\t\tswitch d.IntSize {\n\t\tcase 4:\n\t\t\tkind = reflect.Int32\n\t\tcase 8:\n\t\t\tkind = reflect.Int64\n\t\tdefault:\n\t\t\treturn 0, fmt.Errorf(\"cannot decode into int before IntSize is set\")\n\t\t}\n\tcase reflect.Uint:\n\t\tswitch d.IntSize {\n\t\tcase 4:\n\t\t\tkind = reflect.Uint32\n\t\tcase 8:\n\t\t\tkind = reflect.Uint64\n\t\tdefault:\n\t\t\treturn 0, fmt.Errorf(\"cannot decode into uint before IntSize is set\")\n\t\t}\n\t}\n\n\t// Decode basic types\n\tswitch kind {\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unimplemented kind %s\", kind)\n\tcase reflect.Uint8:\n\t\tsize = 1\n\t\tout.SetUint(uint64(data[0]))\n\tcase reflect.Uint16:\n\t\tsize = 2\n\t\tout.SetUint(uint64(d.Order.Uint16(data)))\n\tcase reflect.Uint32:\n\t\tsize = 4\n\t\tout.SetUint(uint64(d.Order.Uint32(data)))\n\tcase reflect.Uint64:\n\t\tsize = 8\n\t\tout.SetUint(uint64(d.Order.Uint64(data)))\n\tcase reflect.Int8:\n\t\tsize = 1\n\t\tout.SetInt(int64(data[0]))\n\tcase reflect.Int16:\n\t\tsize = 2\n\t\tout.SetInt(int64(d.Order.Uint16(data)))\n\tcase reflect.Int32:\n\t\tsize = 4\n\t\tout.SetInt(int64(d.Order.Uint32(data)))\n\tcase reflect.Int64:\n\t\tsize = 8\n\t\tout.SetInt(int64(d.Order.Uint64(data)))\n\t}\n\treturn size, nil\n}\n"
  },
  {
    "path": "pcvaluetab/symtab.go",
    "content": "// Copyright 2023 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\n// Decode the Go 1.21 symbol table and PCDATA tables.\n\nimport (\n\t\"bytes\"\n\t\"debug/elf\"\n\t\"encoding/binary\"\n\t\"log\"\n\t\"unsafe\"\n)\n\ntype SymTab struct {\n\tdec Decoder\n\n\theader rawPCHeader\n\n\tfuncNameData []byte\n\tpctabData    []byte\n\tpclnData     []byte\n\n\tFuncs  []Func\n\tPCTabs map[PCTabKey]*VarintPCData\n}\n\ntype Func struct {\n\tName    string\n\tEncSize int // Bytes in rawFunc\n\tTextLen int\n\tPCTabs  []PCTabKey\n}\n\ntype PCTabKey uint32\n\ntype VarintPCData struct {\n\tRaw     []byte\n\tPCs     []uint32\n\tVals    []int32\n\tTextLen uint32\n}\n\nfunc LoadSymTab(path string) *SymTab {\n\tf, err := elf.Open(path)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tsec := f.Section(\".gopclntab\")\n\tif sec == nil {\n\t\tlog.Fatal(\"missing .gopclntab\")\n\t}\n\tdata, err := sec.Data()\n\tif err != nil {\n\t\tlog.Fatal(\"reading .gopclntab: \", err)\n\t}\n\n\tdec := Decoder{Order: binary.LittleEndian}\n\tvar header rawPCHeader\n\t// Read the pre-header, which gives us order and size information.\n\tif _, err := dec.Read(data, &header.rawPCHeader1); err != nil {\n\t\tlog.Fatal(\"reading header: \", err)\n\t}\n\tswitch header.Magic {\n\tcase 0xFFFFFFF1:\n\t\t// Go 1.20 little endian\n\tcase 0xF1FFFFFF:\n\t\t// Go 1.20 big endian\n\t\tdec.Order = binary.BigEndian\n\tdefault:\n\t\tlog.Fatalf(\"bad magic in header: %x\", header.Magic)\n\t}\n\tdec.IntSize = int(header.PtrSize)\n\tdec.PtrSize = int(header.PtrSize)\n\t// Read the full header\n\tif _, err := dec.Read(data, &header); err != nil {\n\t\tlog.Fatal(\"reading header: \", err)\n\t}\n\n\t// Extract data from the header.\n\tsymtab := new(SymTab)\n\tsymtab.dec = dec\n\tsymtab.header = header\n\tsymtab.funcNameData = data[header.FuncnameOffset:]\n\tsymtab.pctabData = data[header.PctabOffset:]\n\tsymtab.pclnData = data[header.PclnOffset:]\n\n\t// Read the func offsets table. Alternating PC, func_ offset. Ends with a\n\t// single \"last PC\".\n\tfuncOffsets := make([]uint32, 2*header.Nfunc+1)\n\tif _, err := dec.Read(symtab.pclnData, funcOffsets); err != nil {\n\t\tlog.Fatal(\"reading func offsets: \", err)\n\t}\n\n\t// Load functions.\n\tsymtab.PCTabs = map[PCTabKey]*VarintPCData{}\n\tfor i := 0; i < header.Nfunc; i++ {\n\t\toff := funcOffsets[2*i+1]\n\n\t\tvar raw rawFunc\n\t\trawLen, err := dec.Read(symtab.pclnData[off:], &raw)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"reading func: \", err)\n\t\t}\n\t\tnameLen := bytes.IndexByte(symtab.funcNameData[raw.NameOff:], 0)\n\t\tname := string(symtab.funcNameData[raw.NameOff:][:nameLen])\n\n\t\tencSize := rawLen + 4*int(raw.Npcdata) + 4*int(raw.Nfuncdata)\n\t\t// Round to pointer size\n\t\tencSize = (encSize + dec.PtrSize - 1) &^ (dec.PtrSize - 1)\n\n\t\t// raw.Npcdata PCDATA offsets follow the header.\n\t\tpcTabs := make([]PCTabKey, 3+raw.Npcdata)\n\t\tpcTabs[0] = PCTabKey(raw.Pcfile)\n\t\tpcTabs[1] = PCTabKey(raw.Pcln)\n\t\tpcTabs[2] = PCTabKey(raw.Pcsp)\n\t\tif _, err := dec.Read(symtab.pclnData[off+uint32(rawLen):], pcTabs[3:]); err != nil {\n\t\t\tlog.Fatal(\"reading pcdata offsets: \", err)\n\t\t}\n\n\t\t// Load PCDATA tables.\n\t\ttextLen := 0\n\t\tfor _, off := range pcTabs {\n\t\t\tif off == 0 {\n\t\t\t\t// Unused\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tpctab := symtab.PCTabs[off]\n\t\t\tif pctab == nil {\n\t\t\t\tpctab = decodeVarintPCData(symtab.pctabData[off:])\n\t\t\t\tsymtab.PCTabs[off] = pctab\n\t\t\t}\n\n\t\t\tif textLen == 0 {\n\t\t\t\ttextLen = int(pctab.TextLen)\n\t\t\t} else if textLen != int(pctab.TextLen) {\n\t\t\t\tlog.Printf(\"function %s has both length %d and length %d\", name, textLen, pctab.TextLen)\n\t\t\t}\n\n\t\t}\n\n\t\tfn := Func{Name: name, EncSize: encSize, TextLen: textLen, PCTabs: pcTabs}\n\t\tsymtab.Funcs = append(symtab.Funcs, fn)\n\t}\n\n\treturn symtab\n}\n\n// decodeVarintPCData decodes an entire varint PCDATA table.\nfunc decodeVarintPCData(data []byte) *VarintPCData {\n\ttab := new(VarintPCData)\n\tpc, val := uint32(0), int32(-1)\n\tpos := 0\n\tfor pos < len(data) && (data[pos] != 0 || pos == 0) {\n\t\tuvdelta, l := binary.Varint(data[pos:])\n\t\tif l <= 0 {\n\t\t\tpanic(\"bad varint\")\n\t\t}\n\t\tpos += l\n\t\tval += int32(uvdelta)\n\n\t\ttab.PCs = append(tab.PCs, pc)\n\t\ttab.Vals = append(tab.Vals, val)\n\n\t\tpcdelta, l := binary.Uvarint(data[pos:])\n\t\tif l <= 0 {\n\t\t\tpanic(\"bad uvarint\")\n\t\t}\n\t\tpos += l\n\t\tpc += uint32(pcdelta)\n\t}\n\tif pos == len(data) {\n\t\tlog.Fatalf(\"PCDATA truncated\")\n\t}\n\ttab.Raw = data[:pos+1]\n\ttab.TextLen = pc\n\treturn tab\n}\n\nfunc (t *VarintPCData) Lookup(pc uint32) int32 {\n\tif pc > t.TextLen {\n\t\tpanic(\"pc too big\")\n\t}\n\tfor i, pc1 := range t.PCs {\n\t\tif pc1 > pc {\n\t\t\treturn t.Vals[i-1]\n\t\t}\n\t}\n\treturn t.Vals[len(t.Vals)-1]\n}\n\ntype pcvalueCache struct {\n\tentries [2][8]pcvalueCacheEnt\n}\n\ntype pcvalueCacheEnt struct {\n\t// targetpc and off together are the key of this cache entry.\n\ttargetpc uintptr\n\toff      uint32\n\n\tval   int32   // The value of this entry.\n\tvalPC uintptr // The PC at which val starts\n}\n\n// pcvalueCacheKey returns the outermost index in a pcvalueCache to use for targetpc.\n// It must be very cheap to calculate.\n// For now, align to goarch.PtrSize and reduce mod the number of entries.\n// In practice, this appears to be fairly randomly and evenly distributed.\nfunc pcvalueCacheKey(targetpc uintptr) uintptr {\n\treturn (targetpc / PtrSize) % uintptr(len(pcvalueCache{}.entries))\n}\n\nconst PtrSize = 8\n\n//go:linkname fastrandn runtime.fastrandn\nfunc fastrandn(n uint32) uint32\n\nfunc lookupVarintPCData(p []byte, targetpc uintptr, cache *pcvalueCache) (int32, uintptr) {\n\t// This closely follows runtime.pcdata\n\t//\n\t// TODO: Should we add the caching logic for a fairer comparison?\n\n\t// Check the cache. This speeds up walks of deep stacks, which\n\t// tend to have the same recursive functions over and over.\n\t//\n\t// This cache is small enough that full associativity is\n\t// cheaper than doing the hashing for a less associative\n\t// cache.\n\toff := uint32(uintptr(unsafe.Pointer(&p[0])))\n\tif cache != nil {\n\t\tx := pcvalueCacheKey(targetpc)\n\t\tfor i := range cache.entries[x] {\n\t\t\t// We check off first because we're more\n\t\t\t// likely to have multiple entries with\n\t\t\t// different offsets for the same targetpc\n\t\t\t// than the other way around, so we'll usually\n\t\t\t// fail in the first clause.\n\t\t\tent := &cache.entries[x][i]\n\t\t\tif ent.off == off && ent.targetpc == targetpc {\n\t\t\t\treturn ent.val, ent.valPC\n\t\t\t}\n\t\t}\n\t}\n\n\tpc := uintptr(0)\n\tprevpc := pc\n\tval := int32(-1)\n\tfor {\n\t\tvar ok bool\n\t\tp, ok = step(p, &pc, &val, pc == 0)\n\t\tif !ok {\n\t\t\tbreak\n\t\t}\n\t\tif targetpc < pc {\n\t\t\t// Replace a random entry in the cache. Random\n\t\t\t// replacement prevents a performance cliff if\n\t\t\t// a recursive stack's cycle is slightly\n\t\t\t// larger than the cache.\n\t\t\t// Put the new element at the beginning,\n\t\t\t// since it is the most likely to be newly used.\n\t\t\tif cache != nil {\n\t\t\t\tx := pcvalueCacheKey(targetpc)\n\t\t\t\te := &cache.entries[x]\n\t\t\t\tci := fastrandn(uint32(len(cache.entries[x])))\n\t\t\t\te[ci] = e[0]\n\t\t\t\te[0] = pcvalueCacheEnt{\n\t\t\t\t\ttargetpc: targetpc,\n\t\t\t\t\toff:      off,\n\t\t\t\t\tval:      val,\n\t\t\t\t\tvalPC:    prevpc,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn val, prevpc\n\t\t}\n\t\tprevpc = pc\n\t}\n\n\tpanic(\"invalid pc-encoded table\")\n}\n\n// TODO: We should read this from the header, but in the runtime it's constant,\n// so for fair comparison, we make it a constant here, too.\nconst PCQuantum = 1\n\n// step advances to the next pc, value pair in the encoded table.\nfunc step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) {\n\t// For both uvdelta and pcdelta, the common case (~70%)\n\t// is that they are a single byte. If so, avoid calling readvarint.\n\tuvdelta := uint32(p[0])\n\tif uvdelta == 0 && !first {\n\t\treturn nil, false\n\t}\n\tn := uint32(1)\n\tif uvdelta&0x80 != 0 {\n\t\tn, uvdelta = readvarint(p)\n\t}\n\t*val += int32(-(uvdelta & 1) ^ (uvdelta >> 1))\n\tp = p[n:]\n\n\tpcdelta := uint32(p[0])\n\tn = 1\n\tif pcdelta&0x80 != 0 {\n\t\tn, pcdelta = readvarint(p)\n\t}\n\tp = p[n:]\n\t*pc += uintptr(pcdelta * PCQuantum)\n\treturn p, true\n}\n\n// readvarint reads a varint from p.\nfunc readvarint(p []byte) (read uint32, val uint32) {\n\tvar v, shift, n uint32\n\tfor {\n\t\tb := p[n]\n\t\tn++\n\t\tv |= uint32(b&0x7F) << (shift & 31)\n\t\tif b&0x80 == 0 {\n\t\t\tbreak\n\t\t}\n\t\tshift += 7\n\t}\n\treturn n, v\n}\n"
  },
  {
    "path": "ptype/main.go",
    "content": "// Copyright 2017 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Command ptype prints Go types from a binary using DWARF info.\n//\n// ptype binary <types...> prints types matching the given regexps in\n// binary in (approximate) Go syntax. If no types are named, ptype\n// prints all named types.\n//\n// The printed types are annotated with information about sizes, field\n// offsets, and gaps between fields.\n//\n// The printed types are as close as possible to Go type syntax, but\n// aren't guaranteed to be legal Go code (e.g., unions have no Go\n// equivalent). ptype backs out high-level Go types such as maps and\n// channels where possible.\n//\n// For example, ptype ptype runtime.mcache prints:\n//\n//     type runtime.mcache struct {\n//             // 1200 byte struct\n//             next_sample int32                         // offset 0\n//             // 4 byte gap\n//             local_scan uintptr                        // offset 8\n//             tiny uintptr                              // offset 16\n//             tinyoffset uintptr                        // offset 24\n//             local_tinyallocs uintptr                  // offset 32\n//             alloc [67]*mspan                          // offset 40\n//             stackcache [4]struct {                    // offset 576\n//                     list runtime.gclinkptr            // offset 576 + 16*i\n//                     size uintptr                      // offset 576 + 16*i + 8\n//             }\n//             local_nlookup uintptr                     // offset 640\n//             local_largefree uintptr                   // offset 648\n//             local_nlargefree uintptr                  // offset 656\n//             local_nsmallfree [67]uintptr              // offset 664\n//     }\npackage main\n\nimport (\n\t\"debug/dwarf\"\n\t\"debug/elf\"\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s binary <type-regexp...>\\n\", os.Args[0])\n\t}\n\tflag.Parse()\n\tif flag.NArg() < 1 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\tbinPath := flag.Arg(0)\n\n\t// Parse type regexp args.\n\tregexps := []*regexp.Regexp{}\n\tfor _, tre := range flag.Args()[1:] {\n\t\tre, err := regexp.Compile(\"^\" + tre)\n\t\tif err != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"bad regexp %q: %s\", tre, err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tregexps = append(regexps, re)\n\t}\n\tif len(regexps) == 0 {\n\t\tregexps = append(regexps, regexp.MustCompile(\".*\"))\n\t}\n\n\t// Parse binary.\n\tf, err := elf.Open(binPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\td, err := f.DWARF()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Find all of the named types.\n\tr := d.Reader()\n\tfor {\n\t\tent, err := r.Next()\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif ent == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tif ent.Tag != dwarf.TagTypedef {\n\t\t\tcontinue\n\t\t}\n\n\t\tname, ok := ent.Val(dwarf.AttrName).(string)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Do we want this type?\n\t\tmatched := false\n\t\tfor _, re := range regexps {\n\t\t\tif re.MatchString(name) {\n\t\t\t\tmatched = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif isBuiltinName(name) || !matched {\n\t\t\tr.SkipChildren()\n\t\t\tcontinue\n\t\t}\n\n\t\t// Print the type.\n\t\tbase, ok := ent.Val(dwarf.AttrType).(dwarf.Offset)\n\t\tif !ok {\n\t\t\tlog.Printf(\"type %s has unknown underlying type\", name)\n\t\t\tcontinue\n\t\t}\n\n\t\ttyp, err := d.Type(base)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tpkg := \"\"\n\t\tif i := strings.LastIndex(name, \".\"); i >= 0 {\n\t\t\tpkg = name[:i+1]\n\t\t}\n\n\t\tp := &typePrinter{pkg: pkg}\n\t\tp.fmt(\"type %s \", name)\n\t\tp.printType(typ)\n\t\tp.fmt(\"\\n\\n\")\n\n\t\tr.SkipChildren()\n\t}\n}\n\nfunc isBuiltinName(typeName string) bool {\n\tswitch typeName {\n\tcase \"string\":\n\t\treturn true\n\t}\n\treturn strings.HasPrefix(typeName, \"map[\") ||\n\t\tstrings.HasPrefix(typeName, \"func(\") ||\n\t\tstrings.HasPrefix(typeName, \"chan \") ||\n\t\tstrings.HasPrefix(typeName, \"chan<- \") ||\n\t\tstrings.HasPrefix(typeName, \"<-chan \")\n}\n\ntype typePrinter struct {\n\toffset []int64\n\tdepth  int\n\tnameOk int\n\tpkg    string\n\n\t// pos is the current character position on this line.\n\tpos int\n\n\t// lineComment is a comment to print at the end of this line.\n\tlineComment string\n}\n\nfunc (p *typePrinter) fmt(f string, args ...interface{}) {\n\tb := fmt.Sprintf(f, args...)\n\tif strings.IndexAny(b, \"\\n\\t\") < 0 {\n\t\tfmt.Printf(\"%s\", b)\n\t\tp.pos += utf8.RuneCountInString(b)\n\t\treturn\n\t}\n\tlines := strings.Split(b, \"\\n\")\n\tfor i, line := range lines {\n\t\thasNL := i < len(lines)-1\n\t\tif p.lineComment == \"\" && hasNL {\n\t\t\t// Fast path for complete lines with no comment.\n\t\t\tfmt.Printf(\"%s\\n\", line)\n\t\t\tp.pos = 0\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, r := range line {\n\t\t\tif r == '\\t' {\n\t\t\t\tp.pos = (p.pos + 8) &^ 7\n\t\t\t} else {\n\t\t\t\tp.pos++\n\t\t\t}\n\t\t}\n\t\tfmt.Printf(\"%s\", line)\n\t\tif hasNL {\n\t\t\tif p.lineComment != \"\" {\n\t\t\t\tspace := 50 - p.pos\n\t\t\t\tif space < 1 {\n\t\t\t\t\tspace = 1\n\t\t\t\t}\n\t\t\t\tfmt.Printf(\"%*s// %s\", space, \"\", p.lineComment)\n\t\t\t\tp.lineComment = \"\"\n\t\t\t}\n\t\t\tfmt.Printf(\"\\n\")\n\t\t\tp.pos = 0\n\t\t}\n\t}\n}\n\nfunc (p *typePrinter) setLineComment(f string, args ...interface{}) {\n\tif p.lineComment != \"\" {\n\t\tpanic(\"multiple line comments\")\n\t}\n\tp.lineComment = fmt.Sprintf(f, args...)\n}\n\nfunc (p *typePrinter) stripPkg(name string) string {\n\tif p.pkg != \"\" && strings.HasPrefix(name, p.pkg) {\n\t\treturn name[len(p.pkg):]\n\t}\n\treturn name\n}\n\nfunc (p *typePrinter) printType(typ dwarf.Type) {\n\tif p.offset == nil {\n\t\tp.offset = []int64{0}\n\t}\n\n\tif p.nameOk > 0 && typ.Common().Name != \"\" {\n\t\tp.fmt(\"%s\", p.stripPkg(typ.Common().Name))\n\t\tp.offset[len(p.offset)-1] += typ.Size()\n\t\treturn\n\t}\n\n\tswitch typ := typ.(type) {\n\tcase *dwarf.ArrayType:\n\t\tif typ.Count < 0 {\n\t\t\tp.fmt(\"[incomplete]\")\n\t\t} else {\n\t\t\tp.fmt(\"[%d]\", typ.Count)\n\t\t}\n\t\tif typ.StrideBitSize > 0 {\n\t\t\tp.fmt(\"/* %d bit element */\", typ.StrideBitSize)\n\t\t}\n\t\torigOffset := p.offset\n\t\tp.offset = append(p.offset, typ.Type.Size(), 0)\n\t\tp.printType(typ.Type)\n\t\tp.offset = origOffset\n\n\tcase *dwarf.StructType:\n\t\tif typ.StructName != \"\" && (p.nameOk > 0 || isBuiltinName(typ.StructName)) {\n\t\t\tp.fmt(\"%s\", p.stripPkg(typ.StructName))\n\t\t\tbreak\n\t\t}\n\n\t\tif strings.HasPrefix(typ.StructName, \"[]\") {\n\t\t\tp.fmt(\"[]\")\n\t\t\telem := typ.Field[0].Type\n\t\t\torigOffset := p.offset\n\t\t\tp.offset = append(p.offset, elem.Size(), 0)\n\t\t\tp.printType(elem)\n\t\t\tp.offset = origOffset\n\t\t\tbreak\n\t\t}\n\n\t\tif typ.StructName == \"runtime.eface\" {\n\t\t\tp.fmt(\"interface{}\")\n\t\t\tbreak\n\t\t} else if typ.StructName == \"runtime.iface\" {\n\t\t\tp.fmt(\"interface{ ... }\")\n\t\t\tbreak\n\t\t}\n\n\t\tisUnion := typ.Kind == \"union\"\n\t\tp.fmt(\"%s {\", typ.Kind)\n\t\tif typ.Incomplete {\n\t\t\tp.fmt(\" incomplete }\")\n\t\t\tbreak\n\t\t}\n\t\tp.depth++\n\t\tindent := \"\\n\" + strings.Repeat(\"\\t\", p.depth)\n\t\tp.fmt(\"%s// %d byte %s\", indent, typ.Size(), typ.Kind)\n\t\tstartOffset := p.offset[len(p.offset)-1]\n\t\tvar prevEnd int64\n\t\tfor i, f := range typ.Field {\n\t\t\tp.fmt(indent)\n\t\t\t// TODO: Bit offsets?\n\t\t\tif !isUnion {\n\t\t\t\toffset := startOffset + f.ByteOffset\n\t\t\t\tif i > 0 && prevEnd < offset {\n\t\t\t\t\tp.fmt(\"// %d byte gap\", offset-prevEnd)\n\t\t\t\t\tp.fmt(indent)\n\t\t\t\t}\n\t\t\t\tp.offset[len(p.offset)-1] = offset\n\t\t\t\tp.setLineComment(\"offset %s\", p.strOffset())\n\t\t\t\tif f.Type.Size() < 0 {\n\t\t\t\t\t// Who knows. Give up.\n\t\t\t\t\t// TODO: This happens for funcs.\n\t\t\t\t\tprevEnd = (1 << 31) - 1\n\t\t\t\t} else {\n\t\t\t\t\tprevEnd = offset + f.Type.Size()\n\t\t\t\t}\n\t\t\t}\n\t\t\tp.fmt(\"%s \", f.Name)\n\t\t\tp.printType(f.Type)\n\t\t\tif f.BitSize != 0 {\n\t\t\t\tp.fmt(\" : %d\", f.BitSize)\n\t\t\t}\n\t\t}\n\t\tp.offset[len(p.offset)-1] = startOffset\n\t\tp.depth--\n\t\tp.fmt(\"\\n%s}\", strings.Repeat(\"\\t\", p.depth))\n\n\tcase *dwarf.EnumType:\n\t\tp.fmt(\"enum\") // TODO\n\n\tcase *dwarf.BoolType, *dwarf.CharType, *dwarf.ComplexType, *dwarf.FloatType, *dwarf.IntType, *dwarf.UcharType, *dwarf.UintType:\n\t\t// Basic types.\n\t\tp.fmt(\"%s\", typ.String())\n\n\tcase *dwarf.PtrType:\n\t\tif _, ok := typ.Type.(*dwarf.VoidType); ok {\n\t\t\t// *void is unsafe.Pointer\n\t\t\tp.fmt(\"unsafe.Pointer\")\n\t\t\tbreak\n\t\t}\n\t\torigOffset := p.offset\n\t\tp.offset = []int64{0}\n\t\tp.nameOk++\n\t\tp.fmt(\"*\")\n\t\tp.printType(typ.Type)\n\t\tp.nameOk--\n\t\tp.offset = origOffset\n\n\tcase *dwarf.FuncType:\n\t\t// TODO: Expand ourselves so we can clean up argument\n\t\t// types, etc.\n\t\tp.fmt(typ.String())\n\n\tcase *dwarf.QualType:\n\t\tp.fmt(\"/* %s */ \", typ.Qual)\n\t\tp.printType(typ.Type)\n\n\tcase *dwarf.TypedefType:\n\t\tn := typ.Common().Name\n\t\tif isBuiltinName(n) {\n\t\t\t// TODO: Make Go-ifying optional.\n\t\t\t//\n\t\t\t// TODO: Expand map types ourselves if\n\t\t\t// possible so we can clean up the type names.\n\t\t\tp.fmt(\"%s\", n)\n\t\t\treturn\n\t\t}\n\n\t\treal := typ.Type\n\t\tfor {\n\t\t\tif real2, ok := real.(*dwarf.TypedefType); ok {\n\t\t\t\treal = real2.Type\n\t\t\t} else {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif str, ok := real.(*dwarf.StructType); ok {\n\t\t\tswitch str.StructName {\n\t\t\tcase \"runtime.iface\", \"runtime.eface\":\n\t\t\t\t// Named interface type.\n\t\t\t\tp.fmt(p.stripPkg(n))\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\t// TODO: If it's \"type x map...\" or similar, we never\n\t\t// see the \"map[...]\" style name and only see that x's\n\t\t// underlying type is a pointer to a struct named\n\t\t// \"hash<...>\".\n\n\t\tp.fmt(\"/* %s */ \", p.stripPkg(n))\n\t\tp.printType(real)\n\n\tcase *dwarf.UnspecifiedType:\n\t\tp.fmt(\"unspecified\")\n\n\tcase *dwarf.VoidType:\n\t\tp.fmt(\"void\")\n\t}\n\n\tp.offset[len(p.offset)-1] += typ.Size()\n}\n\nfunc (p *typePrinter) strOffset() string {\n\tbuf := fmt.Sprintf(\"%d\", p.offset[0])\n\tfor i, idx := 1, 'i'; i < len(p.offset); i, idx = i+2, idx+1 {\n\t\tbuf += fmt.Sprintf(\" + %d*%c\", p.offset[i], idx)\n\t\tif p.offset[i+1] != 0 {\n\t\t\tbuf += fmt.Sprintf(\" + %d\", p.offset[i+1])\n\t\t}\n\t}\n\treturn buf\n}\n"
  },
  {
    "path": "rtanalysis/directives/analysis.go",
    "content": "package directives\n\nimport (\n\t\"go/ast\"\n\t\"reflect\"\n\t\"strings\"\n\n\t\"golang.org/x/tools/go/analysis\"\n)\n\nvar Analyzer = &analysis.Analyzer{\n\tName:       \"directives\",\n\tDoc:        \"collect //go:* directives for function declarations\",\n\tRun:        run,\n\tResultType: reflect.TypeOf(Result(nil)),\n}\n\ntype Result map[*ast.FuncDecl][]string\n\nfunc run(pass *analysis.Pass) (interface{}, error) {\n\tres := Result{}\n\tfor _, f := range pass.Files {\n\t\tcgs := f.Comments\n\t\tfor _, decl := range f.Decls {\n\t\t\t// Process comments before decl.\n\t\t\tvar directives []string\n\t\t\tfor len(cgs) > 0 && cgs[0].Pos() < decl.Pos() {\n\t\t\t\tfor _, c := range cgs[0].List {\n\t\t\t\t\tif strings.HasPrefix(c.Text, \"//go:\") {\n\t\t\t\t\t\tdirectives = append(directives, strings.TrimSpace(c.Text))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcgs = cgs[1:]\n\t\t\t}\n\t\t\t// Ignore comments in decl.\n\t\t\tfor len(cgs) > 0 && cgs[0].Pos() < decl.End() {\n\t\t\t\tcgs = cgs[1:]\n\t\t\t}\n\t\t\t// Attach directives to decl.\n\t\t\tif len(directives) > 0 {\n\t\t\t\tswitch decl := decl.(type) {\n\t\t\t\tcase *ast.FuncDecl:\n\t\t\t\t\tres[decl] = directives\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn res, nil\n}\n"
  },
  {
    "path": "rtanalysis/main.go",
    "content": "package main\n\nimport (\n\t\"github.com/aclements/go-misc/rtanalysis/systemstack\"\n\t\"golang.org/x/tools/go/analysis/singlechecker\"\n)\n\nfunc main() { singlechecker.Main(systemstack.Analyzer) }\n"
  },
  {
    "path": "rtanalysis/systemstack/analysis.go",
    "content": "package systemstack\n\nimport (\n\t\"fmt\"\n\t\"go/ast\"\n\t\"reflect\"\n\n\t\"github.com/aclements/go-misc/rtanalysis/directives\"\n\t\"golang.org/x/tools/go/analysis\"\n)\n\nvar Analyzer = &analysis.Analyzer{\n\tName:       \"onsystemstack\",\n\tDoc:        \"determines functions that always run on the systemstack\",\n\tRun:        run,\n\tResultType: reflect.TypeOf(Result(nil)),\n\tRequires:   []*analysis.Analyzer{directives.Analyzer},\n}\n\ntype Func struct {\n\t// Node is either a *ast.FuncDecl or an *ast.FuncLit.\n\tNode ast.Node\n}\n\nfunc (f Func) String() string {\n\tswitch f := f.Node.(type) {\n\tcase *ast.FuncDecl:\n\t\treturn f.Name.String()\n\tcase *ast.FuncLit:\n\t\treturn fmt.Sprintf(\"func@%v\", f.Pos())\n\t}\n\treturn \"<bad Func type>\"\n}\n\ntype Result map[Func]bool\n\nfunc run(pass *analysis.Pass) (interface{}, error) {\n\tres := Result{}\n\n\t// Seed functions that always run on the system stack.\n\tdirectives := pass.ResultOf[directives.Analyzer].(directives.Result)\n\tfor obj, dirs := range directives {\n\t\tfor _, dir := range dirs {\n\t\t\tif dir == \"//go:systemstack\" {\n\t\t\t\tres[Func{obj}] = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\t// TODO: Derive all this through the call graph, then complain\n\t// if we find a non-systemstack path to a go:systemstack\n\t// function.\n\n\t// Collect call graph and entry points to the system stack.\n\tunknownCaller := Func{&ast.FuncDecl{}}\n\tsystemstack := Func{&ast.FuncDecl{}}\n\tcallers := map[Func][]Func{}\n\tvar caller Func\n\tvar visit func(n ast.Node) bool\n\tvisit = func(n ast.Node) bool {\n\t\tif call, ok := n.(*ast.CallExpr); ok {\n\t\t\tvar id *ast.Ident\n\t\t\tswitch fun := call.Fun.(type) {\n\t\t\tcase *ast.Ident:\n\t\t\t\tif fun.Name == \"systemstack\" || fun.Name == \"mcall\" {\n\t\t\t\t\tif len(call.Args) != 1 {\n\t\t\t\t\t\tpass.Reportf(call.Pos(), \"wrong number of arguments to %s\", fun.Name)\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t\tvar target Func\n\t\t\t\t\tswitch arg := call.Args[0].(type) {\n\t\t\t\t\tcase *ast.Ident:\n\t\t\t\t\t\tt, ok := arg.Obj.Decl.(*ast.FuncDecl)\n\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\ttarget = Func{t}\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tpass.Reportf(call.Pos(), \"%s argument isn't a static function\", fun.Name)\n\t\t\t\t\t\t}\n\t\t\t\t\tcase *ast.FuncLit:\n\t\t\t\t\t\ttarget = Func{arg}\n\t\t\t\t\t}\n\t\t\t\t\tfmt.Println(\"systemstack ->\", target) // XXX\n\t\t\t\t\tcallers[target] = append(callers[target], systemstack)\n\t\t\t\t\t// Don't descend into arguments.\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tid = fun\n\t\t\tcase *ast.SelectorExpr:\n\t\t\t\tid = fun.Sel\n\t\t\t}\n\t\t\tif id != nil && !pass.TypesInfo.Types[id].IsType() && id.Obj != nil {\n\t\t\t\tt, ok := id.Obj.Decl.(*ast.FuncDecl)\n\t\t\t\tif ok {\n\t\t\t\t\ttarget := Func{t}\n\t\t\t\t\tfmt.Println(caller, \"->\", target) // XXX\n\t\t\t\t\tcallers[target] = append(callers[target], caller)\n\t\t\t\t\t// Don't walk into call.Func.\n\t\t\t\t\tfor _, n := range call.Args {\n\t\t\t\t\t\tast.Inspect(n, visit)\n\t\t\t\t\t}\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\tpass.Reportf(call.Pos(), \"unhandled call\")\n\t\t} else if id, ok := n.(*ast.Ident); ok {\n\t\t\tfmt.Println(\"XXX\", caller, id, id.Obj)\n\t\t\tif id.Obj != nil {\n\t\t\t\tif fun, ok := id.Obj.Decl.(*ast.FuncDecl); ok {\n\t\t\t\t\t// Bare call. We don't know how we enter it.\n\t\t\t\t\tfmt.Println(\"unknown ->\", fun) // XXX\n\t\t\t\t\tcallers[Func{fun}] = append(callers[Func{fun}], unknownCaller)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// XXX Walk into closures.\n\t\treturn true\n\t}\n\tfor _, f := range pass.Files {\n\t\tfor _, decl := range f.Decls {\n\t\t\tfdecl, ok := decl.(*ast.FuncDecl)\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcaller = Func{fdecl}\n\t\t\tfmt.Println(\"visit\", caller) // XXX\n\t\t\tast.Inspect(fdecl, visit)\n\t\t\tif fdecl.Name.IsExported() {\n\t\t\t\t// Exported functions have unknown callers.\n\t\t\t\tcallers[Func{fdecl}] = append(callers[Func{fdecl}], unknownCaller)\n\t\t\t}\n\t\t}\n\t}\n\n\tfmt.Println(callers)\n\treturn res, nil\n}\n"
  },
  {
    "path": "scanpagemap.go",
    "content": "package main\n\nimport (\n\t\"bufio\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"regexp\"\n\t\"strconv\"\n)\n\nconst pageSize = 4096\n\nvar smapsRe = regexp.MustCompile(\"^([0-9a-f]+)-([0-9a-f]+) \")\n\nfunc main() {\n\tpid, err := strconv.Atoi(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatal(\"bad pid\")\n\t}\n\n\tsmaps, err := os.Open(fmt.Sprintf(\"/proc/%d/smaps\", pid))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tpagemap, err := os.Open(fmt.Sprintf(\"/proc/%d/pagemap\", pid))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tpageflags, err := os.Open(\"/proc/kpageflags\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tscanner := bufio.NewScanner(smaps)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tfmt.Println(line)\n\n\t\ts := smapsRe.FindStringSubmatch(line)\n\t\tif s == nil {\n\t\t\tcontinue\n\t\t}\n\t\tlo, _ := strconv.ParseUint(s[1], 16, 64)\n\t\thi, _ := strconv.ParseUint(s[2], 16, 64)\n\n\t\tdumpRange(pagemap, pageflags, lo, hi)\n\t}\n\tif err := scanner.Err(); err != nil {\n\t\tlog.Fatal(\"reading smaps: \", err)\n\t}\n}\n\nconst (\n\tPAGEFLAG_ANON          = 1 << 12\n\tPAGEFLAG_COMPOUND_HEAD = 1 << 15\n\tPAGEFLAG_COMPOUND_TAIL = 1 << 16\n\tPAGEFLAG_HUGE          = 1 << 17\n\tPAGEFLAG_THP           = 1 << 22\n\tPAGEFLAG_ZERO_PAGE     = 1 << 24\n)\n\ntype pageFlags uint64\n\nfunc (p pageFlags) String() string {\n\ts := \"\"\n\tif p&PAGEFLAG_ANON != 0 {\n\t\ts += \"ANON|\"\n\t\tp &^= PAGEFLAG_ANON\n\t}\n\tif p&PAGEFLAG_COMPOUND_HEAD != 0 {\n\t\ts += \"COMPOUND_HEAD|\"\n\t\tp &^= PAGEFLAG_COMPOUND_HEAD\n\t}\n\tif p&PAGEFLAG_COMPOUND_TAIL != 0 {\n\t\ts += \"COMPOUND_TAIL|\"\n\t\tp &^= PAGEFLAG_COMPOUND_TAIL\n\t}\n\tif p&PAGEFLAG_HUGE != 0 {\n\t\ts += \"HUGE|\"\n\t\tp &^= PAGEFLAG_HUGE\n\t}\n\tif p&PAGEFLAG_THP != 0 {\n\t\ts += \"THP|\"\n\t\tp &^= PAGEFLAG_THP\n\t}\n\tif p&PAGEFLAG_ZERO_PAGE != 0 {\n\t\ts += \"ZERO_PAGE|\"\n\t\tp &^= PAGEFLAG_ZERO_PAGE\n\t}\n\tif p != 0 {\n\t\treturn fmt.Sprintf(\"%s%#x\", s, uint64(p))\n\t}\n\tif s == \"\" {\n\t\treturn \"0\"\n\t}\n\treturn s[:len(s)-1]\n}\n\nfunc dumpRange(pagemap, pageflags *os.File, lo, hi uint64) {\n\tconst batch = 1024\n\tbuf := make([]byte, 8*batch)\n\tfor addr := lo; addr < hi; addr += pageSize * batch {\n\t\tif int(8*(hi-addr)/pageSize) < len(buf) {\n\t\t\tbuf = buf[:8*(hi-addr)/pageSize]\n\t\t}\n\t\t_, err := pagemap.ReadAt(buf, 8*int64(addr/pageSize))\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"reading pagemap: \", err)\n\t\t}\n\n\t\t// Decode pages in buf.\n\t\tfor i := 0; i < len(buf)/8; i++ {\n\t\t\tpageinfo := binary.LittleEndian.Uint64(buf[i*8:])\n\t\t\tif pageinfo&(1<<63) == 0 {\n\t\t\t\t// Not present.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Bits 0--54 are the PFN if present.\n\t\t\tpfn := pageinfo & (1<<55 - 1)\n\n\t\t\t// Look up PFN in pageflags.\n\t\t\tvar flagbuf [8]byte\n\t\t\t_, err := pageflags.ReadAt(flagbuf[:], int64(8*pfn))\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(\"reading pageflags: \", err)\n\t\t\t}\n\t\t\tflags := binary.LittleEndian.Uint64(flagbuf[:])\n\n\t\t\tfmt.Printf(\"%016x %08x %s\\n\", addr+uint64(i*pageSize), pfn, pageFlags(flags)) // XXX\n\t\t\tif flags&PAGEFLAG_THP != 0 && flags&PAGEFLAG_COMPOUND_HEAD != 0 {\n\t\t\t\t// Head of a transparent huge page.\n\t\t\t\ti += 511\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "split/README.md",
    "content": "This package is a prototype implementation of split (or \"sharded\")\nvalues for Go. This is a possible solution to\nhttps://github.com/golang/go/issues/18802.\n\n[![](https://godoc.org/github.com/aclements/go-misc/split?status.svg)](https://godoc.org/github.com/aclements/go-misc/split)\n\nThis prototype is very dependent on Go runtime internals. As is, it\ndoes not depend on any *modifications* to the Go runtime; however,\nthere is an optional runtime modification that shaves about 4ns off\nthe cost of `Value.Get`. See that method for details.\n\nBenchmarks\n----------\n\nWith the runtime modification, the single-core overhead of the split\nvalue compared to a single atomic counter is about 2 ns, and compared\nto a non-atomic counter is about 6 ns:\n\n```\nBenchmarkCounterSplit          \t200000000\t         8.15 ns/op\nBenchmarkCounterShared         \t300000000\t         5.96 ns/op\nBenchmarkCounterSequential     \t1000000000\t         2.14 ns/op\nBenchmarkLazyAggregationSplit  \t100000000\t        23.9 ns/op\nBenchmarkLazyAggregationShared \t100000000\t        23.1 ns/op\n```\n\nThe scaling of the split values to 24 cores is nearly perfect (real\ncores, no hyperthreads), while the shared values collapse as you'd\nexpect:\n\n```\nBenchmarkCounterSplit-24               \t2000000000\t         0.35 ns/op      8.40 cpu-ns/op\nBenchmarkCounterShared-24            \t50000000\t        24.7 ns/op     593    cpu-ns/op\nBenchmarkLazyAggregationSplit-24       \t2000000000\t         1.03 ns/op     24.7  cpu-ns/op\nBenchmarkLazyAggregationShared-24    \t10000000\t       174 ns/op      4176    cpu-ns/op\n```\n\nWithout the runtime modification, there's a little more overhead in\nthe sequential case, but the scaling isn't affected:\n\n```\nBenchmarkCounterSplit          \t100000000\t        12.3 ns/op\nBenchmarkCounterShared         \t300000000\t         5.97 ns/op\nBenchmarkCounterSequential     \t1000000000\t         2.28 ns/op\nBenchmarkLazyAggregationSplit  \t50000000\t        25.2 ns/op\nBenchmarkLazyAggregationShared \t100000000\t        23.5 ns/op\n```\n"
  },
  {
    "path": "split/bench_test.go",
    "content": "// Copyright 2018 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage split\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n)\n\nfunc BenchmarkCounterSplitAtomic(b *testing.B) {\n\t// Benchmark a simple split counter updating using atomics.\n\tcounter := New(func(*uint64) {})\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tatomic.AddUint64(counter.Get().(*uint64), 1)\n\t\t}\n\t})\n}\n\nfunc BenchmarkCounterSplitLocked(b *testing.B) {\n\t// Benchmark a simple split counter using locking instead of atomics.\n\ttype shard struct {\n\t\tsync.Mutex\n\t\tval uint64\n\t}\n\tcounter := New(func(*shard) {})\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\ts := counter.Get().(*shard)\n\t\t\ts.Lock()\n\t\t\ts.val++\n\t\t\ts.Unlock()\n\t\t}\n\t})\n}\n\nfunc BenchmarkCounterShared(b *testing.B) {\n\t// Non-sharded version of BenchmarkCounter.\n\tvar counter uint64\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tatomic.AddUint64(&counter, 1)\n\t\t}\n\t})\n}\n\nvar seqCounter uint64\n\nfunc BenchmarkCounterSequential(b *testing.B) {\n\t// Sequential version of BenchmarkCounter without atomics. For\n\t// fair comparison with the cost of uncontended atomics, this\n\t// only runs at -test.cpu=1 and uses the RunParallel mechanics\n\t// so the overheads are the same (pb.Next gets inlined and has\n\t// no atomic ops in the fast path, so this is pretty small).\n\tif runtime.GOMAXPROCS(-1) != 1 {\n\t\tb.Skip(\"requires -test.cpu=1\")\n\t}\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tseqCounter++\n\t\t}\n\t})\n}\n\nfunc BenchmarkRWMutex(b *testing.B) {\n\tvar m RWMutex\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor pb.Next() {\n\t\t\tm.RLock().RUnlock()\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "split/example_id_test.go",
    "content": "// Copyright 2018 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage split\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n// UIDGenerator generates unique, reasonably dense integer IDs.\n//\n// The implementation supports efficient concurrent generation of IDs.\n// It works by retrieving batches of 256 IDs at a time from a central\n// ID source, and sub-allocating IDs within those batches.\ntype UIDGenerator struct {\n\tv        *Value\n\tbase     uint64\n\tbaseLock sync.Mutex\n}\n\nconst batchSize = 256\n\ntype uidShard struct {\n\tnext, limit uint64\n}\n\n// NewUIDGenerator returns a new generator for unique IDs.\nfunc NewUIDGenerator() *UIDGenerator {\n\tg := &UIDGenerator{}\n\tg.v = New(func(s *uidShard) {\n\t\tg.baseLock.Lock()\n\t\tdefer g.baseLock.Unlock()\n\t\t*s = uidShard{g.base, g.base + batchSize}\n\t\tg.base += batchSize\n\t})\n\treturn g\n}\n\n// GenUID returns a uint64 that is distinct from the uint64 returned\n// by every other call to GenUID on g.\nfunc (g *UIDGenerator) GenUID() uint64 {\nretry:\n\tshard := g.v.Get().(*uidShard)\n\tlimit := atomic.LoadUint64(&shard.limit)\n\tid := atomic.AddUint64(&shard.next, 1)\n\tif id < limit {\n\t\t// Fast path: we got an ID in the batch.\n\t\treturn id\n\t}\n\t// Slow path: the batch ran out. Get a new batch. This\n\t// is tricky because multiple genUIDs could enter the\n\t// slow path for the same shard.\n\tg.baseLock.Lock()\n\t// Check if another genUID already got a new batch for\n\t// this shard.\n\tif atomic.LoadUint64(&shard.limit) != limit {\n\t\tg.baseLock.Unlock()\n\t\tgoto retry\n\t}\n\t// This genUID won the race to get a new shard for\n\t// this batch.\n\tbase := g.base\n\tg.base += batchSize\n\t// Store the next first so another genUID on this\n\t// shard will continue to fail the limit check.\n\tatomic.StoreUint64(&shard.next, base+1)\n\t// Now store to limit, which commits this batch.\n\tatomic.StoreUint64(&shard.limit, base+batchSize)\n\tg.baseLock.Unlock()\n\treturn base\n}\n\nfunc Example_idGenerator() {\n\tids := NewUIDGenerator()\n\n\t// Generate a bunch of UIDs in parallel.\n\tconst nGoroutines = 64\n\tconst nIDs = 500\n\tgeneratedIDs := make([]uint64, nGoroutines*nIDs)\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < nGoroutines; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\t// Generate 500 unique IDs.\n\t\t\tfor j := 0; j < nIDs; j++ {\n\t\t\t\tgeneratedIDs[i*nIDs+j] = ids.GenUID()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\twg.Wait()\n\n\t// Check that all UIDs were distinct.\n\tidMap := make(map[uint64]bool)\n\tfor _, id := range generatedIDs {\n\t\tif idMap[id] {\n\t\t\tfmt.Printf(\"ID %d generated more than once\\n\", id)\n\t\t\treturn\n\t\t}\n\t\tidMap[id] = true\n\t}\n\t// Output: All IDs were unique\n\tfmt.Println(\"All IDs were unique\")\n}\n"
  },
  {
    "path": "split/example_rwmutex_test.go",
    "content": "// Copyright 2018 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage split\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n// RWMutex is a scalable reader/writer mutual exclusion lock. The lock\n// can be held by an arbitrary number of readers or a single writer.\n// The zero value for a RWMutex is an unlocked mutex.\n//\n// In contrast with sync.RWMutex, this lock attempts to scale to any\n// number of cores simultaneously acquiring read locks. However, this\n// makes obtaining the lock in write mode more expensive.\ntype RWMutex struct {\n\treadLocks *Value\n\twriteLock sync.Mutex\n\tinitLock  sync.Mutex\n\tinit      uint32\n}\n\n// doInit performs lazily initialization on the first use of m.\nfunc (m *RWMutex) doInit() {\n\t// Acquire the initialization lock to protect against\n\t// concurrent initialization.\n\tm.initLock.Lock()\n\tdefer m.initLock.Unlock()\n\tif atomic.LoadUint32(&m.init) != 0 {\n\t\t// Another goroutine initialized the mutex while we\n\t\t// were waiting on the shard lock.\n\t\treturn\n\t}\n\tm.readLocks = New(func(*sync.Mutex) {\n\t\t// Block creating new shards while the write lock is\n\t\t// held.\n\t\tm.writeLock.Lock()\n\t\tm.writeLock.Unlock()\n\t})\n\tatomic.StoreUint32(&m.init, 1)\n}\n\n// Lock acquires m in writer mode. This blocks all readers and\n// writers.\nfunc (m *RWMutex) Lock() {\n\tif atomic.LoadUint32(&m.init) == 0 {\n\t\tm.doInit()\n\t}\n\t// Block other writers and creation of new shards.\n\tm.writeLock.Lock()\n\t// Acquire all read locks.\n\tm.readLocks.Range(func(s *sync.Mutex) {\n\t\ts.Lock()\n\t})\n}\n\n// Unlock releases m from writer mode. The mutex must currently be\n// held in writer mode.\nfunc (m *RWMutex) Unlock() {\n\tm.readLocks.Range(func(s *sync.Mutex) {\n\t\ts.Unlock()\n\t})\n\tm.writeLock.Unlock()\n}\n\n// RWMutexRUnlocker is a token used to unlock an RWMutex in read mode.\ntype RWMutexRUnlocker struct {\n\tshard *sync.Mutex\n}\n\n// RLock acquires m in read mode. This blocks other goroutines from\n// acquiring it in write mode, but does not generally block them from\n// acquiring it in read mode. The caller must used the returned\n// RWMutexRUnlocker to release the lock.\nfunc (m *RWMutex) RLock() RWMutexRUnlocker {\n\tif atomic.LoadUint32(&m.init) == 0 {\n\t\tm.doInit()\n\t}\n\tshard := m.readLocks.Get().(*sync.Mutex)\n\tshard.Lock()\n\treturn RWMutexRUnlocker{shard}\n}\n\n// RUnlock releases an RWMutex from read mode.\nfunc (c RWMutexRUnlocker) RUnlock() {\n\tc.shard.Unlock()\n}\n\nfunc Example_rwMutex() {\n\tvar m RWMutex\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 64; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tm.RLock().RUnlock()\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}\n"
  },
  {
    "path": "split/examples_test.go",
    "content": "// Copyright 2018 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage split\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"runtime\"\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\nfunc Example_counter() {\n\t// This example demonstrates concurrent updates to a split\n\t// counter. The counter can be updated using an atomic\n\t// operation. The final result is the sum of the shard values.\n\tcounter := New(func(*uint64) {})\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 64; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tatomic.AddUint64(counter.Get().(*uint64), 1)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\n\t// Sum up the counter. In this example, the Range isn't\n\t// running concurrently with the updates above, but if it\n\t// were, the sum would be approximate. Specifically, any\n\t// updates that happened between the call to Range and when it\n\t// returns may or may not be included in the sum depending on\n\t// exact timing. For most counters, this is acceptable because\n\t// updates to the counter are already unordered.\n\tvar sum uint64\n\tcounter.Range(func(np *uint64) {\n\t\tsum += atomic.LoadUint64(np)\n\t})\n\tfmt.Println(sum)\n\n\t// Output: 64\n}\n\nfunc Example_counterConsistent() {\n\t// This example is similar to the \"Counter\" example, but the\n\t// counter goes both up and down. Specifically, each goroutine\n\t// increments the counter and then decrements the counter, but\n\t// the increment and decrement may happen on different shards.\n\t// The sum of the counter at any instant is between 0 and the\n\t// number of goroutines, but since Range can't see all of the\n\t// shards at the same instant, it may observe a decrement\n\t// without an increment, leading to a negative sum.\n\t//\n\t// In this example, we solve this problem using two-phase\n\t// locking.\n\ttype shard struct {\n\t\tval uint64\n\t\tsync.Mutex\n\t}\n\tcounter := New(func(*shard) {})\n\n\tconst N = 64\n\tvar wg sync.WaitGroup\n\tvar stop uint32\n\tfor i := 0; i < N; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tfor atomic.LoadUint32(&stop) == 0 {\n\t\t\t\ts := counter.Get().(*shard)\n\t\t\t\ts.Lock()\n\t\t\t\ts.val++\n\t\t\t\ts.Unlock()\n\n\t\t\t\t// .. do some work, maybe get moved to\n\t\t\t\t// a different shard ..\n\t\t\t\truntime.Gosched()\n\n\t\t\t\ts = counter.Get().(*shard)\n\t\t\t\ts.Lock()\n\t\t\t\ts.val--\n\t\t\t\ts.Unlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\t// Let the goroutines do some work.\n\ttime.Sleep(time.Millisecond)\n\n\t// Capture a consistent sum by locking all of the shards, then\n\t// unlocking all of them. This must be done in a single Range\n\t// call to prevent the number of shards from changing.\n\tvar sum uint64\n\tcounter.Range(func(s *shard) {\n\t\ts.Lock()\n\t\tsum += s.val\n\t}, func(s *shard) {\n\t\ts.Unlock()\n\t})\n\n\t// Stop the writers.\n\tatomic.StoreUint32(&stop, 1)\n\twg.Wait()\n\n\tif sum < 0 || sum > N {\n\t\tfmt.Println(\"bad sum:\", sum)\n\t}\n\t// Output:\n}\n\nfunc Example_logging() {\n\t// This example demonstrates concurrent appends to a split\n\t// log. Each shard of the log is protected by a mutex. The log\n\t// is combined by sorting the log records in timestamp order.\n\t// This example collects a consistent snapshot of the log\n\t// using these timestamps.\n\ttype record struct {\n\t\twhen time.Time\n\t\tmsg  string\n\t}\n\ttype shard struct {\n\t\tsync.Mutex\n\t\tlog []record\n\t}\n\tlogger := New(func(*shard) {})\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 4; i++ {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tfor j := 0; j < 4; j++ {\n\t\t\t\tmsg := fmt.Sprintf(\"goroutine %d message %d\", i, j)\n\t\t\t\tshard := logger.Get().(*shard)\n\t\t\t\tshard.Lock()\n\t\t\t\t// We have to record the time under\n\t\t\t\t// the lock to ensure it's ordered for\n\t\t\t\t// the reader.\n\t\t\t\trec := record{time.Now(), msg}\n\t\t\t\tshard.log = append(shard.log, rec)\n\t\t\t\tshard.Unlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\twg.Wait()\n\n\t// Collect and sort the log records. This isn't running\n\t// concurrently with log appends, but for demonstration\n\t// purposes it's written so it could. To get a consistent view\n\t// of the log, this uses timestamp ordering: it records the\n\t// current time before calling Range and ignores any records\n\t// from after that time. For logs it makes sense to get a\n\t// consistent snapshot: a given worker could move between\n\t// shards and it would be confusing to see later log records\n\t// from that worker without earlier log records.\n\tsnapshot := time.Now()\n\tvar combined []record\n\tlogger.Range(func(val *shard) {\n\t\tval.Lock()\n\t\tlog := val.log\n\t\tval.Unlock()\n\t\t// Trim records after time \"snapshot\".\n\t\ti := sort.Search(len(log), func(i int) bool {\n\t\t\treturn log[i].when.After(snapshot)\n\t\t})\n\t\tlog = log[:i]\n\t\tcombined = append(combined, log...)\n\t})\n\tsort.Slice(combined, func(i, j int) bool { return combined[i].when.Before(combined[j].when) })\n\n\tfor _, rec := range combined {\n\t\tfmt.Println(rec.msg)\n\t}\n\n\t// Unordered output:\n\t// goroutine 3 message 0\n\t// goroutine 3 message 1\n\t// goroutine 3 message 2\n\t// goroutine 3 message 3\n\t// goroutine 2 message 0\n\t// goroutine 1 message 0\n\t// goroutine 1 message 1\n\t// goroutine 1 message 2\n\t// goroutine 1 message 3\n\t// goroutine 2 message 1\n\t// goroutine 0 message 0\n\t// goroutine 2 message 2\n\t// goroutine 2 message 3\n\t// goroutine 0 message 1\n\t// goroutine 0 message 2\n\t// goroutine 0 message 3\n}\n\nfunc Example_randomSource() {\n\t// This example demonstrates concurrent random number\n\t// generation using split random number generators.\n\tvar seed int64\n\ttype lockedRand struct {\n\t\tsync.Mutex\n\t\t*rand.Rand\n\t}\n\trandSource := New(func(r *lockedRand) {\n\t\tr.Rand = rand.New(rand.NewSource(atomic.AddInt64(&seed, 1)))\n\t})\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 64; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tfor j := 0; j < 64; j++ {\n\t\t\t\t// Generate a random number using a\n\t\t\t\t// local random source. rand.Rand\n\t\t\t\t// isn't thread-safe, so we lock it.\n\t\t\t\tr := randSource.Get().(*lockedRand)\n\t\t\t\tr.Lock()\n\t\t\t\tfmt.Println(r.Int())\n\t\t\t\tr.Unlock()\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}\n\nfunc Example_optimisticTransactions() {\n\t// This example demonstrates computing an instant-in-time\n\t// consistent snapshot of a sharded value without blocking\n\t// writers. Writers in this example can update multiple shards\n\t// transactionally, so this requires careful synchronization\n\t// between readers and writers to get a sequentially\n\t// consistent view of the entire sharded value.\n\t//\n\t// Each transaction moves a \"unit\" between two shards.\n\t// Initially all shards have a count of 0. Each writer\n\t// repeatedly picks two shards and transactionally decrements\n\t// the value of one shard and increments the value of the\n\t// other. Hence, at any instant, the shards should all sum to\n\t// 0.\n\t//\n\t// Since the Range callback doesn't see all shards at the same\n\t// instant, it can't simply add up the values of the shards.\n\t// If it did, the following could happen:\n\t//\n\t// 1. Suppose there are two shards with counts {0, 0}\n\t//\n\t// 2. Goroutine 1 calls Range. The callback reads shard 1,\n\t// which is 0, and adds 0 to the sum.\n\t//\n\t// 3. On goroutine 2, a writer transactionally moves a unit\n\t// from shard 1 to shard 2, so now the shard values are {-1,\n\t// 1}.\n\t//\n\t// 4. On goroutine 1, the Range continues and the callback\n\t// reads shard 2, which has value 1, and adds 1 to the sum.\n\t//\n\t// Now the value of the sum is 1, even though at any given\n\t// instant all of the shards added up to 0.\n\t//\n\t// This examples solves this using a sequence number in each\n\t// shard that is updated on every change to that shard. The\n\t// reader reads all of the shards repeatedly until it gets two\n\t// complete reads in a row where the sequence numbers didn't\n\t// change. This means no modifications raced with the read, so\n\t// it observed a consistent snapshot.\n\n\ttype shard struct {\n\t\torder uint32 // Lock order of the shards.\n\t\tval   int64  // The unit count of this shard.\n\t\tseq   uint64 // Sequence number; the low bit indicates this shard is unstable.\n\t}\n\tvar lockOrder uint32\n\tval := New(func(s *shard) {\n\t\ts.order = atomic.AddUint32(&lockOrder, 1) - 1\n\t})\n\n\tacquireSeq := func(p *uint64) {\n\t\t// \"Acquire\" a sequence counter by spinning until the\n\t\t// counter is even and then incrementing it.\n\t\tfor {\n\t\t\tv := atomic.LoadUint64(p)\n\t\t\tif v&1 == 0 && atomic.CompareAndSwapUint64(p, v, v+1) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\truntime.Gosched()\n\t\t}\n\t}\n\n\t// Start a set of writer goroutines.\n\tvar wg sync.WaitGroup\n\tvar stop uint32\n\tfor i := 0; i < 64; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tfor atomic.LoadUint32(&stop) == 0 {\n\t\t\t\t// Pick a first shard.\n\t\t\t\tshard1 := val.Get().(*shard)\n\t\t\t\t// Try to get moved to a different shard.\n\t\t\t\truntime.Gosched()\n\t\t\t\t// Pick a second shard.\n\t\t\t\tshard2 := val.Get().(*shard)\n\t\t\t\tif shard1 == shard2 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\t// Put the shards in lock order.\n\t\t\t\tlock1, lock2 := shard1, shard2\n\t\t\t\tif lock1.order > lock2.order {\n\t\t\t\t\tlock1, lock2 = lock2, lock1\n\t\t\t\t}\n\n\t\t\t\t// Lock both shards. Odd sequence\n\t\t\t\t// numbers also indicates their values\n\t\t\t\t// are unstable.\n\t\t\t\tacquireSeq(&lock1.seq)\n\t\t\t\tacquireSeq(&lock2.seq)\n\n\t\t\t\t// Move a unit from shard1 to shard2.\n\t\t\t\tatomic.AddInt64(&shard1.val, -1)\n\t\t\t\tatomic.AddInt64(&shard2.val, +1)\n\n\t\t\t\t// Increment the sequence numbers\n\t\t\t\t// again to indicate the shards\n\t\t\t\t// changed, but are now stable.\n\t\t\t\tatomic.AddUint64(&lock1.seq, 1)\n\t\t\t\tatomic.AddUint64(&lock2.seq, 1)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\t// Let the writers get going.\n\ttime.Sleep(time.Millisecond)\n\n\t// Retrieve a consistent sum of the shards. The sum should be\n\t// zero. This uses optimistic concurrency control and does not\n\t// block the writer, so it may have to read the shards\n\t// multiple times until it gets two reads in a row where none\n\t// of the sequence numbers have changed.\n\tvar valSum int64\n\tvar prevSeqSum uint64\n\tfor {\n\t\tvalSum = 0\n\t\tvar seqSum uint64\n\t\tval.Range(func(s *shard) {\n\t\t\t// Within just this shard, we also need to\n\t\t\t// perform a consistent read of its value and\n\t\t\t// sequence number. If we could read both\n\t\t\t// fields in a single atomic operation, this\n\t\t\t// wouldn't be necessary, but since we can't,\n\t\t\t// we also use optimistic concurrency within\n\t\t\t// the shard.\n\t\t\tfor {\n\t\t\t\t// Wait until the sequence number is\n\t\t\t\t// even, indicating that the sequence\n\t\t\t\t// number and value are stable.\n\t\t\t\tvar seq1 uint64\n\t\t\t\tfor {\n\t\t\t\t\tif seq1 = atomic.LoadUint64(&s.seq); seq1&1 == 0 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\truntime.Gosched()\n\t\t\t\t}\n\t\t\t\t// Read the value optimistically.\n\t\t\t\tval := atomic.LoadInt64(&s.val)\n\t\t\t\t// Re-read the sequence number. If it\n\t\t\t\t// hasn't changed, then we know we got\n\t\t\t\t// a consistent read of both the value\n\t\t\t\t// and the sequence number. Otherwise,\n\t\t\t\t// try again.\n\t\t\t\tif atomic.LoadUint64(&s.seq) == seq1 {\n\t\t\t\t\t// Got a consistent read.\n\t\t\t\t\t// Update the value sum and\n\t\t\t\t\t// the sequence number\n\t\t\t\t\t// snapshot.\n\t\t\t\t\tvalSum += val\n\t\t\t\t\tseqSum += seq1\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t\tif seqSum == prevSeqSum {\n\t\t\t// We got two reads of the shards in a row\n\t\t\t// with the same sequence numbers. That means\n\t\t\t// no updates happened between those reads, so\n\t\t\t// the values we observed were consistent.\n\t\t\tbreak\n\t\t}\n\t\tprevSeqSum = seqSum\n\t}\n\n\t// Exit all workers.\n\tatomic.StoreUint32(&stop, 1)\n\twg.Wait()\n\n\tfmt.Printf(\"Values sum to %d\\n\", valSum)\n\t// Output: Values sum to 0\n}\n\n// TODO: SRCU-style grace period algorithm? Consistent counter using\n// two epochs (though I'm not sure what it could be tracking that\n// would require a sequentially consistent snapshot)?\n"
  },
  {
    "path": "split/stub.s",
    "content": "// Copyright 2018 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Allow linkname-ing procPin and procUnpin.\n"
  },
  {
    "path": "split/value.go",
    "content": "// Copyright 2018 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Defensively block building on untested versions:\n// +build go1.8,!go1.12\n\n// Package split provides a logical value type that is split across\n// one or more shards to achieve better parallelism.\n//\n// Split values have many uses, but are primarily for optimizing\n// \"write-mostly\" shared data structures that have commutative\n// operations. Split values allow concurrent updates to happen on\n// different shards, which minimizes contention between updates.\n// However, reading the entire value requires combining all of these\n// shards, which is a potentially expensive operation.\n//\n// WARNING: This package depends on Go runtime internals. It has been\n// tested with Go 1.8 through Go 1.10, but may not work with older or\n// newer versions.\npackage split\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"unsafe\"\n)\n\nconst cacheLineBytes = 128\n\n// Value represents a logical value split across one or more shards.\n// The shards are arranged to minimize contention when different\n// shards are accessed concurrently.\ntype Value struct {\n\tstore     unsafe.Pointer\n\tptrType   unsafe.Pointer\n\tshardSize uintptr\n\tlen       int\n\tcbType    reflect.Type\n}\n\ntype emptyInterface struct {\n\ttyp  unsafe.Pointer\n\tword unsafe.Pointer\n}\n\n// New returns a new Value. The constructor argument must be a\n// function with type func(*T), where T determines the type that will\n// be stored in each shard. New will initialize each shard to the zero\n// value of T and then call constructor with a pointer to the shard to\n// perform any further initialization. The constructor function may\n// also be called in the future if new shards are created.\nfunc New(constructor interface{}) *Value {\n\tct := reflect.TypeOf(constructor)\n\tif ct.Kind() != reflect.Func || ct.NumIn() != 1 || ct.NumOut() != 0 || ct.In(0).Kind() != reflect.Ptr {\n\t\tpanic(\"New constructor must be func(*T) for some type T\")\n\t}\n\tet := ct.In(0).Elem()\n\n\t// Embed et in a struct so we can pad it out to a cache line.\n\t//\n\t// TODO: If et is small, this can stride-allocate multiple\n\t// Values together. Would need non-trivial runtime support,\n\t// but would save a lot of space. We could do this for\n\t// pointer-free types without runtime support and maybe types\n\t// that are just a pointer.\n\tshardSize := (et.Size() + (cacheLineBytes - 1)) &^ (cacheLineBytes - 1)\n\tpadding := shardSize - et.Size()\n\tpadded := reflect.StructOf([]reflect.StructField{\n\t\t{Name: \"X\", Type: et},\n\t\t{Name: \"Pad\", Type: reflect.ArrayOf(int(padding), byteType)},\n\t})\n\n\t// Allocate backing store.\n\tnproc := runtime.GOMAXPROCS(-1)\n\tstore := reflect.New(reflect.ArrayOf(nproc, padded))\n\n\t// Get pointer-to-element type.\n\tpet := reflect.PtrTo(et)\n\tpetz := reflect.Zero(pet).Interface()\n\tptrType := (*emptyInterface)(unsafe.Pointer(&petz)).typ\n\n\tv := &Value{\n\t\tstore:     unsafe.Pointer(store.Pointer()),\n\t\tptrType:   ptrType,\n\t\tshardSize: shardSize,\n\t\tlen:       nproc,\n\t\tcbType:    ct, // func(T*) type, same as constructor.\n\t}\n\n\t// Initialize each shard.\n\tv.Range(constructor)\n\n\treturn v\n}\n\nvar byteType = reflect.TypeOf(byte(0))\n\n// Get returns a pointer to some shard of v.\n//\n// Get may return the same pointer to multiple goroutines, so the\n// caller is responsible for synchronizing concurrent access to the\n// returned value. This can be done using atomic operations or locks,\n// just like any other shared structure.\n//\n// Get attempts to maintain CPU locality and contention-freedom of\n// shards. That is, two calls to Get from the same CPU are likely to\n// return the same pointer, while calls to Get from different CPUs are\n// likely to return different pointers. Furthermore, accessing\n// different shards in parallel is unlikely to result in cache\n// contention.\nfunc (v *Value) Get() interface{} {\n\t// Get the P ID.\n\t//\n\t// TODO: Could use CPU ID instead of P ID. Would get even\n\t// better cache locality and limit might be more fixed.\n\t//\n\t// TODO: We don't need pinning here.\n\tpid := runtime_procPin()\n\truntime_procUnpin()\n\n\t// This is 10% faster than procPin/procUnpin. It requires the\n\t// following patch to the runtime:\n\t////go:linkname sync_split_procID sync/split.procID\n\t//func sync_split_procID() int {\n\t//\treturn int(getg().m.p.ptr().id)\n\t//}\n\t//pid := procID()\n\n\t// This is 30% faster than procPin/procUnpin. It requires the\n\t// following patch to the runtime:\n\t//func ProcID() int {\n\t//\treturn int(getg().m.p.ptr().id)\n\t//}\n\t// However, it's unclear how to do this without exposing public API.\n\t//pid := runtime.ProcID()\n\n\tif pid > v.len {\n\t\t// TODO: Grow the backing store if pid is larger than\n\t\t// store. This is tricky because we may have handed\n\t\t// out pointers into the current store. Probably this\n\t\t// is only possible with a level of indirection that\n\t\t// lets us allocate the backing store in multiple\n\t\t// segments. Then we can do an RCU-style update on the\n\t\t// index structure. We may want to limit the number of\n\t\t// shards to something sane anyway (e.g., 1024). How\n\t\t// would this synchronize with Range? E.g., if Range\n\t\t// iterator is going through locking everything, it\n\t\t// would be bad if Get then made a new, unlocked\n\t\t// element.\n\t\tpid = int(uint(pid) % uint(v.len))\n\t}\n\tval := emptyInterface{\n\t\ttyp:  v.ptrType,\n\t\tword: v.shard(pid),\n\t}\n\treturn *(*interface{})(unsafe.Pointer(&val))\n}\n\nfunc (v *Value) shard(shard int) unsafe.Pointer {\n\t// The caller must ensure that 0 <= shard < v.len.\n\treturn unsafe.Pointer(uintptr(v.store) + v.shardSize*uintptr(shard))\n}\n\n// Range calls each of its argument functions with pointers to all of\n// the shards in v. Each argument must be a function with type\n// func(*T), where T is the shard type of the Value.\n//\n// Range calls its first argument N times with a pointer to each of\n// the N shards of v. It then calls its second argument with each\n// shard, and so on. Range guarantees that the set of shards and their\n// order will not change during this process. This makes it safe to\n// implement multi-pass algorithms, such as locking all of the shards\n// and then unlocking all of the shards.\n//\n// Multiple calls to Range are not guaranteed to observe the same set\n// of shards, so algorithms that need a consistent view of the shards\n// must make a single call to Range with multiple functions.\n//\n// Multiple calls to Range are guaranteed to traverse the shards in a\n// consistent order. While different calls may traverse more or fewer\n// shards, if any Range traverses shard A before shard B, all Range\n// calls will do so. Uses of Range that acquire locks on multiple\n// shards can depend on this for lock ordering.\n//\n// Range calls each function sequentially, so it's safe to update\n// local state without synchronization. However, the functions may run\n// concurrently with other goroutines calling Get or Range, so they\n// must synchronize access to shard values.\nfunc (v *Value) Range(fn ...interface{}) {\n\t// \"Type check\" all of the fn arguments before calling\n\t// anything.\n\t//\n\t// TODO: Accept any func(U) where *T is assignable to U (like\n\t// runtime.SetFinalizer).\n\tfor _, fn1 := range fn {\n\t\tif reflect.TypeOf(fn1) != v.cbType {\n\t\t\tpanic(fmt.Sprintf(\"Range expected %s, got %T\", v.cbType, fn1))\n\t\t}\n\t}\n\n\t// TODO: If we grow the backing store, this needs to block\n\t// growing if there are multiple passes (it doesn't have to if\n\t// there's one pass, but it has to handle it very carefully).\n\tfor _, fn1 := range fn {\n\t\t// Cast fn1 to a function with equivalent calling\n\t\t// convention.\n\t\tvar fn1Generic func(unsafe.Pointer)\n\t\t*(*unsafe.Pointer)(unsafe.Pointer(&fn1Generic)) = ((*emptyInterface)(unsafe.Pointer(&fn1)).word)\n\t\t// Call function on each shard.\n\t\tfor i := 0; i < v.len; i++ {\n\t\t\tfn1Generic(v.shard(i))\n\t\t}\n\t}\n}\n\n//go:linkname runtime_procPin runtime.procPin\nfunc runtime_procPin() int\n\n//go:linkname runtime_procUnpin runtime.procUnpin\nfunc runtime_procUnpin()\n\n// Provided by the runtime (with patch above).\nfunc procID() int\n"
  },
  {
    "path": "split/vlogger_test.go",
    "content": "// Copyright 2018 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage split\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n)\n\n// valueLoggerLocked is an implementation of a value logger that uses\n// locking to protect concurrent access.\ntype valueLoggerLocked struct {\n\tsync.Mutex\n\tvals *valueLoggerBuf\n\tpos  int\n}\n\nfunc newValueLoggerLocked() valueLoggerLocked {\n\tvar l valueLoggerLocked\n\tl.vals = valueLoggerBufPool.Get().(*valueLoggerBuf)\n\treturn l\n}\n\nfunc (l *valueLoggerLocked) append(v uint64) {\n\tl.Lock()\n\tl.vals[l.pos] = v\n\tl.pos++\n\tif l.pos == len(l.vals) {\n\t\tbuf := l.vals\n\t\tl.vals = new(valueLoggerBuf)\n\t\tl.pos = 0\n\t\tl.Unlock()\n\t\tl.process(buf)\n\t} else {\n\t\tl.Unlock()\n\t}\n}\n\nfunc (l *valueLoggerLocked) process(buf *valueLoggerBuf) {\n\t// In a real system, this would do something with the data in\n\t// buf. Here we just discard it.\n\tvalueLoggerBufPool.Put(buf)\n}\n\nfunc BenchmarkLazyAggregationSplitLocked(b *testing.B) {\n\t// Benchmark a lazy aggregating value logger that uses locking\n\t// instead of atomics.\n\tlogger := New(func(l *valueLoggerLocked) { *l = newValueLoggerLocked() })\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor i := uint64(0); pb.Next(); i++ {\n\t\t\tlogger.Get().(*valueLoggerLocked).append(i)\n\t\t}\n\t})\n}\n\nconst (\n\tlog2ValueLoggerBuf  = 8 // 256 entries per buffer\n\tlog2ValueLoggerBufs = 1 // Double buffering\n\n\tvalueLoggerIndexShift = 64 - (log2ValueLoggerBuf + log2ValueLoggerBufs)\n\tactiveWriterBits      = 1 + log2ValueLoggerBuf // Room for max writers to a buffer, plus mark bit.\n\tbufMarkMask           = 1 << (activeWriterBits - 1)\n)\n\ntype valueLoggerBuf [1 << log2ValueLoggerBuf]uint64\n\nvar valueLoggerBufPool = sync.Pool{New: func() interface{} { return new(valueLoggerBuf) }}\n\n// valueLoggerAtomic is a value logger that uses atomics to protect\n// concurrent access.\ntype valueLoggerAtomic struct {\n\t// control is the buffer control field. It consists of several\n\t// bit fields. The low bits consist of N fields that are each\n\t// activeWriterBits wide and corresponds to indexes into vals.\n\t// Field i counts the number of active writers to vals[i],\n\t// plus a bufMarkMask bit that indicates vals[i] is full.\n\t//\n\t// Bits valueLoggerIndexShift and up are an index into the\n\t// logical ring buffer formed by concatenating vals.\n\t//\n\t// TODO: Put this bit packing behind a type with methods?\n\tcontrol uint64\n\t// vals is a double-buffered (though it could be more) ring\n\t// buffer for storing values. Using a pair of buffers allows\n\t// writes to proceed in one buffer while the other buffer is\n\t// being reallocated.\n\tvals [1 << log2ValueLoggerBufs]*valueLoggerBuf\n\t// allocLock protects allocating new buffers for vals. Access\n\t// to vals is already synchronized by control, but this offers\n\t// a convenient way to block writers waiting on a buffer to be\n\t// swapped out.\n\tallocLock sync.Mutex\n}\n\nfunc newValueLogger() valueLoggerAtomic {\n\tvar l valueLoggerAtomic\n\tfor i := range l.vals {\n\t\tl.vals[i] = valueLoggerBufPool.Get().(*valueLoggerBuf)\n\t}\n\treturn l\n}\n\nfunc (l *valueLoggerAtomic) append(v uint64) {\n\t// Claim a slot and increment the active count for that\n\t// buffer. The active count acts as a lock on vals[bufIdx].\n\tvar i, bufIdx, activeShift uint64\n\tfor {\n\t\tc := atomic.LoadUint64(&l.control)\n\t\ti = c >> valueLoggerIndexShift\n\t\tbufIdx = i / uint64(len(valueLoggerBuf{}))\n\t\tactiveShift = bufIdx * activeWriterBits\n\t\tif (c>>activeShift)&bufMarkMask != 0 {\n\t\t\t// This buffer is still being swapped out.\n\t\t\t// Wait for it and retry.\n\t\t\tl.allocLock.Lock()\n\t\t\tl.allocLock.Unlock()\n\t\t\tcontinue\n\t\t}\n\n\t\t// Increment the index. This depends on uint64\n\t\t// wrap-around.\n\t\tnewC := c + 1<<valueLoggerIndexShift\n\t\t// Increment the active writer count.\n\t\tnewC += 1 << activeShift\n\n\t\tif atomic.CompareAndSwapUint64(&l.control, c, newC) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Put the value in the slot we claimed.\n\tl.vals[bufIdx][i%uint64(len(valueLoggerBuf{}))] = v\n\n\t// Decrement the active writer count for the buffer. If this\n\t// wrote to the last slot, set the buffer mark. If this is the\n\t// last writer to this buffer and the buffer is marked, this\n\t// writer is responsible for re-allocating the buffer.\n\tfor {\n\t\tc := atomic.LoadUint64(&l.control)\n\t\t// Decrement the active writer count for this buffer.\n\t\tnewC := c + (^uint64(0) << activeShift)\n\t\t// If this wrote to the last slot, set the buffer mark.\n\t\tif i%uint64(len(valueLoggerBuf{})) == uint64(len(valueLoggerBuf{})-1) {\n\t\t\tnewC |= bufMarkMask << activeShift\n\t\t}\n\t\tif atomic.CompareAndSwapUint64(&l.control, c, newC) {\n\t\t\t// If this was the last writer to this buffer\n\t\t\t// and it's marked, this this writer is\n\t\t\t// responsible for re-allocating the buffer.\n\t\t\tif (newC>>activeShift)&(1<<activeWriterBits-1) != bufMarkMask {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// This writer is responsible for re-allocating the buffer.\n\tl.allocLock.Lock()\n\tcompleteBuf := l.vals[bufIdx]\n\tl.vals[bufIdx] = valueLoggerBufPool.Get().(*valueLoggerBuf)\n\t// Clear the buffer mark so writers can use this\n\t// buffer slot again. Too bad there's no AndUint64.\n\tfor {\n\t\tc := atomic.LoadUint64(&l.control)\n\t\tnewC := c &^ (bufMarkMask << activeShift)\n\t\tif atomic.CompareAndSwapUint64(&l.control, c, newC) {\n\t\t\tbreak\n\t\t}\n\t}\n\tl.allocLock.Unlock()\n\tl.process(completeBuf)\n}\n\nfunc (l *valueLoggerAtomic) process(buf *valueLoggerBuf) {\n\t// In a real system, this would do something with the data in\n\t// buf. Here we just discard it.\n\tvalueLoggerBufPool.Put(buf)\n}\n\nfunc BenchmarkLazyAggregationSplitAtomic(b *testing.B) {\n\t// Benchmark a lazy aggregating value logger.\n\tlogger := New(func(l *valueLoggerAtomic) { *l = newValueLogger() })\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor i := uint64(0); pb.Next(); i++ {\n\t\t\tlogger.Get().(*valueLoggerAtomic).append(i)\n\t\t}\n\t})\n}\n\nfunc BenchmarkLazyAggregationShared(b *testing.B) {\n\t// Non-sharded version of BenchmarkLazyAggregation.\n\tlogger := newValueLogger()\n\n\tb.RunParallel(func(pb *testing.PB) {\n\t\tfor i := uint64(0); pb.Next(); i++ {\n\t\t\tlogger.append(i)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "srgb/main.go",
    "content": "package main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"image\"\n\t\"image/png\"\n\t\"log\"\n\t\"os\"\n\n\t\"golang.org/x/image/draw\"\n)\n\nfunc main() {\n\tvar err error\n\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(os.Stderr, \"Usage: %s input output\\n\", os.Args[0])\n\t}\n\tflag.Parse()\n\tif flag.NArg() != 2 {\n\t\tflag.Usage()\n\t\tos.Exit(2)\n\t}\n\n\t// Read input file.\n\tf, err := os.Open(os.Args[1])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tsrc, _, err := image.Decode(f)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// Scale down by a factor of 2.\n\tsb := src.Bounds()\n\tdst := image.NewRGBA(image.Rect(0, 0, sb.Dx()/2, sb.Dy()/2))\n\tdraw.BiLinear.Scale(dst, dst.Bounds(), src, sb, draw.Over, nil)\n\n\t// Write output file.\n\tif f, err = os.Create(os.Args[2]); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tif err := png.Encode(f, dst); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n"
  },
  {
    "path": "stackmapcompress.py",
    "content": "# -*- indent-tabs-mode: nil -*-\n\n# Parse output of \"go build -gcflags=all=-S -a cmd/go >& /tmp/go.s\" and\n# compress register liveness maps in various ways.\n\nimport re\nimport sys\nimport collections\n\nif True:\n    # Register maps\n    FUNCDATA = \"3\"\n    PCDATA = \"2\"\nelse:\n    # Stack maps\n    FUNCDATA = \"1\" # Locals (not args)\n    PCDATA = \"0\"\n\nclass Stackmap:\n    def __init__(self, dec=None):\n        if dec is None:\n            self.n = self.nbit = 0\n            self.bitmaps = []\n        else:\n            # Decode Go encoding of a runtime.stackmap.\n            n = dec.int32()\n            self.nbit = dec.int32()\n            self.bitmaps = [dec.bitmap(self.nbit) for i in range(n)]\n\n    def clone(self):\n        enc = Encoder()\n        self.encode(enc)\n        return Stackmap(Decoder(enc.b))\n\n    def add(self, bitmap):\n        nbit, b2 = 0, bitmap\n        while b2 != 0:\n            nbit += 1\n            b2 >>= 1\n        self.nbit = max(nbit, self.nbit)\n        for i, b2 in enumerate(self.bitmaps):\n            if bitmap == b2:\n                return i\n        self.bitmaps.append(bitmap)\n        return len(self.bitmaps)-1\n\n    def sort(self):\n        s = sorted((b, i) for i, b in enumerate(self.bitmaps))\n        self.bitmaps = [b for b, i in s]\n        return [i for b, i in s]\n\n    def encode(self, enc, compact=False):\n        enc.int32(len(self.bitmaps))\n        if compact:\n            enc.uint8(self.nbit)\n            combined = 0\n            for i, b in enumerate(self.bitmaps):\n                combined |= b << (i * self.nbit)\n            enc.bitmap(combined, len(self.bitmaps) * self.nbit)\n        else:\n            enc.int32(self.nbit)\n            for b in self.bitmaps:\n                enc.bitmap(b, self.nbit)\n\nclass PCData:\n    def __init__(self):\n        self.pcdata = []\n\n    def encode(self, enc):\n        last = (0, 0)\n        for e in self.pcdata:\n            enc.uvarint(e[0] - last[0])\n            enc.svarint(e[1] - last[1])\n            last = e\n        enc.uint8(0)\n\n    def huffSize(self, pcHuff, valHuff):\n        bits = 0\n        lastPC = 0\n        for pc, val in self.pcdata:\n            bits += pcHuff[pc - lastPC][1] + valHuff[val][1]\n            lastPC = pc\n        return (bits + 7) // 8\n\n    def grSize(self, pcHuff, n):\n        bits = 0\n        lastPC = 0\n        for pc, val in self.pcdata:\n            bits += pcHuff[pc - lastPC][1]\n            lastPC = pc\n            bits += grSize(val + 1, n)\n        return (bits + 7) // 8\n\ndef grSize(val, n):\n    \"\"\"The number of bits in the Golomb-Rice coding of val in base 2^n.\"\"\"\n    return 1 + (val >> n) + n\n\nclass Decoder:\n    def __init__(self, b):\n        self.b = memoryview(b)\n\n    def int32(self):\n        b = self.b\n        self.b = b[4:]\n        return b[0] + (b[1] << 8) + (b[2] << 16) + (b[3] << 24)\n\n    def bitmap(self, nbits):\n        bitmap = 0\n        nbytes = (nbits + 7) // 8\n        for i in range(nbytes):\n            bitmap = bitmap | (self.b[i] << (i*8))\n        self.b = self.b[nbytes:]\n        return bitmap\n\nclass Encoder:\n    def __init__(self):\n        self.b = bytearray()\n\n    def uint8(self, i):\n        self.b.append(i)\n\n    def int32(self, i):\n        self.b.extend([i&0xFF, (i>>8)&0xFF, (i>>16)&0xFF, (i>>24)&0xFF])\n\n    def bitmap(self, bits, nbits):\n        for i in range((nbits + 7) // 8):\n            self.b.append((bits >> (i*8)) & 0xFF)\n\n    def uvarint(self, v):\n        if v < 0:\n            raise ValueError(\"negative unsigned varint\", v)\n        while v > 0x7f:\n            self.b.append((v & 0x7f) | 0x80)\n            v >>= 7\n        self.b.append(v)\n\n    def svarint(self, v):\n        ux = v << 1\n        if v < 0:\n            ux = ~ux\n        self.uvarint(ux)\n\ndef parse(stream):\n    import parseasm\n    objs = parseasm.parse(stream)\n    fns = []\n    for obj in objs.values():\n        if not isinstance(obj, parseasm.Func):\n            continue\n        fns.append(obj)\n        obj.regMaps = []        # [(pc, register bitmap)]\n        regMap = None\n        for inst in obj.insts:\n            if inst.asm.startswith(\"FUNCDATA\\t$\"+FUNCDATA+\", \"):\n                regMapSym = inst.asm.split(\" \")[1][:-4]\n                regMap = Stackmap(Decoder(objs[regMapSym].data))\n            elif inst.asm.startswith(\"PCDATA\\t$\"+PCDATA+\", \"):\n                idx = int(inst.asm.split(\" \")[1][1:])\n                obj.regMaps.append((inst.pc, regMap.bitmaps[idx]))\n    return fns\n\ndef genStackMaps(fns, padToByte=True, dedup=True, sortBitmaps=False):\n    regMapSet = {}\n\n    for fn in fns:\n        # Create pcdata and register map for fn.\n        fn.pcdataRegs = PCData()\n        fn.funcdataRegMap = Stackmap()\n        for (pc, bitmap) in fn.regMaps:\n            fn.pcdataRegs.pcdata.append((pc, fn.funcdataRegMap.add(bitmap)))\n\n        if sortBitmaps:\n            remap = regMap.sort()\n            pcdata.pcdata = [(pc, remap[idx]) for pc, idx in pcdata.pcdata]\n\n        # Encode and dedup register maps.\n        if dedup:\n            e = Encoder()\n            fn.funcdataRegMap.encode(e, not padToByte)\n            regMap = bytes(e.b)\n            if regMap in regMapSet:\n                fn.funcdataRegMap = regMapSet[regMap]\n            else:\n                regMapSet[regMap] = fn.funcdataRegMap\n        else:\n            regMapSet[fn] = fn.funcdataRegMap\n\n    return regMapSet.values()\n\ndef likeStackMap(fns, padToByte=True, dedup=True, sortBitmaps=None, huffmanPcdata=False, grPcdata=False):\n    regMapSet = set()\n    regMaps = bytearray()\n    pcdatas = [] #Encoder()\n    extra = 0\n    for fn in fns:\n        # Create pcdata and register map for fn.\n        pcdata = PCData()\n        regMap = Stackmap()\n        if sortBitmaps == \"freq\":\n            # Pre-populate regMap in frequency order.\n            regMapFreq = collections.Counter()\n            for pc, bitmap in fn.regMaps:\n                regMapFreq[bitmap] += 1\n            for bitmap, freq in sorted(regMapFreq.items(), key=lambda item: item[1], reverse=True):\n                regMap.add(bitmap)\n        for pc, bitmap in fn.regMaps:\n            pcdata.pcdata.append((pc, regMap.add(bitmap)))\n\n        if sortBitmaps == \"value\":\n            remap = regMap.sort()\n            pcdata.pcdata = [(pc, remap[idx]) for pc, idx in pcdata.pcdata]\n\n        pcdatas.append(pcdata)\n\n        # Encode register map and dedup.\n        e = Encoder()\n        regMap.encode(e, not padToByte)\n        regMap = bytes(e.b)\n        if not dedup or regMap not in regMapSet:\n            regMapSet.add(regMap)\n            regMaps.extend(regMap)\n\n        extra += 8 + 4 # funcdata pointer, pcdata table offset\n\n    # Encode pcdata.\n    pcdataEnc = Encoder()\n    if huffmanPcdata or grPcdata:\n        pcDeltas, _ = countDeltas(fns)\n        pcdataHist = collections.Counter()\n        for pcdata in pcdatas:\n            for _, idx in pcdata.pcdata:\n                pcdataHist[idx] += 1\n        pcHuff = huffman(pcDeltas)\n        pcdataHuff = huffman(pcdataHist)\n        size = 0\n        for pcdata in pcdatas:\n            if huffmanPcdata:\n                size += pcdata.huffSize(pcHuff, pcdataHuff)\n            elif grPcdata:\n                size += pcdata.grSize(pcHuff, grPcdata)\n        pcdataEnc.b = \"\\0\" * size # Whatever\n    else:\n        for pcdata in pcdatas:\n            pcdata.encode(pcdataEnc)\n\n    return {\"gclocals\": len(regMaps), \"pcdata\": len(pcdataEnc.b), \"extra\": extra}\n\ndef filterLiveToDead(fns):\n    # Only emit pcdata if something becomes newly-live (this is a\n    # lower bound on what the \"don't care\" optimization could\n    # achieve).\n    for fn in fns:\n        newRegMaps = []\n        prevBitmap = 0\n        for (pc, bitmap) in fn.regMaps:\n            if bitmap is None:\n                newRegIdx.append((pc, None))\n                prevBitmap = 0\n                continue\n            if bitmap & ~prevBitmap != 0:\n                # New bits set.\n                newRegMaps.append((pc, bitmap))\n            prevBitmap = bitmap\n        fn.regMaps = newRegMaps\n\ndef total(dct):\n    dct[\"total\"] = 0\n    dct[\"total\"] = sum(dct.values())\n    return dct\n\ndef iterDeltas(regMaps):\n    prevPC = prevBitmap = 0\n    for (pc, bitmap) in regMaps:\n        pcDelta = pc - prevPC\n        prevPC = pc\n\n        if bitmap is None:\n            bitmapDelta = None\n            prevBitmap = 0\n        else:\n            bitmapDelta = bitmap ^ prevBitmap\n            prevBitmap = bitmap\n\n        yield pcDelta, bitmapDelta\n\ndef countMaps(fns):\n    maps = collections.Counter()\n    for fn in fns:\n        for _, bitmap in fn.regMaps:\n            maps[bitmap] += 1\n    return maps\n\ndef countDeltas(fns):\n    pcDeltas, deltas = collections.Counter(), collections.Counter()\n    # This actually spreads out the head of the distribution quite a bit\n    # because things are more likely to die in clumps and at the same time\n    # as something else becomes live.\n    #filterLiveToDead(fns)\n    for fn in fns:\n        for pcDelta, bitmapDelta in iterDeltas(fn.regMaps):\n            pcDeltas[pcDelta] += 1\n            deltas[bitmapDelta] += 1\n    return pcDeltas, deltas\n\ndef huffman(counts, streamAlign=1):\n    code = [(count, val) for val, count in counts.items()]\n    radix = 2**streamAlign\n    while len(code) > 1:\n        code.sort(key=lambda x: x[0], reverse=True)\n        if len(code) < radix:\n            children, code = code, []\n        else:\n            children, code = code[-radix:], code[:-radix]\n        code.append((sum(child[0] for child in children),\n                     [child[1] for child in children]))\n    tree = {}\n    def mktree(node, codeword, bits):\n        if isinstance(node, list):\n            for i, child in enumerate(node):\n                mktree(child, (codeword << streamAlign) + i, bits + streamAlign)\n        else:\n            tree[node] = (codeword, bits)\n    mktree(code[0][1], 0, 0)\n    return tree\n\ndef huffmanCoded(fns, streamAlign=1):\n    pcDeltas, maskDeltas = countDeltas(fns)\n    hPCs = huffman(pcDeltas, streamAlign)\n    hBitmaps = huffman(maskDeltas, streamAlign)\n\n    pcdataBits = 0\n    extra = 0\n    for fn in fns:\n        for pcDelta, bitmapDelta in iterDeltas(fn.regMaps):\n            pcdataBits += hPCs[pcDelta][1] + hBitmaps[bitmapDelta][1]\n        pcdataBits = (pcdataBits + 7) &~ 7 # Byte align\n        extra += 4                         # PCDATA\n    return {\"pcdata\": (pcdataBits + 7) // 8, \"extra\": extra}\nfns = parse(sys.stdin)\n\nif True:\n    print(total(likeStackMap(fns)))\n    # Linker dedup of gclocals reduces gclocals by >2X\n    #print(total(likeStackMap(fns, dedup=False)))\n    #print(total(likeStackMap(fns, sortBitmaps=\"value\")))\n    # 'total': 529225, 'pcdata': 292703, 'gclocals': 77558, 'extra': 158964\n    print(total(likeStackMap(fns, huffmanPcdata=True)))\n    print(total(likeStackMap(fns, huffmanPcdata=True, sortBitmaps=\"freq\")))\n    for n in range(0, 8):\n        print(n, total(likeStackMap(fns, grPcdata=n, sortBitmaps=\"freq\")))\n    #print(total(likeStackMap(fns, compactBitmap=True)))\n    # 'total': 407999, 'pcdata': 302023, 'extra': 105976\n    print(total(huffmanCoded(fns)))\n    print(total(huffmanCoded(fns, streamAlign=8)))\n    # Only emitting on newly live reduces pcdata by 42%, gclocals by 10%\n    filterLiveToDead(fns)\n    print(total(likeStackMap(fns)))\n\nif False:\n    # What do the bitmaps look like?\n    counts = countMaps(fns)\n    for bitmap, count in counts.items():\n        print(count, bin(bitmap))\n\nif False:\n    # What do the bitmap changes look like?\n    _, deltas = countDeltas(fns)\n    for delta, count in deltas.items():\n        print(count, bin(delta))\n\nif False:\n    # PC delta histogram\n    pcDeltaHist, _ = countDeltas(fns)\n    for delta, count in pcDeltaHist.items():\n        print(count, delta)\n"
  },
  {
    "path": "stress2/cmd.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"os/exec\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n)\n\n// There are several complications here:\n//\n// * The process may start subprocesses. It may exit before its\n// subprocesses. Still-running subprocesses may keep the stdout/stderr\n// pipe open and continue writing to it. If we kill the command (e.g.,\n// after a timeout), we want to try to kill the whole subprocess tree.\n//\n// * The stress process itself may get killed in a way it can or can't\n// catch. If possible, it shouldn't leave behind processes that it\n// started. In POSIX, there's no way to do this for signals we can't\n// catch.\n//\n// TODO(test): Test these situations.\n\n// TODO: Use PR_SET_CHILD_SUBREAPER on Linux to keep track of the\n// whole subprocess tree.\n\n// TODO: We could use PID namespaces plus a custom init process that\n// exits of stress exits to ensure the subprocess tree gets cleaned up\n// if stress exits. However, we can only do this with root privileges.\n\ntype Command struct {\n\t// Status contains the process exit status after the process is done.\n\tStatus *os.ProcessState\n\n\t// waitChan is closed when the command exits and the status\n\t// fields above are filled in.\n\twaitChan chan struct{}\n\n\t// readDone is closed when the reader is no longer reading\n\t// output from the command.\n\treadDone chan struct{}\n\n\tmu      sync.Mutex // Protects fields below\n\tcmd     *exec.Cmd\n\tsigProc *os.Process\n\tout     io.Writer\n}\n\n// StartCommand starts a managed command with the given command-line\n// arguments, with its stdout and stderr redirected to out.\n//\n// This has several differences from exec.Command:\n//\n// - This attempts to manage the entire sub-process tree.\n//\n// - Output to out will be stopped when the command completes, even if\n// sub-processes continue to write to stdout/stderr.\n//\n// - This provides a channel-based way to wait for command completion.\nfunc StartCommand(args []string, out io.Writer) (*Command, error) {\n\tcmd := exec.Command(args[0], args[1:]...)\n\n\t// Put cmd in a process group so we can signal the whole\n\t// process group.\n\t//\n\t// This has the downside that the usual terminal signals\n\t// (notably SIGINT from Ctrl-C) won't automatically get\n\t// delivered to this new process group. Hence, we're\n\t// responsible for catching and forwarding them on.\n\t//\n\t// For other signals, there's simply not much we can do about\n\t// cleaning up children.\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tSetpgid: true,\n\t}\n\n\t// Create a pipe. We don't use \"out\" directly because we may\n\t// need to cut this off before the write side is actually\n\t// closed by the sub-process tree.\n\tr, w, err := os.Pipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tw.Close()\n\t\tif r != nil {\n\t\t\tr.Close()\n\t\t}\n\t}()\n\tcmd.Stdout = w\n\tcmd.Stderr = w\n\n\t// Start process.\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create a fake os.Process for signaling the process group.\n\tsigProc, err := os.FindProcess(-cmd.Process.Pid)\n\tif err != nil {\n\t\t// Just signal the process.\n\t\tsigProc = cmd.Process\n\t}\n\n\tc := &Command{waitChan: make(chan struct{}), cmd: cmd, sigProc: sigProc, out: out}\n\n\t// Start output reader.\n\tc.readDone = make(chan struct{})\n\tgo c.reader(r)\n\tr = nil\n\n\t// Start waiter.\n\tgo c.waiter()\n\n\treturn c, nil\n}\n\nfunc (c *Command) reader(f *os.File) {\n\tbuf := make([]byte, 512)\n\tfor {\n\t\tn, err := f.Read(buf)\n\t\tif n > 0 {\n\t\t\tc.mu.Lock()\n\t\t\t// The command can exit while sub-processes\n\t\t\t// are still writing to stdout. If this\n\t\t\t// happened, stop writing to the output\n\t\t\t// stream.\n\t\t\tif c.cmd != nil {\n\t\t\t\tc.out.Write(buf[:n])\n\t\t\t}\n\t\t\tc.mu.Unlock()\n\t\t}\n\t\tif err != nil {\n\t\t\tif err != io.EOF {\n\t\t\t\tlog.Printf(\"reading from subprocess: %v\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\tf.Close()\n\tclose(c.readDone)\n}\n\nfunc (c *Command) waiter() {\n\terr := c.cmd.Wait()\n\tswitch err.(type) {\n\tcase nil:\n\tcase *exec.ExitError:\n\t\t// Ignore. We'll pick up the process state below.\n\tdefault:\n\t\t// This indicates a bug, so panic.\n\t\tpanic(fmt.Sprintf(\"wait %d failed: %s\", c.cmd.Process.Pid, err))\n\t}\n\n\t// Clean up the process group. If everything in the process\n\t// group has already exited, this will fail, so we ignore any\n\t// errors. We do this as soon as possible after waiting so the\n\t// PGID won't get recycled.\n\tc.mu.Lock()\n\tc.sigProc.Signal(os.Kill)\n\tc.sigProc = nil\n\tc.mu.Unlock()\n\n\t// Wait a little bit for the output reader to catch up. Don't\n\t// wait too long because there could still be subprocesses\n\t// writing to the stdout pipe. But we need to wait a little\n\t// because even if there aren't, the pipe is asynchronous so\n\t// we could still be reading output from it.\n\tselect {\n\tcase <-c.readDone:\n\tcase <-time.After(1 * time.Second):\n\t}\n\n\t// Signal that command has exited.\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.Status = c.cmd.ProcessState\n\tc.cmd = nil\n\tclose(c.waitChan)\n}\n\n// Kill kills the process, first gracefully then aggressively, and\n// attempts to kill all of its sub-processes.\nfunc (c *Command) Kill() {\n\tfor _, sig := range []os.Signal{traceSignal, os.Interrupt, os.Kill} {\n\t\tif sig == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif func() bool {\n\t\t\tc.mu.Lock()\n\t\t\tdefer c.mu.Unlock()\n\t\t\tif c.sigProc == nil {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\tc.sigProc.Signal(sig)\n\t\t\treturn false\n\t\t}() {\n\t\t\treturn\n\t\t}\n\n\t\t// Wait for a few seconds or for it to exit.\n\t\tselect {\n\t\tcase <-c.waitChan:\n\t\t\treturn\n\t\tcase <-time.After(10 * time.Second):\n\t\t}\n\t}\n}\n\n// Done returns a channel that will be closed when the command exits\n// and its output and status are ready.\nfunc (c *Command) Done() <-chan struct{} {\n\treturn c.waitChan\n}\n"
  },
  {
    "path": "stress2/cmd_test.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport \"testing\"\n\nfunc TestStdoutExitRace(t *testing.T) {\n\t// The stdout pipe is asynchronous with exiting, so even if a\n\t// child cleanly writes to stdout, then exits, wait may return\n\t// before we're done reading from the pipe. Check that we\n\t// handle this correctly.\n\n\tfor i := 0; i < 1000; i++ {\n\t\tcmd, err := StartCommand([]string{\"/bin/echo\", \"hi\"})\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\t<-cmd.Done()\n\t\tif !cmd.Status.Success() {\n\t\t\tt.Fatal(\"command failed: \", cmd.Status)\n\t\t}\n\t\tif got, want := string(cmd.Output), \"hi\\n\"; got != want {\n\t\t\tt.Errorf(\"got %q, want %q\", got, want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "stress2/go.mod",
    "content": "module github.com/aclements/go-misc/stress2\n\ngo 1.15\n\nrequire golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37\n"
  },
  {
    "path": "stress2/go.sum",
    "content": "golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37 h1:cg5LA/zNPRzIXIWSCxQW10Rvpy94aQh3LT/ShoCpkHw=\ngolang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\n"
  },
  {
    "path": "stress2/main.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"os\"\n\t\"os/signal\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"time\"\n)\n\nfunc main() {\n\tflag.Usage = func() {\n\t\tfmt.Fprintf(flag.CommandLine.Output(), `Usage: %s [flags] command...\n\nstress runs command repeatedly and in parallel and collects failures.\n\nIf command exits with status 0, it is considered a pass. If it exits\nwith any non-zero status besides 125, it is considered a failure. If\nit exits with status 125 or doesn't match the pass/fail regexps, it is\nconsidered a flake: neither success nor failure. (Status 125 is the\nhighest status not used by POSIX shells.) If it times out, it is\nconsidered a flake.\n\nIf -pass or -fail regular expressions are provided, they override\npass/fail exit status checking.\n\nThe -max-* flags cause the stress tool to exit after some number of\npasses, failures, or total runs. This is useful for bisecting a known\nflaky failure.\n\nCommand output is written to the directory specified by -o. Failures\nare logged to numbered files in this directory. Actively running\ncommands log to \".run-NNNNNN\" files and passes are logged to\n\".pass-NNNNNN\" files.\n\n`, os.Args[0])\n\t\tflag.PrintDefaults()\n\t}\n\n\tvar s Stress\n\tflag.IntVar(&s.Parallelism, \"p\", runtime.NumCPU(), \"run `N` processes in parallel\")\n\tflag.DurationVar(&s.Timeout, \"timeout\", 10*time.Minute, \"timeout each process after `duration`\")\n\tdefaultDir := filepath.Join(os.TempDir(), time.Now().Format(\"stress-20060102T150405\"))\n\tflag.StringVar(&s.OutDir, \"o\", defaultDir, \"write command logs to `directory`\")\n\tflag.Var(FlagLimit{&s.MaxRuns}, \"max-runs\", \"exit after `N` passes+fails (but not flakes/timeouts)\")\n\tflag.Var(FlagLimit{&s.MaxTotalRuns}, \"max-total-runs\", \"exit after `N` runs with any outcome\")\n\tflag.Var(FlagLimit{&s.MaxPasses}, \"max-passes\", \"exit after `N` successful runs\")\n\tflag.Var(FlagLimit{&s.MaxFails}, \"max-fails\", \"exit after `N` failed runs\")\n\tflag.BoolVar(&s.TimeoutsFail, \"timeouts-fail\", false, \"consider timeouts to be failures\")\n\t// TODO: Flag to keep timed-out subprocesses around for\n\t// inspection.\n\tflag.Var(FlagRegexp{&s.FailRe}, \"fail\", \"fail only if output matches `regexp`\")\n\tflag.Var(FlagRegexp{&s.PassRe}, \"pass\", \"pass only if output matches `regexp`\")\n\tflag.Parse()\n\ts.Command = flag.Args()\n\tif s.Parallelism <= 0 || s.Timeout <= 0 || len(s.Command) == 0 {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\n\t// Ensure the output directory exists.\n\terr := os.MkdirAll(s.OutDir, 0777)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfmt.Printf(\"output to: %s\\n\", s.OutDir)\n\n\t// Trap signals and shut down cleanly.\n\t//\n\t// It's important we at least trap the signals that would\n\t// normally be delivered from the terminal since we put child\n\t// processes in their own process group.\n\tinterrupt := make(chan struct{})\n\ts.Interrupt = interrupt\n\tsig := make(chan os.Signal)\n\tsignal.Notify(sig, exitSignals...)\n\tgo func() {\n\t\t<-sig\n\t\t// Let a second signal through, in case Stop gets stuck.\n\t\tsignal.Stop(sig)\n\t\tclose(interrupt)\n\t}()\n\n\t// Run the stress test.\n\tresult := s.Run(NewStdoutReporter())\n\n\tswitch result {\n\tcase ResultPass:\n\t\tos.Exit(0)\n\tcase ResultFail:\n\t\tos.Exit(1)\n\tcase ResultFlake:\n\t\tos.Exit(125)\n\t}\n}\n\ntype FlagLimit struct {\n\tx *int\n}\n\nfunc (f FlagLimit) String() string {\n\tif f.x == nil {\n\t\t// The flag package uses the zero value of FlagLimit\n\t\t// to test the default string.\n\t\treturn \"<nil>\"\n\t}\n\tif *f.x <= 0 {\n\t\treturn \"infinity\"\n\t}\n\treturn strconv.FormatInt(int64(*f.x), 10)\n}\n\nfunc (f FlagLimit) Set(x string) error {\n\tswitch x {\n\tcase \"inf\", \"infinity\", \"none\":\n\t\t*f.x = 0\n\t\treturn nil\n\t}\n\n\tlimit, err := strconv.ParseInt(x, 10, 0)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif limit <= 0 {\n\t\treturn fmt.Errorf(\"limit must be > 0\")\n\t}\n\t*f.x = int(limit)\n\treturn nil\n}\n\ntype FlagRegexp struct {\n\tx **regexp.Regexp\n}\n\nfunc (f FlagRegexp) String() string {\n\tif f.x == nil || *f.x == nil {\n\t\treturn \"\"\n\t}\n\treturn (*f.x).String()\n}\n\nfunc (f FlagRegexp) Set(x string) error {\n\tre, err := regexp.Compile(x)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Now that we've checked the syntax, make them line-oriented.\n\tre, err = regexp.Compile(\"(?m)\" + x)\n\tif err != nil {\n\t\tpanic(\"failed to set m flag: \" + err.Error())\n\t}\n\t*f.x = re\n\treturn nil\n}\n"
  },
  {
    "path": "stress2/reporter.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"golang.org/x/crypto/ssh/terminal\"\n)\n\ntype StressReporter interface {\n\tio.Writer\n\tStartStatus()\n\tStatus(format string, a ...interface{})\n\tStopStatus()\n}\n\nfunc NewStdoutReporter() StressReporter {\n\tif os.Getenv(\"TERM\") == \"\" || os.Getenv(\"TERM\") == \"dumb\" || !terminal.IsTerminal(syscall.Stdout) {\n\t\treturn &ReporterDumb{w: os.Stdout}\n\t}\n\treturn &ReporterVT100{w: os.Stdout}\n}\n\ntype ReporterDumb struct {\n\tw io.Writer\n}\n\nfunc (r *ReporterDumb) StartStatus() {}\nfunc (r *ReporterDumb) StopStatus()  {}\nfunc (r *ReporterDumb) Status(format string, a ...interface{}) {\n\tfmt.Fprintf(r.w, format, a...)\n\tr.w.Write([]byte{'\\n'})\n}\nfunc (r *ReporterDumb) Write(data []byte) (int, error) {\n\treturn r.w.Write(data)\n}\n\ntype ReporterVT100 struct {\n\tw      io.Writer\n\tstop   chan struct{}\n\tupdate chan func() string\n\twg     sync.WaitGroup\n\tmu     sync.Mutex\n}\n\nfunc (r *ReporterVT100) StartStatus() {\n\tr.stop = make(chan struct{})\n\tr.update = make(chan func() string)\n\tr.wg.Add(1)\n\tgo r.run()\n}\n\nfunc (r *ReporterVT100) StopStatus() {\n\tclose(r.stop)\n\tr.wg.Wait()\n}\n\nfunc (r *ReporterVT100) Status(format string, a ...interface{}) {\n\tr.update <- func() string {\n\t\treturn fmt.Sprintf(format, a...)\n\t}\n}\n\n// VT100 control sequences\nconst (\n\tresetLine = \"\\r\\x1b[2K\"\n\twrapOff   = \"\\x1b[?7l\"\n\tmoveEOL   = \"\\x1b[999C\"\n\twrapOn    = \"\\x1b[?7h\"\n)\n\nfunc (r *ReporterVT100) Write(data []byte) (int, error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\t// Clear the status line.\n\tfmt.Fprintf(r.w, \"%s%s\", resetLine, wrapOn)\n\treturn r.w.Write(data)\n}\n\nfunc (r *ReporterVT100) run() {\n\tconst ticker = \"-\\\\|/\"\n\t// minUpdate is the minimum time between displaying updates.\n\tconst minUpdate = time.Second / 10\n\n\ti := 0\n\tstatus := func() string { return \"\" }\n\ttick := time.NewTicker(time.Second / 2)\n\tinhibit, pending := false, false\n\tdeinhibit := time.NewTimer(0)\n\tdefer func() {\n\t\ttick.Stop()\n\n\t\t// Keep the last status line.\n\t\tr.mu.Lock()\n\t\tfmt.Fprintf(r.w, \"%s%s%s%s\\n\", resetLine, wrapOff, status(), wrapOn)\n\t\tr.mu.Unlock()\n\n\t\tr.wg.Done()\n\t}()\n\n\tfor {\n\t\t// Print the status line plus a ticker.\n\t\tr.mu.Lock()\n\t\tfmt.Fprintf(r.w, \"%s%s%s%s%c\", resetLine, wrapOff, status(), moveEOL, ticker[i%len(ticker)])\n\t\tr.mu.Unlock()\n\t\tpending = false\n\n\tignore:\n\t\tselect {\n\t\tcase <-tick.C:\n\t\t\ti++\n\n\t\tcase status = <-r.update:\n\t\t\tif inhibit {\n\t\t\t\t// There's a pending update. Show it\n\t\t\t\t// when the inhibit expires.\n\t\t\t\tpending = true\n\t\t\t\tgoto ignore\n\t\t\t}\n\t\t\t// Show this update, but then inhibit further\n\t\t\t// updates for a little while.\n\t\t\tinhibit = true\n\t\t\tdeinhibit.Reset(minUpdate)\n\n\t\tcase <-deinhibit.C:\n\t\t\t// Refresh the displayed status if there are\n\t\t\t// pending updates and allow the next status\n\t\t\t// update to appear immediately.\n\t\t\tinhibit = false\n\t\t\tif !pending {\n\t\t\t\tgoto ignore\n\t\t\t}\n\n\t\tcase <-r.stop:\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// TimeSince formats as the duration since a given reference time.\n// This is intended for use in deferred format strings since it will\n// continue to show the correct duration as the current time changes.\ntype TimeSince time.Time\n\nfunc (t TimeSince) String() string {\n\tif time.Time(t).IsZero() {\n\t\treturn \"?\"\n\t}\n\treturn time.Since(time.Time(t)).Round(time.Second).String()\n}\n"
  },
  {
    "path": "stress2/signal_notunix.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build plan9 windows\n\npackage main\n\nimport \"os\"\n\nvar exitSignals = []os.Signal{os.Interrupt}\n\nvar traceSignal = nil\n"
  },
  {
    "path": "stress2/signal_unix.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// +build aix darwin dragonfly freebsd js linux netbsd openbsd solaris\n\npackage main\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\nvar exitSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM}\n\n// traceSignal is the signal to send a Go program to make it crash\n// with a stack trace.\nvar traceSignal = syscall.SIGQUIT\n"
  },
  {
    "path": "stress2/stress.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\t\"unicode/utf8\"\n)\n\n// A Stress stress tests a command.\ntype Stress struct {\n\tCommand     []string\n\tParallelism int\n\tTimeout     time.Duration\n\tOutDir      string\n\n\tMaxPasses    int // If 0, no limit\n\tMaxFails     int\n\tMaxRuns      int // Limit on passes+fails (but not flakes)\n\tMaxTotalRuns int // Limit on all types of runs\n\n\tTimeoutsFail bool // Consider timeouts to be failures\n\n\tFailRe *regexp.Regexp\n\tPassRe *regexp.Regexp\n\n\tInterrupt <-chan struct{}\n}\n\ntype startRun struct {\n\tid int64\n}\n\ntype result struct {\n\tid     int64\n\toutput *os.File\n\tstatus *os.ProcessState // nil on timeout\n\terr    error            // If non-nil, error starting command\n}\n\ntype ResultKind int\n\nconst (\n\tResultPass ResultKind = iota\n\tResultFail\n\tResultFlake\n\tResultTimeout\n)\n\nfunc (s *Stress) resultKind(res result, output []byte) ResultKind {\n\tswitch {\n\tcase res.status == nil && !s.TimeoutsFail:\n\t\treturn ResultTimeout\n\tcase s.PassRe == nil && res.status != nil && res.status.Success(),\n\t\ts.PassRe != nil && s.PassRe.Match(output):\n\t\treturn ResultPass\n\tcase s.FailRe == nil && (res.status == nil || res.status.ExitCode() != 125),\n\t\ts.FailRe != nil && s.FailRe.Match(output):\n\t\treturn ResultFail\n\tdefault:\n\t\treturn ResultFlake\n\t}\n}\n\nfunc (s *Stress) Run(reporter StressReporter) ResultKind {\n\t// Replace \"0 as infinity\" limits with a value that's easy to\n\t// compare against.\n\tconst MaxInt = int(^uint(0) >> 1)\n\tfor _, limit := range []*int{&s.MaxPasses, &s.MaxFails, &s.MaxRuns, &s.MaxTotalRuns} {\n\t\tif *limit <= 0 {\n\t\t\t*limit = MaxInt\n\t\t}\n\t}\n\n\tstart := make(chan startRun, s.Parallelism)\n\tstop := make(chan struct{})\n\tresults := make(chan result, s.Parallelism)\n\tvar id int64\n\tactiveStartTimes := make(map[int64]time.Time)\n\n\treporter.StartStatus()\n\n\t// TODO: Do a smoke test. Start just one task and if it fails\n\t// within a second, go into rate-limited starting mode.\n\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < s.Parallelism; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\ts.runner(start, stop, results)\n\t\t}()\n\t\tstart <- startRun{id}\n\t\tactiveStartTimes[id] = time.Now()\n\t\tid++\n\t}\n\n\t// TODO: Rate limit restarts after failures.\n\n\tfatal := false\n\ttotalRuns := 0\n\tcounts := make(map[ResultKind]int)\n\tlogIdxPass, logIdxFail, logIdxFlake := 0, 0, 0\n\tvar passFailTime time.Duration\n\tupdateStatus := func() {\n\t\t// TODO: ETA if we have s.Max*?\n\t\tbuf := new(bytes.Buffer)\n\t\tfmt.Fprintf(buf, \"%d passes, %d fails\", counts[ResultPass], counts[ResultFail])\n\t\tif n := counts[ResultFlake]; n > 0 {\n\t\t\tfmt.Fprintf(buf, \", %d flakes\", n)\n\t\t}\n\t\tif n := counts[ResultTimeout]; n > 0 {\n\t\t\tfmt.Fprintf(buf, \", %d timeouts\", n)\n\t\t}\n\t\tvar avg interface{} = \"?\"\n\t\tif passFail := counts[ResultPass] + counts[ResultFail]; passFail > 0 {\n\t\t\tavg = (passFailTime / time.Duration(passFail)).Round(time.Second)\n\t\t}\n\t\tvar oldest time.Time\n\t\tfor _, t := range activeStartTimes {\n\t\t\tif oldest.IsZero() || t.Before(oldest) {\n\t\t\t\toldest = t\n\t\t\t}\n\t\t}\n\t\treporter.Status(\"%s, avg %s, max active %s\", buf.String(), avg, TimeSince(oldest))\n\t}\nloop:\n\tfor {\n\t\tupdateStatus()\n\n\t\tvar res result\n\t\tselect {\n\t\tcase res = <-results:\n\t\tcase <-s.Interrupt:\n\t\t\tbreak loop\n\t\t}\n\n\t\tif res.err != nil {\n\t\t\tlog.Printf(\"error starting command: %s\", res.err)\n\t\t\tfatal = true\n\t\t\tbreak\n\t\t}\n\n\t\t// Read the command output back from the log file.\n\t\tif _, err := res.output.Seek(0, 0); err != nil {\n\t\t\tlog.Printf(\"error seeking log file: %s\", err)\n\t\t\tfatal = true\n\t\t\tbreak\n\t\t}\n\t\toutput, err := ioutil.ReadAll(res.output)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error reading log file: %s\", err)\n\t\t\tfatal = true\n\t\t\tbreak\n\t\t}\n\t\tlogPath := res.output.Name()\n\t\tif err := res.output.Close(); err != nil {\n\t\t\tlog.Printf(\"error saving log file: %s\", err)\n\t\t\tfatal = true\n\t\t\tbreak\n\t\t}\n\n\t\t// Classify the result.\n\t\tkind := s.resultKind(res, output)\n\t\ttotalRuns++\n\t\tcounts[kind]++\n\n\t\t// Update time stats.\n\t\tduration := time.Since(activeStartTimes[res.id])\n\t\tdelete(activeStartTimes, res.id)\n\t\tif kind == ResultPass || kind == ResultFail {\n\t\t\tpassFailTime += duration\n\t\t}\n\n\t\t// Save log.\n\t\tvar prefix string\n\t\tvar logIdx *int\n\t\tswitch kind {\n\t\tdefault:\n\t\t\tpanic(\"bad kind\")\n\t\tcase ResultPass:\n\t\t\tprefix, logIdx = \".pass-\", &logIdxPass\n\t\tcase ResultFail, ResultTimeout:\n\t\t\tprefix, logIdx = \"\", &logIdxFail\n\t\tcase ResultFlake:\n\t\t\tprefix, logIdx = \"flake-\", &logIdxFlake\n\t\t}\n\t\tpath, err := saveLog(s.OutDir, prefix, logIdx, logPath)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error saving log: %s\", err)\n\t\t\tfatal = true\n\t\t\tbreak\n\t\t}\n\n\t\t// Show failures.\n\t\tif kind != ResultPass {\n\t\t\tprintTail(reporter, output)\n\t\t\tfmt.Fprintf(reporter, \"full output written to %s\\n\", path)\n\t\t}\n\n\t\t// Check if we're done.\n\t\tif totalRuns >= s.MaxTotalRuns ||\n\t\t\tcounts[ResultPass]+counts[ResultFail] >= s.MaxRuns ||\n\t\t\tcounts[ResultPass] >= s.MaxPasses ||\n\t\t\tcounts[ResultFail] >= s.MaxFails {\n\t\t\tbreak\n\t\t}\n\n\t\t// Start another process.\n\t\tstart <- startRun{id}\n\t\tactiveStartTimes[id] = time.Now()\n\t\tid++\n\t}\n\tupdateStatus()\n\treporter.StopStatus()\n\n\t// Shut down runners. This will kill the subprocesses.\n\tfmt.Fprintf(reporter, \"stopping processes...\\n\")\n\tclose(start)\n\tclose(stop)\n\twg.Wait()\n\n\tif fatal {\n\t\t// There was something wrong with the command. Don't\n\t\t// treat this as a success or a failure.\n\t\treturn ResultFlake\n\t} else if counts[ResultFail] > 0 {\n\t\t// If there were any failures, exit with failure.\n\t\treturn ResultFail\n\t} else if counts[ResultPass] > 0 {\n\t\t// If there were no failures and only successes, exit\n\t\t// with success.\n\t\treturn ResultPass\n\t} else {\n\t\t// If there were no failures or passes, then they were\n\t\t// all timeouts or flakes.\n\t\treturn ResultFlake\n\t}\n}\n\nfunc (s *Stress) runner(start <-chan startRun, stop <-chan struct{}, results chan<- result) {\n\tfor tok := range start {\n\t\tif !s.run1(tok, stop, results) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (s *Stress) run1(tok startRun, stop <-chan struct{}, results chan<- result) bool {\n\t// Open a hidden file to stream in-progress output to\n\t// so the user can see it.\n\tname := path.Join(s.OutDir, fmt.Sprintf(\".run-%06d\", tok.id))\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\tresults <- result{tok.id, nil, nil, err}\n\t\treturn true\n\t}\n\tdeleteFile := true\n\tdefer func() {\n\t\tif deleteFile {\n\t\t\tf.Close()\n\t\t\tos.Remove(name)\n\t\t}\n\t}()\n\n\t// Start command.\n\tcmd, err := StartCommand(s.Command, f)\n\tif err != nil {\n\t\t// TODO(test): Run command that doesn't exist.\n\t\tresults <- result{id: tok.id, err: err}\n\t\treturn true\n\t}\n\n\t// Wait for cancellation, timeout, or completion.\n\ttimeout := time.NewTimer(s.Timeout)\n\tselect {\n\tcase <-stop:\n\t\tcmd.Kill()\n\t\t// Stop the runner loop\n\t\treturn false\n\n\tcase <-timeout.C:\n\t\tcmd.Kill()\n\t\t<-cmd.Done()\n\t\tfmt.Fprintf(f, \"timeout after %s\\n\", s.Timeout)\n\t\tdeleteFile = false\n\t\tresults <- result{id: tok.id, output: f}\n\n\tcase <-cmd.Done():\n\t\tif !cmd.Status.Success() {\n\t\t\tfmt.Fprintf(f, \"exited: %s\\n\", formatProcessState(cmd.Status))\n\t\t}\n\t\tdeleteFile = false\n\t\tresults <- result{id: tok.id, output: f, status: cmd.Status}\n\t}\n\ttimeout.Stop()\n\treturn true\n}\n\nfunc saveLog(outDir, prefix string, idx *int, oldName string) (string, error) {\n\tvar name string\n\tfor {\n\t\tname = path.Join(outDir, fmt.Sprintf(\"%s%06d\", prefix, *idx))\n\t\t*idx++\n\t\terr := os.Link(oldName, name)\n\t\tif err == nil {\n\t\t\t// Found a name.\n\t\t\tbreak\n\t\t} else if !os.IsExist(err) {\n\t\t\treturn \"\", err\n\t\t}\n\t\t// Name already exists. Try the next index.\n\t}\n\n\t// Delete the old name.\n\tos.Remove(oldName)\n\treturn name, nil\n}\n\nfunc printTail(w io.Writer, data []byte) {\n\tconst maxLines = 10\n\tconst maxRunes = maxLines * 100\n\n\t// Ensure data ends with a \\n if there are any lines.\n\tif len(data) > 0 && data[len(data)-1] != '\\n' {\n\t\tdata = append(data[:len(data):len(data)], '\\n')\n\t}\n\n\tpos := len(data)\n\tlastNL := len(data)\n\tlineCount := -1\n\truneCount := 0\n\tfor pos > 0 {\n\t\t// Find beginning of the next line.\n\t\tbol := bytes.LastIndexByte(data[:lastNL], '\\n') + 1\n\n\t\t// Would this line push us over either limit?\n\t\truneCount += utf8.RuneCount(data[bol:lastNL])\n\t\tif runeCount > maxRunes {\n\t\t\tbreak\n\t\t}\n\n\t\t// Include the line.\n\t\tpos = bol\n\t\tlastNL = pos - 1\n\t\tlineCount++\n\t\tif lineCount >= maxLines {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tw.Write(data[pos:])\n}\n\nfunc formatProcessState(state *os.ProcessState) string {\n\t// While this is syscall-specific, in practice all supported\n\t// OSes have a WaitStatus with the same interface (though\n\t// different representations).\n\ts := state.Sys().(syscall.WaitStatus)\n\tswitch {\n\tcase s.Exited():\n\t\treturn fmt.Sprintf(\"status %d\", s.ExitStatus())\n\tcase s.Signaled():\n\t\textra := \"\"\n\t\tif s.CoreDump() {\n\t\t\textra = \" (dumped core)\"\n\t\t}\n\t\treturn fmt.Sprintf(\"signal %s%s\", s.Signal(), extra)\n\tdefault:\n\t\treturn fmt.Sprintf(\"unknown wait status %v\", s)\n\t}\n}\n"
  },
  {
    "path": "stress2/stress_test.go",
    "content": "// Copyright 2020 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage main\n\nimport (\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestPrintTail(t *testing.T) {\n\tcheck := func(t *testing.T, data, want string) {\n\t\tt.Helper()\n\t\tvar got strings.Builder\n\t\tprintTail(&got, []byte(data))\n\t\tif got.String() != want {\n\t\t\tt.Errorf(\"for:\\n%s\\ngot:\\n%s\\nwant:\\n%s\", data, got.String(), want)\n\t\t}\n\t}\n\n\t// Basic\n\tcheck(t, \"\", \"\")\n\tcheck(t, \"a\", \"a\\n\")\n\tcheck(t, \"a\\nb\\n\", \"a\\nb\\n\")\n\t// Line trimming\n\ta20 := strings.Repeat(\"a\\n\", 20)\n\tcheck(t, a20, strings.Repeat(\"a\\n\", 10))\n\tcheck(t, a20[:len(a20)-1], strings.Repeat(\"a\\n\", 10))\n\t// Test rune limits.\n\tlong := strings.Repeat(\"a\", 2000) + \"\\n\"\n\tcheck(t, long, \"\")\n\tlong += \"x\\n\"\n\tcheck(t, long, \"x\\n\")\n}\n"
  },
  {
    "path": "varint/README.md",
    "content": "This directory contains experiments with varint decoding using\nhand-coded assembly.\n\nThe simple assembly loop is 15–30% faster than the Go loop. The loop\nis somewhat clever, but in principle the compiler could probably\nproduce this code.\n\nMost of the code experiments with BMI2 instructions. This requires\nHaswell or newer, which the benchmark will detect. This approach is\nconstant time up to 8 byte varints (56 bit values). It's 50% faster\nthan the Go code for 8 byte varints, but 80% slower for one byte\nvarints.\n"
  },
  {
    "path": "varint/asm_amd64.s",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n#include \"textflag.h\"\n\nGLOBL\t·hasBMI2(SB),NOPTR,$1\n\nTEXT ·queryBMI2(SB),NOSPLIT,$0-1\n\t// TODO: Check validity of query.\n\tMOVQ\t$0x07, AX\n\tMOVQ\t$0, CX\n\tCPUID\n\t// Bit 8 of EBX indicates BMI2 support.\n\tBTQ\t$8, BX\n\tSETCS\tret+0(FP)\n\tRET\n\n// Hand-coded byte decoding loop with some clever tricks.\nTEXT ·decodeVarintAsmLoop(SB),NOSPLIT,$0-40\n\tMOVQ\tbuf_base+0(FP), BX\t// Pointer\n\tMOVQ\tbuf_len+8(FP), AX\t// Length\n\tMOVL\t$10, CX\n\tCMPQ\tAX, CX\n\tCMOVLGT\tCX, AX\t\t// Length is at most 10\n\tXORL\tSI, SI\t\t// Index\n\tXORL\tCX, CX\t\t// Shift\n\tXORL\tDX, DX\t\t// Value\n\nloop:\n\tCMPL\tSI, AX\t\t// (fused with JEQ)\n\tJEQ\tbad\t\t// Reached end of buffer or >10 bytes\n\n\tMOVBLZX\t(SI)(BX*1), DI\t// Load next byte\n\tINCL\tSI\n\t// This could be a BTRL $7, DI, but this is simpler and\n\t// just as fast thanks to macro-op fusion.\n\tTESTL\t$0x80, DI\t// Is bit 7 set? (fused with JZ)\n\tJZ\tlast\n\tANDL\t$0x7f, DI\t// Clear bit 7\n\tSHLQ\tCL, DI\t\t// value |= value << shift\n\tORQ\tDI, DX\n\tADDL\t$7, CX\t\t// shift += 7\n\tJMP\tloop\n\nlast:\n\tSHLQ\tCL, DI\t\t// Final value |= value << shift\n\tORQ\tDI, DX\n\t// Return decoded value and length.\n\tMOVQ\tDX, x+24(FP)\n\tMOVQ\tSI, n+32(FP)\n\tRET\n\nbad:\n\tMOVQ\t$0, x+24(FP)\n\tMOVQ\t$0, n+32(FP)\n\tRET\n\n// decodeVarintAsmBMI2 uses the BMI2 PEXT instruction to extract 7\n// bits from each byte in one instruction.\nTEXT ·decodeVarintAsmBMI2(SB),NOSPLIT,$0-40\n\tMOVQ\tbuf_base+0(FP), BX\n\tMOVQ\tbuf_len+8(FP), CX\n\n\t// Take the slow path if there's no BMI2 or there are fewer\n\t// than 8 bytes available.\n\tMOVBLZX\t·hasBMI2(SB), AX\n\tTESTB\tAL, AL\n\tJEQ\tslowpath\n\tCMPQ\tCX, $8\n\tJLT\tslowpath\n\n\t// Load 8 bytes from buf.\n\tMOVQ\t(BX), AX\n\n\t// Extract the continuation bits into BX.\n\tMOVQ\tAX, M0\n\tPMOVMSKB\tM0, BX\n\t// Compute byte length - 1 of varint into BX.\n\tNOTL\tBX\n\tBSFL\tBX, BX\n\t// If it's more than 8 bytes, take the slow path.\n\tCMPL\tBX, $8\n\tJGE\tslowpath\n\t// Extract the relevant bytes from the input.\n\tINCL\tBX\n\tMOVQ\tBX, CX\n\tSHLQ\t$(3+8), CX\t// CX[15:8] = (byte len * 8); CX[7:0] = 0\n\tBEXTRQ\tCX, AX, AX\t// Requires BMI1\n\t// Extract the low 7 bits from each byte of the input.\n\tMOVQ\t$0x7f7f7f7f7f7f7f7f, DI\n\tPEXTQ\tDI, AX, DX\t// Requires BMI2\n\t// Return decoded value and length.\n\tMOVQ\tDX, x+24(FP)\n\tMOVQ\tBX, n+32(FP)\n\tRET\n\nslowpath:\n\t// Consume buffer one byte at a time.\n\t// TODO: Could merge with some of the above registers better.\n\tMOVQ\tbuf_base+0(FP), BX\t// Pointer\n\tMOVQ\tbuf_len+8(FP), AX\t// Length\n\tMOVQ\t$10, CX\n\tCMPQ\tAX, CX\n\tCMOVQGT\tCX, AX\t\t// Length is at most 10\n\tXORQ\tSI, SI\t\t// Index\n\tXORQ\tCX, CX\t\t// Shift\n\tXORQ\tDX, DX\t\t// Value\n\nloop:\n\tCMPQ\tSI, AX\n\tJEQ\tbad\t\t// Reached end of buffer or >10 bytes\n\n\tMOVBLZX\t(SI)(BX*1), DI\t// Load next byte\n\tINCQ\tSI\n\tBTRL\t$7, DI\t\t// Is bit 7 set? Clear bit 7.\n\tJNC\tlast\t\t// If not set, this is the final byte\n\tSHLQ\tCL, DI\t\t// value |= value << shift\n\tORQ\tDI, DX\n\tADDQ\t$7, CX\t\t// shift += 7\n\tJMP\tloop\n\nlast:\n\tSHLQ\tCL, DI\t\t// value |= value << shift\n\tORQ\tDI, DX\n\t// Return decoded value and length.\n\tMOVQ\tDX, x+24(FP)\n\tMOVQ\tSI, n+32(FP)\n\tRET\n\nbad:\n\tMOVQ\t$0, x+24(FP)\n\tMOVQ\t$0, n+32(FP)\n\tRET\n\n// The other two also use PEXT, but use different tricks to extract\n// the length and set up the mask. They turned out to be slower than\n// the one above, but are historically interesting.\n\nDATA extract<>+0x00(SB)/8,$0x000000000000007f\nDATA extract<>+0x08(SB)/8,$0x0000000000007f7f\nDATA extract<>+0x10(SB)/8,$0x00000000007f7f7f\nDATA extract<>+0x18(SB)/8,$0x000000007f7f7f7f\nDATA extract<>+0x20(SB)/8,$0x0000007f7f7f7f7f\nDATA extract<>+0x28(SB)/8,$0x00007f7f7f7f7f7f\nDATA extract<>+0x30(SB)/8,$0x007f7f7f7f7f7f7f\nDATA extract<>+0x38(SB)/8,$0x7f7f7f7f7f7f7f7f\nGLOBL extract<>(SB),(NOPTR+RODATA),$(8*8)\n\nTEXT ·decodeVarintAsm1(SB),NOSPLIT,$0-40\n\t// Take the slow path if there's no BMI2 or there are fewer\n\t// than 8 bytes available.\n\tMOVBLZX\t·hasBMI2(SB), AX\n\tTESTB\tAL, AL\n\tJEQ\tslowpath\n\tMOVQ\tbuf_len+8(FP), AX\n\tCMPQ\tAX, $8\n\tJLT\tslowpath\n\n\t// Load 8 bytes from buf.\n\tMOVQ\tbuf_base+0(FP), AX\n\tMOVQ\t(AX), AX\n\n\t// Extract the continuation bits into BX.\n\tMOVQ\tAX, M0\n\tPMOVMSKB\tM0, BX\n\t// Compute byte length - 1 of varint into BX.\n\tNOTL\tBX\n\tBSFL\tBX, BX\n\t// If it's more than 8 bytes, take the slow path.\n\tCMPL\tBX, $8\n\tJGE\tslowpath\n\t// Extract the value into DX using a mask lookup table.\n\tMOVQ\t$extract<>(SB), CX\n\tMOVQ\t(CX)(BX*8), DX\n\tPEXTQ\tDX, AX, DX\t// Requires BMI2\n\t// Return decoded value and length.\n\tMOVQ\tDX, x+24(FP)\n\tINCL\tBX\n\tMOVQ\tBX, n+32(FP)\n\tRET\n\nslowpath:\n\t// Consume buffer one byte at a time.\n\t// TODO: Could merge with some of the above registers better.\n\tMOVQ\tbuf_base+0(FP), BX\t// Pointer\n\tMOVQ\tbuf_len+8(FP), AX\t// Length\n\tMOVQ\t$10, CX\n\tCMPQ\tAX, CX\n\tCMOVQGT\tCX, AX\t\t// Length is at most 10\n\tXORQ\tSI, SI\t\t// Index\n\tXORQ\tCX, CX\t\t// Shift\n\tXORQ\tDX, DX\t\t// Value\n\nloop:\n\tCMPQ\tSI, AX\n\tJEQ\tbad\t\t// Reached end of buffer or >10 bytes\n\n\tMOVBLZX\t(SI)(BX*1), DI\t// Load next byte\n\tINCQ\tSI\n\tBTRL\t$7, DI\t\t// Is bit 7 set? Clear bit 7.\n\tJNC\tlast\t\t// If not set, this is the final byte\n\tSHLQ\tCL, DI\t\t// value |= value << shift\n\tORQ\tDI, DX\n\tADDQ\t$7, CX\t\t// shift += 7\n\tJMP\tloop\n\nlast:\n\tSHLQ\tCL, DI\t\t// value |= value << shift\n\tORQ\tDI, DX\n\t// Return decoded value and length.\n\tMOVQ\tDX, x+24(FP)\n\tMOVQ\tSI, n+32(FP)\n\tRET\n\nbad:\n\tMOVQ\t$0, x+24(FP)\n\tMOVQ\t$0, n+32(FP)\n\tRET\n\nTEXT ·decodeVarintAsm2(SB),NOSPLIT,$0-40\n\tMOVQ\tbuf_base+0(FP), BX\n\tMOVQ\tbuf_len+8(FP), CX\n\n\t// Take the slow path if there's no BMI2 or there are fewer\n\t// than 8 bytes available.\n\tMOVBLZX\t·hasBMI2(SB), AX\n\tTESTB\tAL, AL\n\tJEQ\tslowpath\n\tCMPQ\tCX, $8\n\tJLT\tslowpath\n\n\t// Load 8 bytes from buf.\n\tMOVQ\t(BX), AX\n\n\t// Get continuation bit mask into DX.\n\tMOVQ\t$0x7f7f7f7f7f7f7f7f, DI\n\tMOVQ\tAX, DX\n\tORQ\tDI, DX\n\t// Compute bit length of varint into CX.\n\tNOTQ\tDX\n\tBSFQ\tDX, CX\n\t// If all continuation bits are set, take the slow path.\n\tJZ\tslowpath\n\t// Compute bit extraction mask into R14.\n\t//BLSMSKQ\tDX, R14\t\t// Requires BMI1\n\tBYTE $0xc4; BYTE $0xe2; BYTE $0x88; BYTE $0xf3; BYTE $0xd2\n\t// Mask the value.\n\tANDQ\tR14, AX\n\t// Extract the bits.\n\tPEXTQ\tDI, AX, DX\t// Requires BMI2\n\n\t// Compute byte length. 7=>1, 15=>2, etc.\n\tINCQ\tCX\n\tSHRQ\t$3, CX\n\n\t// Return decoded value and length.\n\tMOVQ\tDX, x+24(FP)\n\tMOVQ\tCX, n+32(FP)\n\tRET\n\nslowpath:\n\t// Consume buffer one byte at a time.\n\t// TODO: Could merge with some of the above registers better.\n\tMOVQ\tbuf_base+0(FP), BX\t// Pointer\n\tMOVQ\tbuf_len+8(FP), AX\t// Length\n\tMOVQ\t$10, CX\n\tCMPQ\tAX, CX\n\tCMOVQGT\tCX, AX\t\t// Length is at most 10\n\tXORQ\tSI, SI\t\t// Index\n\tXORQ\tCX, CX\t\t// Shift\n\tXORQ\tDX, DX\t\t// Value\n\nloop:\n\tCMPQ\tSI, AX\n\tJEQ\tbad\t\t// Reached end of buffer or >10 bytes\n\n\tMOVBLZX\t(SI)(BX*1), DI\t// Load next byte\n\tINCQ\tSI\n\tBTRL\t$7, DI\t\t// Is bit 7 set? Clear bit 7.\n\tJNC\tlast\t\t// If not set, this is the final byte\n\tSHLQ\tCL, DI\t\t// value |= value << shift\n\tORQ\tDI, DX\n\tADDQ\t$7, CX\t\t// shift += 7\n\tJMP\tloop\n\nlast:\n\tSHLQ\tCL, DI\t\t// value |= value << shift\n\tORQ\tDI, DX\n\t// Return decoded value and length.\n\tMOVQ\tDX, x+24(FP)\n\tMOVQ\tSI, n+32(FP)\n\tRET\n\nbad:\n\tMOVQ\t$0, x+24(FP)\n\tMOVQ\t$0, n+32(FP)\n\tRET\n"
  },
  {
    "path": "varint/bench_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage varint\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"testing\"\n)\n\nfunc TestDecodeVarintAsm(t *testing.T) {\n\ttype fn struct {\n\t\tname string\n\t\tf    func([]byte) (uint64, int)\n\t}\n\tfor _, f := range []fn{\n\t\t{\"decodeVarintAsmLoop\", decodeVarintAsmLoop},\n\t\t{\"decodeVarintAsmBMI2\", decodeVarintAsmBMI2},\n\t\t{\"decodeVarintAsm1\", decodeVarintAsm1},\n\t\t{\"decodeVarintAsm2\", decodeVarintAsm2},\n\t} {\n\t\tfor _, bmi2 := range []bool{false, true} {\n\t\t\tfor _, pad := range []bool{false, true} {\n\t\t\t\tname := fmt.Sprintf(\"f:%s/bmi2:%v/pad:%v\", f.name, bmi2, pad)\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\ttestDecodeVarintAsm(t, f.f, bmi2, pad)\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc testDecodeVarintAsm(t *testing.T, f func([]byte) (uint64, int), bmi2, pad bool) {\n\tif bmi2 && !hasBMI2 {\n\t\tt.Skip(\"BMI2 not supported on this CPU\")\n\t}\n\n\toldHasBMI2 := hasBMI2\n\tdefer func() { hasBMI2 = oldHasBMI2 }()\n\thasBMI2 = bmi2\n\n\tpadding := []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}\n\tfor x1 := uint(0); x1 < 64; x1++ {\n\t\tfor x2 := uint(0); x2 < x1; x2++ {\n\t\t\tvar v uint64 = (1 << x1) | (1 << x2)\n\t\t\tbuf := EncodeVarint(v)\n\t\t\tvlen := len(buf)\n\t\t\tif pad {\n\t\t\t\tbuf = append(buf, padding...)\n\t\t\t}\n\t\t\tx, n := f(buf)\n\t\t\tif x != v || n != vlen {\n\t\t\t\tt.Errorf(\"decode(encode(%#x)) = %#x, %d; want %#x, %d %x\", v, x, n, v, vlen, buf)\n\t\t\t}\n\t\t}\n\t}\n}\n\nvar testBuf []byte\n\nvar testLengths [11][]byte\n\nfunc init() {\n\tr := rand.New(rand.NewSource(1))\n\tfor i := 0; i < 1000; i++ {\n\t\tval := uint64(r.Uint32())\n\t\ttestBuf = append(testBuf, EncodeVarint(val)...)\n\t}\n\n\tfor length := 1; length <= 10; length++ {\n\t\tencoded := EncodeVarint(1 << uint(7*(length-1)))\n\t\tif len(encoded) != length {\n\t\t\tpanic(\"unexpected encoded length\")\n\t\t}\n\t\tfor i := 0; i < 1000; i++ {\n\t\t\ttestLengths[length] = append(testLengths[length], encoded...)\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeVarint(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tbuf := testBuf\n\t\tfor len(buf) > 0 {\n\t\t\t_, n := DecodeVarint(buf)\n\t\t\tbuf = buf[n:]\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeVarintN(b *testing.B) {\n\tfor length := 1; length < len(testLengths); length++ {\n\t\tname := fmt.Sprintf(\"bytes:%d\", length)\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tbuf := testLengths[length]\n\t\t\t\tfor len(buf) > 0 {\n\t\t\t\t\t_, n := DecodeVarint(buf)\n\t\t\t\t\tbuf = buf[n:]\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkDecodeVarintAsmLoop(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tbuf := testBuf\n\t\tfor len(buf) > 0 {\n\t\t\t_, n := decodeVarintAsmLoop(buf)\n\t\t\tbuf = buf[n:]\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeVarintAsmLoopN(b *testing.B) {\n\tfor length := 1; length < len(testLengths); length++ {\n\t\tname := fmt.Sprintf(\"bytes:%d\", length)\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tbuf := testLengths[length]\n\t\t\t\tfor len(buf) > 0 {\n\t\t\t\t\t_, n := decodeVarintAsmLoop(buf)\n\t\t\t\t\tbuf = buf[n:]\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkDecodeVarintAsmBMI2(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tbuf := testBuf\n\t\tfor len(buf) > 0 {\n\t\t\t_, n := decodeVarintAsmBMI2(buf)\n\t\t\tbuf = buf[n:]\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeVarintAsmBMI2N(b *testing.B) {\n\tfor length := 1; length < len(testLengths); length++ {\n\t\tname := fmt.Sprintf(\"bytes:%d\", length)\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tfor i := 0; i < b.N; i++ {\n\t\t\t\tbuf := testLengths[length]\n\t\t\t\tfor len(buf) > 0 {\n\t\t\t\t\t_, n := decodeVarintAsmBMI2(buf)\n\t\t\t\t\tbuf = buf[n:]\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc BenchmarkDecodeVarintAsm1(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tbuf := testBuf\n\t\tfor len(buf) > 0 {\n\t\t\t_, n := decodeVarintAsm1(buf)\n\t\t\tbuf = buf[n:]\n\t\t}\n\t}\n}\n\nfunc BenchmarkDecodeVarintAsm2(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tbuf := testBuf\n\t\tfor len(buf) > 0 {\n\t\t\t_, n := decodeVarintAsm2(buf)\n\t\t\tbuf = buf[n:]\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "varint/varint.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage varint\n\nconst maxVarintBytes = 10\n\n// EncodeVarint and DecodeVarint from https://github.com/golang/protobuf\n\nfunc EncodeVarint(x uint64) []byte {\n\tvar buf [maxVarintBytes]byte\n\tvar n int\n\tfor n = 0; x > 127; n++ {\n\t\tbuf[n] = 0x80 | uint8(x&0x7F)\n\t\tx >>= 7\n\t}\n\tbuf[n] = uint8(x)\n\tn++\n\treturn buf[0:n]\n}\n\nfunc DecodeVarint(buf []byte) (x uint64, n int) {\n\t// x, n already 0\n\tfor shift := uint(0); shift < 64; shift += 7 {\n\t\tif n >= len(buf) {\n\t\t\treturn 0, 0\n\t\t}\n\t\tb := uint64(buf[n])\n\t\tn++\n\t\tx |= (b & 0x7F) << shift\n\t\tif (b & 0x80) == 0 {\n\t\t\treturn x, n\n\t\t}\n\t}\n\n\t// The number is too large to represent in a 64-bit value.\n\treturn 0, 0\n}\n\nfunc queryBMI2() bool\n\nvar hasBMI2 = queryBMI2()\n\nfunc decodeVarintAsmLoop(buf []byte) (x uint64, n int)\nfunc decodeVarintAsmBMI2(buf []byte) (x uint64, n int)\nfunc decodeVarintAsm1(buf []byte) (x uint64, n int)\nfunc decodeVarintAsm2(buf []byte) (x uint64, n int)\n"
  },
  {
    "path": "whichtest/whichtest",
    "content": "#!/usr/bin/python3\n\n# Copyright 2018 The Go Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n# whichtest extracts relevant stacks from go test timeout tracebacks.\n#\n# It strips out system goroutines and parallel tests that are blocked\n# waiting for other tests to complete, and pairs up goroutines created\n# by tests when it can.\n\n# TODO: Also extract regular test failures to make this a\n# one-size-fits-all tool.\n\nimport collections\nimport sys\n\ndef parseTraces(f):\n    traces = []\n    createdBy = collections.defaultdict(list)\n    accum = 0\n    for line in f:\n        prevAccum = accum\n        if line.startswith(\"runtime stack:\"):\n            accum = 2\n        elif line.startswith(\"goroutine \"):\n            accum = 1\n        elif line.strip() == \"\":\n            accum -= 1\n        elif line.startswith(\"created by \") and accum > 0:\n            createdBy[line.split()[2]].append(traces[-1])\n        if accum > 0:\n            if prevAccum <= 0:\n                traces.append([line])\n            else:\n                traces[-1].append(line)\n    return traces, createdBy\n\ndef findTest(trace):\n    test, isPar = None, False\n    for line in trace:\n        if line.startswith(\"testing.(*T).Parallel\"):\n            # Blocked in testing.T.Parallel\n            isPar = True\n        elif line.startswith(\"testing.tRunner(\"):\n            return test, isPar\n        elif \".Test\" in line and \"(\" in line:\n            test = line.split(\"(\", 1)[0]\n    return None, False\n\ndef traceFns(trace):\n    for line in trace:\n        if line.startswith((\"\\t\", \"runtime stack:\", \"goroutine \", \"created by \")):\n            continue\n        if \"(\" in line:\n            yield line.split(\"(\", 1)[0]\n\ntraces, createdBy = parseTraces(sys.stdin)\nfor trace in traces:\n    test, isPar = findTest(trace)\n    if test is None or isPar:\n        continue\n    print(\"===\", test, \"===\")\n    print()\n    sys.stdout.write(\"\".join(trace))\n    print()\n    # Print goroutines that are probably associated with this test\n    # (this may have false positives).\n    for fn in traceFns(trace):\n        cbs = createdBy.get(fn)\n        if cbs is not None:\n            del createdBy[fn]\n            for cb in cbs:\n                sys.stdout.write(\"\".join(cb))\n                print()\n\n# Goroutines may have been started by functions no longer on a test's\n# stack. Print those. Leaked goroutines may also be interesting.\nanyOther = False\nfor cb, traces in sorted(createdBy.items()):\n    if cb.startswith((\"runtime.\", \"testing.\", \"time.goFunc\")):\n        continue\n    if not anyOther:\n        print(\"=== Other goroutines ===\")\n        print()\n        anyOther = True\n    for trace in traces:\n        sys.stdout.write(\"\".join(trace))\n        print()\n"
  }
]