Repository: aclements/go-misc Branch: master Commit: 86e6001a23dd Files: 270 Total size: 1.1 MB Directory structure: gitextract_zuo9c5tu/ ├── LICENSE ├── abi/ │ ├── abi.go │ ├── go.mod │ └── go.sum ├── bench/ │ ├── parse.go │ ├── parse_test.go │ └── print.go ├── benchcmd/ │ ├── main.go │ ├── rss_nounix.go │ └── rss_unix.go ├── benchmany/ │ ├── benchmany.go │ ├── commits.go │ ├── readlog.go │ ├── run.go │ ├── run_test.go │ ├── signal_notunix.go │ ├── signal_unix.go │ └── status.go ├── benchplot/ │ ├── git.go │ ├── kza.go │ ├── kza_test.go │ ├── main.go │ ├── plot.go │ ├── table.go │ └── vendor/ │ ├── github.com/ │ │ └── aclements/ │ │ ├── go-gg/ │ │ │ ├── LICENSE │ │ │ ├── README.md │ │ │ ├── generic/ │ │ │ │ ├── doc.go │ │ │ │ ├── error.go │ │ │ │ ├── order.go │ │ │ │ └── slice/ │ │ │ │ ├── concat.go │ │ │ │ ├── concat_test.go │ │ │ │ ├── convert.go │ │ │ │ ├── convert_test.go │ │ │ │ ├── cycle.go │ │ │ │ ├── doc.go │ │ │ │ ├── find.go │ │ │ │ ├── index.go │ │ │ │ ├── min.go │ │ │ │ ├── min_test.go │ │ │ │ ├── nub.go │ │ │ │ ├── select_test.go │ │ │ │ ├── seq.go │ │ │ │ ├── sort.go │ │ │ │ └── util_test.go │ │ │ ├── gg/ │ │ │ │ ├── example_scale_test.go │ │ │ │ ├── facet.go │ │ │ │ ├── group.go │ │ │ │ ├── layer.go │ │ │ │ ├── layout/ │ │ │ │ │ ├── grid.go │ │ │ │ │ └── layout.go │ │ │ │ ├── layout.go │ │ │ │ ├── mark.go │ │ │ │ ├── package.go │ │ │ │ ├── plot.go │ │ │ │ ├── render.go │ │ │ │ ├── scale.go │ │ │ │ ├── stepmode_string.go │ │ │ │ ├── testmain.go │ │ │ │ ├── text.go │ │ │ │ └── transform.go │ │ │ ├── ggstat/ │ │ │ │ ├── agg.go │ │ │ │ ├── bin.go │ │ │ │ ├── common.go │ │ │ │ ├── density.go │ │ │ │ ├── domain.go │ │ │ │ ├── ecdf.go │ │ │ │ ├── fn.go │ │ │ │ ├── loess.go │ │ │ │ ├── lsquares.go │ │ │ │ └── normalize.go │ │ │ ├── palette/ │ │ │ │ ├── blend.go │ │ │ │ ├── brewer/ │ │ │ │ │ ├── brewer.go │ │ │ │ │ ├── colorbrewer.json │ │ │ │ │ ├── genbrewer.go │ │ │ │ │ └── package.go │ │ │ │ ├── makesrgbtab.go │ │ │ │ ├── palette.go │ │ │ │ ├── srgb.go │ │ │ │ ├── srgbtab.go │ │ │ │ └── viridis.go │ │ │ └── table/ │ │ │ ├── concat.go │ │ │ ├── filter.go │ │ │ ├── group.go │ │ │ ├── head.go │ │ │ ├── join.go │ │ │ ├── map.go │ │ │ ├── new.go │ │ │ ├── new_test.go │ │ │ ├── pivot.go │ │ │ ├── pivot_test.go │ │ │ ├── print.go │ │ │ ├── print_test.go │ │ │ ├── sort.go │ │ │ ├── table.go │ │ │ └── table_test.go │ │ └── go-moremath/ │ │ ├── LICENSE │ │ ├── README.md │ │ ├── cmd/ │ │ │ └── dist/ │ │ │ ├── dist.go │ │ │ └── plot.go │ │ ├── fit/ │ │ │ ├── loess.go │ │ │ ├── loess_test.go │ │ │ ├── lsquares.go │ │ │ └── package.go │ │ ├── internal/ │ │ │ └── mathtest/ │ │ │ └── mathtest.go │ │ ├── mathx/ │ │ │ ├── beta.go │ │ │ ├── beta_test.go │ │ │ ├── choose.go │ │ │ ├── gamma.go │ │ │ ├── gamma_test.go │ │ │ ├── package.go │ │ │ └── sign.go │ │ ├── scale/ │ │ │ ├── err.go │ │ │ ├── interface.go │ │ │ ├── linear.go │ │ │ ├── linear_test.go │ │ │ ├── log.go │ │ │ ├── log_test.go │ │ │ ├── package.go │ │ │ ├── ticks.go │ │ │ ├── ticks_test.go │ │ │ └── util.go │ │ ├── stats/ │ │ │ ├── alg.go │ │ │ ├── deltadist.go │ │ │ ├── dist.go │ │ │ ├── dist_test.go │ │ │ ├── hist.go │ │ │ ├── hypergdist.go │ │ │ ├── hypergdist_test.go │ │ │ ├── kde.go │ │ │ ├── kde_test.go │ │ │ ├── kdeboundarymethod_string.go │ │ │ ├── kdekernel_string.go │ │ │ ├── linearhist.go │ │ │ ├── locationhypothesis_string.go │ │ │ ├── loghist.go │ │ │ ├── normaldist.go │ │ │ ├── normaldist_test.go │ │ │ ├── package.go │ │ │ ├── sample.go │ │ │ ├── sample_test.go │ │ │ ├── stream.go │ │ │ ├── tdist.go │ │ │ ├── tdist_test.go │ │ │ ├── ttest.go │ │ │ ├── ttest_test.go │ │ │ ├── udist.go │ │ │ ├── udist_test.go │ │ │ ├── utest.go │ │ │ ├── utest_test.go │ │ │ └── util_test.go │ │ └── vec/ │ │ ├── package.go │ │ └── vec.go │ └── update ├── benchscripts/ │ ├── bench-many │ ├── benchstat2 │ ├── plot-time │ └── plot-time-2 ├── buildstats/ │ ├── alg.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ ├── rev.go │ └── timeflag.go ├── cl-fetch/ │ └── main.go ├── dashquery/ │ ├── compile.go │ ├── compile_test.go │ ├── main.go │ └── xdg.go ├── findflakes/ │ ├── adtest.go │ ├── flaketest.go │ ├── geodist.go │ ├── html.go │ ├── logs.go │ ├── main.go │ ├── paths.go │ ├── text.go │ └── xdg.go ├── findtypes/ │ └── main.go ├── foreachplatform/ │ ├── go.mod │ └── main.go ├── gc-S/ │ ├── go.mod │ └── main.go ├── gcdense/ │ └── test.py ├── git-p/ │ ├── gerrit.go │ ├── git.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ ├── pager.go │ ├── shell.go │ └── style.go ├── go-weave/ │ ├── amb/ │ │ ├── det.go │ │ ├── progress.go │ │ ├── rand.go │ │ └── run.go │ ├── models/ │ │ ├── cl20858.go │ │ ├── issue16083.go │ │ ├── markterm.go │ │ ├── maxtree.go │ │ ├── rescan.go │ │ ├── rwmutex.go │ │ └── yuasa.go │ └── weave/ │ ├── atomic.go │ ├── mutex.go │ ├── sema.go │ ├── tls.go │ ├── trace.go │ ├── waitgroup.go │ └── weave.go ├── go.mod ├── go.sum ├── goi/ │ └── main.go ├── gover/ │ ├── cache.go │ ├── gover.go │ └── shutil.go ├── greplogs/ │ └── main.go ├── internal/ │ └── loganal/ │ ├── classify.go │ ├── doc.go │ ├── failure.go │ └── matcher.go ├── minutes3/ │ ├── README.md │ ├── gdoc.go │ ├── gdoc_test.go │ ├── github.go │ ├── go.mod │ ├── go.sum │ ├── minutes.go │ ├── oauth.go │ └── tables.go ├── pcvaluetab/ │ ├── README.md │ ├── alt.go │ ├── alt_test.go │ ├── bench_test.go │ ├── dist.go │ ├── enc.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ ├── read.go │ └── symtab.go ├── ptype/ │ └── main.go ├── rtanalysis/ │ ├── directives/ │ │ └── analysis.go │ ├── main.go │ └── systemstack/ │ └── analysis.go ├── scanpagemap.go ├── split/ │ ├── README.md │ ├── bench_test.go │ ├── example_id_test.go │ ├── example_rwmutex_test.go │ ├── examples_test.go │ ├── stub.s │ ├── value.go │ └── vlogger_test.go ├── srgb/ │ └── main.go ├── stackmapcompress.py ├── stress2/ │ ├── cmd.go │ ├── cmd_test.go │ ├── go.mod │ ├── go.sum │ ├── main.go │ ├── reporter.go │ ├── signal_notunix.go │ ├── signal_unix.go │ ├── stress.go │ └── stress_test.go ├── varint/ │ ├── README.md │ ├── asm_amd64.s │ ├── bench_test.go │ └── varint.go └── whichtest/ └── whichtest ================================================ FILE CONTENTS ================================================ ================================================ FILE: LICENSE ================================================ Copyright (c) 2015 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: abi/abi.go ================================================ // Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main // To analyze kubelet: // // ( X=$PWD; cd -q ~/s/kubernetes && $X/abi $(go list -deps ./cmd/kubelet) ) import ( "flag" "fmt" "go/types" "io" "log" "math" "os" "reflect" "sort" "golang.org/x/tools/go/packages" ) const ( minIntRegs = 0 maxIntRegs = 16 // The number of floating-point registers has little // effect. Just fix it at 8. minFloatRegs = 8 maxFloatRegs = 8 // Comparison mode. modeCompare = false ) func main() { flag.Parse() pkgPaths := flag.Args() // Get the package count to give the user some feedback. cfg := &packages.Config{} cfg.Mode = packages.NeedName pkgs, err := packages.Load(cfg, pkgPaths...) if err != nil { log.Fatal(err) } if packages.PrintErrors(pkgs) > 0 { os.Exit(1) } fmt.Fprintf(os.Stderr, "checking %d packages...\n", len(pkgs)) // Parse and type-check the packages. cfg.Mode = packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedTypesSizes pkgs, err = packages.Load(cfg, pkgPaths...) if err != nil { log.Fatal(err) } if packages.PrintErrors(pkgs) > 0 { os.Exit(1) } // Extract all the functions. var funcs []*types.Func var sizes types.Sizes for _, pkg := range pkgs { sizes = pkg.TypesSizes for _, obj := range pkg.TypesInfo.Defs { if obj, ok := obj.(*types.Func); ok { funcs = append(funcs, obj) } } } // Analyze. qtiles := []float64{0.5, 0.95, 0.99} qtileLabels := []string{"p50", "p95", "p99"} table := [][]interface{}{ {"", "", "", "stack args", "spills", "stack total"}, {"ints", "floats", "% fit", qtileLabels, qtileLabels, qtileLabels}, } if modeCompare { qtiles = []float64{0.5, 0.95, 0.99, 1.0} qtileLabels = []string{"p50", "p95", "p99", "max"} table = [][]interface{}{ {"", "", "", "", "Δ stack bytes"}, {"ints", "floats", "Δ % fit", "diff", qtileLabels, "% bigger"}, } } opts := ABIOptions{ EmptyArray: true, OneArray: true, SplitArrays: false, IgnoreBlank: false, SpillRegs: false, EmptyOnStack: true, } cmp := opts cmp.ABI0 = true const infinity = math.MaxInt32 analyze := func(opts, cmp ABIOptions) { var stackBytes []int var spillBytes []int var stackTotal []int var overheads []int // Stack bytes vs ABI0 fit := 0 // # functions that fit entirely in registers cmpFit := 0 cmpDiff := 0 // # functions with any frame difference cmpLarger := 0 // # functions with larger stack frames in cmp for _, f := range funcs { sig := f.Type().(*types.Signature) frame := opts.Assign(sig, sizes) stackBytes = append(stackBytes, frame.StackBytes) spillBytes = append(spillBytes, frame.StackSpillBytes) stackTotal = append(stackTotal, frame.StackTotal) if frame.StackBytes == 0 { fit++ } if modeCompare { // Compare to alternate options. frameCmp := cmp.Assign(sig, sizes) overhead := frameCmp.StackTotal - frame.StackTotal overheads = append(overheads, overhead) if frameCmp.StackBytes == 0 { cmpFit++ } if frame != frameCmp { cmpDiff++ } if overhead > 0 { cmpLarger++ } } } row := []interface{}{opts.IntRegs, opts.FloatRegs} if opts.IntRegs == infinity { row[0] = "∞" } if opts.FloatRegs == infinity { row[1] = "∞" } if modeCompare { pct := func(n int) string { return fmt.Sprintf("%5.2f%%", 100*float64(n)/float64(len(funcs))) } row = append(row, pct(cmpFit-fit)) row = append(row, []interface{}{cmpDiff, pct(cmpDiff)}) row = append(row, intQuantiles(overheads, qtiles...)) row = append(row, pct(cmpLarger)) } else { row = append(row, fmt.Sprintf("%4.1f%%", 100*float64(fit)/float64(len(funcs)))) row = append(row, intQuantiles(stackBytes, qtiles...)) row = append(row, intQuantiles(spillBytes, qtiles...)) row = append(row, intQuantiles(stackTotal, qtiles...)) } table = append(table, row) } analyze(opts, cmp) for opts.IntRegs = minIntRegs; opts.IntRegs <= maxIntRegs; opts.IntRegs++ { for opts.FloatRegs = minFloatRegs; opts.FloatRegs <= maxFloatRegs; opts.FloatRegs++ { cmp.IntRegs, cmp.FloatRegs = opts.IntRegs, opts.FloatRegs analyze(opts, cmp) } } opts.IntRegs, opts.FloatRegs = infinity, maxFloatRegs cmp.IntRegs, cmp.FloatRegs = opts.IntRegs, opts.FloatRegs analyze(opts, cmp) // Print results. printTable(os.Stdout, table) } type ABIOptions struct { IntRegs, FloatRegs int ABI0 bool // Use ABI0 (other options are ignored) EmptyArray bool // Empty arrays don't stack-assign OneArray bool // Size-1 arrays don't stack-assign SplitArrays bool // Stack-assign arrays separately from rest of arg IgnoreBlank bool // Skip assigning blank fields SpillRegs bool // Structure spill space as register words EmptyOnStack bool // Stack-assign zero-sized values } type frameBuilder struct { opts *ABIOptions sizes types.Sizes ptrSize int ints, floats int Frame } type Frame struct { ArgInts, ArgFloats int ResInts, ResFloats int StackBytes int // Stack bytes without spill slots StackSpillBytes int // Stack bytes of spill slots StackTotal int // Stack bytes for complete argument frame. } func (a *ABIOptions) Assign(sig *types.Signature, sizes types.Sizes) Frame { ptrSize := int(sizes.Sizeof(types.Typ[types.Uintptr])) f := frameBuilder{opts: a, sizes: sizes, ptrSize: ptrSize} // Arguments if r := sig.Recv(); r != nil { f.AddArg(r.Type(), true) } ps := sig.Params() for i := 0; i < ps.Len(); i++ { f.AddArg(ps.At(i).Type(), true) } f.ArgInts, f.ArgFloats = f.ints, f.floats f.StackBytes = align(f.StackBytes, ptrSize) f.StackSpillBytes = align(f.StackSpillBytes, ptrSize) // Results f.ints, f.floats = 0, 0 rs := sig.Results() for i := 0; i < rs.Len(); i++ { f.AddArg(rs.At(i).Type(), false) } f.StackBytes = align(f.StackBytes, ptrSize) f.ResInts, f.ResFloats = f.ints, f.floats f.StackTotal = f.StackBytes + f.StackSpillBytes return f.Frame } func (f *frameBuilder) AddArg(arg types.Type, needsSpill bool) { if f.opts.ABI0 { f.StackAssign(arg) return } si, sf, sb := f.ints, f.floats, f.StackBytes if f.RegAssign(arg, true) { if needsSpill { // Assign spill space. if f.opts.SpillRegs { f.StackSpillBytes += (f.ints-si)*f.ptrSize + (f.floats-sf)*8 } else { f.StackSpillBytes = align(f.StackSpillBytes, int(f.sizes.Alignof(arg))) f.StackSpillBytes += int(f.sizes.Sizeof(arg)) } } } else { // Stack-assign the whole thing. f.ints, f.floats, f.StackBytes = si, sf, sb f.StackAssign(arg) } } func (f *frameBuilder) RegAssign(arg types.Type, top bool) bool { switch arg := arg.(type) { default: log.Fatal("unknown type: ", arg) return false case *types.Named: return f.RegAssign(arg.Underlying(), top) case *types.Array: if f.opts.EmptyArray && arg.Len() == 0 { // Special-case empty arrays. return true } if f.opts.OneArray && arg.Len() == 1 { // Special-case length-1 arrays. return f.RegAssign(arg.Elem(), false) } if f.opts.SplitArrays { // Arrays can go on the stack without failing // the whole argument. f.StackAssign(arg) return true } else { // Arrays fail the whole argument. return false } case *types.Struct: for i := 0; i < arg.NumFields(); i++ { if f.opts.IgnoreBlank && arg.Field(i).Name() == "_" { continue } if !f.RegAssign(arg.Field(i).Type(), false) { return false } } case *types.Basic: switch arg.Kind() { case types.Bool, types.Int, types.Int8, types.Int16, types.Int32, types.Int64, types.Uint, types.Uint8, types.Uint16, types.Uint32, types.Uint64, types.Uintptr: // TODO: 64-bit on 32-bit arch needs two regs. f.ints++ case types.Float32, types.Float64: f.floats++ case types.Complex64, types.Complex128: f.floats += 2 case types.String: f.ints += 2 case types.UnsafePointer: f.ints++ default: log.Fatal("unknown basic kind: ", arg) } case *types.Chan, *types.Map, *types.Pointer, *types.Signature: // These are all represented as a single pointer word. f.ints++ case *types.Interface: // Two pointer words. f.ints += 2 case *types.Slice: // One pointer word plus two scalar words. f.ints += 3 } // Check for out-of-registers. return f.ints <= f.opts.IntRegs && f.floats <= f.opts.FloatRegs } func (f *frameBuilder) StackAssign(arg types.Type) { f.StackBytes = align(f.StackBytes, int(f.sizes.Alignof(arg))) f.StackBytes += int(f.sizes.Sizeof(arg)) } func align(x, n int) int { return (x + n - 1) &^ (n - 1) } func intQuantiles(xs []int, qs ...float64) []int { sort.Ints(xs) vs := make([]int, 0, len(qs)) for _, q := range qs { i := int(q * float64(len(xs))) if i < 0 { i = 0 } else if i >= len(xs) { i = len(xs) - 1 } vs = append(vs, xs[i]) } return vs } func floatQuantiles(xs []float64, qs ...float64) []float64 { sort.Float64s(xs) vs := make([]float64, 0, len(qs)) for _, q := range qs { i := int(q * float64(len(xs))) if i < 0 { i = 0 } else if i >= len(xs) { i = len(xs) } vs = append(vs, xs[i]) } return vs } func printTable(w io.Writer, table [][]interface{}) { type layoutNode struct { w int children []*layoutNode } type cellKey struct { row int col *layoutNode } // Stringify cells and construct layout cells := make(map[cellKey]string) layout := &layoutNode{} var walk func(ri int, row reflect.Value, node *layoutNode) int walk = func(ri int, row reflect.Value, node *layoutNode) int { if row.Kind() == reflect.Interface { row = row.Elem() } if row.Kind() != reflect.Slice { // This is a cell. val := fmt.Sprint(row) if len(val) > node.w { node.w = len(val) } cells[cellKey{ri, node}] = val return node.w } // This is a slice. totalW := 0 rowLen := row.Len() for vi := 0; vi < rowLen; vi++ { var child *layoutNode if vi < len(node.children) { child = node.children[vi] } else { child = &layoutNode{} node.children = append(node.children, child) } totalW += walk(ri, row.Index(vi), child) } // Add in interior column spacing. totalW += 3 * (rowLen - 1) if totalW > node.w { node.w = totalW } return node.w } for ri, row := range table { walk(ri, reflect.ValueOf(row), layout) } // Print table var printNode func(ri int, node *layoutNode, fillW int) printNode = func(ri int, node *layoutNode, fillW int) { if val, ok := cells[cellKey{ri, node}]; ok { if fillW < node.w { fillW = node.w } fmt.Fprintf(w, "| %*s ", fillW, val) return } for ci, child := range node.children { parentW := 0 if ci == len(node.children)-1 { parentW = fillW } else { fillW -= child.w } printNode(ri, child, parentW) } } for ri := range table { printNode(ri, layout, 0) fmt.Fprintf(w, "|\n") } } ================================================ FILE: abi/go.mod ================================================ module abi go 1.15 require golang.org/x/tools v0.0.0-20200815165600-90abf76919f3 ================================================ FILE: abi/go.sum ================================================ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200815165600-90abf76919f3 h1:0aScV/0rLmANzEYIhjCOi2pTvDyhZNduBUMD2q3iqs4= golang.org/x/tools v0.0.0-20200815165600-90abf76919f3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= ================================================ FILE: bench/parse.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package bench reads and writes Go benchmarks results files. // // This format is specified at: // https://github.com/golang/proposal/blob/master/design/14313-benchmark-format.md package bench import ( "bufio" "io" "regexp" "strconv" "strings" "time" "unicode" "unicode/utf8" ) // Benchmark records the configuration and results of a single // benchmark run (a single line of a benchmark results file). type Benchmark struct { // Name is the name of the benchmark, without the "Benchmark" // prefix and without the trailing GOMAXPROCS number. Name string // Iterations is the number of times this benchmark executed. Iterations int // Config is the set of configuration pairs for this // Benchmark. These can be specified in both configuration // blocks and in individual benchmark lines. If the benchmark // name is of the form "BenchmarkX-N", the N is stripped out // and stored as "gomaxprocs" here. Config map[string]*Config // Result is the set of (unit, value) metrics for this // benchmark run. Result map[string]float64 } // Config represents a single key/value configuration pair. type Config struct { // Value is the parsed value of this configuration value. Value interface{} // RawValue is the value of this configuration value, exactly // as written in the original benchmark file. RawValue string // InBlock indicates that this configuration value was // specified in a configuration block line. Otherwise, it was // specified in the benchmark line. InBlock bool } var configRe = regexp.MustCompile(`^(\p{Ll}[^\p{Lu}\s\x85\xa0\x{1680}\x{2000}-\x{200a}\x{2028}\x{2029}\x{202f}\x{205f}\x{3000}]*):(?:[ \t]+(.*))?$`) // Parse parses a standard Go benchmark results file from r. It // returns a *Benchmark for each benchmark result line in the file. // There may be many result lines for the same benchmark name and // configuration, indicating that the benchmark was run multiple // times. // // In the returned Benchmarks, RawValue is set, but Value is always // nil. Use ParseValues to convert raw values to structured types. func Parse(r io.Reader) ([]*Benchmark, error) { benchmarks := []*Benchmark{} config := make(map[string]*Config) scanner := bufio.NewScanner(r) for scanner.Scan() { line := scanner.Text() if line == "testing: warning: no tests to run" { continue } // Configuration lines. m := configRe.FindStringSubmatch(line) if m != nil { config[m[1]] = &Config{RawValue: m[2], InBlock: true} continue } // Benchmark lines. if strings.HasPrefix(line, "Benchmark") { b := parseBenchmark(line, config) if b != nil { benchmarks = append(benchmarks, b) } } } if err := scanner.Err(); err != nil { return nil, err } return benchmarks, nil } func parseBenchmark(line string, gconfig map[string]*Config) *Benchmark { // TODO: Consider using scanner to avoid the slice allocation. f := strings.Fields(line) if len(f) < 4 { return nil } if f[0] != "Benchmark" { next, _ := utf8.DecodeRuneInString(f[0][len("Benchmark"):]) if !unicode.IsUpper(next) { return nil } } b := &Benchmark{ Config: make(map[string]*Config), Result: make(map[string]float64), } // Copy global config. for k, v := range gconfig { b.Config[k] = v } // Parse name and configuration. name := strings.TrimPrefix(f[0], "Benchmark") if strings.Contains(name, "/") { parts := strings.Split(name, "/") b.Name = parts[0] for _, part := range parts[1:] { if i := strings.Index(part, ":"); i >= 0 { k, v := part[:i], part[i+1:] b.Config[k] = &Config{RawValue: v} } } } else if i := strings.LastIndex(name, "-"); i >= 0 { _, err := strconv.Atoi(name[i+1:]) if err == nil { b.Name = name[:i] b.Config["gomaxprocs"] = &Config{RawValue: name[i+1:]} } else { b.Name = name } } else { b.Name = name } if b.Config["gomaxprocs"] == nil { b.Config["gomaxprocs"] = &Config{RawValue: "1"} } // Parse iterations. n, err := strconv.Atoi(f[1]) if err != nil || n <= 0 { return nil } b.Iterations = n // Parse results. for i := 2; i+2 <= len(f); i += 2 { val, err := strconv.ParseFloat(f[i], 64) if err != nil { continue } b.Result[f[i+1]] = val } return b } // ValueParser is a function that parses a string value into a // structured type or returns an error if the string cannot be parsed. type ValueParser func(string) (interface{}, error) // DefaultValueParsers is the default sequence of value parsers used // by ParseValues if no parsers are specified. var DefaultValueParsers = []ValueParser{ func(s string) (interface{}, error) { return strconv.Atoi(s) }, func(s string) (interface{}, error) { return strconv.ParseFloat(s, 64) }, func(s string) (interface{}, error) { return time.ParseDuration(s) }, } // ParseValues parses the raw configuration values in benchmarks into // structured types using best-effort pattern-based parsing. // // If all of the raw values for a given configuration key can be // parsed by one of the valueParsers, ParseValues sets the parsed // values to the results of that ValueParser. If multiple ValueParsers // can parse all of the raw values, it uses the earliest such parser // in the valueParsers list. // // If valueParsers is nil, it uses DefaultValueParsers. func ParseValues(benchmarks []*Benchmark, valueParsers []ValueParser) { if valueParsers == nil { valueParsers = DefaultValueParsers } // Collect all configuration keys. keys := map[string]bool{} for _, b := range benchmarks { for k := range b.Config { keys[k] = true } } // For each configuration key, try value parsers in priority order. for key := range keys { good := false tryParsers: for _, vp := range valueParsers { // Clear all values. This way we can detect // aliasing and not parse the same value // multiple times. for _, b := range benchmarks { c, ok := b.Config[key] if ok { c.Value = nil } } good = true tryValues: for _, b := range benchmarks { c, ok := b.Config[key] if !ok || c.Value != nil { continue } res, err := vp(c.RawValue) if err != nil { // Parse error. Fail this parser. good = false break tryValues } c.Value = res } if good { // This ValueParser converted all of // the values. break tryParsers } } if !good { // All of the value parsers failed. Fall back // to strings. for _, b := range benchmarks { c, ok := b.Config[key] if ok { c.Value = nil } } for _, b := range benchmarks { c, ok := b.Config[key] if ok && c.Value == nil { c.Value = c.RawValue } } } } } ================================================ FILE: bench/parse_test.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bench import ( "bytes" "reflect" "testing" ) func TestParse(t *testing.T) { for _, test := range []struct { input string want []*Benchmark }{ // Test basic line. {` BenchmarkX 1 2 ns/op 3 MB/s`, []*Benchmark{ {"X", 1, map[string]*Config{}, map[string]float64{"ns/op": 2, "MB/s": 3}}, }, }, // Test short name. {` Benchmark 1 2 ns/op`, []*Benchmark{ {"", 1, map[string]*Config{}, map[string]float64{"ns/op": 2}}, }, }, // Test bad names. {` Benchmarkx 1 2 ns/op benchmarkx 1 2 ns/op benchmarkX 1 2 ns/op`, []*Benchmark{}, }, // Test short lines. {` BenchmarkX BenchmarkX 1 BenchmarkX 1 2`, []*Benchmark{}, }, // Test -N. {` BenchmarkX-4 1 2 ns/op`, []*Benchmark{ {"X", 1, map[string]*Config{ "gomaxprocs": &Config{"gomaxprocs", "4", "4", false}, }, map[string]float64{"ns/op": 2}}, }, }, // Test per-benchmark config. {` BenchmarkX/a:20/b:abc 1 2 ns/op BenchmarkY/c:123 2 4 ns/op`, []*Benchmark{ {"X", 1, map[string]*Config{ "a": &Config{"a", "20", "20", false}, "b": &Config{"b", "abc", "abc", false}, }, map[string]float64{"ns/op": 2}}, {"Y", 2, map[string]*Config{ "c": &Config{"c", "123", "123", false}, }, map[string]float64{"ns/op": 4}}, }, }, // Test block config. {` commit: 123456 date: Jan 1 colon:colon: 42 blank: #not-config: x spa ce: x funny space: x Not-config: x BenchmarkX 1 2 ns/op`, []*Benchmark{ {"X", 1, map[string]*Config{ "commit": &Config{"commit", "123456", "123456", true}, "date": &Config{"date", "Jan 1", "Jan 1", true}, "colon:colon": &Config{"colon:colon", "42", "42", true}, "blank": &Config{"blank", "", "", true}, }, map[string]float64{"ns/op": 2}}, }, }, // Test benchmark config overriding block config. {` commit: 123456 date: Jan 1 BenchmarkX/commit:abcdef 1 2 ns/op`, []*Benchmark{ {"X", 1, map[string]*Config{ "commit": &Config{"commit", "abcdef", "abcdef", false}, "date": &Config{"date", "Jan 1", "Jan 1", true}, }, map[string]float64{"ns/op": 2}}, }, }, // Test block config overriding block config. {` commit: 123456 commit: abcdef date: Jan 1 BenchmarkX 1 2 ns/op`, []*Benchmark{ {"X", 1, map[string]*Config{ "commit": &Config{"commit", "abcdef", "abcdef", true}, "date": &Config{"date", "Jan 1", "Jan 1", true}, }, map[string]float64{"ns/op": 2}}, }, }, } { r := bytes.NewBufferString(test.input) bs, err := Parse(r) if err != nil { t.Error("unexpected Parse error", err) continue } if !reflect.DeepEqual(bs, test.want) { t.Log("want:") for _, b := range test.want { t.Logf("%#v", b) } t.Log("got:") for _, b := range bs { t.Logf("%#v", b) } t.Fail() } } } ================================================ FILE: bench/print.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package bench import ( "fmt" "io" "os" "sort" "strings" ) func Print(bs []*Benchmark) error { return Fprint(os.Stdout, bs) } func Fprint(w io.Writer, bs []*Benchmark) error { type kv struct { k, v string } type block struct { config []kv bs []*Benchmark } configKeys := func(b *Benchmark, inBlock bool) []string { var keys []string for k, config := range b.Config { if config.InBlock == inBlock { keys = append(keys, k) } } sort.Strings(keys) return keys } // Split bs into configuration blocks. blocks := []block{} lastConfig := map[string]string{} for _, b := range bs { // Find changed block configuration. var changed []kv for _, k := range configKeys(b, true) { config := b.Config[k] lc, ok := lastConfig[k] if ok && lc == config.RawValue { continue } changed = append(changed, kv{k, config.RawValue}) lastConfig[k] = config.RawValue } if len(blocks) == 0 || changed != nil { // Start a new configuration block. blocks = append(blocks, block{changed, nil}) } // Add benchmark to latest block. bbs := &blocks[len(blocks)-1].bs *bbs = append(*bbs, b) } // Format each configuration block. for i, block := range blocks { // Print configuration values. if i > 0 { if _, err := fmt.Fprint(w, "\n"); err != nil { return err } } for _, kv := range block.config { // TODO: Syntax check. if _, err := fmt.Fprintf(w, "%s: %s\n", kv.k, kv.v); err != nil { return err } } if len(block.config) > 0 { if _, err := fmt.Fprint(w, "\n"); err != nil { return err } } // Construct benchmark lines. lines := make([][]string, len(block.bs)) for _, b := range block.bs { // Construct benchmark name. name := []string{"Benchmark" + b.Name} gomaxprocs, haveGMP := "", false for _, k := range configKeys(b, false) { config := b.Config[k] if k == "gomaxprocs" { gomaxprocs = config.RawValue haveGMP = true continue } // TODO: Syntax check. name = append(name, fmt.Sprintf("%s:%s", k, config.RawValue)) } if haveGMP && gomaxprocs != "1" { if len(name) == 1 { // Use short form. name[0] = fmt.Sprintf("%s-%s", name[0], gomaxprocs) } else { name = append(name, fmt.Sprintf("gomaxprocs:%s", gomaxprocs)) } } // Construct results. line := []string{ strings.Join(name, "/"), fmt.Sprint(b.Iterations), } resultKeys := []string{} for k := range b.Result { resultKeys = append(resultKeys, k) } sort.Sort(resultKeySorter(resultKeys)) for _, k := range resultKeys { result := b.Result[k] // TODO: Syntax check. line = append(line, fmt.Sprint(result), k) } lines = append(lines, line) } // Compute column widths. widths := make([]int, 0) for _, line := range lines { for i, elt := range line { if i >= len(widths) { widths = append(widths, len(elt)) } else if len(elt) > widths[i] { widths[i] = len(elt) } } } // Print lines. for _, line := range lines { for i, elt := range line { var err error p := widths[i] if i == 1 || i >= 2 && i%2 == 0 { // Right align. _, err = fmt.Fprintf(w, "%*s ", p, elt) } else if i < len(line)-1 { // Left align and pad. _, err = fmt.Fprintf(w, "%-*s ", p, elt) } else { // Left align, no pad, EOL. _, err = fmt.Fprintf(w, "%s\n", elt) } if err != nil { return err } } } } return nil } var fixedKeys = map[string]int{ "ns/op": -2, "MB/s": -1, } type resultKeySorter []string func (s resultKeySorter) Len() int { return len(s) } func (s resultKeySorter) Less(i, j int) bool { if fixedKeys[s[i]] != fixedKeys[s[j]] { return fixedKeys[s[i]] < fixedKeys[s[j]] } return s[i] < s[j] } func (s resultKeySorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } ================================================ FILE: benchcmd/main.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Command benchcmd times a shell command using Go benchmark format. package main import ( "flag" "fmt" "os" "os/exec" "time" ) func main() { flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [-n iters] benchname cmd...\n", os.Args[0]) flag.PrintDefaults() } n := flag.Int("n", 5, "iterations") flag.Parse() if flag.NArg() < 2 { flag.Usage() os.Exit(2) } benchname := flag.Arg(0) args := flag.Args()[1:] for i := 0; i < *n; i++ { cmd := exec.Command(args[0], args[1:]...) cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr before := time.Now() if err := cmd.Run(); err != nil { fmt.Println(err) os.Exit(1) } after := time.Now() fmt.Printf("Benchmark%s\t", benchname) fmt.Printf("%d\t%d ns/op", 1, after.Sub(before)) fmt.Printf("\t%d user-ns/op\t%d sys-ns/op", cmd.ProcessState.UserTime(), cmd.ProcessState.SystemTime()) if maxrss, ok := getMaxRSS(cmd.ProcessState); ok { fmt.Printf("\t%d peak-RSS-bytes", maxrss) } fmt.Printf("\n") } } ================================================ FILE: benchcmd/rss_nounix.go ================================================ // Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build !unix package main import "os" func getMaxRSS(ps *os.ProcessState) (bytes uint64, ok bool) { return 0, false } ================================================ FILE: benchcmd/rss_unix.go ================================================ // Copyright 2025 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. //go:build unix package main import ( "os" "runtime" "syscall" ) func getMaxRSS(ps *os.ProcessState) (bytes uint64, ok bool) { ru, ok := ps.SysUsage().(*syscall.Rusage) if !ok { return 0, false } var rssToBytes uint64 switch runtime.GOOS { default: return 0, false case "aix", "android", "dragonfly", "freebsd", "linux", "netbsd", "openbsd": rssToBytes = 1 << 10 case "darwin", "ios": rssToBytes = 1 case "illumos", "solaris": rssToBytes = uint64(syscall.Getpagesize()) } return uint64(ru.Maxrss) * rssToBytes, true } ================================================ FILE: benchmany/benchmany.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Benchmany runs Go benchmarks across many git commits. // // Usage: // // benchmany [-C git-dir] [-n iterations] ... // // benchmany runs the benchmarks in the current directory // times for each commit in and writes the benchmark // results to bench.log. Benchmarks may be Go testing framework // benchmarks or benchmarks from golang.org/x/benchmarks. // // ... can be either a list of individual commits or // a revision range. For the spelling of a revision range, see // "SPECIFYING RANGES" in gitrevisions(7). For exact details, see the // --no-walk option to git-rev-list(1). // // Benchmany will check out each revision in git-dir. The current // directory may or may not be in the same git repository as git-dir. // If git-dir refers to a Go installation, benchmany will run // make.bash at each revision; otherwise, it assumes go test can // rebuild the necessary dependencies. Benchmany also supports using // gover (https://godoc.org/github.com/aclements/go-misc/gover) to // save and reuse Go build trees. This is useful for saving time // across multiple benchmark runs and for benchmarks that depend on // the Go tree itself (such as compiler benchmarks). // // Benchmany supports multiple ways of prioritizing the order in which // individual iterations are run. By default, it runs in "sequential" // mode: it runs the first iteration of all benchmarks, then the // second, and so forth. It also supports a "spread" mode designed to // quickly get coverage for large sets of revisions. This mode // randomizes the order to run iterations in, but biases this order // toward covering an evenly distributed set of revisions early and // finishing all of the iterations of the revisions it has started on // before moving on to new revisions. This way, if benchmany is // interrupted, the revisions benchmarked cover the space more-or-less // evenly. Finally, it supports a "metric" mode, which zeroes in on // changes in a benchmark metric by selecting the commit half way // between the pair of commits with the biggest difference in the // metric. This is like "git bisect", but for performance. // // Benchmany is safe to interrupt. If it is restarted, it will parse // the benchmark log files to recover its state. package main import ( "flag" "fmt" "os" "os/exec" "strings" ) var gitDir string var dryRun bool // maxFails is the maximum number of benchmark run failures to // tolerate for a commit before giving up on trying to benchmark that // commit. Build failures always disqualify a commit. const maxFails = 5 func main() { flag.Parse() doRun() } // git runs git subcommand subcmd and returns its stdout. If git // fails, it prints the failure and exits. func git(subcmd string, args ...string) string { gitargs := []string{} if gitDir != "" { gitargs = append(gitargs, "-C", gitDir) } gitargs = append(gitargs, subcmd) gitargs = append(gitargs, args...) cmd := exec.Command("git", gitargs...) cmd.Stderr = os.Stderr if dryRun { dryPrint(cmd) if !(subcmd == "rev-parse" || subcmd == "rev-list" || subcmd == "show") { return "" } } out, err := cmd.Output() if err != nil { fmt.Fprintf(os.Stderr, "git %s failed: %s\n", shellEscapeList(gitargs), err) os.Exit(1) } return string(out) } func dryPrint(cmd *exec.Cmd) { out := shellEscape(cmd.Path) for _, a := range cmd.Args[1:] { out += " " + shellEscape(a) } if cmd.Dir != "" { out = fmt.Sprintf("(cd %s && %s)", shellEscape(cmd.Dir), out) } fmt.Fprintln(os.Stderr, out) } func shellEscape(x string) string { if len(x) == 0 { return "''" } for _, r := range x { if 'a' <= r && r <= 'z' || 'A' <= r && r <= 'Z' || '0' <= r && r <= '9' || strings.ContainsRune("@%_-+:,./", r) { continue } // Unsafe character. return "'" + strings.Replace(x, "'", "'\"'\"'", -1) + "'" } return x } func shellEscapeList(xs []string) string { out := make([]string, len(xs)) for i, x := range xs { out[i] = shellEscape(x) } return strings.Join(out, " ") } func exists(path string) bool { _, err := os.Stat(path) return !os.IsNotExist(err) } func trimNL(s string) string { return strings.TrimRight(s, "\n") } // indent returns s with each line indented by four spaces. If s is // non-empty, the returned string is guaranteed to end in a "\n". func indent(s string) string { if len(s) == 0 { return s } if strings.HasSuffix(s, "\n") { s = s[:len(s)-1] } return " " + strings.Replace(s, "\n", "\n ", -1) + "\n" } // lines splits s in to lines. It omits a final blank line, if any. func lines(s string) []string { l := strings.Split(s, "\n") if len(l) > 0 && l[len(l)-1] == "" { l = l[:len(l)-1] } return l } ================================================ FILE: benchmany/commits.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "log" "os" "os/user" "path/filepath" "regexp" "strings" "time" ) type commitInfo struct { hash string commitDate time.Time gover bool logPath string count, fails int buildFailed bool } // getCommits returns the commit info for all of the revisions in the // given git revision range, where the revision range is spelled as // documented in gitrevisions(7). Commits are returned in reverse // chronological order, most recent commit first (the same as // git-rev-list(1)). func getCommits(revRange []string, logPath string) []*commitInfo { // Get commit sequence. args := append(append([]string{"--no-walk"}, revRange...), "--") hashes := lines(git("rev-list", args...)) commits := make([]*commitInfo, len(hashes)) commitMap := make(map[string]*commitInfo) for i, hash := range hashes { commits[i] = &commitInfo{ hash: hash, logPath: logPath, } commitMap[hash] = commits[i] } // Get commit dates. // // TODO: This can produce a huge command line. args = append([]string{"-s", "--format=format:%cI"}, hashes...) dates := lines(git("show", args...)) for i := range commits { d, err := time.Parse(time.RFC3339, dates[i]) if err != nil { log.Fatalf("cannot parse commit date: %v", err) } commits[i].commitDate = d } // Get gover-cached builds. It's okay if this fails. if fis, err := ioutil.ReadDir(goverDir()); err == nil { for _, fi := range fis { if ci := commitMap[fi.Name()]; ci != nil && fi.IsDir() { ci.gover = true } } } // Load current benchmark state. logf, err := os.Open(logPath) if err != nil { if !os.IsNotExist(err) { log.Fatalf("opening %s: %v", logPath, err) } } else { defer logf.Close() parseLog(commitMap, logf) } return commits } // goverDir returns the directory containing gover-cached builds. func goverDir() string { cache := os.Getenv("XDG_CACHE_HOME") if cache == "" { home := os.Getenv("HOME") if home == "" { u, err := user.Current() if err != nil { home = u.HomeDir } } cache = filepath.Join(home, ".cache") } return filepath.Join(cache, "gover") } // parseLog parses benchmark runs and failures from r and updates // commits in commitMap. func parseLog(commitMap map[string]*commitInfo, r io.Reader) { scanner := bufio.NewScanner(r) for scanner.Scan() { b := scanner.Bytes() switch { case bytes.HasPrefix(b, []byte("commit: ")): hash := scanner.Text()[len("commit: "):] if ci := commitMap[hash]; ci != nil { ci.count++ } case bytes.HasPrefix(b, []byte("# FAILED at ")): hash := scanner.Text()[len("# FAILED at "):] if ci := commitMap[hash]; ci != nil { ci.fails++ } case bytes.HasPrefix(b, []byte("# BUILD FAILED at ")): hash := scanner.Text()[len("# BUILD FAILED at "):] if ci := commitMap[hash]; ci != nil { ci.buildFailed = true } } } if err := scanner.Err(); err != nil { log.Fatal("parsing benchmark log: ", err) } } // binPath returns the file name of the binary for this commit. func (c *commitInfo) binPath() string { // TODO: This assumes the short commit hash is unique. return fmt.Sprintf("bench.%s", c.hash[:7]) } // failed returns whether commit c has failed and should not be run // any more. func (c *commitInfo) failed() bool { return c.buildFailed || c.fails >= maxFails } // runnable returns whether commit c needs to be benchmarked at least // one more time. func (c *commitInfo) runnable() bool { return !c.buildFailed && c.fails < maxFails && c.count < run.iterations } // partial returns true if this commit is both runnable and already // has some runs. func (c *commitInfo) partial() bool { return c.count > 0 && c.runnable() } var commitRe = regexp.MustCompile(`^commit: |^# FAILED|^# BUILD FAILED`) // cleanLog escapes lines in l that may confuse the log parser and // makes sure l is newline terminated. func cleanLog(l string) string { l = commitRe.ReplaceAllString(l, "# $0") if !strings.HasSuffix(l, "\n") { l += "\n" } return l } // logRun updates c with a successful run. func (c *commitInfo) logRun(out string) { var log bytes.Buffer fmt.Fprintf(&log, "commit: %s\n", c.hash) fmt.Fprintf(&log, "commit-time: %s\n", c.commitDate.UTC().Format(time.RFC3339)) fmt.Fprintf(&log, "\n%s\n", cleanLog(out)) c.writeLog(log.String()) c.count++ } // logFailed updates c with a failed run. If buildFailed is true, this // is considered a permanent failure and buildFailed is set. func (c *commitInfo) logFailed(buildFailed bool, out string) { typ := "FAILED" if buildFailed { typ = "BUILD FAILED" } c.writeLog(fmt.Sprintf("# %s at %s\n# %s\n", typ, c.hash, strings.Replace(cleanLog(out), "\n", "\n# ", -1))) if buildFailed { c.buildFailed = true } else { c.fails++ } } // writeLog appends msg to c's log file. The caller is responsible for // properly formatting it. func (c *commitInfo) writeLog(msg string) { logFile, err := os.OpenFile(c.logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err != nil { log.Fatalf("opening %s: %v", c.logPath, err) } if _, err := logFile.WriteString(msg); err != nil { log.Fatalf("writing to %s: %v", c.logPath, err) } if err := logFile.Close(); err != nil { log.Fatalf("closing %s: %v", c.logPath, err) } } ================================================ FILE: benchmany/readlog.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "io/ioutil" "log" "strconv" "strings" "github.com/aclements/go-moremath/stats" ) // ComputeStats updates the derived statistics in s from the raw // samples in s.Values. func (stat *Benchstat) ComputeStats() { stat.Mean = stats.Mean(stat.Values) } // A Benchstat is the metrics along one axis (e.g., ns/op or MB/s) // for all runs of a specific benchmark. type Benchstat struct { Unit string Values []float64 // metrics Mean float64 // mean of Values } // A BenchKey identifies one metric (e.g., "ns/op", "B/op") from one // benchmark (function name sans "Benchmark" prefix) in one // configuration (input file name). type BenchKey struct { Config, Benchmark, Unit string } type Collection struct { Stats map[BenchKey]*Benchstat // Keys gives all keys of Stats in the order added. Keys []BenchKey // Configs, Benchmarks, and Units give the set of configs, // benchmarks, and units from the keys in Stats in an order // meant to match the order the benchmarks were read in. Configs, Benchmarks, Units []string // ConfigSet, BenchmarkSet, and UnitSet are set // representations of Configs, Benchmarks, and Units. ConfigSet, BenchmarkSet, UnitSet map[string]bool } func (c *Collection) AddStat(key BenchKey) *Benchstat { if stat, ok := c.Stats[key]; ok { return stat } c.addKey(key) stat := &Benchstat{Unit: key.Unit} c.Stats[key] = stat return stat } func (c *Collection) addKey(key BenchKey) { addString := func(strings *[]string, set map[string]bool, add string) { if set[add] { return } *strings = append(*strings, add) set[add] = true } c.Keys = append(c.Keys, key) addString(&c.Configs, c.ConfigSet, key.Config) addString(&c.Benchmarks, c.BenchmarkSet, key.Benchmark) addString(&c.Units, c.UnitSet, key.Unit) } func (c *Collection) Filter(key BenchKey) *Collection { c2 := NewCollection() for _, k := range c.Keys { if (key.Config == "" || key.Config == k.Config) && (key.Benchmark == "" || key.Benchmark == k.Benchmark) && (key.Unit == "" || key.Unit == k.Unit) { c2.addKey(k) c2.Stats[k] = c.Stats[k] } } return c2 } func NewCollection() *Collection { return &Collection{ Stats: make(map[BenchKey]*Benchstat), ConfigSet: make(map[string]bool), BenchmarkSet: make(map[string]bool), UnitSet: make(map[string]bool), } } // readFiles reads a set of benchmark files as a Collection. func readFiles(files ...string) *Collection { c := NewCollection() for _, file := range files { readFile(file, c) } return c } var unitOfXMetric = map[string]string{ "time": "ns/op", "allocated": "allocated bytes/op", // ΔMemStats.TotalAlloc / N "allocs": "allocs/op", // ΔMemStats.Mallocs / N "sys-total": "bytes from system", // MemStats.Sys "sys-heap": "heap bytes from system", // MemStats.HeapSys "sys-stack": "stack bytes from system", // MemStats.StackSys "sys-gc": "GC bytes from system", // MemStats.GCSys "sys-other": "other bytes from system", // MemStats.OtherSys+MSpanSys+MCacheSys+BuckHashSys "gc-pause-total": "STW ns/op", // ΔMemStats.PauseTotalNs / N "gc-pause-one": "STW ns/GC", // ΔMemStats.PauseTotalNs / ΔNumGC "rss": "max RSS bytes", // Rusage.Maxrss * 1<<10 "cputime": "user+sys ns/op", // Rusage.Utime+Stime "virtual-mem": "peak VM bytes", // /proc/self/status VmPeak } // readFile reads a set of benchmarks from a file in to a Collection. func readFile(file string, c *Collection) { c.Configs = append(c.Configs, file) key := BenchKey{Config: file} text, err := ioutil.ReadFile(file) if err != nil { log.Fatal(err) } for _, line := range strings.Split(string(text), "\n") { if strings.HasPrefix(line, "GOPERF-METRIC:") { // x/benchmarks-style output. line := line[14:] f := strings.Split(line, "=") val, err := strconv.ParseFloat(f[1], 64) if err != nil { continue } key.Benchmark = f[0] key.Unit = unitOfXMetric[f[0]] if key.Unit == "" { continue } stat := c.AddStat(key) stat.Values = append(stat.Values, val) continue } f := strings.Fields(line) if len(f) < 4 { continue } name := f[0] if !strings.HasPrefix(name, "Benchmark") { continue } name = strings.TrimPrefix(name, "Benchmark") n, _ := strconv.Atoi(f[1]) if n == 0 { continue } key.Benchmark = name for i := 2; i+2 <= len(f); i += 2 { val, err := strconv.ParseFloat(f[i], 64) if err != nil { continue } key.Unit = f[i+1] stat := c.AddStat(key) stat.Values = append(stat.Values, val) } } } ================================================ FILE: benchmany/run.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "flag" "fmt" "io" "io/ioutil" "log" "math" "math/rand" "os" "os/exec" "path/filepath" "regexp" "strings" "time" "github.com/aclements/go-misc/bench" "github.com/aclements/go-moremath/stats" ) // TODO: Check CPU performance governor before each benchmark. // TODO: Support running pre-built binaries without specific hashes. // This is useful for testing things that aren't yet committed or that // require unusual build steps. var run struct { order string metric string benchFlags string buildCmd string iterations int saveTree bool timeout time.Duration clean bool cleanFlags string logPath string binDir string } func init() { // TODO: This makes a mess of flags during testing. isXBenchmark := false if abs, _ := os.Getwd(); strings.HasSuffix(abs, "golang.org/x/benchmarks/bench") { isXBenchmark = true } f := flag.CommandLine flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [flags] \n", os.Args[0]) f.PrintDefaults() } f.StringVar(&run.order, "order", "seq", "run benchmarks in `order`, which must be one of: seq, spread, metric") f.StringVar(&run.metric, "metric", "ns/op", "for -order metric, the benchmark metric to find differences in") f.StringVar(&gitDir, "C", "", "run git in `dir`") defaultBenchFlags := "-test.run NONE -test.bench ." if isXBenchmark { defaultBenchFlags = "" } f.StringVar(&run.benchFlags, "benchflags", defaultBenchFlags, "pass `flags` to benchmark") defaultBuildCmd := "go test -c" if isXBenchmark { defaultBuildCmd = "go build" } f.StringVar(&run.buildCmd, "buildcmd", defaultBuildCmd, "build benchmark using \"`cmd` -o \"") f.IntVar(&run.iterations, "n", 5, "run each benchmark `N` times") f.StringVar(&run.logPath, "o", "", "write benchmark results to `file` (default \"bench.log\" in -d directory)") f.StringVar(&run.binDir, "d", ".", "write binaries to `directory`") f.BoolVar(&run.saveTree, "save-tree", false, "save Go trees using gover and run benchmarks under saved trees") f.DurationVar(&run.timeout, "timeout", 30*time.Minute, "time out a run after `duration`") f.BoolVar(&dryRun, "dry-run", false, "print commands but do not run them") f.BoolVar(&run.clean, "clean", false, "run \"git clean -f\" after every checkout") f.StringVar(&run.cleanFlags, "cleanflags", "", "add `flags` to git clean command") } func doRun() { if flag.NArg() < 1 { flag.Usage() os.Exit(2) } var pickCommit func([]*commitInfo) *commitInfo switch run.order { case "seq": pickCommit = pickCommitSeq case "spread": pickCommit = pickCommitSpread case "metric": pickCommit = pickCommitMetric default: fmt.Fprintf(os.Stderr, "unknown order: %s\n", run.order) flag.Usage() os.Exit(2) } if run.logPath == "" { run.logPath = filepath.Join(run.binDir, "bench.log") } commits := getCommits(flag.Args(), run.logPath) // Write header block to log. if len(commits) > 0 { header := new(bytes.Buffer) fmt.Fprintf(header, "# Run started at %s\n", time.Now()) writeHeader(header) fmt.Fprintf(header, "\n") commits[0].writeLog(header.String()) } // Always run git from the top level of the git tree. Some // commands, like git clean, care about this. gitDir = trimNL(git("rev-parse", "--show-toplevel")) status := NewStatusReporter() defer status.Stop() for { doneIters, totalIters, partialCommits, doneCommits, failedCommits := runStats(commits) unstartedCommits := len(commits) - (partialCommits + doneCommits + failedCommits) msg := fmt.Sprintf("%d/%d runs, %d unstarted+%d partial+%d done+%d failed commits", doneIters, totalIters, unstartedCommits, partialCommits, doneCommits, failedCommits) // TODO: Count builds and runs separately. status.Progress(msg, float64(doneIters)/float64(totalIters)) commit := pickCommit(commits) if commit == nil { break } runBenchmark(commit, status) } } func writeHeader(w io.Writer) { goos, err := exec.Command("go", "env", "GOOS").Output() if err != nil { log.Fatalf("error running go env GOOS: %s", err) } fmt.Fprintf(w, "goos: %s\n", strings.TrimSpace(string(goos))) goarch, err := exec.Command("go", "env", "GOARCH").Output() if err != nil { log.Fatalf("error running go env GOARCH: %s", err) } fmt.Fprintf(w, "goarch: %s\n", strings.TrimSpace(string(goarch))) kernel, err := exec.Command("uname", "-sr").Output() if err != nil { log.Fatalf("error running uname -sr: %s", err) } fmt.Fprintf(w, "uname-sr: %s\n", strings.TrimSpace(string(kernel))) cpuinfo, err := ioutil.ReadFile("/proc/cpuinfo") if err == nil { subs := regexp.MustCompile(`(?m)^model name\s*:\s*(.*)`).FindSubmatch(cpuinfo) if subs != nil { fmt.Fprintf(w, "cpu: %s\n", string(subs[1])) } } fmt.Fprintf(w, "tool: benchmany\n") } func runStats(commits []*commitInfo) (doneIters, totalIters, partialCommits, doneCommits, failedCommits int) { for _, c := range commits { if c.count >= run.iterations { // Don't care if it failed. doneIters += c.count totalIters += c.count } else if c.runnable() { doneIters += c.count totalIters += run.iterations } if c.count == run.iterations { doneCommits++ } else if c.runnable() { if c.count != 0 { partialCommits++ } } else { failedCommits++ } } return } // pickCommitSeq picks the next commit to run based on the most recent // commit with the fewest iterations. func pickCommitSeq(commits []*commitInfo) *commitInfo { var minCommit *commitInfo for _, commit := range commits { if !commit.runnable() { continue } if minCommit == nil || commit.count < minCommit.count { minCommit = commit } } return minCommit } // pickCommitSpread picks the next commit to run from commits using an // algorithm that spreads out the runs. func pickCommitSpread(commits []*commitInfo) *commitInfo { // Assign weights to each commit. This is thoroughly // heuristic, but it's geared toward either increasing the // iteration count of commits that we have, or picking a new // commit so as to spread out the commits we have. weights := make([]int, len(commits)) totalWeight := 0 nPartial := 0 for _, commit := range commits { if commit.partial() { nPartial++ } } if nPartial >= len(commits)/10 { // Limit the number of partially completed revisions // to 10% by only choosing a partial commit in this // case. for i, commit := range commits { if commit.partial() { // Bias toward commits that are // further from done. weights[i] = run.iterations - commit.count } } } else { // Pick a new commit weighted by its distance from a // commit that we already have. // Find distance from left to right. distance := len(commits) haveAny := false for i, commit := range commits { if commit.count > 0 { distance = 1 haveAny = true } else if commit.runnable() { distance++ } weights[i] = distance } // Find distance from right to left. distance = len(commits) for i := len(commits) - 1; i >= 0; i-- { commit := commits[i] if commit.count > 0 { distance = 1 } else if commit.runnable() { distance++ } if distance < weights[i] { weights[i] = distance } } if !haveAny { // We don't have any commits. Pick one uniformly. for i := range commits { weights[i] = 1 } } // Zero non-runnable commits. for i, commit := range commits { if !commit.runnable() { weights[i] = 0 } } } for _, w := range weights { totalWeight += w } if totalWeight == 0 { return nil } // Pick a commit based on the weights. x := rand.Intn(totalWeight) cumulative := 0 for i, w := range weights { cumulative += w if cumulative > x { return commits[i] } } panic("unreachable") } func pickCommitMetric(commits []*commitInfo) *commitInfo { // If there are any partial commits, finish them up. for _, c := range commits { if c.partial() { return c } } // Remove failed commits. This makes it easier to avoid // picking a failed commit below. ncommits := []*commitInfo{} for _, c := range commits { if !c.failed() { ncommits = append(ncommits, c) } } commits = ncommits if len(ncommits) == 0 { return nil } // Make sure we've run the most recent commit. if commits[0].runnable() { return commits[0] } // Make sure we've run the earliest commit. if c := commits[len(commits)-1]; c.runnable() { return c } // We're bounded from both sides and every commit we've run // has the best stats we're going to get. Parse run.metric // from the log file. logf, err := os.Open(run.logPath) if err != nil { log.Fatal("opening benchmark log: ", err) } defer logf.Close() bs, err := bench.Parse(logf) if err != nil { log.Fatal("parsing benchmark log for metrics: ", err) } results := make(map[string]map[string][]float64) for _, b := range bs { var hash string if commitConfig, ok := b.Config["commit"]; !ok { continue } else { hash = commitConfig.RawValue } result, ok := b.Result[run.metric] if !ok { continue } if results[hash] == nil { results[hash] = make(map[string][]float64) } results[hash][b.Name] = append(results[hash][b.Name], result) } geomeans := make(map[string]float64) for hash, benches := range results { var means []float64 for _, results := range benches { means = append(means, stats.Mean(results)) } geomeans[hash] = stats.GeoMean(means) } // Find the pair of commits with the biggest difference in the // metric. prevI := -1 maxDiff, maxMid := -1.0, (*commitInfo)(nil) for i, c := range commits { if c.count == 0 || geomeans[c.hash] == 0 { continue } if prevI == -1 { prevI = i continue } if i > prevI+1 { // TODO: This isn't branch-aware. We should // only compare commits with an ancestry // relationship. diff := math.Abs(geomeans[c.hash] - geomeans[commits[prevI].hash]) if diff > maxDiff { maxDiff = diff maxMid = commits[(prevI+i)/2] } } prevI = i } return maxMid } // runBenchmark runs the benchmark at commit. It updates commit.count, // commit.fails, and commit.buildFailed as appropriate and writes to // the commit log to record the outcome. func runBenchmark(commit *commitInfo, status *StatusReporter) { // Build the benchmark if necessary. binPath := filepath.Join(run.binDir, commit.binPath()) if !exists(binPath) { runStatus(status, commit, "building") // Check out the appropriate commit. This is necessary // even if we're using gover because the benchmark // itself might have changed (e.g., bug fixes). git("checkout", "-q", commit.hash) if run.clean { args := append([]string{"-f"}, strings.Fields(run.cleanFlags)...) git("clean", args...) } var buildCmd []string if commit.gover { buildCmd = []string{"gover", "with", commit.hash} } else { // If this is the Go toolchain, do a full // make.bash. Otherwise, we assume that go // test -c will build the necessary // dependencies. if exists(filepath.Join(gitDir, "src", "make.bash")) { cmd := exec.Command("./make.bash") cmd.Dir = filepath.Join(gitDir, "src") if dryRun { dryPrint(cmd) } else if out, err := combinedOutputTimeout(cmd); err != nil { detail := indent(string(out)) + indent(err.Error()) fmt.Fprintf(os.Stderr, "failed to build toolchain at %s:\n%s", commit.hash, detail) commit.logFailed(true, detail) return } if run.saveTree && doGoverSave() == nil { commit.gover = true } } // Assume build command is in $PATH. // // TODO: Force PATH if we built the toolchain. buildCmd = []string{} } buildCmd = append(buildCmd, strings.Fields(run.buildCmd)...) buildCmd = append(buildCmd, "-o", binPath) cmd := exec.Command(buildCmd[0], buildCmd[1:]...) if dryRun { dryPrint(cmd) } else if out, err := combinedOutputTimeout(cmd); err != nil { detail := indent(string(out)) + indent(err.Error()) fmt.Fprintf(os.Stderr, "failed to build tests at %s:\n%s", commit.hash, detail) commit.logFailed(true, detail) return } } // Run the benchmark. runStatus(status, commit, "running") if filepath.Base(binPath) == binPath { // Make exec.Command treat this as a relative path. binPath = "./" + binPath } args := append([]string{binPath}, strings.Fields(run.benchFlags)...) if run.saveTree { args = append([]string{"gover", "with", commit.hash}, args...) } cmd := exec.Command(args[0], args[1:]...) if dryRun { dryPrint(cmd) commit.count++ return } out, err := combinedOutputTimeout(cmd) if err == nil { commit.logRun(string(out)) } else { detail := indent(string(out)) + indent(err.Error()) fmt.Fprintf(os.Stderr, "failed to run benchmark at %s:\n%s", commit.hash, detail) commit.logFailed(false, detail) } } func doGoverSave() error { cmd := exec.Command("gover", "save") cmd.Dir = gitDir if dryRun { dryPrint(cmd) return nil } else { out, err := cmd.CombinedOutput() if err != nil { fmt.Fprintf(os.Stderr, "gover save failed: %s:\n%s", err, indent(string(out))) } return err } } // runStatus updates the status message for commit. func runStatus(sr *StatusReporter, commit *commitInfo, status string) { sr.Message(fmt.Sprintf("commit %s, iteration %d/%d: %s...", commit.hash[:7], commit.count+1, run.iterations, status)) } // combinedOutputTimeout is like c.CombinedOutput(), but if // run.timeout != 0, it will kill c after run.timeout time expires. func combinedOutputTimeout(c *exec.Cmd) (out []byte, err error) { var b bytes.Buffer c.Stdout = &b c.Stderr = &b if err := c.Start(); err != nil { return nil, err } if run.timeout == 0 { err := c.Wait() return b.Bytes(), err } tick := time.NewTimer(run.timeout) trace := signalTrace done := make(chan error) go func() { done <- c.Wait() }() loop: for { select { case err = <-done: break loop case <-tick.C: if trace != nil { fmt.Fprintf(os.Stderr, "command timed out; sending %v\n", trace) c.Process.Signal(trace) tick = time.NewTimer(5 * time.Second) trace = nil } else { fmt.Fprintf(os.Stderr, "command timed out; killing\n") c.Process.Kill() } } } tick.Stop() return b.Bytes(), err } ================================================ FILE: benchmany/run_test.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "fmt" "io/ioutil" "math/rand" "os" "os/exec" "path/filepath" "testing" "github.com/aclements/go-misc/bench" ) func TestPickSpread(t *testing.T) { run.iterations = 5 for iter := 0; iter < 10; iter++ { commits := []*commitInfo{} for i := 0; i < 100; i++ { commits = append(commits, &commitInfo{}) } for { commit := pickCommitSpread(commits) if commit == nil { break } if rand.Intn(50) == 0 { commit.buildFailed = true } else if rand.Intn(50) == 1 { commit.fails++ } else { commit.count++ } } // Test that all of the commits ran the expected // number of times. for _, c := range commits { if c.runnable() { t.Fatalf("commit still runnable %+v", c) } } } } func TestRun(t *testing.T) { // Create a git repo for testing. repo, err := ioutil.TempDir("", "benchmany-test") if err != nil { t.Fatal("creating temp dir: ", err) } defer os.RemoveAll(repo) tgit(t, repo, "init") tgit(t, repo, "config", "user.name", "gopher") tgit(t, repo, "config", "user.email", "gopher@example.com") // Write benchmark. err = ioutil.WriteFile(filepath.Join(repo, "x_test.go"), []byte(` package main import "testing" func TestMain(m *testing.M) { println("BenchmarkX 1 100 ns/op") }`), 0666) if err != nil { t.Fatal("writing x_test.go: ", err) } tgit(t, repo, "add", "x_test.go") tgit(t, repo, "commit", "-m", "initial") // Create several commits. var revs []string for i := 0; i < 3; i++ { str := fmt.Sprintf("%d", i) err = ioutil.WriteFile(filepath.Join(repo, "x"), []byte(str), 0666) if err != nil { t.Fatal("writing x: ", err) } tgit(t, repo, "add", "x") tgit(t, repo, "commit", "-m", str) revs = append(revs, trimNL(tgit(t, repo, "rev-parse", "HEAD"))) } for iters := 4; iters <= 5; iters++ { // Run benchmark. tgit(t, repo, "checkout", "master") oldArgs := os.Args oldWD, err := os.Getwd() if err != nil { t.Fatal("Getwd: ", err) } os.Args = []string{os.Args[0], "-n", fmt.Sprintf("%d", iters), "HEAD~3..HEAD"} os.Chdir(repo) defer func() { os.Args = oldArgs os.Chdir(oldWD) }() main() // Check results. f, err := os.Open(filepath.Join(repo, "bench.log")) if err != nil { t.Fatal("opening bench.log: ", err) } defer f.Close() bs, err := bench.Parse(f) if err != nil { t.Fatal("malformed benchmark log: ", err) } counts := make(map[string]int) for _, b := range bs { t.Log(b, b.Config["commit"].RawValue) counts[b.Config["commit"].RawValue]++ if uname, ok := b.Config["uname-sr"]; !ok { t.Errorf("missing uname-sr config") } else { t.Logf("uname-sr: %s", uname) } } for _, rev := range revs { if counts[rev] != iters { t.Errorf("expected %d results for %s, got %d", iters, rev, counts[rev]) } } } } func tgit(t *testing.T, repo string, args ...string) string { cmd := exec.Command("git", args...) cmd.Dir = repo out, err := cmd.CombinedOutput() if err != nil { t.Fatalf("git %s failed: %v\n%s", args, err, out) } return string(out) } ================================================ FILE: benchmany/signal_notunix.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build plan9 windows package main import "os" var signalTrace os.Signal = nil ================================================ FILE: benchmany/signal_unix.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !plan9,!windows package main import ( "os" "syscall" ) var signalTrace os.Signal = syscall.SIGQUIT ================================================ FILE: benchmany/status.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "fmt" "math" "os" "time" "github.com/aclements/go-moremath/fit" "golang.org/x/crypto/ssh/terminal" ) type StatusReporter struct { update chan<- statusUpdate done chan bool } type statusUpdate struct { progress float64 message string } func NewStatusReporter() *StatusReporter { if os.Getenv("TERM") == "dumb" || !terminal.IsTerminal(1) { return &StatusReporter{} } update := make(chan statusUpdate) sr := &StatusReporter{update: update} go sr.loop(update) return sr } func (sr *StatusReporter) Progress(msg string, frac float64) { if sr.update != nil { sr.update <- statusUpdate{message: msg, progress: frac} } } func (sr *StatusReporter) Message(msg string) { if sr.update == nil { fmt.Println(msg) } else { sr.update <- statusUpdate{message: msg, progress: -1} } } func (sr *StatusReporter) Stop() { if sr.update != nil { sr.done = make(chan bool) close(sr.update) <-sr.done sr.update = nil } } func (sr *StatusReporter) loop(updates <-chan statusUpdate) { const resetLine = "\r\x1b[2K" const wrapOff = "\x1b[?7l" const wrapOn = "\x1b[?7h" tick := time.NewTicker(time.Second / 4) defer tick.Stop() var end time.Time t0 := time.Now() var times, progress, weights []float64 var msg string for { select { case update, ok := <-updates: if !ok { fmt.Print(resetLine) close(sr.done) return } if update.progress == -1 { fmt.Print(resetLine) fmt.Println(update.message) break } now := float64(time.Now().Sub(t0)) times = append(times, float64(now)) progress = append(progress, update.progress) weights = append(weights, 0) msg = update.message // Compute ETA using linear regression with // exponentially decaying weights. const halfLife = 150 * time.Second for i, t := range times { weights[i] = math.Exp(-1 / float64(halfLife) * (now - t)) } reg := fit.PolynomialRegression(times, progress, weights, 1) a, b := reg.Coefficients[0], reg.Coefficients[1] // The intercept of a + b*x - 1 is the ending // time. if b == 0 { end = time.Time{} } else { end = t0.Add(time.Duration((1 - a) / b)) } case <-tick.C: } var eta string if end.IsZero() { eta = "unknown" } else { etaDur := end.Sub(time.Now()) // Trim off sub-second precision. etaDur -= etaDur % time.Second if etaDur <= 0 { eta = "0s" } else { eta = etaDur.String() } } if msg == "" { eta = "ETA " + eta } else { eta = ", ETA " + eta } // TODO: This isn't quite right. If we hit the right // edge of the terminal, it won't wrap, but the // right-most character will be the *last* character // in the string, since terminal keeps overwriting it. fmt.Printf("%s%s%s%s%s", resetLine, wrapOff, msg, eta, wrapOn) } } ================================================ FILE: benchplot/git.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "log" "os" "os/exec" "regexp" "strings" "time" ) type CommitInfo struct { Hash, Subject, Branch string AuthorDate, CommitDate time.Time Parents, Children []string } func Commits(repo string, revs ...string) (commits []CommitInfo) { args := []string{"-C", repo, "log", "-s", "--format=format:%H %aI %cI %P\n%s\n"} if len(revs) == 0 { args = append(args, "--all") } else { args = append(append(args, "--"), revs...) } cmd := exec.Command("git", args...) cmd.Stderr = os.Stderr out, err := cmd.Output() if err != nil { log.Fatal("git show failed: ", err) } for _, line := range strings.Split(string(out), "\n\n") { parts := strings.Split(line, "\n") subject := parts[1] parts = strings.Split(parts[0], " ") adate, err := time.Parse(time.RFC3339, parts[1]) if err != nil { log.Fatal("cannot parse author date: ", err) } cdate, err := time.Parse(time.RFC3339, parts[2]) if err != nil { log.Fatal("cannot parse commit date: ", err) } commits = append(commits, CommitInfo{ parts[0], subject, "", adate, cdate, parts[3:], nil, }) } // Compute hash indexes. hashset := make(map[string]*CommitInfo) for i := range commits { hashset[commits[i].Hash] = &commits[i] } // Compute children hashes. for h, ci := range hashset { for _, parent := range ci.Parents { if ci2, ok := hashset[parent]; ok { ci2.Children = append(ci2.Children, h) } } } // Compute branch names. var branchRe = regexp.MustCompile(`^\[[^] ]+\] `) var branchOf func(ci *CommitInfo) string branchOf = func(ci *CommitInfo) string { subject := ci.Subject if strings.HasPrefix(subject, "[") { m := branchRe.FindString(subject) if m != "" { return m[1 : len(m)-2] } } if strings.HasPrefix(subject, "Merge") || strings.HasPrefix(subject, "Revert") { // Walk children looking for a branch name. for _, child := range ci.Children { if ci2 := hashset[child]; ci2 != nil { branch := branchOf(ci2) if branch != "master" { return branch } } } } return "master" } for _, ci := range hashset { ci.Branch = branchOf(ci) } // Clean up missing branch tags: if all parents and children // of a commit have the same non-master branch, that commit // must also have been from that branch. cleanBranches: for _, ci := range hashset { if ci.Branch == "master" { alt := "" for _, child := range ci.Children { if ci2 := hashset[child]; ci2 != nil { if alt == "" { alt = ci2.Branch } else if ci2.Branch != alt { continue cleanBranches } } } for _, parent := range ci.Parents { if ci2 := hashset[parent]; ci2 != nil { if alt == "" { alt = ci2.Branch } else if ci2.Branch != alt { continue cleanBranches } } } if alt != "" { ci.Branch = alt } } } return } ================================================ FILE: benchplot/kza.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import "math" // TODO: This all assumes that data is sampled at a regular interval // and there are no missing values. It could be generalized to accept // missing values (perhaps represented by NaN), or generalized much // further by accepting (t, x) pairs and a vector of times at which to // evaluate the filter (and an arbitrary window size). I would have to // figure out how that affects the difference array in KZA. // TODO: These can generate a lot of garbage. Perhaps the caller // should pass in the target slice? Or these should just overwrite the // input array and leave it to the caller to copy if necessary? // MovingAverage performs a moving average (MA) filter of xs with // window size m. m must be a positive odd integer. // // Note that this is filter is often described in terms of the half // length of the window (m-1)/2. func MovingAverage(xs []float64, m int) []float64 { if m <= 0 || m%2 != 1 { panic("m must be a positive, odd integer") } ys := make([]float64, len(xs)) sum, n := 0.0, 0 for l, i, r := -m, -(m-1)/2, 0; i < len(ys); l, i, r = l+1, i+1, r+1 { if l >= 0 { sum -= xs[l] n-- } if r < len(xs) { sum += xs[r] n++ } if i >= 0 { ys[i] = sum / float64(n) } } return ys } // KolmogorovZurbenko performs a Kolmogorov-Zurbenko (KZ) filter of xs // with window size m and k iterations. m must be a positive odd // integer. k must be positive. func KolmogorovZurbenko(xs []float64, m, k int) []float64 { // k is typically small, and MA is quite efficient, so just do // the iterated moving average rather than bothering to // compute the binomial coefficient kernel. for i := 0; i < k; i++ { // TODO: Generate less garbage. xs = MovingAverage(xs, m) } return xs } // AdaptiveKolmogorovZurbenko performs an adaptive Kolmogorov-Zurbenko // (KZA) filter of xs using an initial window size m and k iterations. // m must be a positive odd integer. k must be positive. // // See Zurbenko, et al. 1996: Detecting discontinuities in time series // of upper air data: Demonstration of an adaptive filter technique. // Journal of Climate, 9, 3548–3560. func AdaptiveKolmogorovZurbenko(xs []float64, m, k int) []float64 { // Perform initial KZ filter. z := KolmogorovZurbenko(xs, m, k) // Compute differenced values. q := (m - 1) / 2 d := make([]float64, len(z)+1) maxD := 0.0 for i := q; i < len(z)-q; i++ { d[i] = math.Abs(z[i+q] - z[i-q]) if d[i] > maxD { maxD = d[i] } } if maxD == 0 { // xs is constant, so no amount of filtering will do // anything. Avoid dividing 0/0 below. return xs } // Compute adaptive filter. ys := make([]float64, len(xs)) for t := range ys { dPrime := d[t+1] - d[t] f := 1 - d[t]/maxD qt := q if dPrime <= 0 { // Zurbenko doesn't specify what to do with // the fractional part of qt and qh, so we // interpret this as summing all points of xs // between qt and qh. qt = int(math.Ceil(float64(q) * f)) } if t-qt < 0 { qt = t } qh := q if dPrime >= 0 { qh = int(math.Floor(float64(q) * f)) } if t+qh >= len(xs) { qh = len(xs) - t - 1 } sum := 0.0 for i := t - qt; i <= t+qh; i++ { sum += xs[i] } // Zurbenko divides by qh+qt, but this undercounts the // number of terms in the sum by 1. ys[t] = sum / float64(qh+qt+1) } return ys } ================================================ FILE: benchplot/kza_test.go ================================================ // Copyright 2015 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "math/rand" "testing" ) // Aeq returns true if expect and got are equal to 8 significant // figures (1 part in 100 million). func Aeq(expect, got float64) bool { if expect < 0 && got < 0 { expect, got = -expect, -got } return expect*0.99999999 <= got && got*0.99999999 <= expect } func TestMovingAverage(t *testing.T) { // Test MovingAverage against the obvious (but slow) // implementation. xs := make([]float64, 100) for iter := 0; iter < 10; iter++ { for i := range xs { xs[i] = rand.Float64() } m := 1 + 2*rand.Intn(100) ys1, ys2 := MovingAverage(xs, m), slowMovingAverage(xs, m) // TODO: Use stuff from mathtest. for i, y1 := range ys1 { if !Aeq(y1, ys2[i]) { t.Fatalf("want %v, got %v", ys2, ys1) } } } } func slowMovingAverage(xs []float64, m int) []float64 { ys := make([]float64, len(xs)) for i := range ys { psum, n := 0.0, 0 for j := i - (m-1)/2; j <= i+(m-1)/2; j++ { if 0 <= j && j < len(xs) { psum += xs[j] n++ } } ys[i] = psum / float64(n) } return ys } ================================================ FILE: benchplot/main.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Command benchplot plots the results of benchmarks over time. // // benchplot takes an input file in Go benchmark format [1]. Each // benchmark result must have a "commit" configuration key that gives // the full commit hash of the revision that gave that result. // benchplot will cross-reference these hashes against the specified // Git repository and plot each metric over time for each benchmark. // // [1] https://github.com/golang/proposal/blob/master/design/14313-benchmark-format.md package main import ( "bytes" "flag" "fmt" "log" "os" "os/exec" "runtime" "runtime/pprof" "strings" "github.com/aclements/go-gg/gg" "github.com/aclements/go-gg/table" "github.com/aclements/go-misc/bench" ) func main() { log.SetPrefix("benchplot: ") log.SetFlags(0) defaultGitDir, _ := exec.Command("git", "rev-parse", "--show-toplevel").Output() defaultGitDir = bytes.TrimRight(defaultGitDir, "\n") var ( flagCPUProfile = flag.String("cpuprofile", "", "write CPU profile to `file`") flagMemProfile = flag.String("memprofile", "", "write heap profile to `file`") flagGitDir = flag.String("C", string(defaultGitDir), "run git in `dir`") flagOut = flag.String("o", "", "write output to `file` (default: stdout)") flagTable = flag.Bool("table", false, "output a table instead of a plot") ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [flags] [inputs...]\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() if *flagCPUProfile != "" { f, err := os.Create(*flagCPUProfile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } if *flagMemProfile != "" { defer func() { runtime.GC() f, err := os.Create(*flagMemProfile) if err != nil { log.Fatal(err) } pprof.WriteHeapProfile(f) f.Close() }() } // Parse benchmark inputs. paths := flag.Args() if len(paths) == 0 { paths = []string{"-"} } var benchmarks []*bench.Benchmark for _, path := range paths { func() { f := os.Stdin if path != "-" { var err error f, err = os.Open(path) if err != nil { log.Fatal(err) } defer f.Close() } bs, err := bench.Parse(f) if err != nil { log.Fatal(err) } benchmarks = append(benchmarks, bs...) }() } bench.ParseValues(benchmarks, nil) // Prepare gg tables. var tab table.Grouping btab, configCols, resultCols := benchmarksToTable(benchmarks) if btab.Column("commit") == nil { tab = btab } else { gtab := commitsToTable(Commits(*flagGitDir)) tab = table.Join(btab, "commit", gtab, "commit") } // Prepare for output. f := os.Stdout if *flagOut != "" { var err error f, err = os.Create(*flagOut) if err != nil { log.Fatal(err) } defer f.Close() } // Output table. if *flagTable { table.Fprint(f, tab) return } // Plot. // // TODO: Collect nrows/ncols from the plot itself. p, nrows, ncols := plot(tab, configCols, resultCols) if !(len(paths) == 1 && paths[0] == "-") { p.Add(gg.Title(strings.Join(paths, " "))) } // Render plot. p.WriteSVG(f, 500*ncols, 350*nrows) } ================================================ FILE: benchplot/plot.go ================================================ package main import ( "fmt" "math" "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/gg" "github.com/aclements/go-gg/ggstat" "github.com/aclements/go-gg/table" ) // TODO: Support plotting non-normalized results. func plot(t table.Grouping, configCols, resultCols []string) (*gg.Plot, int, int) { //t = table.Flatten(table.HeadTables(table.GroupBy(t, "name"), 9)) // Filter to just the master branch. // // TODO: Flag to control this? Or separate filter command? Or // accept a filter expression in the argument? t = table.FilterEq(t, "branch", "master") // Compute rows and columns. ncols := len(resultCols) nrows := len(table.GroupBy(t, "name").Tables()) plot := gg.NewPlot(t) // Turn ordered commit date into a "commit index" column. plot.SortBy("commit date") plot.Stat(commitIndex{}) // Average each result at each commit (but keep columns names // the same to keep things easier to read). plot.Stat(ggstat.Agg("commit", "name")(ggstat.AggMean(resultCols...))) for _, rcol := range resultCols { plot.SetData(table.Rename(plot.Data(), "mean "+rcol, rcol)) } // Unpivot all of the metrics into one column. plot.Stat(convertFloat{resultCols}) plot.SetData(table.Unpivot(plot.Data(), "metric", "result", resultCols...)) y := "result" // Normalize to earliest commit on master. It's important to // do this before the geomean if there are commits missing. // Unfortunately, that also means we have to *temporarily* // group by name and metric, since the geomean needs to be // done on a different grouping. plot.GroupBy("name", "metric") plot.Stat(ggstat.Normalize{X: "branch", By: firstMasterIndex, Cols: []string{"result"}}) y = "normalized " + y plot.SetData(table.Remove(plot.Data(), "result")) plot.SetData(table.Ungroup(table.Ungroup(plot.Data()))) // Compute geomean for each metric at each commit if there's // more than one benchmark. if len(table.GroupBy(t, "name").Tables()) > 1 { gt := removeNaNs(plot.Data(), y) gt = ggstat.Agg("commit", "metric")(ggstat.AggGeoMean(y)).F(gt) gt = table.MapTables(gt, func(_ table.GroupID, t *table.Table) *table.Table { return table.NewBuilder(t).AddConst("name", " geomean").Done() }) gt = table.Rename(gt, "geomean "+y, y) plot.SetData(table.Concat(plot.Data(), gt)) nrows++ } // Facet by name and metric. plot.Add(gg.FacetY{Col: "name"}, gg.FacetX{Col: "metric"}) // Filter the data to reduce noise. plot.Stat(kza{y, 15, 3}) y = "filtered " + y // Always show Y=0. plot.SetScale("y", gg.NewLinearScaler().Include(0)) plot.Add(gg.LayerLines{ X: "commit index", Y: y, //Color: "branch", }) // plot.Add(gg.LayerTags{X: "commit index", Y: y, Label: "branch"}) // Interactive tooltip with short hash. plot.Stat(tooltip{y}) plot.Add(gg.LayerTooltips{X: "commit index", Y: y, Label: "tooltip"}) return plot, nrows, ncols } func firstMasterIndex(bs []string) int { return slice.Index(bs, "master") } type commitIndex struct{} func (commitIndex) F(g table.Grouping) table.Grouping { return table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table { idxs := make([]int, t.Len()) last, idx := "", -1 for i, hash := range t.MustColumn("commit").([]string) { if hash != last { idx++ last = hash } idxs[i] = idx } t = table.NewBuilder(t).Add("commit index", idxs).Done() return t }) } type convertFloat struct { cols []string } func (c convertFloat) F(g table.Grouping) table.Grouping { return table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table { b := table.NewBuilder(t) for _, col := range c.cols { var ncol []float64 slice.Convert(&ncol, t.MustColumn(col)) b.Add(col, ncol) } return b.Done() }) } func removeNaNs(g table.Grouping, col string) table.Grouping { return table.Filter(g, func(result float64) bool { return !math.IsNaN(result) }, col) } type kza struct { X string M, K int } func (k kza) F(g table.Grouping) table.Grouping { return table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table { var xs []float64 slice.Convert(&xs, t.MustColumn(k.X)) nxs := AdaptiveKolmogorovZurbenko(xs, k.M, k.K) return table.NewBuilder(t).Add("filtered "+k.X, nxs).Done() }) } type tooltip struct { Y string } func (t tooltip) F(g table.Grouping) table.Grouping { return table.MapCols(g, func(commit []string, result []float64, tooltip []string) { for i, c := range commit { tooltip[i] = fmt.Sprintf("%s %.2fX", c[:7], result[i]) } }, "commit", t.Y)("tooltip") } ================================================ FILE: benchplot/table.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "math" "reflect" "sort" "strings" "time" "github.com/aclements/go-gg/table" "github.com/aclements/go-misc/bench" ) func benchmarksToTable(bs []*bench.Benchmark) (t *table.Table, configCols, resultCols []string) { // Gather name, config, and result columns. nan := math.NaN() names := make([]string, len(bs)) configs, results := map[string]reflect.Value{}, map[string][]float64{} for i, b := range bs { names[i] = b.Name for k, c := range b.Config { seq, ok := configs[k] if !ok { t := reflect.SliceOf(reflect.TypeOf(c.Value)) seq = reflect.MakeSlice(t, len(bs), len(bs)) configs[k] = seq } seq.Index(i).Set(reflect.ValueOf(c.Value)) } for k, v := range b.Result { seq, ok := results[k] if !ok { seq = make([]float64, len(bs)) for i := range seq { seq[i] = nan } results[k] = seq } seq[i] = v } } // Build table. tab := new(table.Builder).Add("name", names) keys := make([]string, 0, len(configs)) for k := range configs { keys = append(keys, k) } sort.Strings(keys) for _, key := range keys { nicekey := strings.Replace(key, "-", " ", -1) niceval := configs[key].Interface() if n, ok := niceval.([]time.Time); ok { niceval = byTime(n) } tab.Add(nicekey, niceval) configCols = append(configCols, nicekey) } keys = make([]string, 0, len(results)) for k := range results { keys = append(keys, k) } sort.Strings(keys) for _, key := range keys { nicekey := strings.Replace(key, "-", " ", -1) if nicekey == "ns/op" { // TODO: Use the unit parser from benchstat. nicekey = "time/op" durations := make([]time.Duration, len(results[key])) for i, x := range results[key] { durations[i] = time.Duration(x) } tab.Add(nicekey, durations) } else { tab.Add(nicekey, results[key]) } resultCols = append(resultCols, nicekey) } return tab.Done(), configCols, resultCols } func commitsToTable(commits []CommitInfo) *table.Table { hashCol := make([]string, len(commits)) authorDateCol := make(byTime, len(commits)) commitDateCol := make(byTime, len(commits)) branchCol := make([]string, len(commits)) j := 0 for i := range commits { ci := &commits[i] hashCol[j] = ci.Hash authorDateCol[j] = ci.AuthorDate commitDateCol[j] = ci.CommitDate branchCol[j] = ci.Branch j++ } return new(table.Builder). Add("commit", hashCol). Add("author date", authorDateCol). Add("commit date", commitDateCol). Add("branch", branchCol). Done() } type byTime []time.Time func (s byTime) Len() int { return len(s) } func (s byTime) Less(i, j int) bool { return s[i].Before(s[j]) } func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/LICENSE ================================================ Copyright (c) 2016 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/README.md ================================================ # gg [![](https://godoc.org/github.com/aclements/go-gg?status.svg)](https://godoc.org/github.com/aclements/go-gg) gg is a plotting package for Go inspired by the Grammar of Graphics. Note that gg is currently very experimental and the API is still in flux. Please vendor this package before using it. To fetch gg, run go get github.com/aclements/go-gg ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/doc.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package generic provides type-generic functions. package generic ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/error.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package generic import "reflect" type TypeError struct { Type1, Type2 reflect.Type Extra string } func (e TypeError) Error() string { msg := e.Type1.String() if e.Type2 != nil { msg += " and " + e.Type2.String() } msg += " " + e.Extra return msg } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/order.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package generic import "reflect" // CanOrder returns whether the values a and b are orderable according // to the Go language specification. func CanOrder(a, b interface{}) bool { ak, bk := reflect.ValueOf(a).Kind(), reflect.ValueOf(b).Kind() if ak != bk { return false } return CanOrderR(ak) } var orderable = map[reflect.Kind]bool{ reflect.Int: true, reflect.Int8: true, reflect.Int16: true, reflect.Int32: true, reflect.Int64: true, reflect.Uint: true, reflect.Uintptr: true, reflect.Uint8: true, reflect.Uint16: true, reflect.Uint32: true, reflect.Uint64: true, reflect.Float32: true, reflect.Float64: true, reflect.String: true, } // CanOrderR returns whether two values of kind k are orderable // according to the Go language specification. func CanOrderR(k reflect.Kind) bool { return orderable[k] } // Order returns the order of values a and b: -1 if a < b, 0 if a == // b, 1 if a > b. The results are undefined if either a or b is NaN. // // Order panics if a and b are not orderable according to the Go // language specification. func Order(a, b interface{}) int { return OrderR(reflect.ValueOf(a), reflect.ValueOf(b)) } // OrderR is equivalent to Order, but operates on reflect.Values. func OrderR(a, b reflect.Value) int { if a.Kind() != b.Kind() { panic(&TypeError{a.Type(), b.Type(), "are not orderable because they are different kinds"}) } switch a.Kind() { case reflect.Float32, reflect.Float64: a, b := a.Float(), b.Float() if a < b { return -1 } else if a > b { return 1 } return 0 case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: a, b := a.Int(), b.Int() if a < b { return -1 } else if a > b { return 1 } return 0 case reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: a, b := a.Uint(), b.Uint() if a < b { return -1 } else if a > b { return 1 } return 0 case reflect.String: a, b := a.String(), b.String() if a < b { return -1 } else if a > b { return 1 } return 0 } panic(&TypeError{a.Type(), nil, "is not orderable"}) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/concat.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "reflect" "github.com/aclements/go-gg/generic" ) // Concat returns the concatenation of all of ss. The types of all of // the arguments must be identical or Concat will panic with a // *generic.TypeError. The returned slice will have the same type as the // inputs. If there are 0 arguments, Concat returns nil. Concat does // not modify any of the input slices. func Concat(ss ...T) T { if len(ss) == 0 { return nil } rvs := make([]reflect.Value, len(ss)) total := 0 var typ reflect.Type for i, s := range ss { rvs[i] = reflectSlice(s) total += rvs[i].Len() if i == 0 { typ = rvs[i].Type() } else if rvs[i].Type() != typ { panic(&generic.TypeError{typ, rvs[i].Type(), "have different types"}) } } out := reflect.MakeSlice(typ, 0, total) for _, rv := range rvs { out = reflect.AppendSlice(out, rv) } return out.Interface() } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/concat_test.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import "testing" func TestConcat(t *testing.T) { if g := Concat(); g != nil { t.Errorf("Concat() should be nil; got %v", g) } if g, w := Concat([]int{}), []int{}; !de(w, g) { t.Errorf("want %v; got %v", w, g) } if g, w := Concat([]int(nil)), []int{}; !de(w, g) { t.Errorf("want %v; got %v", w, g) } if g, w := Concat([]int{1, 2}, []int{3, 4}), []int{1, 2, 3, 4}; !de(w, g) { t.Errorf("want %v; got %v", w, g) } shouldPanic(t, "have different types", func() { Concat([]int{}, []string{}) }) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/convert.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "reflect" "github.com/aclements/go-gg/generic" ) // Convert converts each element in from and assigns it to *to. to // must be a pointer to a slice. Convert slices or extends *to to // len(from) and then assigns to[i] = T(from[i]) where T is the type // of *to's elements. If from and *to have the same element type, it // simply assigns *to = from. func Convert(to interface{}, from T) { fv := reflectSlice(from) tv := reflect.ValueOf(to) if tv.Kind() != reflect.Ptr { panic(&generic.TypeError{tv.Type(), nil, "is not a *[]T"}) } tst := tv.Type().Elem() if tst.Kind() != reflect.Slice { panic(&generic.TypeError{tv.Type(), nil, "is not a *[]T"}) } if fv.Type().AssignableTo(tst) { tv.Elem().Set(fv) return } eltt := tst.Elem() if !fv.Type().Elem().ConvertibleTo(eltt) { panic(&generic.TypeError{fv.Type(), tst, "cannot be converted"}) } switch to := to.(type) { case *[]float64: // This is extremely common. *to = (*to)[:0] for i, len := 0, fv.Len(); i < len; i++ { *to = append(*to, fv.Index(i).Convert(eltt).Float()) } default: tsv := tv.Elem() tsv.SetLen(0) for i, len := 0, fv.Len(); i < len; i++ { tsv = reflect.Append(tsv, fv.Index(i).Convert(eltt)) } tv.Elem().Set(tsv) } } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/convert_test.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import "testing" func TestConvert(t *testing.T) { var is []int Convert(&is, []int{1, 2, 3}) if w := []int{1, 2, 3}; !de(w, is) { t.Errorf("want %v; got %v", w, is) } Convert(&is, []float64{1, 2, 3}) if w := []int{1, 2, 3}; !de(w, is) { t.Errorf("want %v; got %v", w, is) } var fs []float64 Convert(&fs, []int{1, 2, 3}) if w := []float64{1, 2, 3}; !de(w, fs) { t.Errorf("want %v; got %v", w, fs) } Convert(&fs, []float64{1, 2, 3}) if w := []float64{1, 2, 3}; !de(w, fs) { t.Errorf("want %v; got %v", w, fs) } shouldPanic(t, "cannot be converted", func() { Convert(&is, []string{"1", "2", "3"}) }) shouldPanic(t, `is not a \*\[\]T`, func() { Convert(is, []int{1, 2, 3}) }) shouldPanic(t, `is not a \*\[\]T`, func() { x := 1 Convert(&x, []int{1, 2, 3}) }) shouldPanic(t, "is not a slice", func() { Convert(&is, 1) }) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/cycle.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import "reflect" // Cycle constructs a slice of length length by repeatedly // concatenating s to itself. If len(s) >= length, it returns // s[:length]. Otherwise, it allocates a new slice. If len(s) == 0 and // length != 0, Cycle panics. func Cycle(s T, length int) T { rv := reflectSlice(s) if rv.Len() >= length { return rv.Slice(0, length).Interface() } if rv.Len() == 0 { panic("empty slice") } // Allocate a new slice of the appropriate length. out := reflect.MakeSlice(rv.Type(), length, length) // Copy elements to out. for pos := 0; pos < length; { pos += reflect.Copy(out.Slice(pos, length), rv) } return out.Interface() } // Repeat returns a slice consisting of length copies of v. func Repeat(v interface{}, length int) T { if length < 0 { length = 0 } rv := reflect.ValueOf(v) out := reflect.MakeSlice(reflect.SliceOf(rv.Type()), length, length) for i := 0; i < length; i++ { out.Index(i).Set(rv) } return out.Interface() } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/doc.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package slice provides generic slice functions. package slice ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/find.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "reflect" "github.com/aclements/go-gg/generic" ) // Index returns the index of the first instance of val in s, or -1 if // val is not present in s. val's type must be s's element type. func Index(s T, val interface{}) int { rs := reflectSlice(s) if vt := reflect.TypeOf(val); rs.Type().Elem() != vt { // TODO: Better " is not a sequence of ". panic(&generic.TypeError{rs.Type(), vt, "cannot find"}) } for i, l := 0, rs.Len(); i < l; i++ { if rs.Index(i).Interface() == val { return i } } return -1 } // LastIndex returns the index of the last instance of val in s, or -1 // if val is not present in s. val's type must be s's element type. func LastIndex(s T, val interface{}) int { rs := reflectSlice(s) if vt := reflect.TypeOf(val); rs.Type().Elem() != vt { // TODO: Better " is not a sequence of ". panic(&generic.TypeError{rs.Type(), vt, "cannot find"}) } for i := rs.Len() - 1; i >= 0; i-- { if rs.Index(i).Interface() == val { return i } } return -1 } // Contains reports whether val is within s. val's type must be s's // element type. func Contains(s T, val interface{}) bool { return Index(s, val) >= 0 } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/index.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "reflect" "github.com/aclements/go-gg/generic" ) // Select returns a slice w such that w[i] = v[indexes[i]]. func Select(v T, indexes []int) T { switch v := v.(type) { case []int: res := make([]int, len(indexes)) for i, x := range indexes { res[i] = v[x] } return res case []float64: res := make([]float64, len(indexes)) for i, x := range indexes { res[i] = v[x] } return res case []string: res := make([]string, len(indexes)) for i, x := range indexes { res[i] = v[x] } return res } rv := reflectSlice(v) res := reflect.MakeSlice(rv.Type(), len(indexes), len(indexes)) for i, x := range indexes { res.Index(i).Set(rv.Index(x)) } return res.Interface() } // SelectInto assigns out[i] = in[indexes[i]]. in and out must have // the same types and len(out) must be >= len(indexes). If in and out // overlap, the results are undefined. func SelectInto(out, in T, indexes []int) { // TODO: Maybe they should only have to be assignable? if it, ot := reflect.TypeOf(in), reflect.TypeOf(out); it != ot { panic(&generic.TypeError{it, ot, "must be the same type"}) } switch in := in.(type) { case []int: out := out.([]int) for i, x := range indexes { out[i] = in[x] } return case []float64: out := out.([]float64) for i, x := range indexes { out[i] = in[x] } return case []string: out := out.([]string) for i, x := range indexes { out[i] = in[x] } return } inv, outv := reflectSlice(in), reflectSlice(out) for i, x := range indexes { outv.Index(i).Set(inv.Index(x)) } } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/min.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "reflect" "sort" "github.com/aclements/go-gg/generic" ) // Min returns the minimum value in v. v must either implement // sort.Interface or its elements must be orderable. Min panics if v // is empty. func Min(v T) interface{} { x, _ := minmax(v, -1, true) return x.Interface() } // ArgMin returns the index of the minimum value in v. If there are // multiple indexes equal to the minimum value, ArgMin returns the // lowest of them. v must be a slice whose elements are orderable, or // must implement sort.Interface. ArgMin panics if v is empty. func ArgMin(v interface{}) int { _, i := minmax(v, -1, false) return i } // Max returns the maximum value in v. v must either implement // sort.Interface or its elements must be orderable. Max panics if v // is empty. func Max(v T) interface{} { x, _ := minmax(v, 1, true) return x.Interface() } // ArgMax returns the index of the maximum value in v. If there are // multiple indexes equal to the maximum value, ArgMax returns the // lowest of them. v must be a slice whose elements are orderable, or // must implement sort.Interface. ArgMax panics if v is empty. func ArgMax(v interface{}) int { _, i := minmax(v, 1, false) return i } func minmax(v interface{}, keep int, val bool) (reflect.Value, int) { switch v := v.(type) { case sort.Interface: if v.Len() == 0 { if keep < 0 { panic("zero-length sequence has no minimum") } else { panic("zero-length sequence has no maximum") } } maxi := 0 if keep < 0 { for i, len := 0, v.Len(); i < len; i++ { if v.Less(i, maxi) { maxi = i } } } else { for i, len := 0, v.Len(); i < len; i++ { if v.Less(maxi, i) { maxi = i } } } if !val { return reflect.Value{}, maxi } rv := reflectSlice(v) return rv.Index(maxi), maxi } rv := reflectSlice(v) if !generic.CanOrderR(rv.Type().Elem().Kind()) { panic(&generic.TypeError{rv.Type().Elem(), nil, "is not orderable"}) } if rv.Len() == 0 { if keep < 0 { panic("zero-length slice has no minimum") } else { panic("zero-length slice has no maximum") } } max, maxi := rv.Index(0), 0 for i, len := 1, rv.Len(); i < len; i++ { if elt := rv.Index(i); generic.OrderR(elt, max) == keep { max, maxi = elt, i } } return max, maxi } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/min_test.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "math" "testing" "time" ) func TestMin(t *testing.T) { shouldPanic(t, "no min", func() { Min([]float64{}) }) shouldPanic(t, "no min", func() { ArgMin([]float64{}) }) shouldPanic(t, "no max", func() { Max([]float64{}) }) shouldPanic(t, "no max", func() { ArgMax([]float64{}) }) xs := []float64{5, 1, 8, 1, 8, 3} if x := Min(xs); x != 1.0 { t.Errorf("Min should be 1, got %v", x) } if x := ArgMin(xs); x != 1 { t.Errorf("ArgMin should be 1, got %v", x) } if x := Max(xs); x != 8.0 { t.Errorf("Max should be 8, got %v", x) } if x := ArgMax(xs); x != 2 { t.Errorf("ArgMax should be 2, got %v", x) } xs = []float64{1, 5, math.NaN()} if x := Min(xs); x != 1.0 { t.Errorf("Min should be 1, got %v", x) } if x := Max(xs); x != 5.0 { t.Errorf("Max should be 5, got %v", x) } } type fakeSortInterface struct { len int } func (f fakeSortInterface) Len() int { return f.len } func (f fakeSortInterface) Swap(i, j int) { panic("can't") } func (f fakeSortInterface) Less(i, j int) bool { return i < j } type timeSlice []time.Time func (s timeSlice) Len() int { return len(s) } func (s timeSlice) Less(i, j int) bool { return s[i].Before(s[j]) } func (s timeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func TestMinSort(t *testing.T) { shouldPanic(t, "no min", func() { Min(fakeSortInterface{0}) }) shouldPanic(t, "no min", func() { ArgMin(fakeSortInterface{0}) }) shouldPanic(t, "no max", func() { Max(fakeSortInterface{0}) }) shouldPanic(t, "no max", func() { ArgMax(fakeSortInterface{0}) }) f := fakeSortInterface{5} if x := ArgMin(f); x != 0 { t.Errorf("ArgMin should be 0, got %v", x) } if x := ArgMax(f); x != 4 { t.Errorf("ArgMax should be 4, got %v", x) } z := time.Unix(0, 0) ts := timeSlice{z.Add(time.Hour), z, z.Add(2 * time.Hour), z.Add(time.Hour)} if x := Min(ts); x != ts[1] { t.Errorf("Min should be %v, got %v", ts[1], x) } if x := ArgMin(ts); x != 1 { t.Errorf("ArgMin should be 1, got %v", x) } if x := Max(ts); x != ts[2] { t.Errorf("Max should be %v, got %v", ts[2], x) } if x := ArgMax(ts); x != 2 { t.Errorf("ArgMax should be 2, got %v", x) } } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/nub.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import "reflect" var trueVal = reflect.ValueOf(true) // Nub returns v with duplicates removed. It keeps the first instance // of each distinct value and preserves their order. func Nub(v T) T { rv := reflectSlice(v) indexes := make([]int, 0) set := reflect.MakeMap(reflect.MapOf(rv.Type().Elem(), trueVal.Type())) for i, l := 0, rv.Len(); i < l; i++ { x := rv.Index(i) if set.MapIndex(x).IsValid() { continue } set.SetMapIndex(x, trueVal) indexes = append(indexes, i) } return Select(v, indexes) } // NubAppend is equivalent to appending all of the slices in vs and // then calling Nub on the result, but more efficient. func NubAppend(vs ...T) T { if len(vs) == 0 { return nil } rv := reflectSlice(vs[0]) set := reflect.MakeMap(reflect.MapOf(rv.Type().Elem(), trueVal.Type())) out := reflect.MakeSlice(rv.Type(), 0, 0) for _, v := range vs { rv := reflectSlice(v) for i, l := 0, rv.Len(); i < l; i++ { x := rv.Index(i) if set.MapIndex(x).IsValid() { continue } set.SetMapIndex(x, trueVal) out = reflect.Append(out, x) } } return out.Interface() } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/select_test.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "reflect" "testing" ) func TestSelect(t *testing.T) { x1 := []int{1, 2, 3} got := Select(x1, []int{2, 1, 0}) if want := []int{3, 2, 1}; !reflect.DeepEqual(got, want) { t.Fatalf("expected %v, got %v", want, got) } got = Select(x1, []int{1, 1, 1, 1}) if want := []int{2, 2, 2, 2}; !reflect.DeepEqual(got, want) { t.Fatalf("expected %v, got %v", want, got) } type T struct{ x int } x2 := []T{{1}, {2}, {3}} got = Select(x2, []int{2, 1, 0}) if want := []T{{3}, {2}, {1}}; !reflect.DeepEqual(got, want) { t.Fatalf("expected %v, got %v", want, got) } } func TestSelectType(t *testing.T) { type T []float64 x1 := T{1, 2, 3} y1 := Select(x1, []int{}) if _, ok := y1.(T); !ok { t.Fatalf("result has wrong type; expected T, got %T", y1) } type U int x2 := []U{1, 2, 3} y2 := Select(x2, []int{}) if _, ok := y2.([]U); !ok { t.Fatalf("result has wrong type; expected []U, got %T", y2) } } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/seq.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "reflect" "github.com/aclements/go-gg/generic" ) // T is a Go slice value of type []U. // // This is primarily for documentation. There is no way to statically // enforce this in Go; however, functions that expect a slice will // panic with a *generic.TypeError if passed a non-slice value. type T interface{} // reflectSlice checks that s is a slice and returns its // reflect.Value. It panics with a *generic.TypeError if s is not a slice. func reflectSlice(s T) reflect.Value { rv := reflect.ValueOf(s) if rv.Kind() != reflect.Slice { panic(&generic.TypeError{rv.Type(), nil, "is not a slice"}) } return rv } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/sort.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "reflect" "sort" "time" "github.com/aclements/go-gg/generic" ) // CanSort returns whether the value v can be sorted. func CanSort(v interface{}) bool { switch v.(type) { case sort.Interface, []time.Time: return true } return generic.CanOrderR(reflect.TypeOf(v).Elem().Kind()) } // Sort sorts v in increasing order. v must implement sort.Interface, // be a slice whose elements are orderable, or be a []time.Time. func Sort(v interface{}) { sort.Sort(Sorter(v)) } // Sorter returns a sort.Interface for sorting v. v must implement // sort.Interface, be a slice whose elements are orderable, or be a // []time.Time. func Sorter(v interface{}) sort.Interface { switch v := v.(type) { case []int: return sort.IntSlice(v) case []float64: return sort.Float64Slice(v) case []string: return sort.StringSlice(v) case []time.Time: return sortTimeSlice(v) case sort.Interface: return v } rv := reflectSlice(v) switch rv.Type().Elem().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return sortIntSlice{rv} case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return sortUintSlice{rv} case reflect.Float32, reflect.Float64: return sortFloatSlice{rv} case reflect.String: return sortStringSlice{rv} } panic(&generic.TypeError{rv.Type().Elem(), nil, "is not orderable"}) } type sortIntSlice struct { reflect.Value } func (s sortIntSlice) Len() int { return s.Value.Len() } func (s sortIntSlice) Less(i, j int) bool { return s.Index(i).Int() < s.Index(j).Int() } func (s sortIntSlice) Swap(i, j int) { a, b := s.Index(i).Int(), s.Index(j).Int() s.Index(i).SetInt(b) s.Index(j).SetInt(a) } type sortUintSlice struct { reflect.Value } func (s sortUintSlice) Len() int { return s.Value.Len() } func (s sortUintSlice) Less(i, j int) bool { return s.Index(i).Uint() < s.Index(j).Uint() } func (s sortUintSlice) Swap(i, j int) { a, b := s.Index(i).Uint(), s.Index(j).Uint() s.Index(i).SetUint(b) s.Index(j).SetUint(a) } type sortFloatSlice struct { reflect.Value } func (s sortFloatSlice) Len() int { return s.Value.Len() } func (s sortFloatSlice) Less(i, j int) bool { return s.Index(i).Float() < s.Index(j).Float() } func (s sortFloatSlice) Swap(i, j int) { a, b := s.Index(i).Float(), s.Index(j).Float() s.Index(i).SetFloat(b) s.Index(j).SetFloat(a) } type sortStringSlice struct { reflect.Value } func (s sortStringSlice) Len() int { return s.Value.Len() } func (s sortStringSlice) Less(i, j int) bool { return s.Index(i).String() < s.Index(j).String() } func (s sortStringSlice) Swap(i, j int) { a, b := s.Index(i).String(), s.Index(j).String() s.Index(i).SetString(b) s.Index(j).SetString(a) } type sortTimeSlice []time.Time func (s sortTimeSlice) Len() int { return len(s) } func (s sortTimeSlice) Less(i, j int) bool { return s[i].Before(s[j]) } func (s sortTimeSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/generic/slice/util_test.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package slice import ( "fmt" "reflect" "regexp" "testing" ) func de(x, y interface{}) bool { return reflect.DeepEqual(x, y) } func shouldPanic(t *testing.T, re string, f func()) { r := regexp.MustCompile(re) defer func() { err := recover() if err == nil { t.Fatalf("want panic matching %q; got no panic", re) } else if !r.MatchString(fmt.Sprintf("%s", err)) { t.Fatalf("want panic matching %q; got %s", re, err) } }() f() } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/example_scale_test.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import ( "fmt" "math/rand" "os" "time" "github.com/aclements/go-gg/table" ) func ExampleNewTimeScaler() { var x []time.Time var y []float64 var steps []time.Duration for _, step := range []time.Duration{ 1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, time.Minute, time.Hour, 24 * time.Hour, 7 * 24 * time.Hour, } { t := time.Now() for i := 0; i < 100; i++ { x = append(x, t) y = append(y, rand.Float64()-.5) steps = append(steps, 100*step) t = t.Add(-step) } } tb := table.NewBuilder(nil) tb.Add("x", x).Add("y", y).Add("steps", steps) plot := NewPlot(tb.Done()) plot.SetScale("x", NewTimeScaler()) plot.Add(FacetY{ Col: "steps", SplitXScales: true, }) plot.Add(LayerLines{ X: "x", Y: "y", }) f, err := os.Create("scale_time.svg") if err != nil { panic("unable to create scale_time.svg") } defer f.Close() plot.WriteSVG(f, 800, 1000) fmt.Println("ok") // output: // ok } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/facet.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import ( "fmt" "math" "reflect" "github.com/aclements/go-gg/generic" "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/table" ) // TODO: What if there are already layers? Maybe they should be // repeated in all facets. ggplot2 apparently does this when the // faceting variable isn't in one of the data frames. // TODO: Subplot is getting rather complicated. If I want to make // facets only use public APIs, perhaps gg itself should only know // about some interface for table group labels that provides a layout // manager and the layout logic should live with the facets. // TODO: This is very nearly flexible enough to make pairwise plots. // TODO: Is this flexible enough to make marginal distribution plots? // TODO: There's logical overlap between how a facet chooses to // position and label a subplot and a discrete-ranged scalar. Perhaps // facets should use scalars to chose positions and labels? // FacetCommon is the base type for plot faceting operations. Faceting // is a grouping operation that subdivides a plot into subplots based // on the values in data column. Faceting operations may be composed: // if a faceting operation has already divided the plot into subplots, // a further faceting operation will subdivide each of those subplots. type FacetCommon struct { // Col names the column to facet by. Each distinct value of // this column will become a separate plot. If Col is // orderable, the facets will be in value order; otherwise, // they will be in index order. Col string // SplitXScales indicates that each band (column for FacetX; // row for FacetY) created by this faceting operation should // have separate X axis scales. The default, false, indicates // that subplots should continue to share X scales. // // SplitXScales and SplitYScales, combined with facet // composition, give a great deal of control over how scales // are shared. Suppose you want to create an X/Y facet grid by // first performing a FacetX and then a FacetY. Here are some // common ways to share or split the scales: // // * To share the same scales between all subplots, set both // flags to false in both facet operations. // // * To have independent scales in all subplots, set both // flags to true in the FacetY (and it doesn't matter what // they are in the FacetX). // // * To share the X scale within each column and the Y scale // within each row, set SplitXScales in the FacetX and // SplitYScales in the FacetY. SplitXScales bool // SplitYScales is the equivalent of SplitXScales for Y axis // scales. SplitYScales bool // Labeler is a function that constructs facet labels from // data values. If this is nil, the default is fmt.Sprint. // // TODO: Call this through reflect to get the argument type // right? Labeler func(interface{}) string // Rows and Cols specify the number of rows or columns for // FacetWrap. If both are zero, FacetWrap chooses reasonable // defaults. Otherwise, one or the other should be zero. Rows, Cols int // TODO: Wrap order and label side for FacetWrap. } // FacetX splits a plot into columns. type FacetX FacetCommon // FacetY splits a plot into rows. type FacetY FacetCommon // FacetWrap splits a plot into a grid of rows and columns. type FacetWrap FacetCommon func (f FacetX) Apply(p *Plot) { (*FacetCommon)(&f).apply(p, "x") } func (f FacetY) Apply(p *Plot) { (*FacetCommon)(&f).apply(p, "y") } func (f FacetWrap) Apply(p *Plot) { (*FacetCommon)(&f).apply(p, "-") } func (f *FacetCommon) apply(p *Plot, dir string) { if f.Labeler == nil { f.Labeler = func(x interface{}) string { return fmt.Sprint(x) } } grouped := table.GroupBy(p.Data(), f.Col) // TODO: What should this do if there are multiple faceting // operations and the results aren't a complete cross-product? // Using GroupBy to form the initial faceting groups will // leave out subplots with no data. Alternatively, I could // base this on the total set of values and force there to be // a complete cross-product. // TODO: If this is, say, and X faceting and different // existing columns have different sets of values, should I // only split a column on the values it has? Doing that right // would require grouping existing subplots in potentially // complex ways (for example, if I do a FacetWrap and then a // FacetX, grouping subplots by column alone will be wrong.) // Collect grouped values. If there was already grouping // structure, it's possible we'll have multiple groups with // the same value for Col. type valInfo struct { index int label string } var valType reflect.Type vals := make(map[interface{}]*valInfo) for i, gid := range grouped.Tables() { val := gid.Label() if _, ok := vals[val]; !ok { vals[val] = &valInfo{len(vals), f.Labeler(val)} } if i == 0 { valType = reflect.TypeOf(val) } } // If f.Col is orderable, order and re-index values. if generic.CanOrderR(valType.Kind()) { valSeq := reflect.MakeSlice(reflect.SliceOf(valType), 0, len(vals)) for val := range vals { valSeq = reflect.Append(valSeq, reflect.ValueOf(val)) } slice.Sort(valSeq.Interface()) for i := 0; i < valSeq.Len(); i++ { vals[valSeq.Index(i).Interface()].index = i } } // Compute FacetWrap rows and cols. if dir == "-" { cells := float64(len(vals)) if f.Cols == 0 { if f.Rows == 0 { // Chose default Rows and Cols. f.Rows = int(math.Ceil(math.Sqrt(cells))) } // Compute Cols from Rows. f.Cols = int(math.Ceil(cells / float64(f.Rows))) } else { // Compute Rows from Cols. f.Rows = int(math.Ceil(cells / float64(f.Cols))) } } // Find existing subplots, split existing subplots and bands // into len(vals) new subplots and bands, and transform each // GroupBy group into its new subplot. type bandKey struct { // band1 is the primary band. band2 is only used by // FacetWrap. band1, band2 *subplotBand // X and Y of band. This is a necessary part of the // key because FacetWrap creates rows but does not // create distant horizontal bands for them. x, y int } type bandScale struct { band *subplotBand scale Scaler } subplots := make(map[*subplot][]*subplot) bands := make(map[bandKey][]*subplotBand) scales := make(map[bandScale]Scaler) var ndata table.GroupingBuilder for _, gid := range grouped.Tables() { // Find subplot by walking up group hierarchy. sub := subplotOf(gid) // Split old band into len(vals) new bands in the // orthogonal axis. var obandKey bandKey if dir == "x" { obandKey = bandKey{band1: sub.vBand, x: sub.x} } else if dir == "y" { obandKey = bandKey{band1: sub.hBand, y: sub.y} } else { obandKey = bandKey{sub.vBand, sub.hBand, sub.x, sub.y} } nbands := bands[obandKey] if nbands == nil { nbands = make([]*subplotBand, len(vals)) for _, val := range vals { nb := &subplotBand{parent: obandKey.band1, label: val.label} nbands[val.index] = nb } bands[obandKey] = nbands } // Split old subplot into len(vals) new subplots. nsubplots := subplots[sub] if nsubplots == nil { nsubplots = make([]*subplot, len(vals)) for _, val := range vals { ns := &subplot{parent: sub, x: sub.x, y: sub.y, vBand: sub.vBand, hBand: sub.hBand} if dir == "x" { ns.x = sub.x*len(vals) + val.index ns.vBand = nbands[val.index] } else if dir == "y" { ns.y = sub.y*len(vals) + val.index ns.hBand = nbands[val.index] } else { ns.x = sub.x*f.Cols + val.index%f.Cols ns.y = sub.y*f.Rows + val.index/f.Cols ns.vBand = nbands[val.index] } nsubplots[val.index] = ns } subplots[sub] = nsubplots } // Map this group to its new subplot. nsubplot := nsubplots[vals[gid.Label()].index] ngid := gid.Parent().Extend(nsubplot) ndata.Add(ngid, grouped.Table(gid)) // Split scales if requested. At a high level, we want // to give each band a new scale, but there may // already be multiple scales within a band, so we // find the set of scales within a band and split each // distinct scale up. var nband *subplotBand if dir == "x" { nband = nsubplot.vBand } else if dir == "y" { nband = nsubplot.hBand } else { if f.SplitXScales || f.SplitYScales { // TODO: I probably need to rephrase // this whole scale splitting // operation in terms of subplot X and // Y and possibly do it as a second // pass once all of the subplots are // created. panic("not implemented: scale splitting for FacetWrap") } } if f.SplitXScales { scaler := p.GetScaleAt("x", gid) nscaler := scales[bandScale{nband, scaler}] if nscaler == nil { nscaler = scaler.CloneScaler() scales[bandScale{nband, scaler}] = nscaler } p.SetScaleAt("x", nscaler, ngid) } if f.SplitYScales { scaler := p.GetScaleAt("y", gid) nscaler := scales[bandScale{nband, scaler}] if nscaler == nil { nscaler = scaler.CloneScaler() scales[bandScale{nband, scaler}] = nscaler } p.SetScaleAt("y", nscaler, ngid) } } p.SetData(ndata.Done()) } // subplotBand represents a rectangular group of subplots in either a // vertical group (with a label on top) or a horizontal group (with a // label to the right). type subplotBand struct { parent *subplotBand label string } type subplot struct { parent *subplot // x and y are the position of this subplot, where 0, 0 is the // top left. x, y int vBand, hBand *subplotBand } var rootSubplot = &subplot{} func subplotOf(gid table.GroupID) *subplot { for ; gid != table.RootGroupID; gid = gid.Parent() { sub, ok := gid.Label().(*subplot) if ok { return sub } } return rootSubplot } func (s subplot) String() string { return fmt.Sprintf("[%d %d]", s.x, s.y) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/group.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import "github.com/aclements/go-gg/table" // TODO: GroupByKey? Would the key function only work on one binding? // With a first-class row representation we could pass that. // GroupBy sub-divides all groups such that all of the rows in each // group have equal values for all of the named columns. func (p *Plot) GroupBy(cols ...string) *Plot { // TODO: Should this accept column expressions, like layers? return p.SetData(table.GroupBy(p.Data(), cols...)) } // GroupAuto groups p's data table on all columns that are comparable // but are not numeric (that is, all categorical columns). // // TODO: Maybe there should be a CategoricalBindings that returns the // set of categorical bindings, which callers could just pass to // GroupBy, possibly after manipulating. // // TODO: Does implementing sort.Interface make an otherwise cardinal // column ordinal? func (p *Plot) GroupAuto() *Plot { // Find the categorical columns. categorical := []string{} g := p.Data() for _, col := range g.Columns() { et := table.ColType(g, col).Elem() if et.Comparable() && !isCardinal(et.Kind()) { categorical = append(categorical, col) } } return p.GroupBy(categorical...) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/layer.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import ( "fmt" "github.com/aclements/go-gg/table" ) func defaultCols(p *Plot, cols ...*string) { dcols := p.Data().Columns() for i, colp := range cols { if *colp == "" { if i >= len(dcols) { panic(fmt.Sprintf("cannot get default column %d; table has only %d columns", i, len(dcols))) } *colp = dcols[i] } } } // LayerLines is like LayerPaths, but connects data points in order by // the "x" property. type LayerLines LayerPaths func (l LayerLines) Apply(p *Plot) { LayerPaths(l).apply(p, true) } //go:generate stringer -type StepMode // StepMode controls how LayerSteps connects subsequent points. type StepMode int const ( // StepHV makes LayerSteps connect subsequent points with a // horizontal segment and then a vertical segment. StepHV StepMode = iota // StepVH makes LayerSteps connect subsequent points with a // vertical segment and then a horizontal segment. StepVH // StepHMid makes LayerSteps connect subsequent points A and B // with three segments: a horizontal segment from A to the // midpoint between A and B, followed by vertical segment, // followed by a horizontal segment from the midpoint to B. StepHMid // StepVMid makes LayerSteps connect subsequent points A and B // with three segments: a vertical segment from A to the // midpoint between A and B, followed by horizontal segment, // followed by a vertical segment from the midpoint to B. StepVMid ) // LayerSteps is like LayerPaths, but connects data points with a path // consisting only of horizontal and vertical segments. type LayerSteps struct { LayerPaths Step StepMode } func (l LayerSteps) Apply(p *Plot) { // TODO: Should this also support only showing horizontal or // vertical segments? // // TODO: This could be a data transform instead of a layer. // Then it could be used in conjunction with, for example, // ribbons. defaultCols(p, &l.X, &l.Y) p.marks = append(p.marks, plotMark{&markSteps{ l.Step, p.use("x", l.X), p.use("y", l.Y), p.use("stroke", l.Color), p.use("fill", l.Fill), }, p.Data().Tables()}) } // LayerPaths groups by Color and Fill, and then connects successive // data points in each group with a path and/or a filled polygon. type LayerPaths struct { // X and Y name columns that define the input and response of // each point on the path. If these are empty, they default to // the first and second columns, respectively. X, Y string // Color names a column that defines the stroke color of each // path. If Color is "", it defaults to constant black. // Otherwise, the data is grouped by Color. Color string // Fill names a column that defines the fill color of each // path. If Fill is "", it defaults to none. Otherwise, the // data is grouped by Fill. Fill string // XXX Perhaps the theme should provide default values for // things like "color". That would suggest we need to resolve // defaults like that at render time. Possibly a special scale // that gets values from the theme could be used to resolve // them. // // XXX strokeOpacity, fillOpacity, strokeWidth, what other // properties do SVG strokes have? // // XXX Should the set of known styling bindings be fixed, and // all possible rendering targets have to know what to do with // them, or should the rendering target be able to have // different styling bindings they understand (presumably with // some reasonable base set)? If the renderer can determine // the known bindings, we would probably just capture the // environment here (and make it so a captured environment // does not change) and hand that to the renderer later. } func (l LayerPaths) Apply(p *Plot) { l.apply(p, false) } func (l LayerPaths) apply(p *Plot, sort bool) { defaultCols(p, &l.X, &l.Y) if l.Color != "" { p.GroupBy(l.Color) } if l.Fill != "" { p.GroupBy(l.Fill) } if sort { defer p.Save().Restore() p = p.SortBy(l.X) } p.marks = append(p.marks, plotMark{&markPath{ p.use("x", l.X), p.use("y", l.Y), p.use("stroke", l.Color), p.use("fill", l.Fill), }, p.Data().Tables()}) } // LayerArea shades the area between two columns with a polygon. It is // useful in conjunction with ggstat.AggMax and ggstat.AggMin for // drawing the extents of data. type LayerArea struct { // X names the column that defines the input of each point. If // this is empty, it defaults to the first column. X string // Upper and Lower name columns that define the range of // response to shade. If either is "", it defaults to a // constant 0 value. Upper, Lower string // Fill names a column that defines the fill color of each // area. If Fill is "", it defaults to black. Otherwise, the // data is grouped by Fill. Fill string // FillOpacity names a column that defines the fill opacity of // each area. If FillOpacity is "", it defaults to 0.5. // Otherwise, the data is grouped by FillOpacity. FillOpacity string } func (l LayerArea) Apply(p *Plot) { defaultCols(p, &l.X) if l.Fill != "" { p.GroupBy(l.Fill) } if l.FillOpacity != "" { p.GroupBy(l.FillOpacity) } defer p.Save().Restore() p = p.SortBy(l.X) upper, lower := l.Upper, l.Lower if upper == "" { upper = p.Const(0) } if lower == "" { lower = p.Const(0) } p.marks = append(p.marks, plotMark{&markArea{ p.use("x", l.X), p.use("y", upper), p.use("y", lower), p.use("fill", l.Fill), p.use("opacity", l.FillOpacity), }, p.Data().Tables()}) } // LayerPoints layers a point mark at each data point. type LayerPoints struct { // X and Y name columns that define input and response of each // point. If these are empty, they default to the first and // second columns, respectively. X, Y string // Color names the column that defines the fill color of each // point. If Color is "", it defaults to constant black. Color string // Opacity names the column that defines the opacity of each // point. If Opacity is "", it defaults to fully opaque. This // is multiplied by any alpha value specified by Color. Opacity string // Size names the column that defines the size of each point. // If Size is "", it defaults to 1% of the smallest plot // dimension. Size string // XXX fill vs stroke, shape } func (l LayerPoints) Apply(p *Plot) { defaultCols(p, &l.X, &l.Y) p.marks = append(p.marks, plotMark{&markPoint{ p.use("x", l.X), p.use("y", l.Y), // TODO: It's actually the fill color, but I generally // want it to match things that are stroke colors. // Maybe I should have a "color" aesthetic for the // "primary" color? Or I could have a hierarchy of // aesthetics, in which this uses "stroke" if it has a // scale, but otherwise uses "color". p.use("stroke", l.Color), // TODO: What scale for opacity? Or should I assume // callers will use PreScaled values if they want // specific opacities? What's the physical type? p.use("opacity", l.Opacity), p.use("size", l.Size), }, p.Data().Tables()}) } // LayerTiles layers a rectangle at each data point. The rectangle is // specified by its center, width, and height. type LayerTiles struct { // X and Y name columns that define the input and response at // the center of each rectangle. If they are "", they default // to the first and second columns, respectively. X, Y string // Width and Height name columns that define the width and // height of each rectangle. If they are "", the width and/or // height are automatically determined from the smallest // spacing between distinct X and Y points. Width, Height string // Fill names a column that defines the fill color of each // rectangle. If it is "", the default fill is black. Fill string // XXX Stroke color/width, opacity, center adjustment. } func (l LayerTiles) Apply(p *Plot) { defaultCols(p, &l.X, &l.Y) if l.Width != "" || l.Height != "" { // TODO: What scale are these in? (x+width) is in the // X scale, but width itself is not. It doesn't make // sense to train the X scale on width, and if there's // a scale transform, (x+width) has to happen before // the transform. OTOH, if x is discrete, I can't do // (x+width); maybe in that case you just can't // specify a width. OTOOH, if width is specified and // the value is unscaled, I could still do something // reasonable with that if x is discrete. panic("not implemented: non-default width/height") } p.marks = append(p.marks, plotMark{&markTiles{ p.use("x", l.X), p.use("y", l.Y), p.use("fill", l.Fill), }, p.Data().Tables()}) } // LayerTags attaches text annotations to data points. // // TODO: Currently this groups by label and makes one annotation per // group. This should be a controllable. type LayerTags struct { // X and Y name columns that define the input and response // each tag is attached to. If they are "", they default to // the first and second columns, respectively. X, Y string // Label names the column that gives the text to put in the // tag at X, Y. Label is required. Label string // HPos controls the horizontal position of the tag if // multiple points have the same Label. The label will be // attached to the point closest to HPos between the left-most // (HPos == 0) and the right-most (HPos == 1) points on this // curve. HPos float64 // Offset controls the pixel offset of the tag from the point // it is attached to. If these are both zero, they are treated // as -20, -20. OffsetX, OffsetY int } func (l LayerTags) Apply(p *Plot) { // TODO: Should there be special "annotation marks" that are // always on top and can perhaps extend outside the plot area? defaultCols(p, &l.X, &l.Y) if l.OffsetX == 0 && l.OffsetY == 0 { l.OffsetX, l.OffsetY = -20, -20 } defer p.Save().Restore() p.GroupBy(l.Label) // TODO: I keep wanting an abstraction for a column across // groups like this. labels := make(map[table.GroupID]table.Slice) for _, gid := range p.Data().Tables() { labels[gid] = p.Data().Table(gid).MustColumn(l.Label) } p.marks = append(p.marks, plotMark{&markTags{ p.use("x", l.X), p.use("y", l.Y), labels, l.HPos, l.OffsetX, l.OffsetY, }, p.Data().Tables()}) } // LayerTooltips attaches hover tooltips to data points. type LayerTooltips struct { // X and Y name columns that define locations of tooltips. If // they are "", they default to the first and second columns, // respectively. X, Y string // Label names the column that gives the text of the tooltip. Label string // TODO: Text styling, closest X or closest point, multiple // tooltips if there are multiple points at the same X with // different Ys? } func (l LayerTooltips) Apply(p *Plot) { defer p.Save().Restore() defaultCols(p, &l.X, &l.Y) // Split up by subplot and flatten each subplot. tables := map[*subplot][]*table.Table{} gids := map[*subplot]table.GroupID{} for _, gid := range p.Data().Tables() { s := subplotOf(gid) tables[s] = append(tables[s], p.Data().Table(gid)) gids[s] = gid } var ng table.GroupingBuilder for k, ts := range tables { var subg table.GroupingBuilder for i, t := range ts { subg.Add(table.RootGroupID.Extend(i), t) } ngid := table.RootGroupID.Extend(k) ng.Add(ngid, table.Flatten(subg.Done())) p.copyScales(gids[k], ngid) } p.SetData(ng.Done()) labels := make(map[table.GroupID]table.Slice) for _, gid := range p.Data().Tables() { labels[gid] = p.Data().Table(gid).MustColumn(l.Label) } p.marks = append(p.marks, plotMark{&markTooltips{ p.use("x", l.X), p.use("y", l.Y), labels, }, p.Data().Tables()}) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/layout/grid.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package layout import "sort" // Grid lays out elements in a two dimensional table. Each child is // assigned to a cell in the table and may optionally span multiple // rows and/or columns. type Grid struct { elts []*gridElement cols, rows int x, y, w, h float64 } type gridElement struct { e Element x, y, colSpan, rowSpan int } // Add adds Element e to Grid g, spanning cells (x,y) up to but not // including (x+colSpan, y+colSpan). func (g *Grid) Add(e Element, x, y, colSpan, rowSpan int) { if x+colSpan > g.cols { g.cols = x + colSpan } if y+rowSpan > g.rows { g.rows = y + rowSpan } g.elts = append(g.elts, &gridElement{e, x, y, colSpan, rowSpan}) } func (g *Grid) Children() []Element { res := make([]Element, len(g.elts)) for i, elt := range g.elts { res[i] = elt.e } return res } func (g *Grid) doLayout(byRow bool, allocated float64) (dims []float64, flexes []bool) { seq := func(n int) []int { res := make([]int, n) for i := range res { res[i] = i } return res } max := func(x, y float64) float64 { if x > y { return x } return y } if byRow { dims = make([]float64, g.rows) flexes = make([]bool, g.rows) } else { dims = make([]float64, g.cols) flexes = make([]bool, g.cols) } for i := range flexes { // TODO: Should empty columns be set to false? flexes[i] = true } // Sort elements by colSpan or rowSpan. eltOrder := seq(len(g.elts)) sort.Sort(&gridElementSorter{g.elts, eltOrder, byRow}) // Add a fake element that spans everything and uses the // allocated space. if allocated > 0 { eltOrder = append(eltOrder, -1) } // Process elements by increasing span. for _, i := range eltOrder { var ( edim float64 eflex bool epos int espan int ) if i == -1 { // Fake element for final space allocation. edim, eflex, epos, espan = allocated, true, 0, len(dims) } else { e := g.elts[i] // TODO: We need to make one pass and get both size // hints or this will be exponential. if byRow { _, edim, _, eflex = e.e.SizeHint() epos, espan = e.y, e.rowSpan } else { edim, _, eflex, _ = e.e.SizeHint() epos, espan = e.x, e.colSpan } } if espan == 1 { dims[epos] = max(dims[epos], edim) if !eflex { flexes[epos] = false } } else if espan > 1 { total := edim // Expand flexible columns so that the total // dim is >= e's dim, and so the rows/columns // we do expand get equal dims. We don't // shrink any row/column. If all rows/columns // are fixed, we treat them all as flexible. var subdims []float64 forceFlex := false for i := epos; i < epos+espan; i++ { if flexes[i] { subdims = append(subdims, dims[i]) } else { // This space is accounted for. total -= dims[i] } } if len(subdims) == 0 { // All rows/columns are fixed, so treat // them all as flexible. forceFlex = true subdims = append(subdims, dims[epos:epos+espan]...) total = edim } if total <= 0 { // Fixed columns already take e's space. continue } // Remove flex columns already wider than // total/count from consideration. count := len(subdims) sort.Sort(sort.Reverse(sort.Float64Slice(subdims))) for _, dim := range dims { if dim > total/float64(count) { total -= dim count-- } } // Expand remaining rows/columns to total/count. if count <= 0 { // Flex columns already take e's space. continue } dim := total / float64(count) for i := epos; i < epos+espan; i++ { if flexes[i] || forceFlex { dims[i] = max(dims[i], dim) } } // TODO: What do I do with e's flex? Clearly // if a fixed element spans the whole grid, // the grid should be fixed, so I shouldn't // ignore it. } } return } func (g *Grid) SizeHint() (w, h float64, flexw, flexh bool) { sum := func(xs []float64) float64 { s := 0.0 for _, x := range xs { s += x } return s } any := func(xs []bool) bool { for _, x := range xs { if x { return true } } return false } xdims, xflexes := g.doLayout(false, 0) ydims, yflexes := g.doLayout(true, 0) return sum(xdims), sum(ydims), any(xflexes), any(yflexes) } func (g *Grid) SetLayout(x, y, w, h float64) { // Record layout. g.x, g.y, g.w, g.h = x, y, w, h // Layout children. csum := func(xs []float64) []float64 { res, csum := make([]float64, len(xs)+1), 0.0 for i, x := range xs { res[i+1] = csum + x csum += x } return res } xdims, _ := g.doLayout(false, w) ydims, _ := g.doLayout(true, h) xpos := csum(xdims) ypos := csum(ydims) for _, elt := range g.elts { elt.e.SetLayout(xpos[elt.x], ypos[elt.y], xpos[elt.x+elt.colSpan]-xpos[elt.x], ypos[elt.y+elt.rowSpan]-ypos[elt.y]) } } func (g *Grid) Layout() (x, y, w, h float64) { return g.x, g.y, g.w, g.h } type gridElementSorter struct { elts []*gridElement seq []int byRowSpan bool } func (g *gridElementSorter) Len() int { return len(g.seq) } func (g *gridElementSorter) Less(i, j int) bool { e1, e2 := g.elts[g.seq[i]], g.elts[g.seq[j]] if g.byRowSpan { return e1.rowSpan < e2.rowSpan } return e1.colSpan < e2.colSpan } func (g *gridElementSorter) Swap(i, j int) { g.seq[i], g.seq[j] = g.seq[j], g.seq[i] } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/layout/layout.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package layout provides helpers for laying out hierarchies of // rectangular elements in two dimensional space. package layout // TODO: If I want to handle wrapped text, this API is insufficient. // In that case, I may need something more like Android where the // parent can pass in Unspecified, (Exactly x), or (AtMost x) for both // dimensions and make multiple calls. I would probably start out with // AtMost the allocated dimension for everything and if the total came // back too large, I would cut back space (possibly causing the other // dimension to grow if text wraps). // An Element is a rectangular feature in a layout. type Element interface { // SizeHint returns this Element's desired size and whether it // can expand from that size in either direction. SizeHint() (w, h float64, flexw, flexh bool) // SetLayout sets this Element's layout relative to its parent // and, if this Element is a container, recursively lays out // this Element's children. // // w and h may be smaller than SizeHint() if the space is // constrained. They may also be larger, even if the element // isn't flexible, in which case the Element will position // itself within the assigned size using some gravity. // // TODO: Or should the parent be responsible for gravity if it // allocates too much space to a fixed element? // // TODO: Since an Element doesn't know its parent, it's // difficult to turn local coordinates into absolute // coordinates. These should either be absolute coordinates, // or Element should have a parent and it should be easy to // get absolute coordinates. SetLayout(x, y, w, h float64) // Layout returns this Element's layout. Layout() (x, y, w, h float64) } // A Group is an Element that manages the layout of child Elements. type Group interface { Element // Children returns the child Elements laid out by this Group. Children() []Element } // Leaf is a leaf in a layout hierarchy. It is meant for embedding: it // partially implements Element, leaving SizeHint to the embedding // type. type Leaf struct { x, y, w, h float64 } func (l *Leaf) SetLayout(x, y, w, h float64) { l.x, l.y, l.w, l.h = x, y, w, h } func (l *Leaf) Layout() (x, y, w, h float64) { return l.x, l.y, l.w, l.h } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/layout.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import ( "fmt" "math" "sort" "github.com/aclements/go-gg/gg/layout" "github.com/aclements/go-gg/table" "github.com/ajstarks/svgo" ) // A plotElt is a high-level element of a plot layout. // // plotElts are arranged in a 2D grid. Coordinates in the grid are // specified by a pair of "paths" rather than a simple pair of // indexes. For example, element A is to the left of element B if A's // X path is less than B's X path, where paths are compared as tuples // with an infinite number of trailing 0's. This makes it easy to, for // example, place an element to the right of another element without // having to renumber all of the elements that are already to its // right. // // The first level of the hierarchy is simply the coordinate of the // plot in the grid. Within this, we layout plot elements as follows: // // +----------------------+ // | Label (x, y/-3/-1) | // +----------------------+ // | Label (x, y/-3/0) | // +----------------------+ // | Padding (x, y/-2) | // +-----------+----------+----------------------+----------+------------+ // | Padding | YTicks | | Padding | Label | // | (x/-2, y) | (x/-1,y) | Subplot (x, y) | (x/2, y) | (x/3/0, y) | // | | | | | | // +-----------+----------+----------------------+----------+------------+ // | XTicks (x, y/1) | // +----------------------+ // | Padding (x, y/2) | // +----------------------+ // // TODO: Should I instead think of this as specifying the edges rather // than the cells? type plotElt interface { layout.Element // paths returns the top-left and bottom-right cells of this // element. x2Path and y2Path may be nil, indicating that they // are the same as xPath and yPath. paths() (xPath, yPath, x2Path, y2Path eltPath) // render draws this plot element to r.svg. render(r *eltRender) } type eltRender struct { svg *svg.SVG id int } func (r *eltRender) genid(prefix string) (id, ref string) { id = fmt.Sprintf("%s%d", prefix, r.id) ref = "url(#" + id + ")" r.id++ return } type eltCommon struct { xPath, yPath, x2Path, y2Path eltPath } func (c *eltCommon) paths() (xPath, yPath, x2Path, y2Path eltPath) { return c.xPath, c.yPath, c.x2Path, c.y2Path } type eltSubplot struct { eltCommon layout.Leaf subplot *subplot marks []plotMark scales map[string]map[Scaler]bool xTicks, yTicks *eltTicks plotMargins struct { t, r, b, l float64 } } func newEltSubplot(s *subplot) *eltSubplot { return &eltSubplot{ eltCommon: eltCommon{xPath: eltPath{s.x}, yPath: eltPath{s.y}}, subplot: s, scales: make(map[string]map[Scaler]bool), } } func (e *eltSubplot) SizeHint() (w, h float64, flexw, flexh bool) { return 0, 0, true, true } func (e *eltSubplot) SetLayout(x, y, w, h float64) { e.Leaf.SetLayout(x, y, w, h) m := &e.plotMargins m.t, m.r, m.b, m.l = plotMargins(w, h) } type eltTicks struct { eltCommon layout.Leaf axis rune // 'x' or 'y' ticksFor *eltSubplot // Subplot to which this is directly attached ticks map[Scaler]plotEltTicks } type plotEltTicks struct { major table.Slice minor table.Slice labels []string } func newEltTicks(axis rune, s *eltSubplot) *eltTicks { elt := &eltTicks{ eltCommon: s.eltCommon, axis: axis, ticksFor: s, } switch axis { case 'x': elt.yPath = eltPath{s.subplot.y, 1} case 'y': elt.xPath = eltPath{s.subplot.x, -1} default: panic("bad axis") } return elt } func (e *eltTicks) scales() map[Scaler]bool { switch e.axis { case 'x': return e.ticksFor.scales["x"] case 'y': return e.ticksFor.scales["y"] default: panic("bad axis") } } func (e *eltTicks) mapTicks(s Scaler, ticks table.Slice) (pixels []float64) { x, y, w, h := e.Layout() // TODO: This doesn't show ticks in the margin area. This may // be fine with niced tick labels, but it tends to look bad // with un-niced ticks. Ideally we would expand the input // domain instead, but this isn't well-defined for discrete // scales. We could use Unmap to try to find the expanded // input domain on both sides, but fall back to expanding the // ranger if Unmap fails (which it would for a discrete // scale). m := e.ticksFor.plotMargins switch e.axis { case 'x': s.Ranger(NewFloatRanger(x+m.l, x+w-m.r)) case 'y': s.Ranger(NewFloatRanger(y+h-m.b, y+m.t)) } return mapMany(s, ticks).([]float64) } // computeTicks computes the location and labels of the ticks in // element e based on the dimensions of e.ticksFor (which must have // been laid out prior to calling this). func (e *eltTicks) computeTicks() { const tickDistance = 30 // TODO: Theme. Min pixels between tick labels. _, _, w, h := e.ticksFor.Layout() var dim float64 switch e.axis { case 'x': dim = w case 'y': dim = h } // Compute max ticks assuming the labels are zero sized. maxTicks := int(dim / tickDistance) // Optimize ticks, keeping labels at least tickDistance apart. e.ticks = make(map[Scaler]plotEltTicks) for s := range e.scales() { pred := func(ticks, _ table.Slice, labels []string) bool { if len(labels) <= 1 { return true } // Check distance between labels. pos := e.mapTicks(s, ticks) // Ticks are in value order, but we need them // in position order. sort.Float64s(pos) var last float64 for i, p := range pos { if i > 0 && p-last < tickDistance { // Labels i-1 and i are too close. return false } metrics := measureString(fontSize, labels[i]) switch e.axis { case 'x': last = p + metrics.width case 'y': last = p + metrics.leading } } return true } major, minor, labels := s.Ticks(maxTicks, pred) e.ticks[s] = plotEltTicks{major, minor, labels} } } func (e *eltTicks) SizeHint() (w, h float64, flexw, flexh bool) { if len(e.ticks) == 0 { // Ticks haven't been computed yet or there are none. // Assume this takes up no space. switch e.axis { case 'x': return 0, 0, true, false case 'y': return 0, 0, false, true default: panic("bad axis") } } var maxWidth, maxHeight float64 for s := range e.scales() { for _, label := range e.ticks[s].labels { metrics := measureString(fontSize, label) maxHeight = math.Max(maxHeight, metrics.leading) maxWidth = math.Max(maxWidth, metrics.width) } } switch e.axis { case 'x': maxHeight += xTickSep case 'y': maxWidth += yTickSep } return maxWidth, maxHeight, e.axis == 'x', e.axis == 'y' } type eltLabel struct { eltCommon layout.Leaf side rune // 't', 'b', 'l', 'r' label string fill string } func newEltLabelFacet(side rune, label string, x1, y1, x2, y2 int, level int) *eltLabel { elt := &eltLabel{ side: side, label: label, fill: "#ccc", // TODO: Theme. } switch side { case 't': elt.eltCommon = eltCommon{ xPath: eltPath{x1}, yPath: eltPath{y1, -3, -level}, x2Path: eltPath{x2}, } case 'r': elt.eltCommon = eltCommon{ xPath: eltPath{x2, 3, level}, yPath: eltPath{y1}, y2Path: eltPath{y2}, } default: panic("bad side") } return elt } func newEltLabelAxis(side rune, label string, x, y, span int) *eltLabel { elt := &eltLabel{ eltCommon: eltCommon{xPath: eltPath{x}, yPath: eltPath{y}}, side: side, label: label, fill: "none", } switch side { case 'T', 'b': elt.x2Path = eltPath{x + span} case 'l': elt.y2Path = eltPath{y + span} default: panic("bad side") } return elt } func (e *eltLabel) SizeHint() (w, h float64, flexw, flexh bool) { // TODO: We actually want the height of the text, which could // be N*leading if there are multiple lines. dim := measureString(fontSize, e.label).leading * facetLabelHeight switch e.side { case 't', 'b': return 0, dim, true, false case 'T': // Titles return 0, 1.5 * dim, true, false case 'l', 'r': return dim, 0, false, true default: panic("bad side") } } type eltPadding struct { eltCommon layout.Leaf side rune // 't', 'b', 'l', 'r' } func newEltPadding(side rune, x, y int) *eltPadding { elt := &eltPadding{ eltCommon: eltCommon{xPath: eltPath{x}, yPath: eltPath{y}}, side: side, } switch side { case 't': elt.yPath = eltPath{y, -2} case 'r': elt.xPath = eltPath{x, 2} case 'b': elt.yPath = eltPath{y, 2} case 'l': elt.xPath = eltPath{x, -2} default: panic("bad side") } return elt } func (e *eltPadding) SizeHint() (w, h float64, flexw, flexh bool) { const padding = 4 // TODO: Theme. switch e.side { case 't', 'b': return 0, padding, true, false case 'l', 'r': return padding, 0, false, true default: panic("bad side") } } func addSubplotLabels(elts []plotElt) []plotElt { // Find the regions covered by each subplot band. vBands := make(map[*subplotBand]subplotRegion) hBands := make(map[*subplotBand]subplotRegion) for _, elt := range elts { elt, ok := elt.(*eltSubplot) if !ok { continue } s := elt.subplot level := 0 for vBand := s.vBand; vBand != nil; vBand = vBand.parent { r := vBands[vBand] r.update(s, level) vBands[vBand] = r level++ } level = 0 for hBand := s.hBand; hBand != nil; hBand = hBand.parent { r := hBands[hBand] r.update(s, level) hBands[hBand] = r level++ } } // Create ticks. // // TODO: If the facet grid isn't total, this can add ticks to // the side of a plot that's in the middle of the grid and // that creates a gap between all of the plots. This seems // like a fundamental limitation of treating this as a grid. // We could either abandon the grid and instead use a // hierarchy of left-of/right-of/above/below relations, or we // could make facets produce a total grid. var prev *eltSubplot var curTicks *eltTicks sorter := newSubplotSorter(elts, 'x') sort.Sort(sorter) for _, elt := range sorter.elts { if prev == nil || prev.subplot.y != elt.subplot.y || !eqScales(prev, elt, "y") { // Show Y axis ticks. curTicks = newEltTicks('y', elt) elts = append(elts, curTicks) } elt.yTicks = curTicks prev = elt } sorter.dir = 'y' sort.Sort(sorter) prev, curTicks = nil, nil for _, elt := range sorter.elts { if prev == nil || prev.subplot.x != elt.subplot.x || !eqScales(prev, elt, "x") { // Show X axis ticks. curTicks = newEltTicks('x', elt) elts = append(elts, curTicks) } elt.xTicks = curTicks prev = elt } // Create labels. for vBand, r := range vBands { elts = append(elts, newEltLabelFacet('t', vBand.label, r.x1, r.y1, r.x2, r.y2, r.level)) } for hBand, r := range hBands { elts = append(elts, newEltLabelFacet('r', hBand.label, r.x1, r.y1, r.x2, r.y2, r.level)) } return elts } func addAxisLabels(elts []plotElt, title, xlabel, ylabel string) []plotElt { // Find the region covered by subplots. var r subplotRegion for _, elt := range elts { elt, ok := elt.(*eltSubplot) if !ok { continue } r.update(elt.subplot, 0) } if !r.valid { return elts } // Add title. // TODO: Make this larger. if title != "" { elts = append(elts, newEltLabelAxis('T', title, r.x1, r.y1-1, r.x2-r.x1)) } // Add labels. elts = append(elts, newEltLabelAxis('b', xlabel, r.x1, r.y2+1, r.x2-r.x1), newEltLabelAxis('l', ylabel, r.x1-1, r.y1, r.y2-r.y1)) return elts } type subplotRegion struct { valid bool x1, x2, y1, y2, level int } func (r *subplotRegion) update(s *subplot, level int) { if !r.valid { r.x1, r.x2, r.y1, r.y2, r.level = s.x, s.x, s.y, s.y, level r.valid = true return } if s.x < r.x1 { r.x1 = s.x } else if s.x > r.x2 { r.x2 = s.x } if s.y < r.y1 { r.y1 = s.y } else if s.y > r.y2 { r.y2 = s.y } if level > r.level { r.level = level } } // subplotSorter sorts eltSubplots by subplot (x, y) position. type subplotSorter struct { elts []*eltSubplot // dir indicates primary sorting direction: 'x' means to sort // left-to-right, top-to-bottom; 'y' means to sort // bottom-to-top, left-to-right. dir rune } func newSubplotSorter(elts []plotElt, dir rune) *subplotSorter { selts := []*eltSubplot{} for _, elt := range elts { if s, ok := elt.(*eltSubplot); ok { selts = append(selts, s) } } return &subplotSorter{selts, dir} } func (s subplotSorter) Len() int { return len(s.elts) } func (s subplotSorter) Less(i, j int) bool { a, b := s.elts[i], s.elts[j] if s.dir == 'x' { if a.subplot.y != b.subplot.y { return a.subplot.y < b.subplot.y } return a.subplot.x < b.subplot.x } else { if a.subplot.x != b.subplot.x { return a.subplot.x < b.subplot.x } return a.subplot.y > b.subplot.y } } func (s subplotSorter) Swap(i, j int) { s.elts[i], s.elts[j] = s.elts[j], s.elts[i] } func eqScales(a, b *eltSubplot, aes string) bool { sa, sb := a.scales[aes], b.scales[aes] if len(sa) != len(sb) { return false } for k, v := range sa { if sb[k] != v { return false } } return true } type eltPath []int func (a eltPath) cmp(b eltPath) int { for len(a) > 0 || len(b) > 0 { var ax, bx int if len(a) > 0 { ax, a = a[0], a[1:] } if len(b) > 0 { bx, b = b[0], b[1:] } if ax != bx { if ax < bx { return -1 } else { return 1 } } } return 0 } type eltPaths []eltPath func (s eltPaths) Len() int { return len(s) } func (s eltPaths) Less(i, j int) bool { return s[i].cmp(s[j]) < 0 } func (s eltPaths) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s eltPaths) nub() eltPaths { var i, o int for i, o = 1, 1; i < len(s); i++ { if s[i].cmp(s[i-1]) != 0 { s[o] = s[i] o++ } } return s[:o] } func (s eltPaths) find(p eltPath) int { return sort.Search(len(s), func(i int) bool { return s[i].cmp(p) >= 0 }) } // layoutPlotElts returns a layout containing all of the elements in // elts. // // layoutPlotElts flattens the X and Y paths of elts into simple // coordinate indexes and constructs a layout.Grid. func layoutPlotElts(elts []plotElt) layout.Element { // Add padding elements to each subplot. // // TODO: Should there be padding between labels and the plot? for _, elt := range elts { elt, ok := elt.(*eltSubplot) if !ok { continue } x, y := elt.xPath[0], elt.yPath[0] elts = append(elts, newEltPadding('t', x, y), newEltPadding('r', x, y), newEltPadding('b', x, y), newEltPadding('l', x, y), ) } // Construct the global element grid from coordinate paths by // sorting the sets of X paths and Y paths to each leaf and // computing a global (x,y) for each leaf from these orders. type eltPos struct { x, y, xSpan, ySpan int } flat := map[plotElt]eltPos{} dir := func(get func(plotElt) (p, p2 eltPath), set func(p *eltPos, pos, span int)) { var paths eltPaths for _, elt := range elts { p, p2 := get(elt) paths = append(paths, p) if p2 != nil { paths = append(paths, p2) } } sort.Sort(paths) paths = paths.nub() for _, elt := range elts { p, p2 := get(elt) pos, span := paths.find(p), 1 if p2 != nil { span = paths.find(p2) - pos + 1 } eltPos := flat[elt] set(&eltPos, pos, span) flat[elt] = eltPos } } dir(func(e plotElt) (p, p2 eltPath) { p, _, p2, _ = e.paths() return }, func(p *eltPos, pos, span int) { p.x, p.xSpan = pos, span }) dir(func(e plotElt) (p, p2 eltPath) { _, p, _, p2 = e.paths() return }, func(p *eltPos, pos, span int) { p.y, p.ySpan = pos, span }) // Construct the grid layout. l := new(layout.Grid) for elt, pos := range flat { l.Add(elt, pos.x, pos.y, pos.xSpan, pos.ySpan) } return l } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/mark.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import ( "bytes" "encoding/base64" "encoding/json" "fmt" "image" "image/color" "image/png" "math" "reflect" "sort" "strconv" "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/table" "github.com/aclements/go-moremath/stats" "github.com/ajstarks/svgo" ) // TODO: Audit all of this for inf and NaN. type marker interface { mark(env *renderEnv, canvas *svg.SVG) } func isFinite(x float64) bool { return !(math.IsNaN(x) || math.IsInf(x, 0)) } type plotMark struct { m marker groups []table.GroupID } type markPath struct { x, y, stroke, fill *scaledData } func (m *markPath) mark(env *renderEnv, canvas *svg.SVG) { // XXX What ensures these type assertions will succeed, // especially if it's an identity scale? Maybe identity scales // still need to coerce their results to the right type. xs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64) var stroke color.Color = color.Black if m.stroke != nil { stroke = env.getFirst(m.stroke).(color.Color) } var fill color.Color = color.Transparent if m.fill != nil { fill = env.getFirst(m.fill).(color.Color) } drawPath(canvas, xs, ys, stroke, fill) } type markArea struct { x, upper, lower, fill, fillOpacity *scaledData } func reversed(data []float64) []float64 { var rev []float64 for i := len(data) - 1; i >= 0; i-- { rev = append(rev, data[i]) } return rev } func (m *markArea) mark(env *renderEnv, canvas *svg.SVG) { xs := env.get(m.x).([]float64) upper := env.get(m.upper).([]float64) lower := env.get(m.lower).([]float64) var fill color.Color = color.Black if m.fill != nil { fill = env.getFirst(m.fill).(color.Color) } fillOpacity := 0.5 if m.fillOpacity != nil { fillOpacity = env.getFirst(m.fillOpacity).(float64) } r, g, b, a := fill.RGBA() fill = color.RGBA64{ uint16(float64(r) * fillOpacity), uint16(float64(g) * fillOpacity), uint16(float64(b) * fillOpacity), uint16(float64(a) * fillOpacity)} xs = append(xs, reversed(xs)...) ys := append(upper, reversed(lower)...) drawPath(canvas, xs, ys, color.Transparent, fill) } type markSteps struct { dir StepMode x, y, stroke, fill *scaledData } func (m *markSteps) mark(env *renderEnv, canvas *svg.SVG) { xs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64) var stroke color.Color = color.Black if m.stroke != nil { stroke = env.getFirst(m.stroke).(color.Color) } var fill color.Color = color.Transparent if m.fill != nil { fill = env.getFirst(m.fill).(color.Color) } if len(xs) == 0 { return } // Create intermediate points. xs2, ys2 := make([]float64, 2*len(xs)), make([]float64, 2*len(ys)) for i := range xs2 { switch m.dir { case StepHV, StepVH: xs2[i], ys2[i] = xs[i/2], ys[i/2] case StepHMid, StepVMid: if i == 0 || i == len(xs2)-1 { xs2[i], ys2[i] = xs[i/2], ys[i/2] break } var p1, p2 int if i%2 == 0 { // Interpolate i/2-1 and i/2. p1, p2 = i/2-1, i/2 } else { // Interpolate i/2 and i/2+1. p1, p2 = i/2, i/2+1 } if m.dir == StepHMid { xs2[i], ys2[i] = (xs[p1]+xs[p2])/2, ys[i/2] } else { xs2[i], ys2[i] = xs[i/2], (ys[p1]+ys[p2])/2 } } } if m.dir == StepHV { xs2 = xs2[1:] } else if m.dir == StepVH { ys2 = ys2[1:] } drawPath(canvas, xs2, ys2, stroke, fill) } func drawPath(canvas *svg.SVG, xs, ys []float64, stroke color.Color, fill color.Color) { switch len(xs) { case 0: return case 1: // TODO: Depending on the stroke cap, this *could* be // well-defined. Warning.Print("cannot draw path through 1 point; ignoring") return } // Build path. var path []byte inLine := false for i := range xs { if !isFinite(xs[i]) || !isFinite(ys[i]) { inLine = false continue } if !inLine { path = append(path, 'M') inLine = true } path = append(path, ' ') path = strconv.AppendFloat(path, xs[i], 'g', 6, 64) path = append(path, ' ') path = strconv.AppendFloat(path, ys[i], 'g', 6, 64) } if len(path) == 0 { return } // XXX Stroke width style := cssPaint("stroke", stroke) + ";" + cssPaint("fill", fill) + ";stroke-width:3" canvas.Path(wrapPath(string(path)), style) } type markPoint struct { x, y, color, opacity, size *scaledData } func (m *markPoint) mark(env *renderEnv, canvas *svg.SVG) { xs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64) var colors []color.Color if m.color != nil { slice.Convert(&colors, env.get(m.color)) } var opacities []float64 if m.opacity != nil { opacities = env.get(m.opacity).([]float64) } var sizes []float64 if m.size != nil { sizes = env.get(m.size).([]float64) } mindim := math.Min(env.Size()) for i := range xs { if !isFinite(xs[i]) || !isFinite(ys[i]) { continue } var style string if colors != nil { style = cssPaint("fill", colors[i]) } if opacities != nil { if style != "" { style += ";" } style += fmt.Sprintf("opacity:%.6g", opacities[i]) } r := mindim * 0.01 if sizes != nil { r = mindim * sizes[i] } canvas.Circle(int(xs[i]), int(ys[i]), int(r), style) } } type markTiles struct { x, y, fill *scaledData } func (m *markTiles) mark(env *renderEnv, canvas *svg.SVG) { xs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64) // TODO: Should the Scaler (or Ranger) ensure that the values // are color.Color? How would this work with an identity // scaler? var fills []color.Color if m.fill != nil { slice.Convert(&fills, env.get(m.fill)) } // TODO: We can't use an this if the width and height // are specified, or if there is a stroke. // minx, maxx := stats.Bounds(xs) // miny, maxy := stats.Bounds(ys) // Compute image bounds. imageBounds := func(vals []float64) (float64, float64, float64, bool) { // Reduce to unique values. unique := []float64{} uset := map[float64]bool{} for _, v := range vals { if !uset[v] { if !isFinite(v) { continue } unique = append(unique, v) uset[v] = true } } var minGap float64 regular := true switch len(unique) { case 0: return 0, 0, -1, false case 1: // TODO: In this case we'll produce a 1 pixel // wide/high line. That's probably not what's // desired. Maybe we want it to be the // width/height of the plot area? minGap = 1.0 default: sort.Float64s(unique) minGap = unique[1] - unique[0] for i, u := range unique[1:] { minGap = math.Min(minGap, u-unique[i]) } // Consider the spacing "regular" if every // point is within a 1000th of a multiple of // minGap. for _, u := range unique { _, error := math.Modf((u - unique[0]) / minGap) if 0.001 <= error && error <= 0.999 { regular = false break } } } return unique[0], unique[len(unique)-1], minGap, regular } xmin, xmax, xgap, xreg := imageBounds(xs) ymin, ymax, ygap, yreg := imageBounds(ys) if xgap == -1 || ygap == -1 { return } if !xreg || !yreg { // TODO: Can't use an image. panic("not implemented: irregular tile spacing") } // TODO: If there are a small number of cells, just make the // rectangles since it's hard to reliably disable // interpolation (e.g., the below doesn't work in rsvg). // Create the image. iw, ih := round((xmax-xmin+xgap)/xgap), round((ymax-ymin+ygap)/ygap) img := image.NewRGBA(image.Rect(0, 0, iw, ih)) fill := color.Color(color.Black) for i := range xs { if !isFinite(xs[i]) || !isFinite(ys[i]) { continue } if fills != nil { fill = fills[i] } img.Set(round((xs[i]-xmin)/xgap), round((ys[i]-ymin)/ygap), fill) } // Encode the image. uri := bytes.NewBufferString("data:image/png;base64,") w := base64.NewEncoder(base64.StdEncoding, uri) if err := png.Encode(w, img); err != nil { Warning.Println("error encoding image:", err) return } w.Close() canvas.Image(round(xmin-xgap/2), round(ymin-ygap/2), round(xmax-xmin+xgap), int(ymax-ymin+ygap), uri.String(), `preserveAspectRatio="none" style="image-rendering:optimizeSpeed;image-rendering:-moz-crisp-edges;image-rendering:-webkit-optimize-contrast;image-rendering:pixelated"`) } type markTags struct { x, y *scaledData labels map[table.GroupID]table.Slice hpos float64 offsetX, offsetY int } func (m *markTags) mark(env *renderEnv, canvas *svg.SVG) { const padX = 5 xs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64) if len(xs) == 0 { return } // Find the point closest to hpos between the min and max. // // TODO: Give the user control over this. minx, maxx := stats.Bounds(xs) targetx := minx + (maxx-minx)*m.hpos midi, middelta := 0, math.Abs(xs[0]-targetx) for i, x := range xs { delta := math.Abs(x - targetx) if delta < middelta { midi, middelta = i, delta } } // Get label. label := fmt.Sprint(reflect.ValueOf(m.labels[env.gid]).Index(midi).Interface()) // Attach tag to this point. // // TODO: More user control. // // TODO: Make automatic positioning account for bounds of plot. // // TODO: Adjust positions to avoid overlap. Unfortunately, // this requires some global optimization, but mark only sees // one tag at a time. // // TODO: Re-enable the tag box when I have decent text metrics. //t := measureString(fontSize, label) //canvas.Rect(int(xs[midi]+offsetX-t.width), int(ys[midi]+offsetY-0.75*t.leading), int(t.width), int(1.5*t.leading), `rx="4"`, `fill="white"`, `stroke="black"`) if m.offsetX > 0 { // To the right, left-aligned. canvas.Text(int(xs[midi])+m.offsetX+padX, int(ys[midi])+m.offsetY, label, `dy=".3em"`) } else { canvas.Text(int(xs[midi])+m.offsetX-padX, int(ys[midi])+m.offsetY, label, `dy=".3em"`, `text-anchor="end"`) } canvas.Path(fmt.Sprintf("M%.6g %.6gc%.6g %.6g,%.6g %.6g,%.6g %.6g", xs[midi], ys[midi], 0.8*float64(m.offsetX), 0.0, 0.2*float64(m.offsetX), float64(m.offsetY), float64(m.offsetX), float64(m.offsetY)), `fill="none"`, `stroke="black"`, `stroke-dasharray="2, 3"`, `stroke-width="2"`) } type markTooltips struct { x, y *scaledData labels map[table.GroupID]table.Slice } func (m *markTooltips) mark(env *renderEnv, canvas *svg.SVG) { // Construct JSON for data. xs, ys := env.get(m.x).([]float64), env.get(m.y).([]float64) if len(xs) == 0 { return } var labels []string switch l2 := m.labels[env.gid].(type) { case []string: labels = l2 default: l2v := reflect.ValueOf(l2) labels = make([]string, l2v.Len()) for i := range labels { labels[i] = fmt.Sprint(l2v.Index(i).Interface()) } } // TODO: Make env able to generate IDs. // // TODO: Sort by x and use binary search in Javascript. // // TODO: Remove points that round to the same coordinate. // // TODO: Put on the left if we're close to the right edge. id := fmt.Sprintf("tooltips%p", env) var buf bytes.Buffer fmt.Fprintf(&buf, "var %s = ", id) data := struct { X []int `json:"x"` Y []int `json:"y"` L []string `json:"l"` }{make([]int, 0, len(xs)), make([]int, 0, len(ys)), labels} for i := range xs { if !isFinite(xs[i]) || !isFinite(ys[i]) { continue } // Round data to an int to save space. data.X = append(data.X, int(xs[i]+0.5)) data.Y = append(data.Y, int(ys[i]+0.5)) } if len(data.X) == 0 { return } json.NewEncoder(&buf).Encode(data) canvas.Script("text/javascript", buf.String()) canvas.Path("", `display="none"`, `fill="white"`, `stroke="black"`, fmt.Sprintf(`id="%s-p"`, id)) canvas.Text(0, 0, "", `display="none"`, fmt.Sprintf(`id="%s-t"`, id)) px, _, pw, _ := env.Area() canvas.Rect(int(env.area[0]), int(env.area[1]), int(env.area[2]), int(env.area[3]), `fill-opacity="0"`, fmt.Sprintf(`onmousemove="tooltipMove(evt,%s,"%s",%v,%v)"`, id, id, px, px+pw), fmt.Sprintf(`onmouseout="tooltipOut(evt,%s,"%s")"`, id, id)) // TODO: Only write this once per SVG. canvas.Script("text/javascript", ` function tooltipMove(evt, data, tid, minx, maxx) { // Convert evt.x to an SVG coordinate. var svg = document.rootElement; var pt = svg.createSVGPoint(); pt.x = evt.clientX; pt.y = evt.clientY; var epos = pt.matrixTransform(svg.getScreenCTM().inverse()); // Find data point closest to event coordinate. var cd = Math.sqrt(Math.pow(epos.x-data.x[0], 2) + Math.pow(epos.y-data.y[0], 2)), ci = 0; for (var i = 1; i < data.x.length; i++) { var d = Math.sqrt(Math.pow(epos.x-data.x[i], 2) + Math.pow(epos.y-data.y[i], 2)); if (d < cd) { cd = d; ci = i; } } // Update text content and position. var text = document.getElementById(tid+"-t"); text.textContent = data.l[ci]; text.style.display = "block"; text.setAttribute("x", 0); text.setAttribute("y", 0); var bb = text.getBBox(); var hm = 2, r = 3; var tx = data.x[ci] + bb.height/4 + hm; var flip = false; if (tx + bb.width + 2*hm + r > maxx) { var tx2 = data.x[ci] - bb.height/4 - hm - bb.width; if (tx2 - 2*hm - r >= minx) { // Position left of point. tx = tx2; flip = true; } } text.setAttribute("x", tx); text.setAttribute("y", data.y[ci] - (bb.y + bb.height/2)); // Update marker. var p = document.getElementById(tid+"-p"); if (flip) { p.setAttribute("transform", "translate("+2*data.x[ci]+",0) scale(-1,1)") } else { p.setAttribute("transform", "") } p.setAttribute("d", "M"+data.x[ci]+","+data.y[ci]+ "l"+(bb.height/4)+","+(-bb.height/2)+ "h"+(bb.width+2*hm)+ "a"+r+","+r+",90,0,1,"+r+","+r+ "v"+(bb.height-2*r)+ "a"+r+","+r+",90,0,1,"+(-r)+","+r+ "h"+(-bb.width-2*hm)+"z"); p.style.display = "block"; } function tooltipOut(evt, data, tid) { var text = document.getElementById(tid+"-t"); text.style.display = "none"; var p = document.getElementById(tid+"-p"); p.style.display = "none"; } `) } // cssPaint returns a CSS fragment for setting CSS property prop to // color c. func cssPaint(prop string, c color.Color) string { r, g, b, a := c.RGBA() if a == 0 { // No paint. return prop + ":none" } if a != 0xffff { // Undo alpha pre-multiplication. r = r * 0xffff / a g = g * 0xffff / a b = b * 0xffff / a } r, g, b = r>>8, g>>8, b>>8 css := prop if r>>4 == r&0xF && g>>4 == g&0xF && b>>4 == b&0xF { // Use #rgb form. css += fmt.Sprintf(":#%x%x%x", r>>4, g>>4, b>>4) } else { // Use #rrggbb form. css += fmt.Sprintf(":#%02x%02x%02x", r, g, b) } if a != 0xffff { // SVG 1.1 only supports CSS2 color formats, which // unfortunately does not include rgba, so we have to // use a separate CSS property. css += ";" + prop + "-opacity:" + strconv.FormatFloat(float64(a)/0xffff, 'g', 0, 64) } return css } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/package.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package gg creates plots using the Grammar of Graphics. // // WARNING: This API is highly unstable. For now, please vendor this // package. // // gg creates statistical visualizations. It's designed to help users // quickly navigate and explore complex data in different ways, both // in terms of what they're plotting and how they're plotting it. This // focus on rapid exploration of complex data leads to a very // different design than typical plotting packages. // // gg is heavily inspired by Wilkinson's Grammar of Graphics [1]. A // key observation of the Grammar of Graphics is that there are many // motifs across different types of plots. The Grammar of Graphics // separates these motifs into orthogonal concerns that can be // manipulated and extended independently, enabling the creation of // traditional plot types from their fundamental components as well as // the creation of entirely new plot types. // // Data model // // Central to gg is its data model. At the most basic level, the input // data consists of a table with a set of named columns, with the rows // organized into one or more groups. At a higher level, because gg // makes it easy to restructure data before plotting it, it expects to // start with regularized input data, where each column represents a // distinct independent or dependent variable. In other words, any two // values that make sense to plot on the same axis should be in the // same column. // // For example, to express a line graph with several series of // different colors in gg, you would say "plot column A against column // B, grouped into series and colored according to column C". In // contrast, typical plotting packages use a "spreadsheet" model, // where each data series is a separate column, so expressing the same // graph requires saying "plot column A against column B in color 1 // and plot column A against column C in color 2" and so on. // // gg's approach is suited to exploratory data analysis because you // don't have to restructure the data to see it in a different way. In // the traditional spreadsheet model, you have to structure the data // to match the plot. In gg, you tell the plot what structure to // extract from the data. // // Layers and scales // // To visualize data, gg provides a set of composable plot building // blocks. There are no fixed "plot types" in gg. The main building // block is a "layer", which transforms a data set into a set of // visual marks, such as lines, points, or rectangles. Each layer is // configured by mapping columns of the data set to different // "aesthetics". An aesthetic is a generalization of a dimension: X // and Y are aesthetics, but so are color and stroke width and point // shape. Unlike typical plotting packages, these various aesthetics // are treated symmetrically and any aesthetic can be fed from any // column of the data. // // Layers work in close concert with "scales", which map from values // in the data space to values in the visual space. Scales can map // from continuous or discrete data values (such as numbers or // strings) to continuous or discrete visual values (such as pixel // offsets or point shapes). Each aesthetic has an associated scale. // If the user hasn't provided a specific scale for an aesthetic, gg // uses a default scale that guesses what to do based on the data type // and aesthetic. // // Stats // // Data can be pre-processed prior to rendering it with a layer using // a "stat". A stat can be an arbitrary data transformation, but it's // typically used to compute statistical summaries, such as the // five-number summary (e.g., for a box plot), a linear regression, or // a density estimate. // // TODO: "Compound" layers? // // Facets // // TODO. // // Aesthetics // // gg understands the following aesthetics. // // "x" and "y" give the offset from the lower-left corner of a plot. // Their ranges are always set to the pixel coordinates of the X and Y // axes, respectively, and cannot be overridden. // // "stroke" and "fill" give the stroke and fill colors of paths and // points. Their ranger must have type color.Color. The default ranger // returns a single-hue gradient for continuous data, or a categorical // palette for discrete data. // // "opacity" gives the overall opacity of a mark. Its ranger must have // type float64 and give values between 0 and 1, inclusive. The // default ranger ranges from 10% opaque (0.1) to fully opaque (1.0). // // "size" gives the size of marks. Its ranger must have type float64 // and yields values that are relative to the smallest dimension of // the plot area (e.g., a value of 0.5 creates a point that cover half // of the plot width or height, whichever is smaller). The default // ranger ranges from 1% (0.01) to 10% (0.1). // // Related work // // gg draws ideas and inspiration from many sources. The core // principle of a Grammar of Graphics was introduced by Wiklinson [1]. // There have been many implementations in many languages. The most // popular is certainly Wickham's ggplot2 for R [2]. gg draws most // heavily on Wickham's follow-up work on ggvis for R [3]. // // [1] Leland Wilkinson, The Grammar of Graphics, Springer, 1999. // // [2] Hadley Wickham, ggplot2: Elegant Graphics for Data Analysis, // Springer, 2009. // // [3] Hadley Wickham, ggvis, http://ggvis.rstudio.com/. // // TODO: Scale transforms, coordinate spaces. package gg ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/plot.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import ( "fmt" "log" "os" "github.com/aclements/go-gg/table" ) // TODO: Split transforms, scalers, and layers into their own packages // to clean up the name spaces and un-prefix their names? // Warning is a logger for reporting conditions that don't prevent the // production of a plot, but may lead to unexpected results. var Warning = log.New(os.Stderr, "[gg] ", log.Lshortfile) // Plot represents a single (potentially faceted) plot. type Plot struct { env *plotEnv scales map[string]scalerTree scaledData map[scaledDataKey]*scaledData scaleSet map[scaleKey]bool marks []plotMark axisLabels map[string]string autoAxisLabels map[string][]string title string constNonce int } // NewPlot returns a new Plot backed by data. It has no layers, one // facet, and all scales are default. func NewPlot(data table.Grouping) *Plot { p := &Plot{ env: &plotEnv{ data: data, }, scales: make(map[string]scalerTree), scaledData: make(map[scaledDataKey]*scaledData), scaleSet: make(map[scaleKey]bool), axisLabels: make(map[string]string), autoAxisLabels: make(map[string][]string), } return p } type plotEnv struct { parent *plotEnv data table.Grouping } type scaleKey struct { gid table.GroupID aes string scale Scaler } // SetData sets p's current data table. The caller must not modify // data in this table after this point. func (p *Plot) SetData(data table.Grouping) *Plot { p.env.data = data return p } // Data returns p's current data table. func (p *Plot) Data() table.Grouping { return p.env.data } // Const creates a new constant column bound to val in all groups and // returns the generated column name. This is a convenient way to pass // constant values to layers as columns. // // TODO: Typically this should be used with PreScaled or physical types. func (p *Plot) Const(val interface{}) string { tab := p.Data() retry: col := fmt.Sprintf("[gg-const-%d]", p.constNonce) p.constNonce++ for _, col2 := range tab.Columns() { if col == col2 { goto retry } } p.SetData(table.MapTables(tab, func(_ table.GroupID, t *table.Table) *table.Table { return table.NewBuilder(t).AddConst(col, val).Done() })) return col } type scalerTree struct { scales map[table.GroupID]Scaler } func newScalerTree() scalerTree { return scalerTree{map[table.GroupID]Scaler{ table.RootGroupID: &defaultScale{}, }} } func (t scalerTree) bind(gid table.GroupID, s Scaler) { // Unbind scales under gid. for ogid := range t.scales { if gid == table.RootGroupID { // Optimize binding the root GID. delete(t.scales, ogid) continue } for p := ogid; ; p = p.Parent() { if p == gid { delete(t.scales, ogid) break } if p == table.RootGroupID { break } } } t.scales[gid] = s } func (t scalerTree) find(gid table.GroupID) Scaler { for { if s, ok := t.scales[gid]; ok { return s } if gid == table.RootGroupID { // This should never happen. panic("no scale for group " + gid.String()) } gid = gid.Parent() } } func (p *Plot) getScales(aes string) scalerTree { st, ok := p.scales[aes] if !ok { st = newScalerTree() p.scales[aes] = st } return st } func (p *Plot) copyScales(old, new table.GroupID) { for _, st := range p.scales { st.scales[new] = st.find(old) } } // SetScale binds a scale to the given visual aesthetic. SetScale is // shorthand for SetScaleAt(aes, s, table.RootGroupID). SetScale must // be called before Add. // // SetScale returns p for ease of chaining. func (p *Plot) SetScale(aes string, s Scaler) *Plot { return p.SetScaleAt(aes, s, table.RootGroupID) } // SetScaleAt binds a scale to the given visual aesthetic for all data // in group gid or descendants of gid. SetScaleAt must be called // before Add. func (p *Plot) SetScaleAt(aes string, s Scaler, gid table.GroupID) *Plot { // TODO: Should aes be an enum so you can't mix up aesthetics // and column names? p.getScales(aes).bind(gid, s) return p } // GetScale returns the scale for the given visual aesthetic used for // data in the root group. func (p *Plot) GetScale(aes string) Scaler { return p.GetScaleAt(aes, table.RootGroupID) } // GetScaleAt returns the scale for the given visual aesthetic used // for data in group gid. func (p *Plot) GetScaleAt(aes string, gid table.GroupID) Scaler { return p.getScales(aes).find(gid) } type scaledDataKey struct { aes string data table.Grouping col string } // use binds a column of data to an aesthetic. It expands the domain // of the aesthetic's scale to include the data in col, and returns // the scaled data. // // col may be "", in which case it simply returns nil. // // TODO: Should aes be an enum? func (p *Plot) use(aes string, col string) *scaledData { if col == "" { return nil } // TODO: This is wrong. If the scale tree for aes changes, // this may return a stale scaledData bound to the wrong // scalers. If I got rid of scale trees, I could just put the // scaler in the key. Or I could clean up the cache when the // scale tree changes. sd := p.scaledData[scaledDataKey{aes, p.Data(), col}] if sd == nil { // Construct the scaledData. sd = &scaledData{ seqs: make(map[table.GroupID]scaledSeq), } // Get the scale tree. st := p.getScales(aes) for _, gid := range p.Data().Tables() { table := p.Data().Table(gid) // Get the data. seq := table.MustColumn(col) // Find the scale. scaler := st.find(gid) // Add the scale to the scale set. p.scaleSet[scaleKey{gid, aes, scaler}] = true // Train the scale. if _, ok := seq.([]Unscaled); !ok { scaler.ExpandDomain(seq) } // Add it to the scaledData. sd.seqs[gid] = scaledSeq{seq, scaler} } p.scaledData[scaledDataKey{aes, p.Data(), col}] = sd } // Update axis labels. if aes == "x" || aes == "y" { p.autoAxisLabels[aes] = append(p.autoAxisLabels[aes], col) } return sd } // Save saves the current data table of p to a stack. func (p *Plot) Save() *Plot { p.env = &plotEnv{ parent: p.env, data: p.env.data, } return p } // Restore restores the data table of p from the save stack. func (p *Plot) Restore() *Plot { if p.env.parent == nil { panic("unbalanced Save/Restore") } p.env = p.env.parent return p } // A Plotter is an operation that can modify a Plot. type Plotter interface { Apply(*Plot) } // Add applies each of plotters to Plot in order. func (p *Plot) Add(plotters ...Plotter) *Plot { for _, plotter := range plotters { plotter.Apply(p) } return p } // AxisLabel returns a Plotter that sets the label of an axis on a // Plot. By default, Plot constructs automatic axis labels from column // names, but AxisLabel lets callers override these. // // TODO: Should labels be attached to aesthetics, generally? // // TODO: Should this really be a Plotter or just a method of Plot? func AxisLabel(axis, label string) Plotter { return axisLabel{axis, label} } type axisLabel struct { axis, label string } func (a axisLabel) Apply(p *Plot) { p.axisLabels[a.axis] = a.label } // Title returns a Plotter that sets the title of a Plot. func Title(label string) Plotter { return titlePlotter{label} } type titlePlotter struct { label string } func (t titlePlotter) Apply(p *Plot) { p.title = t.label } // A Stat transforms a table.Grouping. type Stat interface { F(table.Grouping) table.Grouping } // Stat applies each of stats in order to p's data. // // TODO: Perform scale transforms before applying stats. func (p *Plot) Stat(stats ...Stat) *Plot { data := p.Data() for _, stat := range stats { data = stat.F(data) } return p.SetData(data) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/render.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import ( "bytes" "fmt" "io" "math" "reflect" "strings" "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/table" "github.com/ajstarks/svgo" ) // fontSize is the font size in pixels. // // TODO: Theme. const fontSize float64 = 14 // facetLabelHeight is the height of facet labels, as a multiple of // the font height. // // TODO: Should this be a multiple of fontSize, em height, leading? // Currently it's leading. // // TODO: Theme. const facetLabelHeight = 1.3 const xTickSep = 5 // TODO: Theme. const yTickSep = 5 // TODO: Theme. // plotMargins returns the top, right, bottom, and left margins for a // plot of the given width and height. // // By default, this adds a 5% margin based on the smaller of width and // height. This ensures that (with automatic scales), the extremes of // the data and its tick labels don't appear right at the edge of the // plot area. // // TODO: Theme. var plotMargins = func(w, h float64) (t, r, b, l float64) { margin := 0.05 * math.Min(w, h) return margin, margin, margin, margin } func (p *Plot) WriteSVG(w io.Writer, width, height int) error { // TODO: Legend, title. // TODO: Check if the same scaler is used for multiple // aesthetics with conflicting rangers. Alternatively, if we // just computed the scaled data eagerly here, it wouldn't // matter if the same Scaler was used for multiple things // because we would just change its Ranger between scaling // different data. We could still optimize for get/get1 by // specifying whether we care about all of the values or just // the first when fetching the scaledData (arguably this // should also affect scale training, so this is necessary // anyway). // TODO: Rather than finding these scales here and giving them // Ratners, we could use special "Width"/"Height" Rangers and // assign them much earlier (e.g., when they are Used). We // could then either find all of the scales that have those // Rangers and configure them at this point, or we could pass // the renderEnv in when ranging. // TODO: Default ranges for other things like color. // TODO: Expose the layout so a package user can put together // multiple Plots. // // What if the user wants multiple aligned plots, but as // *different* images (e.g., flipping from one slide to // another)? // TODO: Let the user alternatively specify the width and // height of the subplots, rather than the whole plot. // TODO: Automatic aspect ratio by averaging slopes. // TODO: Custom tick breaks. // TODO: Make sure *all* Scalers have Rangers or the user will // get confusing panics. // TODO: If the user restricts, say, the X range, should that // only train the Y axis on what's in the X range? // Assign default Rangers to scales that don't have them. // // TODO: Do this on a clone of the scale so this doesn't // persist. for aes, scales := range p.scales { if aes == "x" || aes == "y" { // We'll assign these when we render each // subplot. continue } for _, scale := range scales.scales { if scale.Ranger(nil) == nil { scale.Ranger(defaultRanger(aes)) } } } // Find all of the subplots and subdivide the marks. // // TODO: If a mark was done in a parent subplot, broadcast it // to all child leafs of that subplot. subplots := make(map[*subplot]*eltSubplot) plotElts := []plotElt{} for _, mark := range p.marks { submarks := make(map[*eltSubplot]plotMark) for _, gid := range mark.groups { subplot := subplotOf(gid) elt := subplots[subplot] if elt == nil { elt = newEltSubplot(subplot) plotElts = append(plotElts, elt) subplots[subplot] = elt } submark := submarks[elt] submark.m = mark.m submark.groups = append(submark.groups, gid) submarks[elt] = submark } for subplot, submark := range submarks { subplot.marks = append(subplot.marks, submark) } } // Subdivide the scales. for sk := range p.scaleSet { subplot := subplotOf(sk.gid) elt := subplots[subplot] if elt == nil { continue } ss := elt.scales[sk.aes] if ss == nil { ss = make(map[Scaler]bool) elt.scales[sk.aes] = ss } ss[sk.scale] = true } // Add ticks and facet labels. plotElts = addSubplotLabels(plotElts) // Add axis labels and title. var xlabel, ylabel string if l, ok := p.axisLabels["x"]; ok { xlabel = l } else { xlabel = strings.Join(slice.Nub(p.autoAxisLabels["x"]).([]string), "\n") } if l, ok := p.axisLabels["y"]; ok { ylabel = l } else { ylabel = strings.Join(slice.Nub(p.autoAxisLabels["y"]).([]string), "\n") } plotElts = addAxisLabels(plotElts, p.title, xlabel, ylabel) // Compute plot element layout. layout := layoutPlotElts(plotElts) // Perform layout. There's a cyclic dependency involving tick // labels here: the tick labels depend on how many ticks there // are, how many ticks there are depends on the size of the // plot, the size of the plot depends on its surrounding // content, and the size of the surrounding content depends on // the tick labels. There may not be a fixed point here, so we // compromise around the number of ticks. // // 1) Lay out the graphs without ticks. layout.SetLayout(0, 0, float64(width), float64(height)) // 2) Compute the number of ticks and tick labels for each // tick element. for _, elt := range plotElts { if elt, ok := elt.(*eltTicks); ok { elt.computeTicks() } } // 3) Re-layout the plot and stick with the ticks we computed. layout.SetLayout(0, 0, float64(width), float64(height)) // Draw. svg := svg.New(w) svg.Start(width, height, fmt.Sprintf(`font-size="%.6gpx" font-family="Roboto,"Helvetica Neue",Helvetica,Arial,sans-serif"`, fontSize)) defer svg.End() // Render each plot element. r := &eltRender{svg, 0} for _, elt := range plotElts { elt.render(r) } return nil } func (e *eltSubplot) render(r *eltRender) { svg := r.svg x, y, w, h := e.Layout() m := e.plotMargins // Round the bounds rectangle in. x2i, y2i := int(x+w), int(y+h) xi, yi := int(math.Ceil(x)), int(math.Ceil(y)) wi, hi := x2i-xi, y2i-yi // Create clip region for plot area. clipId, clipRef := r.genid("clip") svg.ClipPath(`id="` + clipId + `"`) svg.Rect(xi, yi, wi, hi) svg.ClipEnd() svg.Group(`clip-path="` + clipRef + `"`) // Set scale ranges. xRanger := NewFloatRanger(float64(xi)+m.l, float64(x2i)-m.r) yRanger := NewFloatRanger(float64(y2i)-m.b, float64(yi)+m.t) for s := range e.scales["x"] { s.Ranger(xRanger) } for s := range e.scales["y"] { s.Ranger(yRanger) } // Render grid. renderBackground(svg, xi, yi, wi, hi) for s := range e.scales["x"] { renderGrid(svg, 'x', s, e.xTicks.ticks[s], yi, y2i) } for s := range e.scales["y"] { renderGrid(svg, 'y', s, e.yTicks.ticks[s], xi, x2i) } // Create rendering environment. env := &renderEnv{ cache: make(map[renderCacheKey]table.Slice), area: [4]float64{float64(xi), float64(yi), float64(wi), float64(hi)}, } // Render marks. for _, mark := range e.marks { for _, gid := range mark.groups { env.gid = gid mark.m.mark(env, svg) } } // End clip region. svg.Gend() // Draw border and scale ticks. // // TODO: Theme. // Render border. svg.Path(fmt.Sprintf("M%d %dV%dH%d", xi, yi, y2i, x2i), "stroke:#888; fill:none; stroke-width:2") // TODO: Theme. // Render scale ticks. for s := range e.scales["x"] { renderScale(svg, 'x', s, e.xTicks.ticks[s], y2i) } for s := range e.scales["y"] { renderScale(svg, 'y', s, e.yTicks.ticks[s], xi) } } // TODO: Use shape-rendering: crispEdges? func renderBackground(svg *svg.SVG, x, y, w, h int) { svg.Rect(x, y, w, h, "fill:#eee") // TODO: Theme. } func renderGrid(svg *svg.SVG, dir rune, scale Scaler, ticks plotEltTicks, start, end int) { major := mapMany(scale, ticks.major).([]float64) r := func(x float64) float64 { // Round to nearest N. return math.Floor(x + 0.5) } var path []string for _, p := range major { if dir == 'x' { path = append(path, fmt.Sprintf("M%.6g %dv%d", r(p), start, end-start)) } else { path = append(path, fmt.Sprintf("M%d %.6gh%d", start, r(p), end-start)) } } svg.Path(wrapPath(strings.Join(path, "")), "stroke: #fff; stroke-width:2") // TODO: Theme. } func renderScale(svg *svg.SVG, dir rune, scale Scaler, ticks plotEltTicks, pos int) { const length float64 = 4 // TODO: Theme var path bytes.Buffer have := map[float64]bool{} for _, t := range []struct { length float64 s table.Slice }{ {length * 2, ticks.major}, {length, ticks.minor}, } { ticks := mapMany(scale, t.s).([]float64) r := func(x float64) float64 { // Round to nearest N. return math.Floor(x + 0.5) } for _, p := range ticks { p = r(p) if have[p] { // Avoid overplotting the same tick // marks. continue } have[p] = true if dir == 'x' { fmt.Fprintf(&path, "M%.6g %dv%.6g", p, pos, -t.length) } else { fmt.Fprintf(&path, "M%d %.6gh%.6g", pos, p, t.length) } } } svg.Path(wrapPath(path.String()), "stroke:#888; stroke-width:2") // TODO: Theme } func (e *eltTicks) render(r *eltRender) { svg := r.svg x, y, w, _ := e.Layout() for s := range e.scales() { pos := e.mapTicks(s, e.ticks[s].major) for i, label := range e.ticks[s].labels { tick := pos[i] if e.axis == 'x' { svg.Text(int(tick), int(y+xTickSep), label, `text-anchor="middle" dy="1em" fill="#666"`) // TODO: Theme. } else { svg.Text(int(x+w-yTickSep), int(tick), label, `text-anchor="end" dy=".3em" fill="#666"`) } } } } func (e *eltLabel) render(r *eltRender) { svg := r.svg x, y, w, h := e.Layout() // Clip to label region. clipId, clipRef := r.genid("clip") svg.ClipPath(`id="` + clipId + `"`) svg.Rect(int(x), int(y), int(w), int(h)) svg.ClipEnd() svg.Group(`clip-path="` + clipRef + `"`) defer svg.Gend() if e.fill != "none" { svg.Rect(int(x), int(y), int(w), int(h), "fill: "+e.fill) } // Vertical centering is very poorly // supported. dy is the best chance. style := `text-anchor="middle" dy=".3em"` switch e.side { case 'l': style += fmt.Sprintf(` transform="rotate(-90 %d %d)"`, int(x+w/2), int(y+h/2)) case 'r': style += fmt.Sprintf(` transform="rotate(90 %d %d)"`, int(x+w/2), int(y+h/2)) } svg.Text(int(x+w/2), int(y+h/2), e.label, style) } func (e *eltPadding) render(r *eltRender) { } type renderEnv struct { gid table.GroupID cache map[renderCacheKey]table.Slice area [4]float64 } type renderCacheKey struct { sd *scaledData gid table.GroupID } // scaledData is a key for retrieving scaled data from a renderEnv. It // is the result of using a binding and can be thought of as a lazy // representation of the visually-mapped data that becomes available // once all of the scales have been trained. type scaledData struct { seqs map[table.GroupID]scaledSeq } type scaledSeq struct { seq table.Slice scaler Scaler } func (env *renderEnv) get(sd *scaledData) table.Slice { cacheKey := renderCacheKey{sd, env.gid} if mapped := env.cache[cacheKey]; mapped != nil { return mapped } v := sd.seqs[env.gid] mapped := mapMany(v.scaler, v.seq) env.cache[cacheKey] = mapped return mapped } func (env *renderEnv) getFirst(sd *scaledData) interface{} { if mapped := env.cache[renderCacheKey{sd, env.gid}]; mapped != nil { mv := reflect.ValueOf(mapped) if mv.Len() == 0 { return nil } return mv.Index(0).Interface() } v := sd.seqs[env.gid] rv := reflect.ValueOf(v.seq) if rv.Len() == 0 { return nil } return v.scaler.Map(rv.Index(0).Interface()) } func (env *renderEnv) Area() (x, y, w, h float64) { return env.area[0], env.area[1], env.area[2], env.area[3] } func (env *renderEnv) Size() (w, h float64) { return env.area[2], env.area[3] } func round(x float64) int { return int(math.Floor(x + 0.5)) } // wrapPath wraps path data p to avoid exceeding SVG's recommended // line length limit of 255 characters. func wrapPath(p string) string { const width = 70 if len(p) <= width { return p } // Chop up p until we get below the width limit. parts := make([]string, 0, 16) for len(p) > width { // Find the last command or space before exceeding width. lastCmd, lastSpace := 0, 0 for i, ch := range p { if i >= width && (lastCmd != 0 || lastSpace != 0) { break } if 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' { lastCmd = i } else if ch == ' ' { lastSpace = i } } split := len(p) // Prefer splitting at commands, but take spaces in // case it's a huge command. if lastCmd != 0 { split = lastCmd } else if lastSpace != 0 { split = lastSpace } parts, p = append(parts, p[:split]), p[split:] } if len(p) > 0 { parts = append(parts, p) } return strings.Join(parts, "\n") } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/scale.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import ( "fmt" "image/color" "math" "reflect" "strings" "time" "github.com/aclements/go-gg/generic" "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/palette" "github.com/aclements/go-gg/table" "github.com/aclements/go-moremath/scale" ) // Continuous -> Interpolatable? Definitely. // // Continuous -> Discrete? Can always discretize the input either in // value order or in index order. In this case the transform (linear, // log, etc) doesn't matter as long as it's order-preserving. OTOH, a // continuous input scale can be asked to map *any* value of its input // type, but if I do this I can only map values that were trained. // That suggests that I have to just bin the range to do this mapping. // // Discrete -> Interpolatable? Pick evenly spaced values on [0,1]. // // Discrete -> Discrete? Definitely. Cycle the range if it's not long // enough. If the input range is a VarNominal, concatenate the // sequences and use index ordering. // // It's not really "continuous", it's more specifically cardinal. // TODO: time.Time and time.Duration scalers. For time.Duration, // handle sub-second as (10 seconds)^n (for n <= 0), handle seconds to // minutes at multiples of 10 seconds, and likewise minutes to hours // as multiples of 10 minutes, and handle hours as (10 hours)^n. // XXX // // A Scaler can be cardinal, discrete, or identity. // // A cardinal Scaler has a VarCardinal input domain. If its output // range is continuous, it maps an interval over the input to an // interval of the output (possibly through a transformation such as a // logarithm). If its output range is discrete, the input is // discretized in value order and it acts like a discrete scale. // // XXX The cardinal -> discrete rule means we need to keep all of the // input data, rather than just its bounds, just in case the range is // discrete. Maybe it should just be a bucketing rule? // // A discrete Scaler has a VarNominal input domain. If the input is // VarOrdinal, its order is used; otherwise, index order is imposed. // If the output range is continuous, a discrete Scaler maps its input // to the centers of equal sub-intervals of [0, 1] and then applies // the Ranger. If the output range is discrete, the Scaler maps the // Nth input level to the N%len(range)th output value. // // An identity Scaler ignores its input domain and output range and // uses an identity function for mapping input to output. This is // useful for specifying aesthetics directly, such as color or size, // and is especially useful for constant Vars. // // XXX Should identity Scalers map numeric types to float64? Maybe it // should depend on the range type of the ranger? // // XXX Arrange documentation as X -> Y? type Scaler interface { // XXX ExpandDomain(table.Slice) // Ranger sets this Scaler's output range if r is non-nil and // returns the previous range. If a scale's Ranger is nil, it // will be assigned a default Ranger based on its aesthetic // when the Plot is rendered. Ranger(r Ranger) Ranger // XXX Should RangeType be implied by the aesthetic? // // XXX Should this be a method of Ranger instead? RangeType() reflect.Type // XXX // // x must be of the same type as the values in the domain Var. // // XXX Or should this take a slice? Or even a Var? That would // also eliminate RangeType(), though then Map would need to // know how to make the right type of return slice. Unless we // pushed slice mapping all the way to Ranger. // // XXX We could eliminate ExpandDomain if the caller was // required to pass everything to this at once and this did // the scale training. That would also make it easy to // implement the cardinal -> discrete by value order rule. // This would probably also make Map much faster. // // XXX If x is Unscaled, Map must only apply the ranger. Map(x interface{}) interface{} // Ticks returns a set of "nice" major and minor tick marks // spanning this Scaler's domain. The returned tick locations // are values in this Scaler's domain type in increasing // order. labels[i] gives the label of the major tick at // major[i]. The minor ticks are a superset of the major // ticks. // // max and pred constrain the ticks returned by Ticks. If // possible, Ticks returns the largest set of ticks such that // there are no more than max major ticks and the ticks // satisfy pred. Both are hints, since for some scale types // there's no clear way to reduce the number of ticks. // // pred should return true if the given set of ticks is // acceptable. pred must be "monotonic" in the following // sense: if pred is true for a given set of ticks, it must be // true for any subset of those ticks and if pred is false for // a given set of ticks, it must be false for any superset of // those ticks. In other words, pred should return false if // there are "too many" ticks or they are "too close // together". If pred is nil, it is assumed to always be // satisfied. // // If no tick marks can be produced (for example, there are no // values in this Scaler's domain or the predicate cannot be // satisfied), Ticks returns nil, nil, nil. // // TODO: Should this return ticks in the input space, the // intermediate space, or the output space? moremath returns // values in the input space. Input space values doesn't work // for discrete scales if I want the ticks between values. // Intermediate space works for continuous and discrete // inputs, but not for discrete ranges (maybe that's okay) and // it's awkward for a caller to do anything with an // intermediate space value. Output space doesn't work with // this API because I change the plot location in the course // of layout without recomputing ticks. However, output space // could work if Scaler exposed tick levels, since I could // save the computed tick level across a re-layout and // recompute the output space ticks from that. Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) // SetFormatter sets the formatter for values on this scale. // // f may be nil, which makes this Scaler use the default // formatting. Otherwise, f must be a func(T) string where T // is convertible from the Scaler's input type (note that this // is weaker than typical Go function calls, which require // that the argument be assignable; this makes it possible to // use general-purpose functions like func(float64) string // even for more specific input types). SetFormatter(f interface{}) CloneScaler() Scaler } type ContinuousScaler interface { Scaler // TODO: There are two variations on min/max. 1) We can force // the min/max, even if there's data beyond it. 2) We can say // cap the scale to some min/max, but a smaller range is okay. // Currently we can't express 2. // SetMin and SetMax set the minimum and maximum values of // this Scalar's domain and return the Scalar. If v is nil, it // unsets the bound. // // v must be convertible to the Scaler's domain type. For // example, if this is a linear scale, v can be of any // numerical type. Unlike ExpandDomain, these do not set the // Scaler's domain type. SetMin(v interface{}) ContinuousScaler SetMax(v interface{}) ContinuousScaler // TODO: Should Include work on any Scaler? // Include requires that v be included in this Scaler's // domain. Like SetMin/SetMax, this can expand Scaler's // domain, but unlike SetMin/SetMax, this does not restrict // it. If v is nil, it does nothing. // // v must be convertible to the Scaler's domain type. Unlike // ExpandDomain, this does not set the Scaler's domain type. Include(v interface{}) ContinuousScaler } // Unscaled represents a value that should not be scaled, but instead // mapped directly to the output range. For continuous scales, this // should be a value between 0 and 1. For discrete scales, this should // be an integral value. // // TODO: This is confusing for opacity and size because it *doesn't* // specify an exact opacity or size ratio since their default rangers // aren't [0,1]. Maybe Unscaled should bypass scaling altogether (and // only work if the range type is float64). type Unscaled float64 var float64Type = reflect.TypeOf(float64(0)) var colorType = reflect.TypeOf((*color.Color)(nil)).Elem() var canCardinal = map[reflect.Kind]bool{ reflect.Float32: true, reflect.Float64: true, reflect.Int: true, reflect.Int8: true, reflect.Int16: true, reflect.Int32: true, reflect.Int64: true, reflect.Uint: true, reflect.Uintptr: true, reflect.Uint8: true, reflect.Uint16: true, reflect.Uint32: true, reflect.Uint64: true, } func isCardinal(k reflect.Kind) bool { // XXX Move this to generic.IsCardinalR and rename CanOrderR // to IsOrderedR. Does complex count? It supports most // arithmetic operators. Maybe cardinal is a plot concept and // not a generic concept? If sort.Interface influences this, // this may need to be a question about a Slice, not a // reflect.Kind. return canCardinal[k] } type defaultScale struct { scale Scaler // Pre-instantiation state. r Ranger formatter interface{} } func (s *defaultScale) String() string { return fmt.Sprintf("default (%s)", s.scale) } func (s *defaultScale) ExpandDomain(v table.Slice) { if s.scale == nil { var err error s.scale, err = DefaultScale(v) if err != nil { panic(&generic.TypeError{reflect.TypeOf(v), nil, err.Error()}) } s.instantiate() } s.scale.ExpandDomain(v) } func (s *defaultScale) ensure() Scaler { if s.scale == nil { s.scale = NewLinearScaler() s.instantiate() } return s.scale } // instantiate applies the pre-instantiation state to the newly // instantiated s.scale and clears the state in s. func (s *defaultScale) instantiate() { if s.r != nil { s.scale.Ranger(s.r) s.r = nil } if s.formatter != nil { s.scale.SetFormatter(s.formatter) s.formatter = nil } } func (s *defaultScale) Ranger(r Ranger) Ranger { // If there's no underlying scale yet, record the Ranger // locally rather than trying to guess a scale. This way users // can easily set Rangers before training any data. if s.scale == nil { old := s.r s.r = r return old } return s.scale.Ranger(r) } func (s *defaultScale) RangeType() reflect.Type { if s.scale == nil { return s.r.RangeType() } return s.scale.RangeType() } func (s *defaultScale) Map(x interface{}) interface{} { return s.ensure().Map(x) } func (s *defaultScale) Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) { return s.ensure().Ticks(max, pred) } func (s *defaultScale) SetFormatter(f interface{}) { if s.scale == nil { s.formatter = f return } s.scale.SetFormatter(f) } func (s *defaultScale) CloneScaler() Scaler { if s.scale == nil { return &defaultScale{nil, s.r, s.formatter} } return &defaultScale{s.scale.CloneScaler(), nil, s.formatter} } func DefaultScale(seq table.Slice) (Scaler, error) { // Handle common case types. switch seq.(type) { case []float64, []int, []uint: return NewLinearScaler(), nil case []string: // TODO: Ordinal scale case []time.Time: return NewTimeScaler(), nil } rt := reflect.TypeOf(seq).Elem() rtk := rt.Kind() switch { case rt.Implements(colorType): // For things that are already visual values, use an // identity scale. return NewIdentityScale(), nil // TODO: GroupAuto needs to make similar // cardinal/ordinal/nominal decisions. Deduplicate // these better. case isCardinal(rtk): return NewLinearScaler(), nil case slice.CanSort(seq): return NewOrdinalScale(), nil case rt.Comparable(): // TODO: Nominal scale panic("not implemented") } return nil, fmt.Errorf("no default scale type for %T", seq) } // defaultRanger returns the default Ranger for the given aesthetic. // If aes is an axis aesthetic, it returns nil (since these Rangers // are assigned at render time). If aes is unknown, it panics. func defaultRanger(aes string) Ranger { switch aes { case "x", "y": return nil case "stroke", "fill": return &defaultColorRanger{} case "opacity": return NewFloatRanger(0.1, 1) case "size": // Default to ranging between 1% and 10% of the // minimum plot dimension. return NewFloatRanger(0.01, 0.1) } panic(fmt.Sprintf("unknown aesthetic %q", aes)) } // TODO: I'd like to remove identity scales and expose only Unscaled, // but I use identity scales for physical types like color.Color right // now. Probably that should bypass Scaler altogether. func NewIdentityScale() Scaler { return &identityScale{} } type identityScale struct { rangeType reflect.Type } func (s *identityScale) ExpandDomain(v table.Slice) { s.rangeType = reflect.TypeOf(v).Elem() } func (s *identityScale) RangeType() reflect.Type { return s.rangeType } func (s *identityScale) Ranger(r Ranger) Ranger { return nil } func (s *identityScale) Map(x interface{}) interface{} { return x } func (s *identityScale) Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) { return nil, nil, nil } func (s *identityScale) SetFormatter(f interface{}) {} func (s *identityScale) CloneScaler() Scaler { s2 := *s return &s2 } // NewLinearScaler returns a continuous linear scale. The domain must // be a VarCardinal. // // XXX If I return a Scaler, I can't have methods for setting fixed // bounds and such. I don't really want to expose the whole type. // Maybe a sub-interface for continuous Scalers? func NewLinearScaler() ContinuousScaler { // TODO: Control over base. return &moremathScale{ min: math.NaN(), max: math.NaN(), dataMin: math.NaN(), dataMax: math.NaN(), } } func NewLogScaler(base int) ContinuousScaler { return &moremathScale{ min: math.NaN(), max: math.NaN(), base: base, dataMin: math.NaN(), dataMax: math.NaN(), } } type moremathScale struct { r Ranger f interface{} domainType reflect.Type base int min, max float64 dataMin, dataMax float64 } func (s *moremathScale) String() string { if s.base > 0 { return fmt.Sprintf("log [%d,%g,%g] => %s", s.base, s.min, s.max, s.r) } return fmt.Sprintf("linear [%g,%g] => %s", s.min, s.max, s.r) } func (s *moremathScale) ExpandDomain(vs table.Slice) { if s.domainType == nil { s.domainType = reflect.TypeOf(vs).Elem() } var data []float64 slice.Convert(&data, vs) min, max := s.dataMin, s.dataMax for _, v := range data { if math.IsNaN(v) || math.IsInf(v, 0) { continue } if v < min || math.IsNaN(min) { min = v } if v > max || math.IsNaN(max) { max = v } } s.dataMin, s.dataMax = min, max } func (s *moremathScale) SetMin(v interface{}) ContinuousScaler { if v == nil { s.min = math.NaN() return s } vfloat := reflect.ValueOf(v).Convert(float64Type).Float() s.min = vfloat return s } func (s *moremathScale) SetMax(v interface{}) ContinuousScaler { if v == nil { s.max = math.NaN() return s } vfloat := reflect.ValueOf(v).Convert(float64Type).Float() s.max = vfloat return s } func (s *moremathScale) Include(v interface{}) ContinuousScaler { if v == nil { return s } vfloat := reflect.ValueOf(v).Convert(float64Type).Float() if math.IsNaN(vfloat) || math.IsInf(vfloat, 0) { return s } if math.IsNaN(s.dataMin) { s.dataMin, s.dataMax = vfloat, vfloat } else { s.dataMin = math.Min(s.dataMin, vfloat) s.dataMax = math.Max(s.dataMax, vfloat) } return s } type tickMapper interface { scale.Ticker Map(float64) float64 } func (s *moremathScale) get() tickMapper { min, max := s.min, s.max if min > max { min, max = max, min } if math.IsNaN(min) { min = s.dataMin } if math.IsNaN(max) { max = s.dataMax } if math.IsNaN(min) { // Only possible if both dataMin and dataMax are NaN. min, max = -1, 1 } if s.base > 0 { ls, err := scale.NewLog(min, max, s.base) if err != nil { panic(err) } ls.SetClamp(true) return &ls } return &scale.Linear{ Min: min, Max: max, } } func (s *moremathScale) Ranger(r Ranger) Ranger { old := s.r if r != nil { s.r = r } return old } func (s *moremathScale) RangeType() reflect.Type { return s.r.RangeType() } func (s *moremathScale) Map(x interface{}) interface{} { ls := s.get() var scaled float64 switch x := x.(type) { case float64: scaled = ls.Map(x) case Unscaled: scaled = float64(x) default: v := reflect.ValueOf(x).Convert(float64Type).Float() scaled = ls.Map(v) } switch r := s.r.(type) { case ContinuousRanger: return r.Map(scaled) case DiscreteRanger: _, levels := r.Levels() // Bin the scaled value into 'levels' bins. level := int(scaled * float64(levels)) if level < 0 { level = 0 } else if level >= levels { level = levels - 1 } return r.MapLevel(level, levels) default: panic("Ranger must be a ContinuousRanger or DiscreteRanger") } } func (s *moremathScale) Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) { type Stringer interface { String() string } if s.domainType == nil { // There are no values and no domain type, so we can't // compute ticks or return slices of the domain type. return nil, nil, nil } o := scale.TickOptions{Max: max} // If the domain type is integral, don't let the tick level go // below 0. This is particularly important if the domain type // is a Stringer since the conversion back to the domain type // will cut off any fractional part. switch s.domainType.Kind() { case reflect.Int, reflect.Uint, reflect.Uintptr, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: o.MinLevel, o.MaxLevel = 0, 1000 default: // Set bounds for the pred loop below. o.MinLevel, o.MaxLevel = -1000, 1000 } ls := s.get() level, ok := o.FindLevel(ls, 0) if !ok { return nil, nil, nil } mkLabels := func(major []float64) []string { // Compute labels. labels = make([]string, len(major)) if s.f != nil { // Use custom formatter. if f, ok := s.f.(func(float64) string); ok { // Fast path. for i, x := range major { labels[i] = f(x) } return labels } // TODO: Type check for better error. fv := reflect.ValueOf(s.f) at := fv.Type().In(0) var avs [1]reflect.Value for i, x := range major { avs[0] = reflect.ValueOf(x).Convert(at) rvs := fv.Call(avs[:]) labels[i] = rvs[0].Interface().(string) } return labels } if s.domainType != nil { z := reflect.Zero(s.domainType).Interface() if _, ok := z.(Stringer); ok { // Convert the ticks back into the domain type // and use its String method. for i, x := range major { v := reflect.ValueOf(x).Convert(s.domainType) labels[i] = v.Interface().(Stringer).String() } return labels } } // Otherwise, just format them as floats. for i, x := range major { labels[i] = fmt.Sprintf("%.6g", x) } return labels } // Adjust level to satisfy pred. for ; level <= o.MaxLevel; level++ { majorx := ls.TicksAtLevel(level) minorx := ls.TicksAtLevel(level - 1) labels := mkLabels(majorx.([]float64)) // Convert to domain type. majorv := reflect.New(reflect.SliceOf(s.domainType)) minorv := reflect.New(reflect.SliceOf(s.domainType)) slice.Convert(majorv.Interface(), majorx) slice.Convert(minorv.Interface(), minorx) major, minor = majorv.Elem().Interface(), minorv.Elem().Interface() if pred == nil || pred(major, minor, labels) { return major, minor, labels } } Warning.Printf("%s: unable to compute satisfactory ticks, axis will be empty", s) return nil, nil, nil } func (s *moremathScale) SetFormatter(f interface{}) { s.f = f } func (s *moremathScale) CloneScaler() Scaler { s2 := *s return &s2 } // NewTimeScaler returns a continuous linear scale. The domain must // be time.Time. func NewTimeScaler() *timeScale { return &timeScale{} } type timeScale struct { r Ranger f func(time.Time) string min, max time.Time dataMin, dataMax time.Time } func (s *timeScale) String() string { return fmt.Sprintf("time [%g,%g] => %s", s.min, s.max, s.r) } func (s *timeScale) ExpandDomain(vs table.Slice) { var data []time.Time slice.Convert(&data, vs) min, max := s.dataMin, s.dataMax for _, v := range data { if v.Before(min) || min.IsZero() { min = v } if v.After(max) || max.IsZero() { max = v } } s.dataMin, s.dataMax = min, max } func (s *timeScale) SetMin(v interface{}) ContinuousScaler { s.min = v.(time.Time) return s } func (s *timeScale) SetMax(v interface{}) ContinuousScaler { s.max = v.(time.Time) return s } func (s *timeScale) Include(v interface{}) ContinuousScaler { tv := v.(time.Time) if s.dataMin.IsZero() { s.dataMin, s.dataMax = tv, tv } else { if tv.Before(s.dataMin) { s.dataMin = tv } if tv.After(s.dataMax) { s.dataMax = tv } } return s } func (s *timeScale) Ranger(r Ranger) Ranger { old := s.r if r != nil { s.r = r } return old } func (s *timeScale) RangeType() reflect.Type { return s.r.RangeType() } func (s *timeScale) getMinMax() (time.Time, time.Time) { min := s.min if min.IsZero() { min = s.dataMin } max := s.max if max.IsZero() { max = s.dataMax } return min, max } func (s *timeScale) Map(x interface{}) interface{} { min, max := s.getMinMax() t := x.(time.Time) var scaled float64 = float64(t.Sub(min)) / float64(max.Sub(min)) switch r := s.r.(type) { case ContinuousRanger: return r.Map(scaled) case DiscreteRanger: _, levels := r.Levels() // Bin the scaled value into 'levels' bins. level := int(scaled * float64(levels)) if level < 0 { level = 0 } else if level >= levels { level = levels - 1 } return r.MapLevel(level, levels) default: panic("Ranger must be a ContinuousRanger or DiscreteRanger") } } type durationTicks time.Duration func (d durationTicks) Next(t time.Time) time.Time { if d == 0 { panic("invalid zero duration") } return t.Add(time.Duration(d)).Truncate(time.Duration(d)) } var timeTickerLevels = []struct { min time.Duration next func(t time.Time) time.Time }{ {time.Minute, durationTicks(time.Minute).Next}, {10 * time.Minute, durationTicks(10 * time.Minute).Next}, {time.Hour, func(t time.Time) time.Time { year, month, day := t.Date() // N.B. This will skip an hour at some DST transitions. return time.Date(year, month, day, t.Hour()+1, 0, 0, 0, t.Location()) }}, {6 * time.Hour, func(t time.Time) time.Time { year, month, day := t.Date() // N.B. This will skip an hour if the DST transition // happens at a multiple of 6 hours. return time.Date(year, month, day, ((t.Hour()+6)/6)*6, 0, 0, 0, t.Location()) }}, {24 * time.Hour, func(t time.Time) time.Time { year, month, day := t.Date() return time.Date(year, month, day+1, 0, 0, 0, 0, t.Location()) }}, {7 * 24 * time.Hour, func(t time.Time) time.Time { year, month, day := t.Date() loc := t.Location() _, week1 := t.ISOWeek() for { day++ t = time.Date(year, month, day, 0, 0, 0, 0, loc) if _, week2 := t.ISOWeek(); week1 != week2 { return t } } }}, {30 * 24 * time.Hour, func(t time.Time) time.Time { year, month, _ := t.Date() return time.Date(year, month+1, 1, 0, 0, 0, 0, t.Location()) }}, {365 * 24 * time.Hour, func(t time.Time) time.Time { return time.Date(t.Year()+1, time.January, 1, 0, 0, 0, 0, t.Location()) }}, } // timeTicker calculates the ticks between min and max. levels >= 0 // refer to entries in timeTickerLevels. levels < 0 start with -1 at // every 10 seconds and then alternate dividing by 2 and 5. So level // -3 is 1s, -9 is 1ms, -12 is 1us, etc. // https://play.golang.org/p/xUv4P25Wxi will print the level step // sizes. type timeTicker struct { min, max time.Time } func (t *timeTicker) getNextTick(level int) func(time.Time) time.Time { if level >= 0 { if level >= len(timeTickerLevels) { // TODO: larger ticks should do multiples of // the year, like the linear scale does. panic(fmt.Sprintf("invalid level %d", level)) } return timeTickerLevels[level].next } else { exp, double := level/2+1, (level%2 == 0) step := math.Pow10(exp) * 1e9 if double { step = step * 5 } return durationTicks(time.Duration(step)).Next } } func (t *timeTicker) CountTicks(level int) int { next := t.getNextTick(level) var i int // N.B. We cut off at 1e5 ticks. If your plot is larger than // that, you're on your own. for x := next(t.min.Add(-1)); !x.After(t.max) && i < 1e5; x = next(x) { i++ } return i } func (t *timeTicker) TicksAtLevel(level int) interface{} { var ticks []time.Time next := t.getNextTick(level) for x := next(t.min.Add(-1)); !x.After(t.max); x = next(x) { ticks = append(ticks, x) } return ticks } func (t *timeTicker) GuessLevel() int { dur := t.max.Sub(t.min) for i := len(timeTickerLevels) - 1; i >= 0; i-- { if dur > timeTickerLevels[i].min { return i } } return int(2 * (math.Log10(float64(dur)/1e9) - 2)) } func (timeTicker) MaxLevel() int { return len(timeTickerLevels) - 1 } func (timeTicker) Label(cur, prev time.Time, level int) string { dateFmt := "2006" switch { case level < 6: dateFmt = "2006/1/2" if !prev.IsZero() { if prev.Year() == cur.Year() { dateFmt = "Jan 2" _, prevweek := prev.ISOWeek() _, curweek := cur.ISOWeek() if prevweek == curweek { dateFmt = "Mon" if prev.YearDay() == cur.YearDay() { dateFmt = "" } } } } case level < 7: dateFmt = "2006/1" if !prev.IsZero() && prev.Year() == cur.Year() { dateFmt = "Jan" } } timeFmt := "" switch { case level < -3: // < 1s digits := (-level - 2) / 2 timeFmt = "15:04:05." + strings.Repeat("0", digits) case level < 0: // < 1m timeFmt = "15:04:05" case level < 4: // < 1d timeFmt = "15:04" } return cur.Format(strings.TrimSpace(dateFmt + " " + timeFmt)) } func (s *timeScale) Ticks(maxTicks int, pred func(major, minor table.Slice, labels []string) bool) (table.Slice, table.Slice, []string) { min, max := s.getMinMax() ticker := &timeTicker{min, max} o := scale.TickOptions{Max: maxTicks, MinLevel: -21, MaxLevel: ticker.MaxLevel()} level, ok := o.FindLevel(ticker, ticker.GuessLevel()) if !ok { // TODO(quentin): Better handling of too-large time range. return nil, nil, nil } mkLabels := func(major []time.Time) []string { // TODO(quentin): Pick a format based on which parts // of the time have changed and are non-zero. labels := make([]string, len(major)) if s.f != nil { // Use custom formatter. for i, x := range major { labels[i] = s.f(x) } return labels } var prev time.Time for i, t := range major { labels[i] = ticker.Label(t, prev, level) prev = t } return labels } var majors, minors []time.Time var labels []string for ; level <= o.MaxLevel; level++ { majors = ticker.TicksAtLevel(level).([]time.Time) if level > o.MinLevel { minors = ticker.TicksAtLevel(level - 1).([]time.Time) } labels = mkLabels(majors) if pred == nil || pred(majors, minors, labels) { break } } return majors, minors, labels } func (s *timeScale) SetFormatter(f interface{}) { s.f = f.(func(time.Time) string) } func (s *timeScale) CloneScaler() Scaler { s2 := *s return &s2 } // TODO: The ordinal scale can only work with values it actually sees // in the data. It has no sense of the type's actual domain. If the // type is an enumerated type, we could fill in intermediate values // and the caller could set a min and max for the scale to enumerate // between. func NewOrdinalScale() Scaler { return &ordinalScale{} } type ordinalScale struct { allData []slice.T r Ranger f interface{} ordered table.Slice index map[interface{}]int } func (s *ordinalScale) ExpandDomain(v table.Slice) { // TODO: Type-check? For example, if I try to use a cardinal // type for "Color" and then a continuous type, this will // crash confusingly only once Map calls makeIndex and // NubAppend tries to make a consistently typed slice. s.allData = append(s.allData, slice.T(v)) s.ordered, s.index = nil, nil } func (s *ordinalScale) Ranger(r Ranger) Ranger { old := s.r if r != nil { s.r = r } return old } func (s *ordinalScale) RangeType() reflect.Type { return s.r.RangeType() } func (s *ordinalScale) makeIndex() { if s.index != nil { return } // Compute ordered data index and cache. s.ordered = slice.NubAppend(s.allData...) slice.Sort(s.ordered) ov := reflect.ValueOf(s.ordered) s.index = make(map[interface{}]int, ov.Len()) for i, len := 0, ov.Len(); i < len; i++ { s.index[ov.Index(i).Interface()] = i } } func (s *ordinalScale) Map(x interface{}) interface{} { var i int switch x := x.(type) { case Unscaled: i = int(x) default: s.makeIndex() i = s.index[x] } switch r := s.r.(type) { case DiscreteRanger: minLevels, maxLevels := r.Levels() if len(s.index) <= minLevels { return r.MapLevel(i, minLevels) } else if len(s.index) <= maxLevels { return r.MapLevel(i, len(s.index)) } else { // TODO: Binning would also be a reasonable // policy. return r.MapLevel(i%maxLevels, maxLevels) } case ContinuousRanger: // Map i to the "middle" of the ith equal j-way // subdivision of [0, 1]. j := len(s.index) x := (float64(i) + 0.5) / float64(j) return r.Map(x) default: panic("Ranger must be a ContinuousRanger or DiscreteRanger") } } func (s *ordinalScale) Ticks(max int, pred func(major, minor table.Slice, labels []string) bool) (major, minor table.Slice, labels []string) { // TODO: Return *no* ticks and only labels. Can't currently // express this. // TODO: Honor constraints. s.makeIndex() labels = make([]string, len(s.index)) ov := reflect.ValueOf(s.ordered) if s.f != nil { // Use custom formatter. // TODO: Type check for better error. fv := reflect.ValueOf(s.f) at := fv.Type().In(0) var avs [1]reflect.Value for i, len := 0, ov.Len(); i < len; i++ { avs[0] = ov.Index(i).Convert(at) rvs := fv.Call(avs[:]) labels[i] = rvs[0].Interface().(string) } } else { // Use String() method or standard format. for i, len := 0, ov.Len(); i < len; i++ { labels[i] = fmt.Sprintf("%v", ov.Index(i).Interface()) } } return s.ordered, nil, labels } func (s *ordinalScale) SetFormatter(f interface{}) { s.f = f } func (s *ordinalScale) CloneScaler() Scaler { ns := &ordinalScale{ allData: make([]slice.T, len(s.allData)), r: s.r, } for i, v := range s.allData { ns.allData[i] = v } return s } // XXX // // A Ranger must be either a ContinuousRanger or a DiscreteRanger. type Ranger interface { RangeType() reflect.Type } type ContinuousRanger interface { Ranger Map(x float64) (y interface{}) Unmap(y interface{}) (x float64, ok bool) } type DiscreteRanger interface { Ranger Levels() (min, max int) MapLevel(i, j int) interface{} } func NewFloatRanger(lo, hi float64) ContinuousRanger { return &floatRanger{lo, hi - lo} } type floatRanger struct { lo, w float64 } func (r *floatRanger) String() string { return fmt.Sprintf("[%g,%g]", r.lo, r.lo+r.w) } func (r *floatRanger) RangeType() reflect.Type { return float64Type } func (r *floatRanger) Map(x float64) interface{} { return x*r.w + r.lo } func (r *floatRanger) Unmap(y interface{}) (float64, bool) { switch y := y.(type) { default: return 0, false case float64: return (y - r.lo) / r.w, true } } func NewColorRanger(palette []color.Color) DiscreteRanger { // TODO: Support continuous palettes. // // TODO: Support discrete palettes that vary depending on the // number of levels. return &colorRanger{palette} } type colorRanger struct { palette []color.Color } func (r *colorRanger) RangeType() reflect.Type { return colorType } func (r *colorRanger) Levels() (min, max int) { return len(r.palette), len(r.palette) } func (r *colorRanger) MapLevel(i, j int) interface{} { if i < 0 { i = 0 } else if i >= len(r.palette) { i = len(r.palette) - 1 } return r.palette[i] } // defaultColorRanger is the default color ranger. It is both a // ContinuousRanger and a DiscreteRanger. type defaultColorRanger struct{} // autoPalette is the discrete palette used by defaultColorRanger. var autoPalette = []color.Color{ color.RGBA{0x4c, 0x72, 0xb0, 0xff}, color.RGBA{0x55, 0xa8, 0x68, 0xff}, color.RGBA{0xc4, 0x4e, 0x52, 0xff}, color.RGBA{0x81, 0x72, 0xb2, 0xff}, color.RGBA{0xcc, 0xb9, 0x74, 0xff}, color.RGBA{0x64, 0xb5, 0xcd, 0xff}, } func (r *defaultColorRanger) RangeType() reflect.Type { return colorType } func (r *defaultColorRanger) Map(x float64) interface{} { return palette.Viridis.Map(x) } func (r *defaultColorRanger) Unmap(y interface{}) (float64, bool) { switch y := y.(type) { default: return 0, false case color.RGBA: return float64(y.G) / float64(226), true } } func (r *defaultColorRanger) Levels() (min, max int) { return len(autoPalette), len(autoPalette) } func (r *defaultColorRanger) MapLevel(i, j int) interface{} { if i < 0 { i = 0 } else if i >= len(autoPalette) { i = len(autoPalette) - 1 } return autoPalette[i] } // mapMany applies scaler.Map to all of the values in seq and returns // a slice of the results. // // TODO: Maybe this should just be how Scaler.Map works. func mapMany(scaler Scaler, seq table.Slice) table.Slice { sv := reflect.ValueOf(seq) rt := reflect.SliceOf(scaler.RangeType()) if seq == nil { return reflect.MakeSlice(rt, 0, 0).Interface() } res := reflect.MakeSlice(rt, sv.Len(), sv.Len()) for i, len := 0, sv.Len(); i < len; i++ { val := scaler.Map(sv.Index(i).Interface()) res.Index(i).Set(reflect.ValueOf(val)) } return res.Interface() } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/stepmode_string.go ================================================ // Code generated by "stringer -type StepMode"; DO NOT EDIT package gg import "fmt" const _StepMode_name = "StepHVStepVHStepHMidStepVMid" var _StepMode_index = [...]uint8{0, 6, 12, 20, 28} func (i StepMode) String() string { if i < 0 || i >= StepMode(len(_StepMode_index)-1) { return fmt.Sprintf("StepMode(%d)", i) } return _StepMode_name[_StepMode_index[i]:_StepMode_index[i+1]] } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/testmain.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore package main import ( "math" "math/rand" "os" "github.com/aclements/go-gg/gg" "github.com/aclements/go-gg/ggstat" "github.com/aclements/go-gg/table" "github.com/aclements/go-moremath/vec" ) func main() { xs1 := vec.Linspace(-10, 10, 100) for i := range xs1 { xs1[i] = rand.Float64()*20 - 10 } ys1 := vec.Map(math.Sin, xs1) xs2 := vec.Linspace(-10, 10, 100) ys2 := vec.Map(math.Cos, xs2) which := []string{} for range xs1 { which = append(which, "sin") } for range xs2 { which = append(which, "cos") } xs := vec.Concat(xs1, xs2) ys := vec.Concat(ys1, ys2) tab := table.NewBuilder(nil).Add("x", xs).Add("y", ys).Add("which", which).Done() plot := gg.NewPlot(tab) plot.GroupAuto() plot.Add(gg.FacetX{Col: "which"}) plot.Add(gg.FacetY{Col: "which"}) plot.Add(gg.LayerLines{X: "x", Y: "y"}) plot.Save() plot.SetData(ggstat.ECDF{X: "x"}.F(plot.Data())) plot.Add(gg.LayerSteps{Step: gg.StepHV}) //plot.Add(gg.LayerSteps{Step:gg.StepHMid}) plot.Restore() plot.Save() plot.SetData(ggstat.Density{X: "x"}.F(plot.Data())) plot.Add(gg.LayerPaths{}) plot.Restore() plot.WriteSVG(os.Stdout, 400, 300) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/text.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import "unicode/utf8" type textMetrics struct { width float64 leading float64 } // measureString returns the metrics in pixels of s rendered in a font // with pixel size pxSize. // // TODO: Often all I want is the leading, which is much cheaper to get // than the width. Maybe textMetrics should have methods? func measureString(pxSize float64, s string) textMetrics { // TODO: This is absolutely horribly awful. Make it real, // perhaps using the freetype package. // Chrome's default font-size is 16px, so 20px is a reasonable // leading. return textMetrics{ width: 0.5 * pxSize * float64(utf8.RuneCountInString(s)), leading: 1.25 * pxSize, } } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/gg/transform.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package gg import "github.com/aclements/go-gg/table" // SortBy sorts each group by the named columns. If a column's type // implements sort.Interface, rows will be sorted according to that // order. Otherwise, the values in the column must be naturally // ordered (their types must be orderable by the Go specification). If // neither is true, SortBy panics with a *generic.TypeError. If more // than one column is given, SortBy sorts by the tuple of the columns; // that is, if two values in the first column are equal, they are // sorted by the second column, and so on. func (p *Plot) SortBy(cols ...string) *Plot { return p.SetData(table.SortBy(p.Data(), cols...)) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/ggstat/agg.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ggstat import ( "fmt" "reflect" "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/table" "github.com/aclements/go-moremath/stats" "github.com/aclements/go-moremath/vec" ) // TODO: AggFirst, AggTukey. StdDev? // Agg constructs an Aggregate transform from a grouping column and a // set of Aggregators. // // TODO: Does this belong in ggstat? The specific aggregator functions // probably do, but the concept could go in package table. func Agg(xs ...string) func(aggs ...Aggregator) Aggregate { return func(aggs ...Aggregator) Aggregate { return Aggregate{xs, aggs} } } // Aggregate computes aggregate functions of a table grouped by // distinct values of a column or set of columns. // // Aggregate first groups the table by the Xs columns. Each of these // groups produces a single row in the output table, where the unique // value of each of the Xs columns appears in the output row, along // with constant columns from the input, as well as any columns that // have a unique value within every group (they're "effectively" // constant). Additional columns in the output row are produced by // applying the Aggregator functions to the group. type Aggregate struct { // Xs is the list column names to group values by before // computing aggregate functions. Xs []string // Aggregators is the set of Aggregator functions to apply to // each group of values. Aggregators []Aggregator } // An Aggregator is a function that aggregates each group of input // into one row and adds it to output. It may be based on multiple // columns from input and may add multiple columns to output. type Aggregator func(input table.Grouping, output *table.Builder) func (s Aggregate) F(g table.Grouping) table.Grouping { isConst := make([]bool, len(g.Columns())) for i := range isConst { isConst[i] = true } subgroups := map[table.GroupID]table.Grouping{} for _, gid := range g.Tables() { g := table.GroupBy(g.Table(gid), s.Xs...) subgroups[gid] = g for i, col := range g.Columns() { if !isConst[i] { continue } // Can this column be promoted to constant? for _, gid2 := range g.Tables() { t := g.Table(gid2) isConst[i] = isConst[i] && checkConst(t, col) } } } return table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table { g := table.GroupBy(t, s.Xs...) var nt table.Builder // Construct X columns. rows := len(g.Tables()) for colidx, xcol := range s.Xs { xs := reflect.MakeSlice(table.ColType(t, xcol), rows, rows) for i, gid := range g.Tables() { for j := 0; j < len(s.Xs)-colidx-1; j++ { gid = gid.Parent() } xs.Index(i).Set(reflect.ValueOf(gid.Label())) } nt.Add(xcol, xs.Interface()) } // Apply Aggregators. for _, agg := range s.Aggregators { agg(g, &nt) } // Keep constant and effectively constant columns. for i := range isConst { col := t.Columns()[i] if !isConst[i] || nt.Has(col) { continue } if cv, ok := t.Const(col); ok { nt.AddConst(col, cv) continue } ncol := reflect.MakeSlice(table.ColType(t, col), len(g.Tables()), len(g.Tables())) for i, gid := range g.Tables() { v := reflect.ValueOf(g.Table(gid).Column(col)) ncol.Index(i).Set(v.Index(0)) } nt.Add(col, ncol.Interface()) } return nt.Done() }) } func checkConst(t *table.Table, col string) bool { if _, ok := t.Const(col); ok { return true } v := reflect.ValueOf(t.Column(col)) if v.Len() <= 1 { return true } if !v.Type().Elem().Comparable() { return false } elem := v.Index(0).Interface() for i, l := 1, v.Len(); i < l; i++ { if elem != v.Index(i).Interface() { return false } } return true } // AggCount returns an aggregate function that computes the number of // rows in each group. The resulting column will be named label, or // "count" if label is "". func AggCount(label string) Aggregator { if label == "" { label = "count" } return func(input table.Grouping, b *table.Builder) { counts := make([]int, 0, len(input.Tables())) for _, gid := range input.Tables() { counts = append(counts, input.Table(gid).Len()) } b.Add(label, counts) } } // AggMean returns an aggregate function that computes the mean of // each of cols. The resulting columns will be named "mean " and // will have the same type as . func AggMean(cols ...string) Aggregator { return aggFn(stats.Mean, "mean ", cols...) } // AggGeoMean returns an aggregate function that computes the // geometric mean of each of cols. The resulting columns will be named // "geomean " and will have the same type as . func AggGeoMean(cols ...string) Aggregator { return aggFn(stats.GeoMean, "geomean ", cols...) } // AggMin returns an aggregate function that computes the minimum of // each of cols. The resulting columns will be named "min " and // will have the same type as . func AggMin(cols ...string) Aggregator { min := func(xs []float64) float64 { x, _ := stats.Bounds(xs) return x } return aggFn(min, "min ", cols...) } // AggMax returns an aggregate function that computes the maximum of // each of cols. The resulting columns will be named "max " and // will have the same type as . func AggMax(cols ...string) Aggregator { max := func(xs []float64) float64 { _, x := stats.Bounds(xs) return x } return aggFn(max, "max ", cols...) } // AggSum returns an aggregate function that computes the sum of each // of cols. The resulting columns will be named "sum " and will // have the same type as . func AggSum(cols ...string) Aggregator { return aggFn(vec.Sum, "sum ", cols...) } // AggQuantile returns an aggregate function that computes a quantile // of each of cols. quantile has a range of [0,1]. The resulting // columns will be named " " and will have the same type // as . func AggQuantile(prefix string, quantile float64, cols ...string) Aggregator { // "prefix" could be autogenerated (e.g. fmt.Sprintf("p%g ", // quantile * 100)), but then the caller would need to do the // same fmt.Sprintf to compute the column name they had just // created. Perhaps Aggregator should provide a way to find // the generated column names. return aggFn(func(data []float64) float64 { return stats.Sample{Xs: data}.Quantile(quantile) }, prefix+" ", cols...) } func aggFn(f func([]float64) float64, prefix string, cols ...string) Aggregator { ocols := make([]string, len(cols)) for i, col := range cols { ocols[i] = prefix + col } return func(input table.Grouping, b *table.Builder) { for coli, col := range cols { means := make([]float64, 0, len(input.Tables())) var xs []float64 for _, gid := range input.Tables() { v := input.Table(gid).MustColumn(col) slice.Convert(&xs, v) means = append(means, f(xs)) } ct := table.ColType(input, col) if ct == float64SliceType { b.Add(ocols[coli], means) } else { // Convert means back to the type of col. outptr := reflect.New(ct) slice.Convert(outptr.Interface(), means) b.Add(ocols[coli], outptr.Elem().Interface()) } } } } // AggUnique returns an aggregate function retains the unique value of // each of cols within each aggregate group, or panics if some group // contains more than one value for one of these columns. // // Note that Aggregate will automatically retain columns that happen // to be unique. AggUnique can be used to enforce at aggregation time // that certain columns *must* be unique (and get a nice error if they // are not). func AggUnique(cols ...string) Aggregator { return func(input table.Grouping, b *table.Builder) { if len(cols) == 0 { return } if len(input.Tables()) == 0 { panic(fmt.Sprintf("unknown column: %q", cols[0])) } for _, col := range cols { ctype := table.ColType(input, col) rows := len(input.Tables()) vs := reflect.MakeSlice(ctype, rows, rows) for i, gid := range input.Tables() { // Get values in this column. xs := reflect.ValueOf(input.Table(gid).MustColumn(col)) // Check for uniqueness. if xs.Len() == 0 { panic(fmt.Sprintf("cannot AggUnique empty column %q", col)) } uniquev := xs.Index(0) unique := uniquev.Interface() for i, len := 1, xs.Len(); i < len; i++ { other := xs.Index(i).Interface() if unique != other { panic(fmt.Sprintf("column %q is not unique; contains at least %v and %v", col, unique, other)) } } // Store unique value. vs.Index(i).Set(uniquev) } // Add unique values slice to output table. b.Add(col, vs.Interface()) } } } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/ggstat/bin.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ggstat import ( "math" "reflect" "sort" "github.com/aclements/go-gg/generic" "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/table" "github.com/aclements/go-moremath/vec" ) // XXX If this is just based on the number of bins, it can come up // with really ugly boundary numbers. If the bin width is specified, // then you could also specify the left edge and bins will be placed // at [align+width*N, align+width*(N+1)]. ggplot2 also lets you // specify the center alignment. // // XXX In Matlab and NumPy, bins are open on the right *except* for // the last bin, which is closed on both. // // XXX Number of bins/bin width/specify boundaries, same bins across // all groups/separate for each group/based on shared scales (don't // have that information here), relative or absolute histogram (Matlab // has lots more). // // XXX Scale transform. // // The result of Bin has two columns in addition to constant columns from the input: // // - Column X is the left edge of the bin. // // - Column W is the sum of the rows' weights, or column "count" is // the number of rows in the bin. type Bin struct { // X is the name of the column to use for samples. X string // W is the optional name of the column to use for sample // weights. It may be "" to weight each sample as 1. W string // Width controls how wide each bin should be. If not provided // or 0, a width will be chosen to produce 30 bins. If X is an // integer column, this width will be treated as an integer as // well. Width float64 // Center controls the center point of each bin. To center on // integers, for example, you could use {Width: 1, Center: // 0}. // XXX What does center mean for integers? Should an unspecified center yield an autochosen one, or 0? //Center float64 // Breaks is the set of break points to use as boundaries // between bins. The interval of each bin is [Breaks[i], // Breaks[i+1]). Data points before the first break are // dropped. If provided, Width and Center are ignored. Breaks table.Slice // SplitGroups indicates that each group in the table should // have separate bounds based on the data in that group alone. // The default, false, indicates that the binning function // should use the bounds of all of the data combined. This // makes it easier to compare bins across groups. SplitGroups bool } func (b Bin) F(g table.Grouping) table.Grouping { breaks := reflect.ValueOf(b.Breaks) agg := AggCount("count") if b.W != "" { agg = aggFn(vec.Sum, "", b.W) } if !breaks.IsValid() && !b.SplitGroups { breaks = b.computeBreaks(g) } // Change b.X to the start of the bin. g = table.MapTables(g, func(_ table.GroupID, t *table.Table) *table.Table { breaks := breaks if !breaks.IsValid() { breaks = b.computeBreaks(t) } nbreaks := breaks.Len() in := reflect.ValueOf(t.MustColumn(b.X)) nin := in.Len() out := reflect.MakeSlice(breaks.Type(), nin, nin) var found []int for i := 0; i < nin; i++ { elt := in.Index(i) bin := sort.Search(nbreaks, func(j int) bool { return generic.OrderR(elt, breaks.Index(j)) < 0 }) // 0 means the row doesn't fit on the front // XXX Allow configuring the first and last bin as infinite or not. bin = bin - 1 if bin >= 0 { found = append(found, i) out.Index(i).Set(breaks.Index(bin)) } } var nt table.Builder for _, col := range t.Columns() { if col == b.X { nt.Add(col, slice.Select(out.Interface(), found)) } else if c, ok := t.Const(col); ok { nt.AddConst(col, c) } else { nt.Add(col, slice.Select(t.Column(col), found)) } } return nt.Done() }) // Group by the found bin return Agg(b.X)(agg).F(g) } func (b Bin) computeBreaks(g table.Grouping) reflect.Value { var cols []slice.T for _, gid := range g.Tables() { cols = append(cols, g.Table(gid).MustColumn(b.X)) } data := slice.Concat(cols...) min := slice.Min(data) max := slice.Max(data) rv := reflect.ValueOf(min) switch rv.Type().Kind() { case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: min, max := rv.Int(), reflect.ValueOf(max).Int() width := int64(b.Width) if width == 0 { width = (max - min) / 30 if width < 1 { width = 1 } } // XXX: This assumes boundaries should be aligned with // 0. We should support explicit Center or Boundary // requests. min -= (min % width) var breaks []int64 for i := min; i < max; i += width { breaks = append(breaks, i) } outs := reflect.New(reflect.ValueOf(cols[0]).Type()) slice.Convert(outs.Interface(), breaks) return outs.Elem() case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: min, max := rv.Uint(), reflect.ValueOf(max).Uint() width := uint64(b.Width) if width == 0 { width = (max - min) / 30 if width < 1 { width = 1 } } min -= (min % width) var breaks []uint64 for i := min; i < max; i += width { breaks = append(breaks, i) } outs := reflect.New(reflect.ValueOf(cols[0]).Type()) slice.Convert(outs.Interface(), breaks) return outs.Elem() case reflect.Float32, reflect.Float64: min, max := rv.Float(), reflect.ValueOf(max).Float() width := b.Width if width == 0 { width = (max - min) / 30 if width == 0 { width = 1 } } min -= math.Mod(min, width) var breaks []float64 for i := min; i < max; i += width { breaks = append(breaks, i) } outs := reflect.New(reflect.ValueOf(cols[0]).Type()) slice.Convert(outs.Interface(), breaks) return outs.Elem() default: panic("can't compute breaks for unknown type") } } // TODO: Count for categorical data. ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/ggstat/common.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ggstat import "reflect" var float64Type = reflect.TypeOf(float64(0)) var float64SliceType = reflect.TypeOf([]float64(nil)) ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/ggstat/density.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ggstat import ( "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/table" "github.com/aclements/go-moremath/stats" "github.com/aclements/go-moremath/vec" ) // TODO: Default to first (and second) column for X (and Y)? // Density constructs a probability density estimate from a set of // samples using kernel density estimation. // // X is the only required field. All other fields have reasonable // default zero values. // // The result of Density has three columns in addition to constant // columns from the input: // // - Column X is the points at which the density estimate is sampled. // // - Column "probability density" is the density estimate. // // - Column "cumulative density" is the cumulative density estimate. type Density struct { // X is the name of the column to use for samples. X string // W is the optional name of the column to use for sample // weights. It may be "" to uniformly weight samples. W string // N is the number of points to sample the KDE at. If N is 0, // a reasonable default is used. // // TODO: This is particularly sensitive to the scale // transform. // // TODO: Base the default on the bandwidth. If the bandwidth // is really narrow, we may need a lot of samples to exceed // the Nyquist rate. N int // Domain specifies the domain at which to sample this function. // If Domain is nil, it defaults to DomainData{}. Domain FunctionDomainer // Kernel is the kernel to use for the KDE. Kernel stats.KDEKernel // Bandwidth is the bandwidth to use for the KDE. // // If this is zero, the bandwidth is computed from the data // using a default bandwidth estimator (currently // stats.BandwidthScott). Bandwidth float64 // BoundaryMethod is the boundary correction method to use for // the KDE. The default value is BoundaryReflect; however, the // default bounds are effectively +/-inf, which is equivalent // to performing no boundary correction. BoundaryMethod stats.KDEBoundaryMethod // [BoundaryMin, BoundaryMax) specify a bounded support for // the KDE. If both are 0 (their default values), they are // treated as +/-inf. // // To specify a half-bounded support, set Min to math.Inf(-1) // or Max to math.Inf(1). BoundaryMin float64 BoundaryMax float64 } func (d Density) F(g table.Grouping) table.Grouping { kde := stats.KDE{ Kernel: d.Kernel, Bandwidth: d.Bandwidth, BoundaryMethod: d.BoundaryMethod, BoundaryMin: d.BoundaryMin, BoundaryMax: d.BoundaryMax, } dname, cname := "probability density", "cumulative density" addEmpty := func(out *table.Builder) { out.Add(dname, []float64{}) out.Add(cname, []float64{}) } return Function{ X: d.X, N: d.N, Domain: d.Domain, Fn: func(gid table.GroupID, in *table.Table, sampleAt []float64, out *table.Builder) { if len(sampleAt) == 0 { addEmpty(out) return } // Get input sample. var sample stats.Sample slice.Convert(&sample.Xs, in.MustColumn(d.X)) if d.W != "" { slice.Convert(&sample.Weights, in.MustColumn(d.W)) if sample.Weight() == 0 { addEmpty(out) return } } // Compute KDE. kde.Sample = sample if d.Bandwidth == 0 { kde.Bandwidth = stats.BandwidthScott(sample) } out.Add(dname, vec.Map(kde.PDF, sampleAt)) out.Add(cname, vec.Map(kde.CDF, sampleAt)) }, }.F(g) } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/ggstat/domain.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ggstat import ( "math" "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/table" "github.com/aclements/go-moremath/stats" ) // A FunctionDomainer computes the domain over which to evaluate a // statistical function. type FunctionDomainer interface { // FunctionDomain computes the domain of a particular column // within a table. It takes a Grouping and a column in that // Grouping to compute the domain of and returns a function // that computes the domain for a specific group in the // Grouping. This makes it possible for FunctionDomain to // easily compute either Grouping-wide domains, or per-Table // domains. // // The returned domain may be (NaN, NaN) to indicate that // there is no data and the domain is vacuous. FunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64) } // DomainFixed is a FunctionDomainer that returns a fixed domain. type DomainFixed struct { Min, Max float64 } var _ FunctionDomainer = DomainFixed{} func (r DomainFixed) FunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64) { return func(table.GroupID) (min, max float64) { return r.Min, r.Max } } // DomainData is a FunctionDomainer that computes domains based on the // bounds of the data. type DomainData struct { // Widen expands the domain by Widen times the span of the // data. // // A value of 1.0 means to use exactly the bounds of the data. // If Widen is 0, it is treated as 1.1 (that is, widen the // domain by 10%, or 5% on the left and 5% on the right). Widen float64 // SplitGroups indicates that each group in the table should // have a separate domain based on the data in that group // alone. The default, false, indicates that the domain should // be based on all of the data in the table combined. This // makes it possible to stack functions and easier to compare // them across groups. SplitGroups bool } var _ FunctionDomainer = DomainData{} const defaultWiden = 1.1 func (r DomainData) FunctionDomain(g table.Grouping, col string) func(gid table.GroupID) (min, max float64) { widen := r.Widen if widen <= 0 { widen = defaultWiden } var xs []float64 if !r.SplitGroups { // Compute combined bounds. gmin, gmax := math.NaN(), math.NaN() for _, gid := range g.Tables() { t := g.Table(gid) slice.Convert(&xs, t.MustColumn(col)) xmin, xmax := stats.Bounds(xs) if xmin < gmin || math.IsNaN(gmin) { gmin = xmin } if xmax > gmax || math.IsNaN(gmax) { gmax = xmax } } // Widen bounds. span := gmax - gmin gmin, gmax = gmin-span*(widen-1)/2, gmax+span*(widen-1)/2 return func(table.GroupID) (min, max float64) { return gmin, gmax } } return func(gid table.GroupID) (min, max float64) { // Compute bounds. slice.Convert(&xs, g.Table(gid).MustColumn(col)) min, max = stats.Bounds(xs) // Widen bounds. span := max - min min, max = min-span*(widen-1)/2, max+span*(widen-1)/2 return } } ================================================ FILE: benchplot/vendor/github.com/aclements/go-gg/ggstat/ecdf.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package ggstat import ( "github.com/aclements/go-gg/generic/slice" "github.com/aclements/go-gg/table" "github.com/aclements/go-moremath/vec" ) // ECDF constructs an empirical CDF from a set of samples. // // X is the only required field. All other fields have reasonable // default zero values. // // The result of ECDF has three columns in addition to constant // columns from the input. The names of the columns depend on whether // Label is "". // // - Column X is the points at which the CDF changes (a subset of the // samples). // // - Column "cumulative density" or "cumulative density of