[
  {
    "path": ".gitignore",
    "content": "# Binaries for programs and plugins\n*.exe\n*.dll\n*.so\n*.dylib\n\n# Test binary, build with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736\n.glide/\n"
  },
  {
    "path": "LICENSE",
    "content": "BSD 3-Clause License\n\nCopyright (c) 2017, Imec\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n  list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n  this list of conditions and the following disclaimer in the documentation\n  and/or other materials provided with the distribution.\n\n* Neither the name of the copyright holder nor the names of its\n  contributors may be used to endorse or promote products derived from\n  this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "README.md",
    "content": "# pargo\n## A library for parallel programming in Go\n\nPackage pargo provides functions and data structures for expressing\nparallel algorithms. While Go is primarily designed for concurrent\nprogramming, it is also usable to some extent for parallel\nprogramming, and this library provides convenience functionality to\nturn otherwise sequential algorithms into parallel algorithms, with\nthe goal to improve performance.\n\nDocumentation: [http://godoc.org/github.com/ExaScience/pargo](http://godoc.org/github.com/ExaScience/pargo)\nand [http://github.com/ExaScience/pargo/wiki](http://github.com/ExaScience/pargo/wiki)\n"
  },
  {
    "path": "doc.go",
    "content": "// Package pargo provides functions and data structures for expressing parallel\n// algorithms. While Go is primarily designed for concurrent programming, it is\n// also usable to some extent for parallel programming, and this library\n// provides convenience functionality to turn otherwise sequential algorithms\n// into parallel algorithms, with the goal to improve performance.\n//\n// For documentation that provides a more structured overview than is possible\n// with Godoc, see the wiki at https://github.com/exascience/pargo/wiki\n//\n// Pargo provides the following subpackages:\n//\n// pargo/parallel provides simple functions for executing series of thunks or\n// predicates, as well as thunks, predicates, or reducers over ranges in\n// parallel. See also https://github.com/ExaScience/pargo/wiki/TaskParallelism\n//\n// pargo/speculative provides speculative implementations of most of the\n// functions from pargo/parallel. These implementations not only execute in\n// parallel, but also attempt to terminate early as soon as the final result is\n// known. See also https://github.com/ExaScience/pargo/wiki/TaskParallelism\n//\n// pargo/sequential provides sequential implementations of all functions from\n// pargo/parallel, for testing and debugging purposes.\n//\n// pargo/sort provides parallel sorting algorithms.\n//\n// pargo/sync provides an efficient parallel map implementation.\n//\n// pargo/pipeline provides functions and data structures to construct and\n// execute parallel pipelines.\n//\n// Pargo has been influenced to various extents by ideas from Cilk, Threading\n// Building Blocks, and Java's java.util.concurrent and java.util.stream\n// packages. See http://supertech.csail.mit.edu/papers/steal.pdf for some\n// theoretical background, and the sample chapter at\n// https://mitpress.mit.edu/books/introduction-algorithms for a more practical\n// overview of the underlying concepts.\npackage pargo\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/exascience/pargo\n\ngo 1.14\n\nrequire gonum.org/v1/gonum v0.7.0\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=\ngithub.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=\ngithub.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=\ngithub.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=\ngolang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2 h1:y102fOLFqhV41b+4GPiJoa0k/x+pJcEi2/HB1Y5T6fU=\ngolang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=\ngolang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=\ngolang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=\ngonum.org/v1/gonum v0.7.0 h1:Hdks0L0hgznZLG9nzXb8vZ0rRvqNvAcgAp84y7Mwkgw=\ngonum.org/v1/gonum v0.7.0/go.mod h1:L02bwd0sqlsvRv41G7wGWFCsVNZFv/k1xzGIxeANHGM=\ngonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=\ngonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=\ngonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=\nrsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=\n"
  },
  {
    "path": "internal/internal.go",
    "content": "package internal\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"runtime/debug\"\n)\n\n// ComputeNofBatches divides the size of the range (high - low) by n. If n is 0,\n// a default is used that takes runtime.GOMAXPROCS(0) into account.\nfunc ComputeNofBatches(low, high, n int) (batches int) {\n\tswitch size := high - low; {\n\tcase size > 0:\n\t\tswitch {\n\t\tcase n == 0:\n\t\t\tbatches = 2 * runtime.GOMAXPROCS(0)\n\t\tcase n > 0:\n\t\t\tbatches = n\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t\tif batches > size {\n\t\t\tbatches = size\n\t\t}\n\tcase size == 0:\n\t\tbatches = 1\n\tdefault:\n\t\tpanic(fmt.Sprintf(\"invalid range: %v:%v\", low, high))\n\t}\n\treturn\n}\n\ntype runtimeError struct{ error }\n\nfunc (runtimeError) RuntimeError() {}\n\n// WrapPanic adds stack trace information to a recovered panic.\nfunc WrapPanic(p interface{}) interface{} {\n\tif p != nil {\n\t\ts := fmt.Sprintf(\"%v\\n%s\\nrethrown at\", p, debug.Stack())\n\t\tif _, isError := p.(error); isError {\n\t\t\tr := errors.New(s)\n\t\t\tif _, isRuntimeError := p.(runtime.Error); isRuntimeError {\n\t\t\t\treturn runtimeError{r}\n\t\t\t}\n\t\t\treturn r\n\t\t}\n\t\treturn s\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "parallel/example_heatdistribution_test.go",
    "content": "package parallel_test\n\n// This is a simplified version of a heat distribution simulation, based on an\n// implementation by Wilfried Verachtert.\n//\n// See https://en.wikipedia.org/wiki/Heat_equation for some theoretical\n// background.\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\n\t\"gonum.org/v1/gonum/mat\"\n\n\t\"github.com/exascience/pargo/parallel\"\n)\n\nconst ε = 0.001\n\nfunc maxDiff(m1, m2 *mat.Dense) (result float64) {\n\trows, cols := m1.Dims()\n\tresult = parallel.RangeReduceFloat64(\n\t\t1, rows-1, 0,\n\t\tfunc(low, high int) (result float64) {\n\t\t\tfor row := low; row < high; row++ {\n\t\t\t\tr1 := m1.RawRowView(row)\n\t\t\t\tr2 := m2.RawRowView(row)\n\t\t\t\tfor col := 1; col < cols-1; col++ {\n\t\t\t\t\tresult = math.Max(result, math.Abs(r1[col]-r2[col]))\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn\n\t\t},\n\t\tmath.Max,\n\t)\n\treturn\n}\n\nfunc HeatDistributionStep(u, v *mat.Dense) {\n\trows, cols := u.Dims()\n\tparallel.Range(1, rows-1, 0,\n\t\tfunc(low, high int) {\n\t\t\tfor row := low; row < high; row++ {\n\t\t\t\tuRow := u.RawRowView(row)\n\t\t\t\tvRow := v.RawRowView(row)\n\t\t\t\tvRowUp := v.RawRowView(row - 1)\n\t\t\t\tvRowDn := v.RawRowView(row + 1)\n\t\t\t\tfor col := 1; col < cols-1; col++ {\n\t\t\t\t\tuRow[col] = (vRowUp[col] + vRowDn[col] + vRow[col-1] + vRow[col+1]) / 4.0\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t)\n}\n\nfunc HeatDistributionSimulation(M, N int, init, t, r, b, l float64) {\n\t// ensure a border\n\tM += 2\n\tN += 2\n\n\t// set up the input matrix\n\tdata := make([]float64, M*N)\n\tfor i := range data {\n\t\tdata[i] = init\n\t}\n\tu := mat.NewDense(M, N, data)\n\n\t// set up the border for the input matrix\n\tfor i := 0; i < N; i++ {\n\t\tu.Set(0, i, t)\n\t\tu.Set(M-1, i, b)\n\t}\n\tfor i := 0; i < M; i++ {\n\t\tu.Set(i, 0, l)\n\t\tu.Set(i, N-1, r)\n\t}\n\n\t// create a secondary working matrix\n\tv := mat.NewDense(M, N, nil)\n\tv.Copy(u)\n\n\t// run the simulation\n\tfor δ, iterations := ε+1.0, 0; δ >= ε; {\n\t\tfor step := 0; step < 1000; step++ {\n\t\t\tHeatDistributionStep(v, u)\n\t\t\tHeatDistributionStep(u, v)\n\t\t}\n\t\titerations += 2000\n\t\tδ = maxDiff(u, v)\n\t\tfmt.Printf(\"iterations: %6d, δ: %08.6f, u[8][8]: %10.8f\\n\", iterations, δ, u.At(8, 8))\n\t}\n}\n\nfunc Example_heatDistributionSimulation() {\n\tHeatDistributionSimulation(1024, 1024, 75, 0, 100, 100, 100)\n\n\t// Output:\n\t// iterations:   2000, δ: 0.009073, u[8][8]: 50.99678108\n\t// iterations:   4000, δ: 0.004537, u[8][8]: 50.50380048\n\t// iterations:   6000, δ: 0.003025, u[8][8]: 50.33708179\n\t// iterations:   8000, δ: 0.002268, u[8][8]: 50.25326869\n\t// iterations:  10000, δ: 0.001815, u[8][8]: 50.20283493\n\t// iterations:  12000, δ: 0.001512, u[8][8]: 50.16915148\n\t// iterations:  14000, δ: 0.001296, u[8][8]: 50.14506197\n\t// iterations:  16000, δ: 0.001134, u[8][8]: 50.12697847\n\t// iterations:  18000, δ: 0.001008, u[8][8]: 50.11290381\n\t// iterations:  20000, δ: 0.000907, u[8][8]: 50.10163797\n}\n"
  },
  {
    "path": "parallel/parallel.go",
    "content": "// Package parallel provides functions for expressing parallel algorithms.\n//\n// See https://github.com/ExaScience/pargo/wiki/TaskParallelism for a general\n// overview.\npackage parallel\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com/exascience/pargo/internal\"\n)\n\n// Reduce receives one or more functions, executes them in parallel, and\n// combines their results with the join function in parallel.\n//\n// Each function is invoked in its own goroutine, and Reduce returns only when\n// all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and Reduce eventually panics with the left-most recovered panic\n// value.\nfunc Reduce(\n\tjoin func(x, y interface{}) interface{},\n\tfirstFunction func() interface{},\n\tmoreFunctions ...func() interface{},\n) interface{} {\n\tif len(moreFunctions) == 0 {\n\t\treturn firstFunction()\n\t}\n\tvar left, right interface{}\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tif len(moreFunctions) == 1 {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = moreFunctions[0]()\n\t\t}()\n\t\tleft = firstFunction()\n\t} else {\n\t\thalf := (len(moreFunctions) + 1) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = Reduce(join, moreFunctions[half], moreFunctions[half+1:]...)\n\t\t}()\n\t\tleft = Reduce(join, firstFunction, moreFunctions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn join(left, right)\n}\n\n// ReduceFloat64 receives one or more functions, executes them in parallel, and\n// combines their results with the join function in parallel.\n//\n// Each function is invoked in its own goroutine, and ReduceFloat64 returns only\n// when all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceFloat64 eventually panics with the left-most recovered\n// panic value.\nfunc ReduceFloat64(\n\tjoin func(x, y float64) float64,\n\tfirstFunction func() float64,\n\tmoreFunctions ...func() float64,\n) float64 {\n\tif len(moreFunctions) == 0 {\n\t\treturn firstFunction()\n\t}\n\tvar left, right float64\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tif len(moreFunctions) == 1 {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = moreFunctions[0]()\n\t\t}()\n\t\tleft = firstFunction()\n\t} else {\n\t\thalf := (len(moreFunctions) + 1) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = ReduceFloat64(join, moreFunctions[half], moreFunctions[half+1:]...)\n\t\t}()\n\t\tleft = ReduceFloat64(join, firstFunction, moreFunctions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn join(left, right)\n}\n\n// ReduceFloat64Sum receives zero or more functions, executes them in parallel,\n// and adds their results in parallel.\n//\n// Each function is invoked in its own goroutine, and ReduceFloat64Sum returns\n// only when all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceFloat64Sum eventually panics with the left-most recovered\n// panic value.\nfunc ReduceFloat64Sum(functions ...func() float64) float64 {\n\tswitch len(functions) {\n\tcase 0:\n\t\treturn 0\n\tcase 1:\n\t\treturn functions[0]()\n\t}\n\tvar left, right float64\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(functions) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = functions[1]()\n\t\t}()\n\t\tleft = functions[0]()\n\tdefault:\n\t\thalf := len(functions) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = ReduceFloat64Sum(functions[half:]...)\n\t\t}()\n\t\tleft = ReduceFloat64Sum(functions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn left + right\n}\n\n// ReduceFloat64Product receives zero or more functions, executes them in\n// parallel, and multiplies their results in parallel.\n//\n// Each function is invoked in its own goroutine, and ReduceFloat64Product\n// returns only when all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceFloat64Product eventually panics with the left-most\n// recovered panic value.\nfunc ReduceFloat64Product(functions ...func() float64) float64 {\n\tswitch len(functions) {\n\tcase 0:\n\t\treturn 1\n\tcase 1:\n\t\treturn functions[0]()\n\t}\n\tvar left, right float64\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(functions) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = functions[1]()\n\t\t}()\n\t\tleft = functions[0]()\n\tdefault:\n\t\thalf := len(functions) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = ReduceFloat64Product(functions[half:]...)\n\t\t}()\n\t\tleft = ReduceFloat64Product(functions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn left * right\n}\n\n// ReduceInt receives zero or more functions, executes them in parallel, and\n// combines their results with the join function in parallel.\n//\n// Each function is invoked in its own goroutine, and ReduceInt returns only\n// when all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceInt eventually panics with the left-most recovered panic\n// value.\nfunc ReduceInt(\n\tjoin func(x, y int) int,\n\tfirstFunction func() int,\n\tmoreFunctions ...func() int,\n) int {\n\tif len(moreFunctions) == 0 {\n\t\treturn firstFunction()\n\t}\n\tvar left, right int\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tif len(moreFunctions) == 1 {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = moreFunctions[0]()\n\t\t}()\n\t\tleft = firstFunction()\n\t} else {\n\t\thalf := (len(moreFunctions) + 1) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = ReduceInt(join, moreFunctions[half], moreFunctions[half+1:]...)\n\t\t}()\n\t\tleft = ReduceInt(join, firstFunction, moreFunctions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn join(left, right)\n}\n\n// ReduceIntSum receives zero or more functions, executes them in parallel, and\n// adds their results in parallel.\n//\n// Each function is invoked in its own goroutine, and ReduceIntSum returns only\n// when all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceIntSum eventually panics with the left-most recovered panic\n// value.\nfunc ReduceIntSum(functions ...func() int) int {\n\tswitch len(functions) {\n\tcase 0:\n\t\treturn 0\n\tcase 1:\n\t\treturn functions[0]()\n\t}\n\tvar left, right int\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(functions) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = functions[1]()\n\t\t}()\n\t\tleft = functions[0]()\n\tdefault:\n\t\thalf := len(functions) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = ReduceIntSum(functions[half:]...)\n\t\t}()\n\t\tleft = ReduceIntSum(functions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn left + right\n}\n\n// ReduceIntProduct receives zero or more functions, executes them in parallel,\n// and multiplies their results in parallel.\n//\n// Each function is invoked in its own goroutine, and ReduceIntProduct returns\n// only when all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceIntProduct eventually panics with the left-most recovered\n// panic value.\nfunc ReduceIntProduct(functions ...func() int) int {\n\tswitch len(functions) {\n\tcase 0:\n\t\treturn 1\n\tcase 1:\n\t\treturn functions[0]()\n\t}\n\tvar left, right int\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(functions) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = functions[1]()\n\t\t}()\n\t\tleft = functions[0]()\n\tdefault:\n\t\thalf := len(functions) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = ReduceIntProduct(functions[half:]...)\n\t\t}()\n\t\tleft = ReduceIntProduct(functions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn left * right\n}\n\n// ReduceString receives zero or more functions, executes them in parallel, and\n// combines their results with the join function in parallel.\n//\n// Each function is invoked in its own goroutine, and ReduceString returns only\n// when all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceString eventually panics with the left-most recovered panic\n// value.\nfunc ReduceString(\n\tjoin func(x, y string) string,\n\tfirstFunction func() string,\n\tmoreFunctions ...func() string,\n) string {\n\tif len(moreFunctions) == 0 {\n\t\treturn firstFunction()\n\t}\n\tvar left, right string\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tif len(moreFunctions) == 1 {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = moreFunctions[0]()\n\t\t}()\n\t\tleft = firstFunction()\n\t} else {\n\t\thalf := (len(moreFunctions) + 1) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = ReduceString(join, moreFunctions[half], moreFunctions[half+1:]...)\n\t\t}()\n\t\tleft = ReduceString(join, firstFunction, moreFunctions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn join(left, right)\n}\n\n// ReduceStringSum receives zero or more functions, executes them in parallel,\n// and concatenates their results in parallel.\n//\n// Each function is invoked in its own goroutine, and ReduceStringSum returns\n// only when all functions have terminated.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceStringSum eventually panics with the left-most recovered\n// panic value.\nfunc ReduceStringSum(functions ...func() string) string {\n\tswitch len(functions) {\n\tcase 0:\n\t\treturn \"\"\n\tcase 1:\n\t\treturn functions[0]()\n\t}\n\tvar left, right string\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(functions) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = functions[1]()\n\t\t}()\n\t\tleft = functions[0]()\n\tdefault:\n\t\thalf := len(functions) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright = ReduceStringSum(functions[half:]...)\n\t\t}()\n\t\tleft = ReduceStringSum(functions[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn left + right\n}\n\n// Do receives zero or more thunks and executes them in parallel.\n//\n// Each thunk is invoked in its own goroutine, and Do returns only when all\n// thunks have terminated.\n//\n// If one or more thunks panic, the corresponding goroutines recover the panics,\n// and Do eventually panics with the left-most recovered panic value.\nfunc Do(thunks ...func()) {\n\tswitch len(thunks) {\n\tcase 0:\n\t\treturn\n\tcase 1:\n\t\tthunks[0]()\n\t\treturn\n\t}\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(thunks) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tthunks[1]()\n\t\t}()\n\t\tthunks[0]()\n\tdefault:\n\t\thalf := len(thunks) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tDo(thunks[half:]...)\n\t\t}()\n\t\tDo(thunks[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n}\n\n// And receives zero or more predicate functions and executes them in parallel.\n//\n// Each predicate is invoked in its own goroutine, and And returns only when all\n// predicates have terminated, combining all return values with the && operator,\n// with true as the default return value.\n//\n// If one or more predicates panic, the corresponding goroutines recover the\n// panics, and And eventually panics with the left-most recovered panic value.\nfunc And(predicates ...func() bool) bool {\n\tswitch len(predicates) {\n\tcase 0:\n\t\treturn true\n\tcase 1:\n\t\treturn predicates[0]()\n\t}\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(predicates) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = predicates[1]()\n\t\t}()\n\t\tb0 = predicates[0]()\n\tdefault:\n\t\thalf := len(predicates) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = And(predicates[half:]...)\n\t\t}()\n\t\tb0 = And(predicates[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn b0 && b1\n}\n\n// Or receives zero or more predicate functions and executes them in parallel.\n//\n// Each predicate is invoked in its own goroutine, and Or returns only when all\n// predicates have terminated, combining all return values with the || operator,\n// with false as the default return value.\n//\n// If one or more predicates panic, the corresponding goroutines recover the\n// panics, and Or eventually panics with the left-most recovered panic value.\nfunc Or(predicates ...func() bool) bool {\n\tswitch len(predicates) {\n\tcase 0:\n\t\treturn false\n\tcase 1:\n\t\treturn predicates[0]()\n\t}\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(predicates) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = predicates[1]()\n\t\t}()\n\t\tb0 = predicates[0]()\n\tdefault:\n\t\thalf := len(predicates) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = Or(predicates[half:]...)\n\t\t}()\n\t\tb0 = Or(predicates[:half]...)\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn b0 || b1\n}\n\n// Range receives a range, a batch count n, and a range function f, divides the\n// range into batches, and invokes the range function for each of these batches\n// in parallel, covering the half-open interval from low to high, including low\n// but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range function is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and Range returns only when all range functions have terminated.\n//\n// Range panics if high < low, or if n < 0.\n//\n// If one or more range function invocations panic, the corresponding goroutines\n// recover the panics, and Range eventually panics with the left-most recovered\n// panic value.\nfunc Range(\n\tlow, high, n int,\n\tf func(low, high int),\n) {\n\tvar recur func(int, int, int)\n\trecur = func(low, high, n int) {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\tf(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\tf(low, high)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\trecur(mid, high, n-half)\n\t\t\t}()\n\t\t\trecur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\trecur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeAnd receives a range, a batch count n, and a range predicate function f,\n// divides the range into batches, and invokes the range predicate for each of\n// these batches in parallel, covering the half-open interval from low to high,\n// including low but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range predicate is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeAnd returns only when all range predicates have\n// terminated, combining all return values with the && operator.\n//\n// RangeAnd panics if high < low, or if n < 0.\n//\n// If one or more range predicate invocations panic, the corresponding\n// goroutines recover the panics, and RangeAnd eventually panics with the\n// left-most recovered panic value.\nfunc RangeAnd(\n\tlow, high, n int,\n\tf func(low, high int) bool,\n) bool {\n\tvar recur func(int, int, int) bool\n\trecur = func(low, high, n int) bool {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn f(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn f(low, high)\n\t\t\t}\n\t\t\tvar b0, b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tb1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tb0 = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn b0 && b1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeOr receives a range, a batch count n, and a range predicate function f,\n// divides the range into batches, and invokes the range predicate for each of\n// these batches in parallel, covering the half-open interval from low to high,\n// including low but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range predicate is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeOr returns only when all range predicates have\n// terminated, combining all return values with the || operator.\n//\n// RangeOr panics if high < low, or if n < 0.\n//\n// If one or more range predicate invocations panic, the corresponding\n// goroutines recover the panics, and RangeOr eventually panics with the\n// left-most recovered panic value.\nfunc RangeOr(\n\tlow, high, n int,\n\tf func(low, high int) bool,\n) bool {\n\tvar recur func(int, int, int) bool\n\trecur = func(low, high, n int) bool {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn f(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn f(low, high)\n\t\t\t}\n\t\t\tvar b0, b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tb1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tb0 = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn b0 || b1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduce receives a range, a batch count, a range reduce function, and a\n// join function, divides the range into batches, and invokes the range reducer\n// for each of these batches in parallel, covering the half-open interval from\n// low to high, including low but excluding high. The results of the range\n// reducer invocations are then combined by repeated invocations of join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduce returns only when all range reducers and pair\n// reducers have terminated.\n//\n// RangeReduce panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduce eventually panics with the left-most\n// recovered panic value.\nfunc RangeReduce(\n\tlow, high, n int,\n\treduce func(low, high int) interface{},\n\tjoin func(x, y interface{}) interface{},\n) interface{} {\n\tvar recur func(int, int, int) interface{}\n\trecur = func(low, high, n int) interface{} {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right interface{}\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceInt receives a range, a batch count n, a range reducer function,\n// and a join function, divides the range into batches, and invokes the range\n// reducer for each of these batches in parallel, covering the half-open\n// interval from low to high, including low but excluding high. The results of\n// the range reducer invocations are then combined by repeated invocations of\n// join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceInt returns only when all range reducers and pair\n// reducers have terminated.\n//\n// RangeReduceInt panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceInt eventually panics with the left-most\n// recovered panic value.\nfunc RangeReduceInt(\n\tlow, high, n int,\n\treduce func(low, high int) int,\n\tjoin func(x, y int) int,\n) int {\n\tvar recur func(int, int, int) int\n\trecur = func(low, high, n int) int {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right int\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceIntSum receives a range, a batch count n, and a range reducer\n// function, divides the range into batches, and invokes the range reducer for\n// each of these batches in parallel, covering the half-open interval from low\n// to high, including low but excluding high. The results of the range reducer\n// invocations are then added together.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceIntSum returns only when all range reducers and\n// pair reducers have terminated.\n//\n// RangeReduceIntSum panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceIntSum eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceIntSum(\n\tlow, high, n int,\n\treduce func(low, high int) int,\n) int {\n\tvar recur func(int, int, int) int\n\trecur = func(low, high, n int) int {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right int\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn left + right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceIntProduct receives a range, a batch count n, and a range reducer\n// function, divides the range into batches, and invokes the range reducer for\n// each of these batches in parallel, covering the half-open interval from low\n// to high, including low but excluding high. The results of the range reducer\n// invocations are then multiplied with each other.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceIntProduct returns only when all range reducers\n// and pair reducers have terminated.\n//\n// RangeReduceIntProduct panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceIntProducet eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceIntProduct(\n\tlow, high, n int,\n\treduce func(low, high int) int,\n) int {\n\tvar recur func(int, int, int) int\n\trecur = func(low, high, n int) int {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right int\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn left * right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceFloat64 receives a range, a batch count n, a range reducer\n// function, and a join function, divides the range into batches, and invokes\n// the range reducer for each of these batches in parallel, covering the\n// half-open interval from low to high, including low but excluding high. The\n// results of the range reducer invocations are then combined by repeated\n// invocations of join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceFloat64 returns only when all range reducers and\n// pair reducers have terminated.\n//\n// RangeReduceFloat64 panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceFloat64 eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceFloat64(\n\tlow, high, n int,\n\treduce func(low, high int) float64,\n\tjoin func(x, y float64) float64,\n) float64 {\n\tvar recur func(int, int, int) float64\n\trecur = func(low, high, n int) float64 {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right float64\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceFloat64Sum receives a range, a batch count n, and a range reducer\n// function, divides the range into batches, and invokes the range reducer for\n// each of these batches in parallel, covering the half-open interval from low\n// to high, including low but excluding high. The results of the range reducer\n// invocations are then added together.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceFloat64Sum returns only when all range reducers\n// and pair reducers have terminated.\n//\n// RangeReduceFloat64Sum panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceFloat64Sum eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceFloat64Sum(\n\tlow, high, n int,\n\treduce func(low, high int) float64,\n) float64 {\n\tvar recur func(int, int, int) float64\n\trecur = func(low, high, n int) float64 {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right float64\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn left + right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceFloat64Product receives a range, a batch count n, and a range\n// reducer function, divides the range into batches, and invokes the range\n// reducer for each of these batches in parallel, covering the half-open\n// interval from low to high, including low but excluding high. The results of\n// the range reducer invocations are then multiplied with each other.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceFloat64Product returns only when all range\n// reducers and pair reducers have terminated.\n//\n// RangeReduceFloat64Product panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceFloat64Producet eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceFloat64Product(\n\tlow, high, n int,\n\treduce func(low, high int) float64,\n) float64 {\n\tvar recur func(int, int, int) float64\n\trecur = func(low, high, n int) float64 {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right float64\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn left * right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceString receives a range, a batch count n, a range reducer\n// function, and a join function, divides the range into batches, and invokes\n// the range reducer for each of these batches in parallel, covering the\n// half-open interval from low to high, including low but excluding high. The\n// results of the range reducer invocations are then combined by repeated\n// invocations of join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceString returns only when all range reducers and\n// pair reducers have terminated.\n//\n// RangeReduceString panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceString eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceString(\n\tlow, high, n int,\n\treduce func(low, high int) string,\n\tjoin func(x, y string) string,\n) string {\n\tvar recur func(int, int, int) string\n\trecur = func(low, high, n int) string {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right string\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceStringSum receives a range, a batch count n, and a range reducer\n// function, divides the range into batches, and invokes the range reducer for\n// each of these batches in parallel, covering the half-open interval from low\n// to high, including low but excluding high. The results of the range reducer\n// invocations are then concatenated together.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceStringSum returns only when all range reducers\n// and pair reducers have terminated.\n//\n// RangeReduceStringSum panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceStringSum eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceStringSum(\n\tlow, high, n int,\n\treduce func(low, high int) string,\n) string {\n\tvar recur func(int, int, int) string\n\trecur = func(low, high, n int) string {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right string\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft = recur(low, mid, half)\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn left + right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n"
  },
  {
    "path": "parallel/parallel_test.go",
    "content": "package parallel_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\n\t\"github.com/exascience/pargo/parallel\"\n)\n\nfunc ExampleDo() {\n\tvar fib func(int) (int, error)\n\n\tfib = func(n int) (result int, err error) {\n\t\tif n < 0 {\n\t\t\terr = errors.New(\"invalid argument\")\n\t\t} else if n < 2 {\n\t\t\tresult = n\n\t\t} else {\n\t\t\tvar n1, n2 int\n\t\t\tn1, err = fib(n - 1)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tn2, err = fib(n - 2)\n\t\t\tresult = n1 + n2\n\t\t}\n\t\treturn\n\t}\n\n\ttype intErr struct {\n\t\tn   int\n\t\terr error\n\t}\n\n\tvar parallelFib func(int) intErr\n\n\tparallelFib = func(n int) (result intErr) {\n\t\tif n < 0 {\n\t\t\tresult.err = errors.New(\"invalid argument\")\n\t\t} else if n < 20 {\n\t\t\tresult.n, result.err = fib(n)\n\t\t} else {\n\t\t\tvar n1, n2 intErr\n\t\t\tparallel.Do(\n\t\t\t\tfunc() { n1 = parallelFib(n - 1) },\n\t\t\t\tfunc() { n2 = parallelFib(n - 2) },\n\t\t\t)\n\t\t\tresult.n = n1.n + n2.n\n\t\t\tif n1.err != nil {\n\t\t\t\tresult.err = n1.err\n\t\t\t} else {\n\t\t\t\tresult.err = n2.err\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tif result := parallelFib(-1); result.err != nil {\n\t\tfmt.Println(result.err)\n\t} else {\n\t\tfmt.Println(result.n)\n\t}\n\n\t// Output:\n\t// invalid argument\n}\n\nfunc ExampleRangeReduceIntSum() {\n\tnumDivisors := func(n int) int {\n\t\treturn parallel.RangeReduceIntSum(\n\t\t\t1, n+1, runtime.GOMAXPROCS(0),\n\t\t\tfunc(low, high int) int {\n\t\t\t\tvar sum int\n\t\t\t\tfor i := low; i < high; i++ {\n\t\t\t\t\tif (n % i) == 0 {\n\t\t\t\t\t\tsum++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn sum\n\t\t\t},\n\t\t)\n\t}\n\n\tfmt.Println(numDivisors(12))\n\n\t// Output:\n\t// 6\n}\n\nfunc numDivisors(n int) int {\n\treturn parallel.RangeReduceIntSum(\n\t\t1, n+1, runtime.GOMAXPROCS(0),\n\t\tfunc(low, high int) int {\n\t\t\tvar sum int\n\t\t\tfor i := low; i < high; i++ {\n\t\t\t\tif (n % i) == 0 {\n\t\t\t\t\tsum++\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn sum\n\t\t},\n\t)\n}\n\nfunc ExampleRangeReduce() {\n\tfindPrimes := func(n int) []int {\n\t\tresult := parallel.RangeReduce(\n\t\t\t2, n, 4*runtime.GOMAXPROCS(0),\n\t\t\tfunc(low, high int) interface{} {\n\t\t\t\tvar slice []int\n\t\t\t\tfor i := low; i < high; i++ {\n\t\t\t\t\tif numDivisors(i) == 2 { // see RangeReduceInt example\n\t\t\t\t\t\tslice = append(slice, i)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn slice\n\t\t\t},\n\t\t\tfunc(x, y interface{}) interface{} {\n\t\t\t\treturn append(x.([]int), y.([]int)...)\n\t\t\t},\n\t\t)\n\t\treturn result.([]int)\n\t}\n\n\tfmt.Println(findPrimes(20))\n\n\t// Output:\n\t// [2 3 5 7 11 13 17 19]\n}\n\nfunc ExampleRangeReduceFloat64Sum() {\n\tsumFloat64s := func(f []float64) float64 {\n\t\tresult := parallel.RangeReduceFloat64Sum(\n\t\t\t0, len(f), runtime.GOMAXPROCS(0),\n\t\t\tfunc(low, high int) float64 {\n\t\t\t\tvar sum float64\n\t\t\t\tfor i := low; i < high; i++ {\n\t\t\t\t\tsum += f[i]\n\t\t\t\t}\n\t\t\t\treturn sum\n\t\t\t},\n\t\t)\n\t\treturn result\n\t}\n\n\tfmt.Println(sumFloat64s([]float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}))\n\n\t// Output:\n\t// 55\n}\n"
  },
  {
    "path": "pipeline/example_wordcount_test.go",
    "content": "package pipeline_test\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"io\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com/exascience/pargo/pipeline\"\n\t\"github.com/exascience/pargo/sort\"\n\t\"github.com/exascience/pargo/sync\"\n)\n\ntype Word string\n\nfunc (w Word) Hash() (hash uint64) {\n\t// DJBX33A\n\thash = 5381\n\tfor _, b := range w {\n\t\thash = ((hash << 5) + hash) + uint64(b)\n\t}\n\treturn\n}\n\nfunc WordCount(r io.Reader) *sync.Map {\n\tresult := sync.NewMap(16 * runtime.GOMAXPROCS(0))\n\tscanner := pipeline.NewScanner(r)\n\tscanner.Split(bufio.ScanWords)\n\tvar p pipeline.Pipeline\n\tp.Source(scanner)\n\tp.Add(\n\t\tpipeline.Par(pipeline.Receive(\n\t\t\tfunc(_ int, data interface{}) interface{} {\n\t\t\t\tvar uniqueWords []string\n\t\t\t\tfor _, s := range data.([]string) {\n\t\t\t\t\tnewValue, _ := result.Modify(Word(s), func(value interface{}, ok bool) (newValue interface{}, store bool) {\n\t\t\t\t\t\tif ok {\n\t\t\t\t\t\t\tnewValue = value.(int) + 1\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tnewValue = 1\n\t\t\t\t\t\t}\n\t\t\t\t\t\tstore = true\n\t\t\t\t\t\treturn\n\t\t\t\t\t})\n\t\t\t\t\tif newValue.(int) == 1 {\n\t\t\t\t\t\tuniqueWords = append(uniqueWords, s)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn uniqueWords\n\t\t\t},\n\t\t)),\n\t\tpipeline.Ord(pipeline.ReceiveAndFinalize(\n\t\t\tfunc(_ int, data interface{}) interface{} {\n\t\t\t\t// print unique words as encountered first at the source\n\t\t\t\tfor _, s := range data.([]string) {\n\t\t\t\t\tfmt.Print(s, \" \")\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t},\n\t\t\tfunc() { fmt.Println(\".\") },\n\t\t)),\n\t)\n\tp.Run()\n\treturn result\n}\n\nfunc Example_wordCount() {\n\tr := strings.NewReader(\"The big black bug bit the big black bear but the big black bear bit the big black bug back\")\n\tcounts := WordCount(r)\n\twords := make(sort.StringSlice, 0)\n\tcounts.Range(func(key, _ interface{}) bool {\n\t\twords = append(words, string(key.(Word)))\n\t\treturn true\n\t})\n\tsort.Sort(words)\n\tfor _, word := range words {\n\t\tcount, _ := counts.Load(Word(word))\n\t\tfmt.Println(word, count.(int))\n\t}\n\n\t// Output:\n\t// The big black bug bit the bear but back .\n\t// The 1\n\t// back 1\n\t// bear 2\n\t// big 4\n\t// bit 2\n\t// black 4\n\t// bug 2\n\t// but 1\n\t// the 3\n}\n"
  },
  {
    "path": "pipeline/filter.go",
    "content": "package pipeline\n\n// A NodeKind reperesents the different kinds of nodes.\ntype NodeKind int\n\nconst (\n\t// Ordered nodes receive batches in encounter order.\n\tOrdered NodeKind = iota\n\n\t// Sequential nodes receive batches in arbitrary sequential order.\n\tSequential\n\n\t// Parallel nodes receives batches in parallel.\n\tParallel\n)\n\n// A Filter is a function that returns a Receiver and a Finalizer to be added to\n// a node. It receives a pipeline, the kind of node it will be added to, and the\n// expected total data size that the receiver will be asked to process.\n//\n// The dataSize parameter is either positive, in which case it indicates the\n// expected total size of all batches that will eventually be passed to this\n// filter's receiver, or it is negative, in which case the expected size is\n// either unknown or too difficult to determine. The dataSize parameter is a\n// pointer whose contents can be modified by the filter, for example if this\n// filter increases or decreases the total size for subsequent filters, or if\n// this filter can change dataSize from an unknown to a known value, or vice\n// versa, must change it from a known to an unknown value.\n//\n// Either the receiver or the finalizer or both can be nil, in which case they\n// will not be added to the current node.\ntype Filter func(pipeline *Pipeline, kind NodeKind, dataSize *int) (Receiver, Finalizer)\n\n// A Receiver is called for every data batch, and returns a potentially modified\n// data batch. The seqNo parameter indicates the order in which the data batch\n// was encountered at the current pipeline's data source.\ntype Receiver func(seqNo int, data interface{}) (filteredData interface{})\n\n// A Finalizer is called once after the corresponding receiver has been called\n// for all data batches in the current pipeline.\ntype Finalizer func()\n\n// ComposeFilters takes a number of filters, calls them with the given pipeline,\n// kind, and dataSize parameters in order, and appends the returned receivers\n// and finalizers (except for nil values) to the result slices.\n//\n// ComposeFilters is used in Node implementations. User programs typically do\n// not call ComposeFilters.\nfunc ComposeFilters(pipeline *Pipeline, kind NodeKind, dataSize *int, filters []Filter) (receivers []Receiver, finalizers []Finalizer) {\n\tfor _, filter := range filters {\n\t\treceiver, finalizer := filter(pipeline, kind, dataSize)\n\t\tif receiver != nil {\n\t\t\treceivers = append(receivers, receiver)\n\t\t}\n\t\tif finalizer != nil {\n\t\t\tfinalizers = append(finalizers, finalizer)\n\t\t}\n\t}\n\treturn\n}\n\nfunc feed(p *Pipeline, receivers []Receiver, index int, seqNo int, data interface{}) {\n\tfor _, receive := range receivers {\n\t\tdata = receive(seqNo, data)\n\t}\n\tp.FeedForward(index, seqNo, data)\n}\n"
  },
  {
    "path": "pipeline/filters.go",
    "content": "package pipeline\n\nimport (\n\t\"errors\"\n\t\"reflect\"\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n// NewNode creates a node of the given kind, with the given filters.\n//\n// It is often more convenient to use one of the Ord, Seq, or Par methods.\nfunc NewNode(kind NodeKind, filters ...Filter) Node {\n\tswitch kind {\n\tcase Ordered, Sequential:\n\t\treturn &seqnode{kind: kind, filters: filters}\n\tcase Parallel:\n\t\treturn &parnode{filters: filters}\n\tdefault:\n\t\tpanic(\"Invalid NodeKind in pipeline.NewNode.\")\n\t}\n}\n\n// Identity is a filter that passes data batches through unmodified.\n// This filter will be optimized away in a pipeline, so it\n// does not hurt to add it.\nfunc Identity(_ *Pipeline, _ NodeKind, _ *int) (_ Receiver, _ Finalizer) {\n\treturn\n}\n\n// Receive creates a Filter that returns the given receiver and a nil finalizer.\nfunc Receive(receive Receiver) Filter {\n\treturn func(_ *Pipeline, _ NodeKind, _ *int) (receiver Receiver, _ Finalizer) {\n\t\treceiver = receive\n\t\treturn\n\t}\n}\n\n// Finalize creates a filter that returns a nil receiver and the given\n// finalizer.\nfunc Finalize(finalize Finalizer) Filter {\n\treturn func(_ *Pipeline, _ NodeKind, _ *int) (_ Receiver, finalizer Finalizer) {\n\t\tfinalizer = finalize\n\t\treturn\n\t}\n}\n\n// ReceiveAndFinalize creates a filter that returns the given filter and\n// receiver.\nfunc ReceiveAndFinalize(receive Receiver, finalize Finalizer) Filter {\n\treturn func(_ *Pipeline, _ NodeKind, _ *int) (receiver Receiver, finalizer Finalizer) {\n\t\treceiver = receive\n\t\tfinalizer = finalize\n\t\treturn\n\t}\n}\n\n// A Predicate is a function that is passed a data batch and returns a boolean\n// value.\n//\n// In most cases, it will cast the data parameter to a specific slice type and\n// check a predicate on each element of the slice.\ntype Predicate func(data interface{}) bool\n\n// Every creates a filter that sets the result pointer to true if the given\n// predicate returns true for every data batch. If cancelWhenKnown is true, this\n// filter cancels the pipeline as soon as the predicate returns false on a data\n// batch.\nfunc Every(result *bool, cancelWhenKnown bool, predicate Predicate) Filter {\n\t*result = true\n\treturn func(pipeline *Pipeline, kind NodeKind, _ *int) (receiver Receiver, finalizer Finalizer) {\n\t\tswitch kind {\n\t\tcase Parallel:\n\t\t\tres := int32(1)\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif !predicate(data) {\n\t\t\t\t\tatomic.StoreInt32(&res, 0)\n\t\t\t\t\tif cancelWhenKnown {\n\t\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t\tfinalizer = func() {\n\t\t\t\tif atomic.LoadInt32(&res) == 0 {\n\t\t\t\t\t*result = false\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif !predicate(data) {\n\t\t\t\t\t*result = false\n\t\t\t\t\tif cancelWhenKnown {\n\t\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\n// NotEvery creates a filter that sets the result pointer to true if the given\n// predicate returns false for at least one of the data batches it is passed. If\n// cancelWhenKnown is true, this filter cancels the pipeline as soon as the\n// predicate returns false on a data batch.\nfunc NotEvery(result *bool, cancelWhenKnown bool, predicate Predicate) Filter {\n\t*result = false\n\treturn func(pipeline *Pipeline, kind NodeKind, _ *int) (receiver Receiver, finalizer Finalizer) {\n\t\tswitch kind {\n\t\tcase Parallel:\n\t\t\tres := int32(0)\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif !predicate(data) {\n\t\t\t\t\tatomic.StoreInt32(&res, 1)\n\t\t\t\t\tif cancelWhenKnown {\n\t\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t\tfinalizer = func() {\n\t\t\t\tif atomic.LoadInt32(&res) == 1 {\n\t\t\t\t\t*result = true\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif !predicate(data) {\n\t\t\t\t\t*result = true\n\t\t\t\t\tif cancelWhenKnown {\n\t\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\n// Some creates a filter that sets the result pointer to true if the given\n// predicate returns true for at least one of the data batches it is passed. If\n// cancelWhenKnown is true, this filter cancels the pipeline as soon as the\n// predicate returns true on a data batch.\nfunc Some(result *bool, cancelWhenKnown bool, predicate Predicate) Filter {\n\t*result = false\n\treturn func(pipeline *Pipeline, kind NodeKind, _ *int) (receiver Receiver, finalizer Finalizer) {\n\t\tswitch kind {\n\t\tcase Parallel:\n\t\t\tres := int32(0)\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif predicate(data) {\n\t\t\t\t\tatomic.StoreInt32(&res, 1)\n\t\t\t\t\tif cancelWhenKnown {\n\t\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t\tfinalizer = func() {\n\t\t\t\tif atomic.LoadInt32(&res) == 1 {\n\t\t\t\t\t*result = true\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif predicate(data) {\n\t\t\t\t\t*result = true\n\t\t\t\t\tif cancelWhenKnown {\n\t\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\n// NotAny creates a filter that sets the result pointer to true if the given\n// predicate returns false for every data batch. If cancelWhenKnown is true,\n// this filter cancels the pipeline as soon as the predicate returns true on a\n// data batch.\nfunc NotAny(result *bool, cancelWhenKnown bool, predicate Predicate) Filter {\n\t*result = true\n\treturn func(pipeline *Pipeline, kind NodeKind, _ *int) (receiver Receiver, finalizer Finalizer) {\n\t\tswitch kind {\n\t\tcase Parallel:\n\t\t\tres := int32(1)\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif predicate(data) {\n\t\t\t\t\tatomic.StoreInt32(&res, 0)\n\t\t\t\t\tif cancelWhenKnown {\n\t\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t\tfinalizer = func() {\n\t\t\t\tif atomic.LoadInt32(&res) == 0 {\n\t\t\t\t\t*result = false\n\t\t\t\t}\n\t\t\t}\n\t\tdefault:\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif predicate(data) {\n\t\t\t\t\t*result = false\n\t\t\t\t\tif cancelWhenKnown {\n\t\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\n// Slice creates a filter that appends all the data batches it sees to the\n// result. The result must represent a settable slice, for example by using the\n// address operator & on a given slice.\nfunc Slice(result interface{}) Filter {\n\tres := reflect.ValueOf(result).Elem()\n\treturn func(pipeline *Pipeline, kind NodeKind, _ *int) (receiver Receiver, finalizer Finalizer) {\n\t\tif (res.Kind() != reflect.Slice) && !res.CanSet() {\n\t\t\tpipeline.SetErr(errors.New(\"result is not a settable slice in Pipeline.ToSlice\"))\n\t\t\treturn\n\t\t}\n\t\tswitch kind {\n\t\tcase Parallel:\n\t\t\tvar m sync.Mutex\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif data != nil {\n\t\t\t\t\td := reflect.ValueOf(data)\n\t\t\t\t\tm.Lock()\n\t\t\t\t\tdefer m.Unlock()\n\t\t\t\t\tres.Set(reflect.AppendSlice(res, d))\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\tdefault:\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif data != nil {\n\t\t\t\t\tres.Set(reflect.AppendSlice(res, reflect.ValueOf(data)))\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\n// Count creates a filter that sets the result pointer to the total size of all\n// data batches it sees.\nfunc Count(result *int) Filter {\n\treturn func(pipeline *Pipeline, kind NodeKind, size *int) (receiver Receiver, finalizer Finalizer) {\n\t\tswitch {\n\t\tcase *size >= 0:\n\t\t\t*result = *size\n\t\tcase kind == Parallel:\n\t\t\tvar res = int64(0)\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif data != nil {\n\t\t\t\t\td := reflect.ValueOf(data)\n\t\t\t\t\tatomic.AddInt64(&res, int64(d.Len()))\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t\tfinalizer = func() {\n\t\t\t\t*result = int(atomic.LoadInt64(&res))\n\t\t\t}\n\t\tdefault:\n\t\t\treceiver = func(_ int, data interface{}) interface{} {\n\t\t\t\tif data != nil {\n\t\t\t\t\td := reflect.ValueOf(data)\n\t\t\t\t\t*result += d.Len()\n\t\t\t\t}\n\t\t\t\treturn data\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n}\n\n// Limit creates an ordered node with a filter that caps the total size of all\n// data batches it passes to the next filter in the pipeline to the given limit.\n// If cancelWhenKnown is true, this filter cancels the pipeline as soon as the\n// limit is reached. If limit is negative, all data is passed through\n// unmodified.\nfunc Limit(limit int, cancelWhenReached bool) Node {\n\treturn Ord(func(pipeline *Pipeline, _ NodeKind, size *int) (receiver Receiver, _ Finalizer) {\n\t\tswitch {\n\t\tcase limit < 0: // unlimited\n\t\tcase limit == 0:\n\t\t\t*size = 0\n\t\t\tif cancelWhenReached {\n\t\t\t\tpipeline.Cancel()\n\t\t\t}\n\t\t\treceiver = func(_ int, _ interface{}) interface{} { return nil }\n\t\tcase (*size < 0) || (*size > limit):\n\t\t\tif *size > 0 {\n\t\t\t\t*size = limit\n\t\t\t}\n\t\t\tseen := 0\n\t\t\treceiver = func(_ int, data interface{}) (result interface{}) {\n\t\t\t\tif seen >= limit {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\td := reflect.ValueOf(data)\n\t\t\t\tl := d.Len()\n\t\t\t\tif (seen + l) > limit {\n\t\t\t\t\tresult = d.Slice(0, limit-seen).Interface()\n\t\t\t\t\tseen = limit\n\t\t\t\t} else {\n\t\t\t\t\tresult = data\n\t\t\t\t\tseen += l\n\t\t\t\t}\n\t\t\t\tif cancelWhenReached && (seen == limit) {\n\t\t\t\t\tpipeline.Cancel()\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\treturn\n\t})\n}\n\n// Skip creates an ordered node with a filter that skips the first n elements\n// from the data batches it passes to the next filter in the pipeline. If n is\n// negative, no data is passed through, and the error value of the pipeline is\n// set to a non-nil value.\nfunc Skip(n int) Node {\n\treturn Ord(func(pipeline *Pipeline, _ NodeKind, size *int) (receiver Receiver, _ Finalizer) {\n\t\tswitch {\n\t\tcase n < 0: // skip everything\n\t\t\t*size = 0\n\t\t\tpipeline.SetErr(errors.New(\"skip filter with unknown size\"))\n\t\t\treceiver = func(_ int, _ interface{}) interface{} { return nil }\n\t\tcase n == 0: // nothing to skip\n\t\tcase (*size < 0) || (*size > n):\n\t\t\tif *size > 0 {\n\t\t\t\t*size = n\n\t\t\t}\n\t\t\tseen := 0\n\t\t\treceiver = func(_ int, data interface{}) (result interface{}) {\n\t\t\t\tif seen >= n {\n\t\t\t\t\tresult = data\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\td := reflect.ValueOf(data)\n\t\t\t\tl := d.Len()\n\t\t\t\tif (seen + l) > n {\n\t\t\t\t\tresult = d.Slice(n-seen, l).Interface()\n\t\t\t\t\tseen = n\n\t\t\t\t} else {\n\t\t\t\t\tseen += l\n\t\t\t\t}\n\t\t\t\treturn\n\t\t\t}\n\t\tcase *size <= n:\n\t\t\t*size = 0\n\t\t\treceiver = func(_ int, _ interface{}) interface{} { return nil }\n\t\t}\n\t\treturn\n\t})\n}\n"
  },
  {
    "path": "pipeline/lparnode.go",
    "content": "package pipeline\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n)\n\ntype lparnode struct {\n\tlimit      int\n\tordered    bool\n\tcond       *sync.Cond\n\tchannel    chan dataBatch\n\twaitGroup  sync.WaitGroup\n\trun        int\n\tfilters    []Filter\n\treceivers  []Receiver\n\tfinalizers []Finalizer\n}\n\n// LimitedPar creates a parallel node with the given filters. The node uses at\n// most limit goroutines at the same time. If limit is 0, a reasonable default\n// is used instead. Even if limit is 0, the node is still limited. For unlimited\n// nodes, use Par instead.\nfunc LimitedPar(limit int, filters ...Filter) Node {\n\tif limit <= 0 {\n\t\tlimit = runtime.GOMAXPROCS(0)\n\t}\n\tif limit == 1 {\n\t\treturn &seqnode{kind: Sequential, filters: filters}\n\t}\n\treturn &lparnode{limit: limit, filters: filters}\n}\n\nfunc (node *lparnode) makeOrdered() {\n\tnode.ordered = true\n\tnode.cond = sync.NewCond(&sync.Mutex{})\n}\n\n// Implements the TryMerge method of the Node interface.\nfunc (node *lparnode) TryMerge(next Node) bool {\n\tif nxt, merge := next.(*lparnode); merge && (nxt.limit == node.limit) {\n\t\tnode.filters = append(node.filters, nxt.filters...)\n\t\tnode.receivers = append(node.receivers, nxt.receivers...)\n\t\tnode.finalizers = append(node.finalizers, nxt.finalizers...)\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Implements the Begin method of the Node interface.\nfunc (node *lparnode) Begin(p *Pipeline, index int, dataSize *int) (keep bool) {\n\tnode.receivers, node.finalizers = ComposeFilters(p, Parallel, dataSize, node.filters)\n\tnode.filters = nil\n\tif keep = (len(node.receivers) > 0) || (len(node.finalizers) > 0); keep {\n\t\tnode.channel = make(chan dataBatch)\n\t\tnode.waitGroup.Add(node.limit)\n\t\tfor i := 0; i < node.limit; i++ {\n\t\t\tgo func() {\n\t\t\t\tdefer node.waitGroup.Done()\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-p.ctx.Done():\n\t\t\t\t\t\tif node.ordered {\n\t\t\t\t\t\t\tnode.cond.Broadcast()\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase batch, ok := <-node.channel:\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif node.ordered {\n\t\t\t\t\t\t\tnode.cond.L.Lock()\n\t\t\t\t\t\t\tif batch.seqNo != node.run {\n\t\t\t\t\t\t\t\tpanic(\"Invalid receive order in an ordered limited parallel pipeline node.\")\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tnode.run++\n\t\t\t\t\t\t\tnode.cond.L.Unlock()\n\t\t\t\t\t\t\tnode.cond.Broadcast()\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfeed(p, node.receivers, index, batch.seqNo, batch.data)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t}\n\treturn\n}\n\n// Implements the Feed method of the Node interface.\nfunc (node *lparnode) Feed(p *Pipeline, _ int, seqNo int, data interface{}) {\n\tif node.ordered {\n\t\tnode.cond.L.Lock()\n\t\tdefer node.cond.L.Unlock()\n\t\tfor {\n\t\t\tif node.run == seqNo {\n\t\t\t\tselect {\n\t\t\t\tcase <-p.ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase node.channel <- dataBatch{seqNo, data}:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase <-p.ctx.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tnode.cond.Wait()\n\t\t\t}\n\t\t}\n\t}\n\tselect {\n\tcase <-p.ctx.Done():\n\t\treturn\n\tcase node.channel <- dataBatch{seqNo, data}:\n\t\treturn\n\t}\n}\n\n// Implements the End method of the Node interface.\nfunc (node *lparnode) End() {\n\tclose(node.channel)\n\tnode.waitGroup.Wait()\n\tfor _, finalize := range node.finalizers {\n\t\tfinalize()\n\t}\n\tnode.receivers = nil\n\tnode.finalizers = nil\n}\n"
  },
  {
    "path": "pipeline/parnode.go",
    "content": "package pipeline\n\nimport (\n\t\"sync\"\n)\n\ntype parnode struct {\n\twaitGroup  sync.WaitGroup\n\tfilters    []Filter\n\treceivers  []Receiver\n\tfinalizers []Finalizer\n}\n\n// Par creates a parallel node with the given filters.\nfunc Par(filters ...Filter) Node {\n\treturn &parnode{filters: filters}\n}\n\n// Implements the TryMerge method of the Node interface.\nfunc (node *parnode) TryMerge(next Node) bool {\n\tif nxt, merge := next.(*parnode); merge {\n\t\tnode.filters = append(node.filters, nxt.filters...)\n\t\tnode.receivers = append(node.receivers, nxt.receivers...)\n\t\tnode.finalizers = append(node.finalizers, nxt.finalizers...)\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Implements the Begin method of the Node interface.\nfunc (node *parnode) Begin(p *Pipeline, _ int, dataSize *int) (keep bool) {\n\tnode.receivers, node.finalizers = ComposeFilters(p, Parallel, dataSize, node.filters)\n\tnode.filters = nil\n\tkeep = (len(node.receivers) > 0) || (len(node.finalizers) > 0)\n\treturn\n}\n\n// Implements the Feed method of the Node interface.\nfunc (node *parnode) Feed(p *Pipeline, index int, seqNo int, data interface{}) {\n\tnode.waitGroup.Add(1)\n\tgo func() {\n\t\tdefer node.waitGroup.Done()\n\t\tselect {\n\t\tcase <-p.ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tfeed(p, node.receivers, index, seqNo, data)\n\t\t}\n\t}()\n}\n\n// Implements the End method of the Node interface.\nfunc (node *parnode) End() {\n\tnode.waitGroup.Wait()\n\tfor _, finalize := range node.finalizers {\n\t\tfinalize()\n\t}\n\tnode.receivers = nil\n\tnode.finalizers = nil\n}\n"
  },
  {
    "path": "pipeline/pipeline.go",
    "content": "// Package pipeline provides means to construct and execute parallel pipelines.\n//\n// A Pipeline feeds batches of data through several functions that can be\n// specified to be executed in encounter order, in arbitrary sequential order,\n// or in parallel.  Ordered, sequential, or parallel stages can arbitrarily\n// alternate.\n//\n// A Pipeline consists of a Source object, and several Node objects.\n//\n// Source objects that are supported by this implementation are arrays, slices,\n// strings, channels, and bufio.Scanner objects, but other kinds of Source\n// objects can be added by user programs.\n//\n// Node objects can be specified to receive batches from the input source either\n// sequentially in encounter order, which is always the same order in which they\n// were originally encountered at the source; sequentially, but in arbitrary\n// order; or in parallel. Ordered nodes always receive batches in encounter\n// order even if they are preceded by arbitrary sequential, or even parallel\n// nodes.\n//\n// Node objects consist of filters, which are pairs of receiver and finalizer\n// functions. Each batch is passed to each receiver function, which can\n// transform and modify the batch for the next receiver function in the\n// pipeline. Each finalizer function is called once when all batches have been\n// passed through all receiver functions.\n//\n// Pipelines do not have an explicit representation for sinks. Instead, filters\n// can use side effects to generate results.\n//\n// Pipelines also support cancelation by way of the context package of Go's\n// standard library.\n//\n// An application of pipelines can be found in the elPrep tool at\n// https://github.com/exascience/elprep - specifically in\n// https://github.com/ExaScience/elprep/blob/master/sam/filter-pipeline.go\npackage pipeline\n\nimport (\n\t\"context\"\n\t\"runtime\"\n\t\"sync\"\n)\n\n// A Node object represents a sequence of filters which are together executed\n// either in encounter order, in arbitrary sequential order, or in parallel.\n//\n// The methods of this interface are typically not called by user programs, but\n// rather implemented by specific node types and called by pipelines. Ordered,\n// sequential, and parallel nodes are also implemented in this package, so that\n// user programs are typically not concerned with Node methods at all.\ntype Node interface {\n\n\t// TryMerge tries to merge node with the current node by appending its\n\t// filters to the filters of the current node, which succeeds if both nodes\n\t// are either sequential or parallel. The return value merged indicates\n\t// whether merging succeeded.\n\tTryMerge(node Node) (merged bool)\n\n\t// Begin informs this node that the pipeline is going to start to feed\n\t// batches of data to this node. The pipeline, the index of this node among\n\t// all the nodes in the pipeline, and the expected total size of all batches\n\t// combined are passed as parameters.\n\t//\n\t// The dataSize parameter is either positive, in which case it indicates the\n\t// expected total size of all batches that will eventually be passed to this\n\t// node's Feed method, or it is negative, in which case the expected size is\n\t// either unknown or too difficult to determine. The dataSize parameter is a\n\t// pointer whose contents can be modified by Begin, for example if this node\n\t// increases or decreases the total size for subsequent nodes, or if this\n\t// node can change dataSize from an unknown to a known value, or vice versa,\n\t// must change it from a known to an unknown value.\n\t//\n\t// A node may decide that, based on the given information, it will actually\n\t// not need to see any of the batches that are normally going to be passed\n\t// to it. In that case, it can return false as a result, and its Feed and\n\t// End method will not be called anymore.  Otherwise, it should return true\n\t// by default.\n\tBegin(p *Pipeline, index int, dataSize *int) (keep bool)\n\n\t// Feed is called for each batch of data. The pipeline, the index of this\n\t// node among all the nodes in the pipeline (which may be different from the\n\t// index number seen by Begin), the sequence number of the batch (according\n\t// to the encounter order), and the actual batch of data are passed as\n\t// parameters.\n\t//\n\t// The data parameter contains the batch of data, which is usually a slice\n\t// of a particular type. After the data has been processed by all filters of\n\t// this node, the node must call p.FeedForward with exactly the same index\n\t// and sequence numbers, but a potentially modified batch of data.\n\t// FeedForward must be called even when the data batch is or becomes empty,\n\t// to ensure that all sequence numbers are seen by subsequent nodes.\n\tFeed(p *Pipeline, index int, seqNo int, data interface{})\n\n\t// End is called after all batches have been passed to Feed. This allows the\n\t// node to release resources and call the finalizers of its filters.\n\tEnd()\n}\n\n// A Pipeline is a parallel pipeline that can feed batches of data fetched from\n// a source through several nodes that are ordered, sequential, or parallel.\n//\n// The zero Pipeline is valid and empty.\n//\n// A Pipeline must not be copied after first use.\ntype Pipeline struct {\n\tmutex        sync.RWMutex\n\terr          error\n\tctx          context.Context\n\tcancel       context.CancelFunc\n\tsource       Source\n\tnodes        []Node\n\tnofBatches   int\n\tbatchInc     int\n\tmaxBatchSize int\n}\n\n// Err returns the current error value for this pipeline, which may be nil if no\n// error has occurred so far.\n//\n// Err and SetErr are safe to be concurrently invoked.\nfunc (p *Pipeline) Err() (err error) {\n\tp.mutex.RLock()\n\terr = p.err\n\tp.mutex.RUnlock()\n\treturn err\n}\n\n// SetErr attempts to set a new error value for this pipeline, unless it already\n// has a non-nil error value. If the attempt is successful, SetErr also cancels\n// the pipeline, and returns true. If the attempt is not successful, SetErr\n// returns false.\n//\n// SetErr and Err are safe to be concurrently invoked, for example from the\n// different goroutines executing filters of parallel nodes in this pipeline.\nfunc (p *Pipeline) SetErr(err error) bool {\n\tp.mutex.Lock()\n\tif p.err == nil {\n\t\tp.err = err\n\t\tp.mutex.Unlock()\n\t\tp.cancel()\n\t\treturn true\n\t}\n\tp.mutex.Unlock()\n\treturn false\n}\n\n// Context returns this pipeline's context.\nfunc (p *Pipeline) Context() context.Context {\n\treturn p.ctx\n}\n\n// Cancel calls the cancel function of this pipeline's context.\nfunc (p *Pipeline) Cancel() {\n\tp.cancel()\n}\n\n// Source sets the data source for this pipeline.\n//\n// If source does not implement the Source interface, the pipeline uses\n// reflection to create a proper source for arrays, slices, strings, or\n// channels.\n//\n// It is safe to call Source multiple times before Run or RunWithContext is\n// called, in which case only the last call to Source is effective.\nfunc (p *Pipeline) Source(source interface{}) {\n\tswitch src := source.(type) {\n\tcase Source:\n\t\tp.source = src\n\tdefault:\n\t\tp.source = reflectSource(source)\n\t}\n}\n\n// Add appends nodes to the end of this pipeline.\nfunc (p *Pipeline) Add(nodes ...Node) {\n\tfor _, node := range nodes {\n\t\tif l := len(p.nodes); (l == 0) || !p.nodes[l-1].TryMerge(node) {\n\t\t\tp.nodes = append(p.nodes, node)\n\t\t}\n\t}\n}\n\n// NofBatches sets or gets the number of batches that are created from the data\n// source for this pipeline, if the expected total size for this pipeline's data\n// source is known or can be determined easily.\n//\n// NofBatches can be called safely by user programs before Run or RunWithContext\n// is called.\n//\n// If user programs do not call NofBatches, or call them with a value < 1, then\n// the pipeline will choose a reasonable default value that takes\n// runtime.GOMAXPROCS(0) into account.\n//\n// If the expected total size for this pipeline's data source is unknown, or is\n// difficult to determine, use SetVariableBatchSize to influence batch sizes.\nfunc (p *Pipeline) NofBatches(n int) (nofBatches int) {\n\tif n < 1 {\n\t\tnofBatches = p.nofBatches\n\t\tif nofBatches < 1 {\n\t\t\tnofBatches = 2 * runtime.GOMAXPROCS(0)\n\t\t\tp.nofBatches = nofBatches\n\t\t}\n\t} else {\n\t\tnofBatches = n\n\t\tp.nofBatches = n\n\t}\n\treturn\n}\n\nconst (\n\tdefaultBatchInc     = 1024\n\tdefaultMaxBatchSize = 0x2000000\n)\n\n// SetVariableBatchSize sets the batch size(s) for the batches that are created\n// from the data source for this pipeline, if the expected total size for this\n// pipeline's data source is unknown or difficult to determine.\n//\n// SetVariableBatchSize can be called safely by user programs before Run or\n// RunWithContext is called.\n//\n// If user programs do not call SetVariableBatchSize, or pass a value < 1 to any\n// of the two parameters, then the pipeline will choose a reasonable default\n// value for that respective parameter.\n//\n// The pipeline will start with batchInc as a batch size, and increase the batch\n// size for every subsequent batch by batchInc to accomodate data sources of\n// different total sizes. The batch size will never be larger than maxBatchSize,\n// though.\n//\n// If the expected total size for this pipeline's data source is known, or can\n// be determined easily, use NofBatches to influence the batch size.\nfunc (p *Pipeline) SetVariableBatchSize(batchInc, maxBatchSize int) {\n\tp.batchInc = batchInc\n\tp.maxBatchSize = maxBatchSize\n}\n\nfunc (p *Pipeline) finalizeVariableBatchSize() {\n\tif p.batchInc < 1 {\n\t\tp.batchInc = defaultBatchInc\n\t}\n\tif p.maxBatchSize < 1 {\n\t\tp.maxBatchSize = defaultMaxBatchSize\n\t}\n}\n\nfunc (p *Pipeline) nextBatchSize(batchSize int) (result int) {\n\tresult = batchSize + p.batchInc\n\tif result > p.maxBatchSize {\n\t\tresult = p.maxBatchSize\n\t}\n\treturn\n}\n\n// RunWithContext initiates pipeline execution.\n//\n// It expects a context and a cancel function as parameters, for example from\n// context.WithCancel(context.Background()). It does not ensure that the cancel\n// function is called at least once, so this must be ensured by the function\n// calling RunWithContext.\n//\n// RunWithContext should only be called after a data source has been set using\n// the Source method, and one or more Node objects have been added to the\n// pipeline using the Add method. NofBatches can be called before RunWithContext\n// to deal with load imbalance, but this is not necessary since RunWithContext\n// chooses a reasonable default value.\n//\n// RunWithContext prepares the data source, tells each node that batches are\n// going to be sent to them by calling Begin, and then fetches batches from the\n// data source and sends them to the nodes. Once the data source is depleted,\n// the nodes are informed that the end of the data source has been reached.\nfunc (p *Pipeline) RunWithContext(ctx context.Context, cancel context.CancelFunc) {\n\tif p.err != nil {\n\t\treturn\n\t}\n\tp.ctx, p.cancel = ctx, cancel\n\tdataSize := p.source.Prepare(p.ctx)\n\tfilteredSize := dataSize\n\tfor index := 0; index < len(p.nodes); {\n\t\tif p.nodes[index].Begin(p, index, &filteredSize) {\n\t\t\tindex++\n\t\t} else {\n\t\t\tp.nodes = append(p.nodes[:index], p.nodes[index+1:]...)\n\t\t}\n\t}\n\tif p.err != nil {\n\t\treturn\n\t}\n\tif len(p.nodes) > 0 {\n\t\tfor index := 0; index < len(p.nodes)-1; {\n\t\t\tif p.nodes[index].TryMerge(p.nodes[index+1]) {\n\t\t\t\tp.nodes = append(p.nodes[:index+1], p.nodes[index+2:]...)\n\t\t\t} else {\n\t\t\t\tindex++\n\t\t\t}\n\t\t}\n\t\tfor index := len(p.nodes) - 1; index >= 0; index-- {\n\t\t\tif _, ok := p.nodes[index].(*strictordnode); ok {\n\t\t\t\tfor index = index - 1; index >= 0; index-- {\n\t\t\t\t\tswitch node := p.nodes[index].(type) {\n\t\t\t\t\tcase *seqnode:\n\t\t\t\t\t\tnode.kind = Ordered\n\t\t\t\t\tcase *lparnode:\n\t\t\t\t\t\tnode.makeOrdered()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif dataSize < 0 {\n\t\t\tp.finalizeVariableBatchSize()\n\t\t\tfor seqNo, batchSize := 0, p.batchInc; p.source.Fetch(batchSize) > 0; seqNo, batchSize = seqNo+1, p.nextBatchSize(batchSize) {\n\t\t\t\tp.nodes[0].Feed(p, 0, seqNo, p.source.Data())\n\t\t\t\tif err := p.source.Err(); err != nil {\n\t\t\t\t\tp.SetErr(err)\n\t\t\t\t\treturn\n\t\t\t\t} else if p.Err() != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tbatchSize := ((dataSize - 1) / p.NofBatches(0)) + 1\n\t\t\tif batchSize == 0 {\n\t\t\t\tbatchSize = 1\n\t\t\t}\n\t\t\tfor seqNo := 0; p.source.Fetch(batchSize) > 0; seqNo++ {\n\t\t\t\tp.nodes[0].Feed(p, 0, seqNo, p.source.Data())\n\t\t\t\tif err := p.source.Err(); err != nil {\n\t\t\t\t\tp.SetErr(err)\n\t\t\t\t\treturn\n\t\t\t\t} else if p.Err() != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor _, node := range p.nodes {\n\t\tnode.End()\n\t}\n\tif p.err == nil {\n\t\tp.err = p.source.Err()\n\t}\n}\n\n// Run initiates pipeline execution by calling\n// RunWithContext(context.WithCancel(context.Background())), and ensures that\n// the cancel function is called at least once when the pipeline is done.\n//\n// Run should only be called after a data source has been set using the Source\n// method, and one or more Node objects have been added to the pipeline using\n// the Add method. NofBatches can be called before Run to deal with load\n// imbalance, but this is not necessary since Run chooses a reasonable default\n// value.\n//\n// Run prepares the data source, tells each node that batches are going to be\n// sent to them by calling Begin, and then fetches batches from the data source\n// and sends them to the nodes. Once the data source is depleted, the nodes are\n// informed that the end of the data source has been reached.\nfunc (p *Pipeline) Run() {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\tp.RunWithContext(ctx, cancel)\n}\n\n// FeedForward must be called in the Feed method of a node to forward a\n// potentially modified data batch to the next node in the current pipeline.\n//\n// FeedForward is used in Node implementations. User programs typically do not\n// call FeedForward.\n//\n// FeedForward must be called with the pipeline received as a parameter by Feed,\n// and must pass the same index and seqNo received by Feed. The data parameter\n// can be either a modified or an unmodified data batch. FeedForward must always\n// be called, even if the data batch is unmodified, and even if the data batch\n// is or becomes empty.\nfunc (p *Pipeline) FeedForward(index int, seqNo int, data interface{}) {\n\tif index++; index < len(p.nodes) {\n\t\tp.nodes[index].Feed(p, index, seqNo, data)\n\t}\n}\n"
  },
  {
    "path": "pipeline/seqnode.go",
    "content": "package pipeline\n\nimport (\n\t\"sync\"\n)\n\ntype (\n\tdataBatch struct {\n\t\tseqNo int\n\t\tdata  interface{}\n\t}\n\n\tseqnode struct {\n\t\tkind       NodeKind\n\t\tchannel    chan dataBatch\n\t\twaitGroup  sync.WaitGroup\n\t\tfilters    []Filter\n\t\treceivers  []Receiver\n\t\tfinalizers []Finalizer\n\t}\n)\n\n// Ord creates an ordered node with the given filters.\nfunc Ord(filters ...Filter) Node {\n\treturn &seqnode{kind: Ordered, filters: filters}\n}\n\n// Seq creates a sequential node with the given filters.\nfunc Seq(filters ...Filter) Node {\n\treturn &seqnode{kind: Sequential, filters: filters}\n}\n\n// Implements the TryMerge method of the Node interface.\nfunc (node *seqnode) TryMerge(next Node) bool {\n\tif nxt, merge := next.(*seqnode); merge && (len(nxt.filters) > 0) {\n\t\tif nxt.kind == Ordered {\n\t\t\tnode.kind = Ordered\n\t\t}\n\t\tnode.filters = append(node.filters, nxt.filters...)\n\t\tnode.receivers = append(node.receivers, nxt.receivers...)\n\t\tnode.finalizers = append(node.finalizers, nxt.finalizers...)\n\t\treturn true\n\t}\n\treturn false\n}\n\n// Implements the Begin method of the Node interface.\nfunc (node *seqnode) Begin(p *Pipeline, index int, dataSize *int) (keep bool) {\n\tnode.receivers, node.finalizers = ComposeFilters(p, node.kind, dataSize, node.filters)\n\tnode.filters = nil\n\tif keep = (len(node.receivers) > 0) || (len(node.finalizers) > 0); keep {\n\t\tnode.channel = make(chan dataBatch)\n\t\tnode.waitGroup.Add(1)\n\t\tswitch node.kind {\n\t\tcase Sequential:\n\t\t\tgo func() {\n\t\t\t\tdefer node.waitGroup.Done()\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-p.ctx.Done():\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase batch, ok := <-node.channel:\n\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfeed(p, node.receivers, index, batch.seqNo, batch.data)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\tcase Ordered:\n\t\t\tgo func() {\n\t\t\t\tdefer node.waitGroup.Done()\n\t\t\t\tstash := make(map[int]interface{})\n\t\t\t\trun := 0\n\t\t\t\tfor {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-p.ctx.Done():\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase batch, ok := <-node.channel:\n\t\t\t\t\t\tswitch {\n\t\t\t\t\t\tcase !ok:\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\tcase batch.seqNo < run:\n\t\t\t\t\t\t\tpanic(\"Invalid receive order in an ordered pipeline node.\")\n\t\t\t\t\t\tcase batch.seqNo > run:\n\t\t\t\t\t\t\tstash[batch.seqNo] = batch.data\n\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\tfeed(p, node.receivers, index, batch.seqNo, batch.data)\n\t\t\t\t\t\tcheckStash:\n\t\t\t\t\t\t\tfor {\n\t\t\t\t\t\t\t\tselect {\n\t\t\t\t\t\t\t\tcase <-p.ctx.Done():\n\t\t\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t\t\tdefault:\n\t\t\t\t\t\t\t\t\trun++\n\t\t\t\t\t\t\t\t\tdata, ok := stash[run]\n\t\t\t\t\t\t\t\t\tif !ok {\n\t\t\t\t\t\t\t\t\t\tbreak checkStash\n\t\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t\t\tdelete(stash, run)\n\t\t\t\t\t\t\t\t\tfeed(p, node.receivers, index, run, data)\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\tdefault:\n\t\t\tpanic(\"Invalid NodeKind in a sequential pipeline node.\")\n\t\t}\n\t}\n\treturn\n}\n\n// Implements the Feed method of the Node interface.\nfunc (node *seqnode) Feed(p *Pipeline, _ int, seqNo int, data interface{}) {\n\tselect {\n\tcase <-p.ctx.Done():\n\t\treturn\n\tcase node.channel <- dataBatch{seqNo, data}:\n\t\treturn\n\t}\n}\n\n// Implements the End method of the Node interface.\nfunc (node *seqnode) End() {\n\tclose(node.channel)\n\tnode.waitGroup.Wait()\n\tfor _, finalize := range node.finalizers {\n\t\tfinalize()\n\t}\n\tnode.receivers = nil\n\tnode.finalizers = nil\n}\n"
  },
  {
    "path": "pipeline/source.go",
    "content": "package pipeline\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"io\"\n\t\"reflect\"\n)\n\n// A Source represents an object that can generate data batches for pipelines.\ntype Source interface {\n\t// Err returns an error value or nil\n\tErr() error\n\n\t// Prepare receives a pipeline context and informs the pipeline what the\n\t// total expected size of all data batches is. The return value is -1 if the\n\t// total size is unknown or difficult to determine.\n\tPrepare(ctx context.Context) (size int)\n\n\t// Fetch gets a data batch of the requested size from the source. It returns\n\t// the size of the data batch that it was actually able to fetch. It returns\n\t// 0 if there is no more data to be fetched from the source; the pipeline\n\t// will then make no further attempts to fetch more elements.\n\tFetch(size int) (fetched int)\n\n\t// Data returns the last fetched data batch.\n\tData() interface{}\n}\n\ntype sliceSource struct {\n\tvalue reflect.Value\n\tsize  int\n\tdata  interface{}\n}\n\nfunc newSliceSource(value reflect.Value) *sliceSource {\n\tsize := value.Len()\n\treturn &sliceSource{value: value.Slice(0, size), size: size}\n}\n\nfunc (src *sliceSource) Err() error {\n\treturn nil\n}\n\nfunc (src *sliceSource) Prepare(_ context.Context) int {\n\treturn src.size\n}\n\nfunc (src *sliceSource) Fetch(n int) (fetched int) {\n\tswitch {\n\tcase src.size == 0:\n\t\tsrc.data = nil\n\tcase n >= src.size:\n\t\tfetched = src.size\n\t\tsrc.data = src.value.Interface()\n\t\tsrc.value = reflect.ValueOf(nil)\n\t\tsrc.size = 0\n\tdefault:\n\t\tfetched = n\n\t\tsrc.data = src.value.Slice(0, n).Interface()\n\t\tsrc.value = src.value.Slice(n, src.size)\n\t\tsrc.size -= n\n\t}\n\treturn\n}\n\nfunc (src *sliceSource) Data() interface{} {\n\treturn src.data\n}\n\ntype chanSource struct {\n\tcases []reflect.SelectCase\n\tzero  reflect.Value\n\tdata  interface{}\n}\n\nfunc newChanSource(value reflect.Value) *chanSource {\n\tzeroElem := value.Type().Elem()\n\treturn &chanSource{\n\t\tcases: []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: value}},\n\t\tzero:  reflect.Zero(reflect.SliceOf(zeroElem)),\n\t}\n}\n\nfunc (src *chanSource) Err() error {\n\treturn nil\n}\n\nfunc (src *chanSource) Prepare(ctx context.Context) (size int) {\n\tsrc.cases = append(src.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ctx.Done())})\n\treturn -1\n}\n\nfunc (src *chanSource) Fetch(n int) (fetched int) {\n\tdata := src.zero\n\tfor fetched = 0; fetched < n; fetched++ {\n\t\tif chosen, element, ok := reflect.Select(src.cases); (chosen == 0) && ok {\n\t\t\tdata = reflect.Append(data, element)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tsrc.data = data.Interface()\n\treturn\n}\n\nfunc (src *chanSource) Data() interface{} {\n\treturn src.data\n}\n\nfunc reflectSource(source interface{}) Source {\n\tswitch value := reflect.ValueOf(source); value.Kind() {\n\tcase reflect.Array, reflect.Slice, reflect.String:\n\t\treturn newSliceSource(value)\n\tcase reflect.Chan:\n\t\treturn newChanSource(value)\n\tdefault:\n\t\tpanic(\"A default pipeline source is not of kind Array, Slice, String, or Chan.\")\n\t}\n}\n\n// Scanner is a wrapper around bufio.Scanner so it can act as a data source for\n// pipelines. It fetches strings.\ntype Scanner struct {\n\t*bufio.Scanner\n\tdata interface{}\n}\n\n// NewScanner returns a new Scanner to read from r. The split function defaults\n// to bufio.ScanLines.\nfunc NewScanner(r io.Reader) *Scanner {\n\treturn &Scanner{Scanner: bufio.NewScanner(r)}\n}\n\n// Prepare implements the method of the Source interface.\nfunc (src *Scanner) Prepare(_ context.Context) (size int) {\n\treturn -1\n}\n\n// Fetch implements the method of the Source interface.\nfunc (src *Scanner) Fetch(n int) (fetched int) {\n\tvar data []string\n\tfor fetched = 0; fetched < n; fetched++ {\n\t\tif src.Scan() {\n\t\t\tdata = append(data, src.Text())\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tsrc.data = data\n\treturn\n}\n\n// Data implements the method of the Source interface.\nfunc (src *Scanner) Data() interface{} {\n\treturn src.data\n}\n\n// BytesScanner is a wrapper around bufio.Scanner so it can act as a data source\n// for pipelines. It fetches slices of bytes.\ntype BytesScanner struct {\n\t*bufio.Scanner\n\tdata interface{}\n}\n\n// NewBytesScanner returns a new Scanner to read from r. The split function\n// defaults to bufio.ScanLines.\nfunc NewBytesScanner(r io.Reader) *BytesScanner {\n\treturn &BytesScanner{Scanner: bufio.NewScanner(r)}\n}\n\n// Prepare implements the method of the Source interface.\nfunc (src *BytesScanner) Prepare(_ context.Context) (size int) {\n\treturn -1\n}\n\n// Fetch implements the method of the Source interface.\nfunc (src *BytesScanner) Fetch(n int) (fetched int) {\n\tvar data [][]byte\n\tfor fetched = 0; fetched < n; fetched++ {\n\t\tif src.Scan() {\n\t\t\tdata = append(data, append([]byte(nil), src.Bytes()...))\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tsrc.data = data\n\treturn\n}\n\n// Data implements the method of the Source interface.\nfunc (src *BytesScanner) Data() interface{} {\n\treturn src.data\n}\n\n// Func is a generic source that generates data batches\n// by repeatedly calling a function.\ntype Func struct {\n\tdata interface{}\n\terr error\n\tsize int\n\tfetch func(size int) (data interface{}, fetched int, err error)\n}\n\n// NewFunc returns a new Func to generate data batches\n// by repeatedly calling fetch.\n//\n// The size parameter informs the pipeline what the total\n// expected size of all data batches is. Pass -1 if the\n// total size is unknown or difficult to determine.\n//\n// The fetch function returns a data batch of the requested\n// size. It returns the size of the data batch that it was\n// actually able to fetch. It returns 0 if there is no more\n// data to be fetched from the source; the pipeline will\n// then make no further attempts to fetch more elements.\n//\n// The fetch function can also return an error if necessary.\nfunc NewFunc(size int, fetch func(size int) (data interface{}, fetched int, err error)) *Func {\n\treturn &Func{size: size, fetch: fetch}\n}\n\n// Err implements the method of the Source interface.\nfunc (f *Func) Err() error {\n\treturn f.err\n}\n\n// Prepare implements the method of the Source interface.\nfunc (f *Func) Prepare(_ context.Context) int {\n\treturn f.size\n}\n\n// Fetch implements the method of the Source interface.\nfunc (f *Func) Fetch(size int) (fetched int) {\n\tf.data, fetched, f.err = f.fetch(size)\n\treturn\n}\n\n// Data implements the method of the Source interface.\nfunc (f *Func) Data() interface{} {\n\treturn f.data\n}\n\n// SingletonChan is similar to a regular chan source,\n// except it only accepts and passes through single\n// elements instead of creating slices of elements\n// from the input channel.\ntype SingletonChan struct {\n\tcases []reflect.SelectCase\n\tzero reflect.Value\n\tdata interface{}\n}\n\n// NewSingletonChan returns a new SingletonChan to read from\n// the given channel.\nfunc NewSingletonChan(channel interface{}) *SingletonChan {\n\tvalue := reflect.ValueOf(channel)\n\tif value.Kind() != reflect.Chan {\n\t\tpanic(\"parameter for pargo.pipeline.NewSingletonChan is not a channel\")\n\t}\n\treturn &SingletonChan{\n\t\tcases: []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: value}},\n\t\tzero: reflect.Zero(value.Type().Elem()),\n\t}\n}\n\n// Err implements the method of the Source interface.\nfunc (src *SingletonChan) Err() error {\n\treturn nil\n}\n\n// Prepare implements the method of the Source interface.\nfunc (src *SingletonChan) Prepare(ctx context.Context) (size int) {\n\tsrc.cases = append(src.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ctx.Done())})\n\treturn -1\n}\n\n// Fetch implements the method of the Source interface.\nfunc (src *SingletonChan) Fetch(n int) (fetched int) {\n\tif chosen, element, ok := reflect.Select(src.cases); (chosen == 0) && ok {\n\t\tsrc.data = element.Interface()\n\t\treturn 1\n\t}\n\tsrc.data = src.zero\n\treturn 0\n}\n\n// Data implements the method of the Source interface.\nfunc (src *SingletonChan) Data() interface{} {\n\treturn src.data\n}\n"
  },
  {
    "path": "pipeline/strictordnode.go",
    "content": "package pipeline\n\nimport (\n\t\"sync\"\n)\n\ntype strictordnode struct {\n\tcond       *sync.Cond\n\tchannel    chan dataBatch\n\twaitGroup  sync.WaitGroup\n\trun        int\n\tfilters    []Filter\n\treceivers  []Receiver\n\tfinalizers []Finalizer\n}\n\n// StrictOrd creates an ordered node with the given filters.\nfunc StrictOrd(filters ...Filter) Node {\n\treturn &strictordnode{filters: filters}\n}\n\n// Implements the TryMerge method of the Node interface.\nfunc (node *strictordnode) TryMerge(next Node) bool {\n\tswitch nxt := next.(type) {\n\tcase *seqnode:\n\t\tnode.filters = append(node.filters, nxt.filters...)\n\t\tnode.receivers = append(node.receivers, nxt.receivers...)\n\t\tnode.finalizers = append(node.finalizers, nxt.finalizers...)\n\t\treturn true\n\tcase *strictordnode:\n\t\tnode.filters = append(node.filters, nxt.filters...)\n\t\tnode.receivers = append(node.receivers, nxt.receivers...)\n\t\tnode.finalizers = append(node.finalizers, nxt.finalizers...)\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n//Implements the Begin method of the Node interface.\nfunc (node *strictordnode) Begin(p *Pipeline, index int, dataSize *int) (keep bool) {\n\tnode.receivers, node.finalizers = ComposeFilters(p, Ordered, dataSize, node.filters)\n\tnode.filters = nil\n\tif keep = (len(node.receivers) > 0) || (len(node.finalizers) > 0); keep {\n\t\tnode.cond = sync.NewCond(&sync.Mutex{})\n\t\tnode.channel = make(chan dataBatch)\n\t\tnode.waitGroup.Add(1)\n\t\tgo func() {\n\t\t\tdefer node.waitGroup.Done()\n\t\t\tfor {\n\t\t\t\tselect {\n\t\t\t\tcase <-p.ctx.Done():\n\t\t\t\t\tnode.cond.Broadcast()\n\t\t\t\t\treturn\n\t\t\t\tcase batch, ok := <-node.channel:\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tnode.cond.L.Lock()\n\t\t\t\t\tif batch.seqNo != node.run {\n\t\t\t\t\t\tpanic(\"Invalid receive order in a strictly ordered pipeline node.\")\n\t\t\t\t\t}\n\t\t\t\t\tnode.run++\n\t\t\t\t\tnode.cond.L.Unlock()\n\t\t\t\t\tnode.cond.Broadcast()\n\t\t\t\t\tfeed(p, node.receivers, index, batch.seqNo, batch.data)\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\treturn\n}\n\n// Implements the Feed method of the Node interface.\nfunc (node *strictordnode) Feed(p *Pipeline, _ int, seqNo int, data interface{}) {\n\tnode.cond.L.Lock()\n\tdefer node.cond.L.Unlock()\n\tfor {\n\t\tif node.run == seqNo {\n\t\t\tselect {\n\t\t\tcase <-p.ctx.Done():\n\t\t\t\treturn\n\t\t\tcase node.channel <- dataBatch{seqNo, data}:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tselect {\n\t\tcase <-p.ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t\tnode.cond.Wait()\n\t\t}\n\t}\n}\n\n// Implements the End method of the Node interface.\nfunc (node *strictordnode) End() {\n\tclose(node.channel)\n\tnode.waitGroup.Wait()\n\tfor _, finalize := range node.finalizers {\n\t\tfinalize()\n\t}\n\tnode.receivers = nil\n\tnode.finalizers = nil\n}\n"
  },
  {
    "path": "sequential/sequential.go",
    "content": "// Package sequential provides sequential implementations of the functions\n// provided by the parallel and speculative packages. This is useful for testing\n// and debugging.\n//\n// It is not recommended to use the implementations of this package for any\n// other purpose, because they are almost certainly too inefficient for regular\n// sequential programs.\npackage sequential\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/exascience/pargo/internal\"\n)\n\n// Reduce receives one or more functions, executes them sequentially, and\n// combines their results sequentially.\n//\n// Partial results are combined with the join function.\nfunc Reduce(\n\tjoin func(x, y interface{}) interface{},\n\tfirstFunction func() interface{},\n\tmoreFunctions ...func() interface{},\n) interface{} {\n\tresult := firstFunction()\n\tfor _, f := range moreFunctions {\n\t\tresult = join(result, f())\n\t}\n\treturn result\n}\n\n// ReduceFloat64 receives one or more functions, executes them sequentially, and\n// combines their results sequentially.\n//\n// Partial results are combined with the join function.\nfunc ReduceFloat64(\n\tjoin func(x, y float64) float64,\n\tfirstFunction func() float64,\n\tmoreFunctions ...func() float64,\n) float64 {\n\tresult := firstFunction()\n\tfor _, f := range moreFunctions {\n\t\tresult = join(result, f())\n\t}\n\treturn result\n}\n\n// ReduceFloat64Sum receives zero or more functions, executes them sequentially,\n// and adds their results sequentially.\nfunc ReduceFloat64Sum(functions ...func() float64) float64 {\n\tresult := float64(0)\n\tfor _, f := range functions {\n\t\tresult += f()\n\t}\n\treturn result\n}\n\n// ReduceFloat64Product receives zero or more functions, executes them\n// sequentially, and multiplies their results sequentially.\nfunc ReduceFloat64Product(functions ...func() float64) float64 {\n\tresult := float64(1)\n\tfor _, f := range functions {\n\t\tresult *= f()\n\t}\n\treturn result\n}\n\n// ReduceInt receives one or more functions, executes them sequentially, and\n// combines their results sequentially.\n//\n// Partial results are combined with the join function.\nfunc ReduceInt(\n\tjoin func(x, y int) int,\n\tfirstFunction func() int,\n\tmoreFunctions ...func() int,\n) int {\n\tresult := firstFunction()\n\tfor _, f := range moreFunctions {\n\t\tresult = join(result, f())\n\t}\n\treturn result\n}\n\n// ReduceIntSum receives zero or more functions, executes them sequentially, and\n// adds their results sequentially.\nfunc ReduceIntSum(functions ...func() int) int {\n\tresult := 0\n\tfor _, f := range functions {\n\t\tresult += f()\n\t}\n\treturn result\n}\n\n// ReduceIntProduct receives zero or more functions, executes them sequentially,\n// and multiplies their results sequentially.\nfunc ReduceIntProduct(functions ...func() int) int {\n\tresult := 1\n\tfor _, f := range functions {\n\t\tresult *= f()\n\t}\n\treturn result\n}\n\n// ReduceString receives one or more functions, executes them in parallel, and\n// combines their results in parallel.\n//\n// Partial results are combined with the join function.\nfunc ReduceString(\n\tjoin func(x, y string) string,\n\tfirstFunction func() string,\n\tmoreFunctions ...func() string,\n) string {\n\tresult := firstFunction()\n\tfor _, f := range moreFunctions {\n\t\tresult = join(result, f())\n\t}\n\treturn result\n}\n\n// ReduceStringSum receives zero or more functions, executes them in parallel,\n// and concatenates their results in parallel.\nfunc ReduceStringSum(functions ...func() string) string {\n\tresult := \"\"\n\tfor _, f := range functions {\n\t\tresult += f()\n\t}\n\treturn result\n}\n\n// Do receives zero or more thunks and executes them sequentially.\nfunc Do(thunks ...func()) {\n\tfor _, thunk := range thunks {\n\t\tthunk()\n\t}\n}\n\n// And receives zero or more predicate functions and executes them sequentially,\n// combining all return values with the && operator, with true as the default\n// return value.\nfunc And(predicates ...func() bool) bool {\n\tresult := true\n\tfor _, predicate := range predicates {\n\t\tresult = result && predicate()\n\t}\n\treturn result\n}\n\n// Or receives zero or more predicate functions and executes them sequentially,\n// combining all return values with the || operator, with false as the default\n// return value.\nfunc Or(predicates ...func() bool) bool {\n\tresult := false\n\tfor _, predicate := range predicates {\n\t\tresult = result || predicate()\n\t}\n\treturn result\n}\n\n// Range receives a range, a batch count n, and a range function f, divides the\n// range into batches, and invokes the range function for each of these batches\n// sequentially, covering the half-open interval from low to high, including low\n// but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// Range panics if high < low, or if n < 0.\nfunc Range(\n\tlow, high, n int,\n\tf func(low, high int),\n) {\n\tvar recur func(int, int, int)\n\trecur = func(low, high, n int) {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\tf(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\tf(low, high)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trecur(low, mid, half)\n\t\t\trecur(mid, high, n-half)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\trecur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeAnd receives a range, a batch count n, and a range predicate function f,\n// divides the range into batches, and invokes the range predicate for each of\n// these batches sequentially, covering the half-open interval from low to high,\n// including low but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// RangeAnd returns by combining all return values with the && operator.\n//\n// RangeAnd panics if high < low, or if n < 0.\nfunc RangeAnd(\n\tlow, high, n int,\n\tf func(low, high int) bool,\n) bool {\n\tvar recur func(int, int, int) bool\n\trecur = func(low, high, n int) bool {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn f(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn f(low, high)\n\t\t\t}\n\t\t\tb0 := recur(low, mid, half)\n\t\t\tb1 := recur(mid, high, n-half)\n\t\t\treturn b0 && b1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeOr receives a range, a batch count n, and a range predicate function f,\n// divides the range into batches, and invokes the range predicate for each of\n// these batches sequentially, covering the half-open interval from low to high,\n// including low but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// RangeOr returns by combining all return values with the || operator.\n//\n// RangeOr panics if high < low, or if n < 0.\nfunc RangeOr(\n\tlow, high, n int,\n\tf func(low, high int) bool,\n) bool {\n\tvar recur func(int, int, int) bool\n\trecur = func(low, high, n int) bool {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn f(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn f(low, high)\n\t\t\t}\n\t\t\tb0 := recur(low, mid, half)\n\t\t\tb1 := recur(mid, high, n-half)\n\t\t\treturn b0 || b1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduce receives a range, a batch count, a range reduce function, and a\n// join function, divides the range into batches, and invokes the range reducer\n// for each of these batches sequentially, covering the half-open interval from\n// low to high, including low but excluding high. The results of the range\n// reducer invocations are then combined by repeated invocations of join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// RangeReduce panics if high < low, or if n < 0.\nfunc RangeReduce(\n\tlow, high, n int,\n\treduce func(low, high int) interface{},\n\tjoin func(x, y interface{}) interface{},\n) interface{} {\n\tvar recur func(int, int, int) interface{}\n\trecur = func(low, high, n int) interface{} {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceInt receives a range, a batch count n, a range reducer function,\n// and a join function, divides the range into batches, and invokes the range\n// reducer for each of these batches sequentially, covering the half-open\n// interval from low to high, including low but excluding high. The results of\n// the range reducer invocations are then combined by repeated invocations of\n// join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// IntRangeReduce panics if high < low, or if n < 0.\nfunc RangeReduceInt(\n\tlow, high, n int,\n\treduce func(low, high int) int,\n\tjoin func(x, y int) int,\n) int {\n\tvar recur func(int, int, int) int\n\trecur = func(low, high, n int) int {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceIntSum receives a range, a batch count n, and a range reducer\n// function, divides the range into batches, and invokes the range reducer for\n// each of these batches sequentially, covering the half-open interval from low\n// to high, including low but excluding high. The results of the range reducer\n// invocations are then added together.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// RangeReduceIntSum panics if high < low, or if n < 0.\nfunc RangeReduceIntSum(\n\tlow, high, n int,\n\treduce func(low, high int) int,\n) int {\n\tvar recur func(int, int, int) int\n\trecur = func(low, high, n int) int {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn left + right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceIntProduct receives a range, a batch count n, and a range reducer\n// function, divides the range into batches, and invokes the range reducer for\n// each of these batches sequentially, covering the half-open interval from low\n// to high, including low but excluding high. The results of the range reducer\n// invocations are then multiplied with each other.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// RangeReduceIntProduct panics if high < low, or if n < 0.\nfunc RangeReduceIntProduct(\n\tlow, high, n int,\n\treduce func(low, high int) int,\n) int {\n\tvar recur func(int, int, int) int\n\trecur = func(low, high, n int) int {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn left * right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceFloat64 receives a range, a batch count n, a range reducer\n// function, and a join function, divides the range into batches, and invokes\n// the range reducer for each of these batches sequentially, covering the\n// half-open interval from low to high, including low but excluding high. The\n// results of the range reducer invocations are then combined by repeated\n// invocations of join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// Float64RangeReduce panics if high < low, or if n < 0.\nfunc RangeReduceFloat64(\n\tlow, high, n int,\n\treduce func(low, high int) float64,\n\tjoin func(x, y float64) float64,\n) float64 {\n\tvar recur func(int, int, int) float64\n\trecur = func(low, high, n int) float64 {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceFloat64Sum receives a range, a batch count n, and a range reducer\n// function, divides the range into batches, and invokes the range reducer for\n// each of these batches sequentially, covering the half-open interval from low\n// to high, including low but excluding high. The results of the range reducer\n// invocations are then added together.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// RangeReduceFloat64Sum panics if high < low, or if n < 0.\nfunc RangeReduceFloat64Sum(\n\tlow, high, n int,\n\treduce func(low, high int) float64,\n) float64 {\n\tvar recur func(int, int, int) float64\n\trecur = func(low, high, n int) float64 {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn left + right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceFloat64Product receives a range, a batch count n, and a range\n// reducer function, divides the range into batches, and invokes the range\n// reducer for each of these batches sequentially, covering the half-open\n// interval from low to high, including low but excluding high. The results of\n// the range reducer invocations are then multiplied with each other.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// RangeReduceFloat64Product panics if high < low, or if n < 0.\nfunc RangeReduceFloat64Product(\n\tlow, high, n int,\n\treduce func(low, high int) float64,\n) float64 {\n\tvar recur func(int, int, int) float64\n\trecur = func(low, high, n int) float64 {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn left * right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceString receives a range, a batch count n, a range reducer\n// function, and a join function, divides the range into batches, and invokes\n// the range reducer for each of these batches sequentially, covering the\n// half-open interval from low to high, including low but excluding high. The\n// results of the range reducer invocations are then combined by repeated\n// invocations of join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// StringRangeReduce panics if high < low, or if n < 0.\nfunc RangeReduceString(\n\tlow, high, n int,\n\treduce func(low, high int) string,\n\tjoin func(x, y string) string,\n) string {\n\tvar recur func(int, int, int) string\n\trecur = func(low, high, n int) string {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceStringSum receives a range, a batch count n, and a range reducer\n// function, divides the range into batches, and invokes the range reducer for\n// each of these batches sequentially, covering the half-open interval from low\n// to high, including low but excluding high. The results of the range reducer\n// invocations are then concatenated together.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// RangeReduceStringSum panics if high < low, or if n < 0.\nfunc RangeReduceStringSum(\n\tlow, high, n int,\n\treduce func(low, high int) string,\n) string {\n\tvar recur func(int, int, int) string\n\trecur = func(low, high, n int) string {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tleft := recur(low, mid, half)\n\t\t\tright := recur(mid, high, n-half)\n\t\t\treturn left + right\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n"
  },
  {
    "path": "sort/example_interface_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved. Use of this source code\n// is governed by a BSD-style license that can be found in the LICENSE file.\n\n// Adapted by Pascal Costanza for the Pargo package.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\tstdsort \"sort\"\n\n\tsort \"github.com/exascience/pargo/sort\"\n)\n\ntype Person struct {\n\tName string\n\tAge  int\n}\n\nfunc (p Person) String() string {\n\treturn fmt.Sprintf(\"%s: %d\", p.Name, p.Age)\n}\n\n// ByAge implements sort.SequentialSorter, sort.Sorter, and sort.StableSorter\n// for []Person based on the Age field.\ntype ByAge []Person\n\nfunc (a ByAge) SequentialSort(i, j int) {\n\tstdsort.SliceStable(a, func(i, j int) bool {\n\t\treturn a[i].Age < a[j].Age\n\t})\n}\n\nfunc (a ByAge) Len() int           { return len(a) }\nfunc (a ByAge) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a ByAge) Less(i, j int) bool { return a[i].Age < a[j].Age }\n\nfunc (a ByAge) NewTemp() sort.StableSorter { return make(ByAge, len(a)) }\n\nfunc (this ByAge) Assign(that sort.StableSorter) func(i, j, len int) {\n\tdst, src := this, that.(ByAge)\n\treturn func(i, j, len int) {\n\t\tfor k := 0; k < len; k++ {\n\t\t\tdst[i+k] = src[j+k]\n\t\t}\n\t}\n}\n\nfunc Example() {\n\tpeople := []Person{\n\t\t{\"Bob\", 31},\n\t\t{\"John\", 42},\n\t\t{\"Michael\", 17},\n\t\t{\"Jenny\", 26},\n\t}\n\n\tfmt.Println(people)\n\tsort.Sort(ByAge(people))\n\tfmt.Println(people)\n\n\tpeople = []Person{\n\t\t{\"Bob\", 31},\n\t\t{\"John\", 42},\n\t\t{\"Michael\", 17},\n\t\t{\"Jenny\", 26},\n\t}\n\n\tfmt.Println(people)\n\tsort.StableSort(ByAge(people))\n\tfmt.Println(people)\n\n\t// Output:\n\t// [Bob: 31 John: 42 Michael: 17 Jenny: 26]\n\t// [Michael: 17 Jenny: 26 Bob: 31 John: 42]\n\t// [Bob: 31 John: 42 Michael: 17 Jenny: 26]\n\t// [Michael: 17 Jenny: 26 Bob: 31 John: 42]\n}\n"
  },
  {
    "path": "sort/mergesort.go",
    "content": "package sort\n\nimport (\n\t\"sync\"\n\n\t\"github.com/exascience/pargo/parallel\"\n)\n\nconst msortGrainSize = 0x3000\n\n// StableSorter is a type, typically a collection, that can be sorted by\n// StableSort in this package. The methods require that ranges of elements of\n// the collection can be enumerated by integer indices.\ntype StableSorter interface {\n\tSequentialSorter\n\n\t// NewTemp creates a new collection that can hold as many elements as the\n\t// original collection. This is temporary memory needed by StableSort, but\n\t// not needed anymore afterwards. The temporary collection does not need to\n\t// be initialized.\n\tNewTemp() StableSorter\n\n\t// Len is the number of elements in the collection.\n\tLen() int\n\n\t// Less reports whether the element with index i should sort before the\n\t// element with index j.\n\tLess(i, j int) bool\n\n\t// Assign returns a function that assigns ranges from source to the receiver\n\t// collection. The element with index i is the first element in the receiver\n\t// to assign to, and the element with index j is the first element in the\n\t// source collection to assign from, with len determining the number of\n\t// elements to assign. The effect should be the same as receiver[i:i+len] =\n\t// source[j:j+len].\n\tAssign(source StableSorter) func(i, j, len int)\n}\n\ntype sorter struct {\n\tless   func(i, j int) bool\n\tassign func(i, j, len int)\n}\n\nfunc binarySearchEq(x int, T *sorter, p, r int) int {\n\tlow, high := p, r+1\n\tif low > high {\n\t\treturn low\n\t}\n\tfor low < high {\n\t\tmid := (low + high) / 2\n\t\tif !T.less(mid, x) {\n\t\t\thigh = mid\n\t\t} else {\n\t\t\tlow = mid + 1\n\t\t}\n\t}\n\treturn high\n}\n\nfunc binarySearchNeq(x int, T *sorter, p, r int) int {\n\tlow, high := p, r+1\n\tif low > high {\n\t\treturn low\n\t}\n\tfor low < high {\n\t\tmid := (low + high) / 2\n\t\tif T.less(x, mid) {\n\t\t\thigh = mid\n\t\t} else {\n\t\t\tlow = mid + 1\n\t\t}\n\t}\n\treturn high\n}\n\nfunc sMerge(T *sorter, p1, r1, p2, r2 int, A *sorter, p3 int) {\n\tfor {\n\t\tif p2 > r2 {\n\t\t\tA.assign(p3, p1, r1+1-p1)\n\t\t\treturn\n\t\t}\n\n\t\tq1 := p1\n\t\tfor (p1 <= r1) && !T.less(p2, p1) {\n\t\t\tp1++\n\t\t}\n\t\tn1 := p1 - q1\n\t\tA.assign(p3, q1, n1)\n\t\tp3 += n1\n\n\t\tif p1 > r1 {\n\t\t\tA.assign(p3, p2, r2+1-p2)\n\t\t\treturn\n\t\t}\n\n\t\tq2 := p2\n\t\tfor (p2 <= r2) && T.less(p2, p1) {\n\t\t\tp2++\n\t\t}\n\t\tn2 := p2 - q2\n\t\tA.assign(p3, q2, n2)\n\t\tp3 += n2\n\t}\n}\n\nfunc pMerge(T *sorter, p1, r1, p2, r2 int, A *sorter, p3 int) {\n\tn1 := r1 - p1 + 1\n\tn2 := r2 - p2 + 1\n\tif (n1 + n2) < msortGrainSize {\n\t\tsMerge(T, p1, r1, p2, r2, A, p3)\n\t\treturn\n\t}\n\tif n1 > n2 {\n\t\tif n1 == 0 {\n\t\t\treturn\n\t\t}\n\t\tq1 := (p1 + r1) / 2\n\t\tq2 := binarySearchEq(q1, T, p2, r2)\n\t\tq3 := p3 + (q1 - p1) + (q2 - p2)\n\t\tA.assign(q3, q1, 1)\n\t\tparallel.Do(\n\t\t\tfunc() { pMerge(T, p1, q1-1, p2, q2-1, A, p3) },\n\t\t\tfunc() { pMerge(T, q1+1, r1, q2, r2, A, q3+1) },\n\t\t)\n\t} else {\n\t\tif n2 == 0 {\n\t\t\treturn\n\t\t}\n\t\tq2 := (p2 + r2) / 2\n\t\tq1 := binarySearchNeq(q2, T, p1, r1)\n\t\tq3 := p3 + (q1 - p1) + (q2 - p2)\n\t\tA.assign(q3, q2, 1)\n\t\tparallel.Do(\n\t\t\tfunc() { pMerge(T, p1, q1-1, p2, q2-1, A, p3) },\n\t\t\tfunc() { pMerge(T, q1, r1, q2+1, r2, A, q3+1) },\n\t\t)\n\t}\n}\n\n// StableSort uses a parallel implementation of merge sort, also known as\n// cilksort.\n//\n// StableSort is only stable if data's SequentialSort method is stable.\n//\n// StableSort is good for large core counts and large collection sizes, but\n// needs a shallow copy of the data collection as additional temporary memory.\nfunc StableSort(data StableSorter) {\n\t// See https://en.wikipedia.org/wiki/Introduction_to_Algorithms and\n\t// https://www.clear.rice.edu/comp422/lecture-notes/ for details on the algorithm.\n\tsize := data.Len()\n\tsSort := data.SequentialSort\n\tif size < msortGrainSize {\n\t\tsSort(0, size)\n\t\treturn\n\t}\n\tvar T, A *sorter\n\tvar temp sync.WaitGroup\n\ttemp.Add(1)\n\tgo func() {\n\t\tdefer temp.Done()\n\t\ta := data.NewTemp()\n\t\tT = &sorter{data.Less, data.Assign(a)}\n\t\tA = &sorter{a.Less, a.Assign(data)}\n\t}()\n\tvar pSort func(int, int)\n\tpSort = func(index, size int) {\n\t\tif size < msortGrainSize {\n\t\t\tsSort(index, index+size)\n\t\t} else {\n\t\t\tq1 := size / 4\n\t\t\tq2 := q1 + q1\n\t\t\tq3 := q2 + q1\n\t\t\tparallel.Do(\n\t\t\t\tfunc() { pSort(index, q1) },\n\t\t\t\tfunc() { pSort(index+q1, q1) },\n\t\t\t\tfunc() { pSort(index+q2, q1) },\n\t\t\t\tfunc() { pSort(index+q3, size-q3) },\n\t\t\t)\n\t\t\ttemp.Wait()\n\t\t\tparallel.Do(\n\t\t\t\tfunc() { pMerge(T, index, index+q1-1, index+q1, index+q2-1, A, index) },\n\t\t\t\tfunc() { pMerge(T, index+q2, index+q3-1, index+q3, index+size-1, A, index+q2) },\n\t\t\t)\n\t\t\tpMerge(A, index, index+q2-1, index+q2, index+size-1, T, index)\n\t\t}\n\t}\n\tpSort(0, size)\n}\n"
  },
  {
    "path": "sort/quicksort.go",
    "content": "package sort\n\nimport (\n\t\"sort\"\n\n\t\"github.com/exascience/pargo/parallel\"\n)\n\nconst qsortGrainSize = 0x500\n\n// Sorter is a type, typically a collection, that can be sorted by Sort in this\n// package. The methods require that (ranges of) elements of the collection can\n// be enumerated by integer indices.\ntype Sorter interface {\n\tSequentialSorter\n\tsort.Interface\n}\n\nfunc medianOfThree(data sort.Interface, l, m, r int) int {\n\tif data.Less(l, m) {\n\t\tif data.Less(m, r) {\n\t\t\treturn m\n\t\t} else if data.Less(l, r) {\n\t\t\treturn r\n\t\t}\n\t} else if data.Less(r, m) {\n\t\treturn m\n\t} else if data.Less(r, l) {\n\t\treturn r\n\t}\n\treturn l\n}\n\nfunc pseudoMedianOfNine(data sort.Interface, index, size int) int {\n\toffset := size / 8\n\treturn medianOfThree(data,\n\t\tmedianOfThree(data, index, index+offset, index+offset*2),\n\t\tmedianOfThree(data, index+offset*3, index+offset*4, index+offset*5),\n\t\tmedianOfThree(data, index+offset*6, index+offset*7, index+size-1),\n\t)\n}\n\n// Sort uses a parallel quicksort implementation.\n//\n// It is good for small core counts and small collection sizes.\nfunc Sort(data Sorter) {\n\tsize := data.Len()\n\tsSort := data.SequentialSort\n\tif size < qsortGrainSize {\n\t\tsSort(0, size)\n\t\treturn\n\t}\n\tvar pSort func(int, int)\n\tpSort = func(index, size int) {\n\t\tif size < qsortGrainSize {\n\t\t\tsSort(index, index+size)\n\t\t} else {\n\t\t\tm := pseudoMedianOfNine(data, index, size)\n\t\t\tif m > index {\n\t\t\t\tdata.Swap(index, m)\n\t\t\t}\n\t\t\ti, j := index, index+size\n\t\touter:\n\t\t\tfor {\n\t\t\t\tfor {\n\t\t\t\t\tj--\n\t\t\t\t\tif !data.Less(index, j) {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfor {\n\t\t\t\t\tif i == j {\n\t\t\t\t\t\tbreak outer\n\t\t\t\t\t}\n\t\t\t\t\ti++\n\t\t\t\t\tif !data.Less(i, index) {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif i == j {\n\t\t\t\t\tbreak outer\n\t\t\t\t}\n\t\t\t\tdata.Swap(i, j)\n\t\t\t}\n\t\t\tdata.Swap(j, index)\n\t\t\ti = j + 1\n\t\t\tparallel.Do(\n\t\t\t\tfunc() { pSort(index, j-index) },\n\t\t\t\tfunc() { pSort(i, index+size-i) },\n\t\t\t)\n\t\t}\n\t}\n\tif !IsSorted(data) {\n\t\tpSort(0, size)\n\t}\n}\n"
  },
  {
    "path": "sort/sort.go",
    "content": "// Package sort provides implementations of parallel sorting algorithms.\npackage sort\n\nimport (\n\t\"sort\"\n\t\"sync/atomic\"\n\n\t\"github.com/exascience/pargo/speculative\"\n)\n\n// SequentialSorter is a type, typically a collection, that can be sequentially\n// sorted. This is needed as a base case for the parallel sorting algorithms in\n// this package. It is recommended to implement this interface by using the\n// functions in the sort package of Go's standard library.\ntype SequentialSorter interface {\n\t// Sort the range that starts at index i and ends at index j. If the\n\t// collection that is represented by this interface is a slice, then the\n\t// slice expression collection[i:j] returns the correct slice to be sorted.\n\tSequentialSort(i, j int)\n}\n\nconst serialCutoff = 10\n\n// IsSorted determines in parallel whether data is already sorted. It attempts\n// to terminate early when the return value is false.\nfunc IsSorted(data sort.Interface) bool {\n\tsize := data.Len()\n\tif size < qsortGrainSize {\n\t\treturn sort.IsSorted(data)\n\t}\n\tfor i := 1; i < serialCutoff; i++ {\n\t\tif data.Less(i, i-1) {\n\t\t\treturn false\n\t\t}\n\t}\n\tvar done int32\n\tdefer atomic.StoreInt32(&done, 1)\n\tvar pTest func(int, int) bool\n\tpTest = func(index, size int) bool {\n\t\tif size < qsortGrainSize {\n\t\t\tfor i := index; i < index+size; i++ {\n\t\t\t\tif ((i % 1024) == 0) && (atomic.LoadInt32(&done) != 0) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tif data.Less(i, i-1) {\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t\thalf := size / 2\n\t\tresult := speculative.And(\n\t\t\tfunc() bool { return pTest(index, half) },\n\t\t\tfunc() bool { return pTest(index+half, size-half) },\n\t\t)\n\t\treturn result\n\t}\n\treturn pTest(serialCutoff, size-serialCutoff)\n}\n\n// IntSlice attaches the methods of sort.Interface, SequentialSorter, Sorter,\n// and StableSorter to []int, sorting in increasing order.\ntype IntSlice []int\n\n// SequentialSort implements the method of of the SequentialSorter interface.\nfunc (s IntSlice) SequentialSort(i, j int) {\n\tsort.Stable(sort.IntSlice(s[i:j]))\n}\n\nfunc (s IntSlice) Len() int {\n\treturn len(s)\n}\n\nfunc (s IntSlice) Less(i, j int) bool {\n\treturn s[i] < s[j]\n}\n\nfunc (s IntSlice) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\n// NewTemp implements the method of the StableSorter interface.\nfunc (s IntSlice) NewTemp() StableSorter {\n\treturn IntSlice(make([]int, len(s)))\n}\n\n// Assign implements the method of the StableSorter interface.\nfunc (s IntSlice) Assign(source StableSorter) func(i, j, len int) {\n\tdst, src := s, source.(IntSlice)\n\treturn func(i, j, len int) {\n\t\tcopy(dst[i:i+len], src[j:j+len])\n\t}\n}\n\n// IntsAreSorted determines in parallel whether a slice of ints is already\n// sorted in increasing order. It attempts to terminate early when the return\n// value is false.\nfunc IntsAreSorted(a []int) bool {\n\treturn IsSorted(IntSlice(a))\n}\n\n// Float64Slice attaches the methods of sort.Interface, SequentialSorter,\n// Sorter, and StableSorter to []float64, sorting in increasing order.\ntype Float64Slice []float64\n\n// SequentialSort implements the method of the SequentialSorter interface.\nfunc (s Float64Slice) SequentialSort(i, j int) {\n\tsort.Stable(sort.Float64Slice(s[i:j]))\n}\n\nfunc (s Float64Slice) Len() int {\n\treturn len(s)\n}\n\nfunc (s Float64Slice) Less(i, j int) bool {\n\treturn s[i] < s[j]\n}\n\nfunc (s Float64Slice) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\n// NewTemp implements the method of the StableSorter interface.\nfunc (s Float64Slice) NewTemp() StableSorter {\n\treturn Float64Slice(make([]float64, len(s)))\n}\n\n// Assign implements the method of the StableSorter interface.\nfunc (s Float64Slice) Assign(source StableSorter) func(i, j, len int) {\n\tdst, src := s, source.(Float64Slice)\n\treturn func(i, j, len int) {\n\t\tcopy(dst[i:i+len], src[j:j+len])\n\t}\n}\n\n// Float64sAreSorted determines in parallel whether a slice of float64s is\n// already sorted in increasing order. It attempts to terminate early when the\n// return value is false.\nfunc Float64sAreSorted(a []float64) bool {\n\treturn IsSorted(Float64Slice(a))\n}\n\n// StringSlice attaches the methods of sort.Interface, SequentialSorter, Sorter,\n// and StableSorter to []string, sorting in increasing order.\ntype StringSlice []string\n\n// SequentialSort implements the method of the SequentialSorter interface.\nfunc (s StringSlice) SequentialSort(i, j int) {\n\tsort.Stable(sort.StringSlice(s[i:j]))\n}\n\nfunc (s StringSlice) Len() int {\n\treturn len(s)\n}\n\nfunc (s StringSlice) Less(i, j int) bool {\n\treturn s[i] < s[j]\n}\n\nfunc (s StringSlice) Swap(i, j int) {\n\ts[i], s[j] = s[j], s[i]\n}\n\n// NewTemp implements the method of the StableSorter interface.\nfunc (s StringSlice) NewTemp() StableSorter {\n\treturn StringSlice(make([]string, len(s)))\n}\n\n// Assign implements the method of the StableSorter interface.\nfunc (s StringSlice) Assign(source StableSorter) func(i, j, len int) {\n\tdst, src := s, source.(StringSlice)\n\treturn func(i, j, len int) {\n\t\tcopy(dst[i:i+len], src[j:j+len])\n\t}\n}\n\n// StringsAreSorted determines in parallel whether a slice of strings is already\n// sorted in increasing order. It attempts to terminate early when the return\n// value is false.\nfunc StringsAreSorted(a []string) bool {\n\treturn IsSorted(StringSlice(a))\n}\n"
  },
  {
    "path": "sort/sort_test.go",
    "content": "package sort\n\nimport (\n\t\"bytes\"\n\t\"math/rand\"\n\t\"sort\"\n\t\"testing\"\n)\n\ntype (\n\tBy func(i, j int) bool\n\n\tIntSliceSorter struct {\n\t\tslice []int\n\t\tby    By\n\t}\n)\n\nfunc (s IntSliceSorter) NewTemp() StableSorter {\n\treturn IntSliceSorter{make([]int, len(s.slice)), s.by}\n}\n\nfunc (s IntSliceSorter) Len() int {\n\treturn len(s.slice)\n}\n\nfunc (s IntSliceSorter) Less(i, j int) bool {\n\treturn s.by(s.slice[i], s.slice[j])\n}\n\nfunc (s IntSliceSorter) Swap(i, j int) {\n\ts.slice[i], s.slice[j] = s.slice[j], s.slice[i]\n}\n\nfunc (s IntSliceSorter) Assign(t StableSorter) func(i, j, len int) {\n\tdst, src := s.slice, t.(IntSliceSorter).slice\n\treturn func(i, j, len int) {\n\t\tfor k := 0; k < len; k++ {\n\t\t\tdst[i+k] = src[j+k]\n\t\t}\n\t}\n}\n\nfunc (s IntSliceSorter) SequentialSort(i, j int) {\n\tslice, by := s.slice[i:j], s.by\n\tsort.Slice(slice, func(i, j int) bool {\n\t\treturn by(slice[i], slice[j])\n\t})\n}\n\nfunc (by By) SequentialSort(slice []int) {\n\tsort.Sort(IntSliceSorter{slice, by})\n}\n\nfunc (by By) ParallelStableSort(slice []int) {\n\tStableSort(IntSliceSorter{slice, by})\n}\n\nfunc (by By) ParallelSort(slice []int) {\n\tSort(IntSliceSorter{slice, by})\n}\n\nfunc (by By) IsSorted(slice []int) bool {\n\treturn sort.IsSorted(IntSliceSorter{slice, by})\n}\n\nfunc makeRandomSlice(size, limit int) []int {\n\tresult := make([]int, size)\n\tfor i := 0; i < size; i++ {\n\t\tresult[i] = rand.Intn(limit)\n\t}\n\treturn result\n}\n\nfunc TestSort(t *testing.T) {\n\torgSlice := makeRandomSlice(100*0x6000, 100*100*0x6000)\n\ts1 := make([]int, len(orgSlice))\n\ts2 := make([]int, len(orgSlice))\n\tcopy(s1, orgSlice)\n\tcopy(s2, orgSlice)\n\n\tt.Run(\"ParallelStableSort\", func(t *testing.T) {\n\t\tBy(func(i, j int) bool { return i < j }).ParallelStableSort(s1)\n\t\tif !By(func(i, j int) bool { return i < j }).IsSorted(s1) {\n\t\t\tt.Errorf(\"parallel stable sort incorrect\")\n\t\t}\n\t})\n\n\tt.Run(\"ParallelSort\", func(t *testing.T) {\n\t\tBy(func(i, j int) bool { return i < j }).ParallelSort(s2)\n\t\tif !By(func(i, j int) bool { return i < j }).IsSorted(s2) {\n\t\t\tt.Errorf(\"parallel sort incorrect\")\n\t\t}\n\t})\n}\n\nfunc TestIntSort(t *testing.T) {\n\torgSlice := makeRandomSlice(100*0x6000, 100*100*0x6000)\n\ts1 := make([]int, len(orgSlice))\n\ts2 := make([]int, len(orgSlice))\n\tcopy(s1, orgSlice)\n\tcopy(s2, orgSlice)\n\n\tt.Run(\"ParallelStableSort IntSlice\", func(t *testing.T) {\n\t\tStableSort(IntSlice(s1))\n\t\tif !sort.IntsAreSorted(s1) {\n\t\t\tt.Errorf(\"parallel stable sort on IntSlice incorrect\")\n\t\t}\n\t\tif !IntsAreSorted(s1) {\n\t\t\tt.Errorf(\"parallel IntsAreSorted incorrect\")\n\t\t}\n\t})\n\n\tt.Run(\"ParallelSort IntSlice\", func(t *testing.T) {\n\t\tSort(IntSlice(s2))\n\t\tif !sort.IntsAreSorted(s2) {\n\t\t\tt.Errorf(\"parallel sort on IntSlice incorrect\")\n\t\t}\n\t\tif !IntsAreSorted(s2) {\n\t\t\tt.Errorf(\"parallel IntsAreSorted incorrect\")\n\t\t}\n\t})\n}\n\nfunc makeRandomFloat64Slice(size int) []float64 {\n\tresult := make([]float64, size)\n\tfor i := 0; i < size; i++ {\n\t\tresult[i] = rand.NormFloat64()\n\t}\n\treturn result\n}\n\nfunc TestFloat64Sort(t *testing.T) {\n\torgSlice := makeRandomFloat64Slice(100 * 0x6000)\n\ts1 := make([]float64, len(orgSlice))\n\ts2 := make([]float64, len(orgSlice))\n\tcopy(s1, orgSlice)\n\tcopy(s2, orgSlice)\n\n\tt.Run(\"ParallelStableSort Float64Slice\", func(t *testing.T) {\n\t\tStableSort(Float64Slice(s1))\n\t\tif !sort.Float64sAreSorted(s1) {\n\t\t\tt.Errorf(\"parallel stable sort on Float64Slice incorrect\")\n\t\t}\n\t\tif !Float64sAreSorted(s1) {\n\t\t\tt.Errorf(\"parallel Float64sAreSorted incorrect\")\n\t\t}\n\t})\n\n\tt.Run(\"ParallelSort Float64Slice\", func(t *testing.T) {\n\t\tSort(Float64Slice(s2))\n\t\tif !sort.Float64sAreSorted(s2) {\n\t\t\tt.Errorf(\"parallel sort on Float64Slice incorrect\")\n\t\t}\n\t\tif !Float64sAreSorted(s2) {\n\t\t\tt.Errorf(\"parallel Float64sAreSorted incorrect\")\n\t\t}\n\t})\n}\n\nfunc makeRandomStringSlice(size, lenlimit int, limit int32) []string {\n\tresult := make([]string, size)\n\tfor i := 0; i < size; i++ {\n\t\tvar buf bytes.Buffer\n\t\tlen := rand.Intn(lenlimit)\n\t\tfor j := 0; j < len; j++ {\n\t\t\tbuf.WriteRune(rand.Int31n(limit))\n\t\t}\n\t\tresult[i] = buf.String()\n\t}\n\treturn result\n}\n\nfunc TestStringSort(t *testing.T) {\n\torgSlice := makeRandomStringSlice(100*0x6000, 256, 16384)\n\ts1 := make([]string, len(orgSlice))\n\ts2 := make([]string, len(orgSlice))\n\tcopy(s1, orgSlice)\n\tcopy(s2, orgSlice)\n\n\tt.Run(\"ParallelStableSort StringSlice\", func(t *testing.T) {\n\t\tStableSort(StringSlice(s1))\n\t\tif !sort.StringsAreSorted(s1) {\n\t\t\tt.Errorf(\"parallel stable sort on StringSlice incorrect\")\n\t\t}\n\t\tif !StringsAreSorted(s1) {\n\t\t\tt.Errorf(\"parallel StringsAreSorted incorrect\")\n\t\t}\n\t})\n\n\tt.Run(\"ParallelSort StringSlice\", func(t *testing.T) {\n\t\tSort(StringSlice(s2))\n\t\tif !sort.StringsAreSorted(s2) {\n\t\t\tt.Errorf(\"parallel sort on StringSlice incorrect\")\n\t\t}\n\t\tif !StringsAreSorted(s2) {\n\t\t\tt.Errorf(\"parallel StringsAreSorted incorrect\")\n\t\t}\n\t})\n}\n\ntype (\n\tbox struct {\n\t\tprimary, secondary int\n\t}\n\n\tboxSlice []box\n)\n\nfunc makeRandomBoxSlice(size int) boxSlice {\n\tresult := make([]box, size)\n\thalf := ((size - 1) / 2) + 1\n\tfor i := 0; i < size; i++ {\n\t\tresult[i].primary = rand.Intn(half)\n\t\tresult[i].secondary = i + 1\n\t}\n\treturn result\n}\n\nfunc (s boxSlice) NewTemp() StableSorter {\n\treturn boxSlice(make([]box, len(s)))\n}\n\nfunc (s boxSlice) Len() int {\n\treturn len(s)\n}\n\nfunc (s boxSlice) Less(i, j int) bool {\n\treturn s[i].primary < s[j].primary\n}\n\nfunc (s boxSlice) Assign(source StableSorter) func(i, j, len int) {\n\tdst, src := s, source.(boxSlice)\n\treturn func(i, j, len int) {\n\t\tfor k := 0; k < len; k++ {\n\t\t\tdst[i+k] = src[j+k]\n\t\t}\n\t}\n}\n\nfunc (s boxSlice) SequentialSort(i, j int) {\n\tslice := s[i:j]\n\tsort.SliceStable(slice, func(i, j int) bool {\n\t\treturn slice[i].primary < slice[j].primary\n\t})\n}\n\nfunc checkStable(b boxSlice) bool {\n\tm := make(map[int]int)\n\tfor _, el := range b {\n\t\tif m[el.primary] < el.secondary {\n\t\t\tm[el.primary] = el.secondary\n\t\t} else {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc TestStableSort(t *testing.T) {\n\torgSlice := makeRandomBoxSlice(100 * 0x6000)\n\ts1 := make(boxSlice, len(orgSlice))\n\tcopy(s1, orgSlice)\n\n\tt.Run(\"ParallelStableSort boxSlice\", func(t *testing.T) {\n\t\tStableSort(s1)\n\t\tif !sort.SliceIsSorted(s1, func(i, j int) bool {\n\t\t\treturn s1[i].primary < s1[j].primary\n\t\t}) {\n\t\t\tt.Errorf(\"parallel stable sort on boxSlice incorrect\")\n\t\t}\n\t})\n\n\tt.Run(\"CheckStable ParallelStableSort boxSlice\", func(t *testing.T) {\n\t\tif !checkStable(s1) {\n\t\t\tt.Errorf(\"parallel stable sort on boxSlice not stable\")\n\t\t}\n\t})\n}\n\nfunc BenchmarkSort(b *testing.B) {\n\torgSlice := makeRandomSlice(100*0x6000, 100*100*0x6000)\n\ts1 := make([]int, len(orgSlice))\n\ts2 := make([]int, len(orgSlice))\n\ts3 := make([]int, len(orgSlice))\n\n\tb.Run(\"SequentialSort\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tb.StopTimer()\n\t\t\tcopy(s1, orgSlice)\n\t\t\tb.StartTimer()\n\t\t\tBy(func(i, j int) bool { return i < j }).SequentialSort(s1)\n\t\t}\n\t})\n\n\tb.Run(\"ParallelStableSort\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tb.StopTimer()\n\t\t\tcopy(s2, orgSlice)\n\t\t\tb.StartTimer()\n\t\t\tBy(func(i, j int) bool { return i < j }).ParallelStableSort(s2)\n\t\t}\n\t})\n\n\tb.Run(\"ParallelSort\", func(b *testing.B) {\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tb.StopTimer()\n\t\t\tcopy(s3, orgSlice)\n\t\t\tb.StartTimer()\n\t\t\tBy(func(i, j int) bool { return i < j }).ParallelSort(s3)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "speculative/speculative.go",
    "content": "// Package speculative provides functions for expressing parallel algorithms,\n// similar to the functions in package parallel, except that the implementations\n// here terminate early when they can.\n//\n// See https://github.com/ExaScience/pargo/wiki/TaskParallelism for a general\n// overview.\npackage speculative\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"github.com/exascience/pargo/internal\"\n)\n\n// Reduce receives one or more functions, executes them in parallel, and\n// combines their results with the join function in parallel.\n//\n// Each function is invoked in its own goroutine. Reduce returns either when all\n// functions have terminated with a second return value of false; or when one or\n// more functions return a second return value of true. In the latter case, the\n// first return value of the left-most function that returned true as a second\n// return value becomes the final result, without waiting for the other\n// functions to terminate.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and Reduce eventually panics with the left-most recovered panic\n// value.\nfunc Reduce(\n\tjoin func(x, y interface{}) (interface{}, bool),\n\tfirstFunction func() (interface{}, bool),\n\tmoreFunctions ...func() (interface{}, bool),\n) (interface{}, bool) {\n\tif len(moreFunctions) == 0 {\n\t\treturn firstFunction()\n\t}\n\tvar left, right interface{}\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tif len(moreFunctions) == 1 {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright, b1 = moreFunctions[0]()\n\t\t}()\n\t\tleft, b0 = firstFunction()\n\t} else {\n\t\thalf := (len(moreFunctions) + 1) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright, b1 = Reduce(join, moreFunctions[half], moreFunctions[half+1:]...)\n\t\t}()\n\t\tleft, b0 = Reduce(join, firstFunction, moreFunctions[:half]...)\n\t}\n\tif b0 {\n\t\treturn left, true\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\tif b1 {\n\t\treturn right, true\n\t}\n\treturn join(left, right)\n}\n\n// ReduceFloat64 receives one or more functions, executes them in parallel, and\n// combines their results with the join function in parallel.\n//\n// Each function is invoked in its own goroutine. ReduceFloat64 returns either\n// when all functions have terminated with a second return value of false; or\n// when one or more functions return a second return value of true. In the\n// latter case, the first return value of the left-most function that returned\n// true as a second return value becomes the final result, without waiting for\n// the other functions to terminate.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceFloat64 eventually panics with the left-most recovered\n// panic value.\nfunc ReduceFloat64(\n\tjoin func(x, y float64) (float64, bool),\n\tfirstFunction func() (float64, bool),\n\tmoreFunctions ...func() (float64, bool),\n) (float64, bool) {\n\tif len(moreFunctions) == 0 {\n\t\treturn firstFunction()\n\t}\n\tvar left, right float64\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tif len(moreFunctions) == 1 {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright, b1 = moreFunctions[0]()\n\t\t}()\n\t\tleft, b0 = firstFunction()\n\t} else {\n\t\thalf := (len(moreFunctions) + 1) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright, b1 = ReduceFloat64(join, moreFunctions[half], moreFunctions[half+1:]...)\n\t\t}()\n\t\tleft, b0 = ReduceFloat64(join, firstFunction, moreFunctions[:half]...)\n\t}\n\tif b0 {\n\t\treturn left, true\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\tif b1 {\n\t\treturn right, true\n\t}\n\treturn join(left, right)\n}\n\n// ReduceInt receives one or more functions, executes them in parallel, and\n// combines their results with the join function in parallel.\n//\n// Each function is invoked in its own goroutine. ReduceInt returns either when\n// all functions have terminated with a second return value of false; or when\n// one or more functions return a second return value of true. In the latter\n// case, the first return value of the left-most function that returned true as\n// a second return value becomes the final result, without waiting for the other\n// functions to terminate.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceInt eventually panics with the left-most recovered panic\n// value.\nfunc ReduceInt(\n\tjoin func(x, y int) (int, bool),\n\tfirstFunction func() (int, bool),\n\tmoreFunctions ...func() (int, bool),\n) (int, bool) {\n\tif len(moreFunctions) == 0 {\n\t\treturn firstFunction()\n\t}\n\tvar left, right int\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tif len(moreFunctions) == 1 {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright, b1 = moreFunctions[0]()\n\t\t}()\n\t\tleft, b0 = firstFunction()\n\t} else {\n\t\thalf := (len(moreFunctions) + 1) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright, b1 = ReduceInt(join, moreFunctions[half], moreFunctions[half+1:]...)\n\t\t}()\n\t\tleft, b0 = ReduceInt(join, firstFunction, moreFunctions[:half]...)\n\t}\n\tif b0 {\n\t\treturn left, true\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\tif b1 {\n\t\treturn right, true\n\t}\n\treturn join(left, right)\n}\n\n// ReduceString receives one or more functions, executes them in parallel, and\n// combines their results with the join function in parallel.\n//\n// Each function is invoked in its own goroutine. ReduceString returns either\n// when all functions have terminated with a second return value of false; or\n// when one or more functions return a second return value of true. In the\n// latter case, the first return value of the left-most function that returned\n// true as a second return value becomes the final result, without waiting for\n// the other functions to terminate.\n//\n// If one or more functions panic, the corresponding goroutines recover the\n// panics, and ReduceString eventually panics with the left-most recovered panic\n// value.\nfunc ReduceString(\n\tjoin func(x, y string) (string, bool),\n\tfirstFunction func() (string, bool),\n\tmoreFunctions ...func() (string, bool),\n) (string, bool) {\n\tif len(moreFunctions) == 0 {\n\t\treturn firstFunction()\n\t}\n\tvar left, right string\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tif len(moreFunctions) == 1 {\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright, b1 = moreFunctions[0]()\n\t\t}()\n\t\tleft, b0 = firstFunction()\n\t} else {\n\t\thalf := (len(moreFunctions) + 1) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tright, b1 = ReduceString(join, moreFunctions[half], moreFunctions[half+1:]...)\n\t\t}()\n\t\tleft, b0 = ReduceString(join, firstFunction, moreFunctions[:half]...)\n\t}\n\tif b0 {\n\t\treturn left, true\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\tif b1 {\n\t\treturn right, true\n\t}\n\treturn join(left, right)\n}\n\n// Do receives zero or more thunks and executes them in parallel.\n//\n// Each function is invoked in its own goroutine. Do returns either when all\n// functions have terminated with a return value of false; or when one or more\n// functions return true, without waiting for the other functions to terminate.\n//\n// If one or more thunks panic, the corresponding goroutines recover the panics,\n// and Do may eventually panic with the left-most recovered panic value.\nfunc Do(thunks ...func() bool) bool {\n\tswitch len(thunks) {\n\tcase 0:\n\t\treturn false\n\tcase 1:\n\t\treturn thunks[0]()\n\t}\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(thunks) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = thunks[1]()\n\t\t}()\n\t\tb0 = thunks[0]()\n\tdefault:\n\t\thalf := len(thunks) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = Do(thunks[half:]...)\n\t\t}()\n\t\tb0 = Do(thunks[:half]...)\n\t}\n\tif b0 {\n\t\treturn true\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn b1\n}\n\n// And receives zero or more predicate functions and executes them in parallel.\n//\n// Each predicate is invoked in its own goroutine, and And returns true if all\n// of them return true; or And returns false when at least one of them returns\n// false, without waiting for the other predicates to terminate.\n//\n// If one or more predicates panic, the corresponding goroutines recover the\n// panics, and And may eventually panic with the left-most recovered panic\n// value.\nfunc And(predicates ...func() bool) bool {\n\tswitch len(predicates) {\n\tcase 0:\n\t\treturn true\n\tcase 1:\n\t\treturn predicates[0]()\n\t}\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(predicates) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = predicates[1]()\n\t\t}()\n\t\tb0 = predicates[0]()\n\tdefault:\n\t\thalf := len(predicates) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = And(predicates[half:]...)\n\t\t}()\n\t\tb0 = And(predicates[:half]...)\n\t}\n\tif !b0 {\n\t\treturn false\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn b1\n}\n\n// Or receives zero or more predicate functions and executes them in parallel.\n//\n// Each predicate is invoked in its own goroutine, and Or returns false if all\n// of them return false; or Or returns true when at least one of them returns\n// true, without waiting for the other predicates to terminate.\n//\n// If one or more predicates panic, the corresponding goroutines recover the\n// panics, and Or may eventually panic with the left-most recovered panic value.\nfunc Or(predicates ...func() bool) bool {\n\tswitch len(predicates) {\n\tcase 0:\n\t\treturn false\n\tcase 1:\n\t\treturn predicates[0]()\n\t}\n\tvar b0, b1 bool\n\tvar p interface{}\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tswitch len(predicates) {\n\tcase 2:\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = predicates[1]()\n\t\t}()\n\t\tb0 = predicates[0]()\n\tdefault:\n\t\thalf := len(predicates) / 2\n\t\tgo func() {\n\t\t\tdefer func() {\n\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tb1 = Or(predicates[half:]...)\n\t\t}()\n\t\tb0 = Or(predicates[:half]...)\n\t}\n\tif b0 {\n\t\treturn true\n\t}\n\twg.Wait()\n\tif p != nil {\n\t\tpanic(p)\n\t}\n\treturn b1\n}\n\n// Range receives a range, a batch count n, and a range function f, divides the\n// range into batches, and invokes the range function for each of these batches\n// in parallel, covering the half-open interval from low to high, including low\n// but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range function is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and Range returns either when all range functions have\n// terminated with a return value of true; or when one or more range functions\n// return true, without waiting for the other range functions to terminate.\n//\n// Range panics if high < low, or if n < 0.\n//\n// If one or more range functions panic, the corresponding goroutines recover\n// the panics, and Range may eventually panic with the left-most recovered panic\n// value. If both non-nil error values are returned and panics occur, then the\n// left-most of these events take precedence.\nfunc Range(\n\tlow, high, n int,\n\tf func(low, high int) bool,\n) bool {\n\tvar recur func(int, int, int) bool\n\trecur = func(low, high, n int) bool {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn f(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn f(low, high)\n\t\t\t}\n\t\t\tvar b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tb1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tif recur(low, mid, half) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn b1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeAnd receives a range, a batch count n, and a range predicate function f,\n// divides the range into batches, and invokes the range predicate for each of\n// these batches in parallel, covering the half-open interval from low to high,\n// including low but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range predicate is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeAnd returns true if all of them return true; or\n// RangeAnd returns false when at least one of them returns false, without\n// waiting for the other range predicates to terminate.\n//\n// RangeAnd panics if high < low, or if n < 0.\n//\n// If one or more range predicates panic, the corresponding goroutines recover\n// the panics, and RangeAnd may eventually panic with the left-most recovered\n// panic value.\nfunc RangeAnd(\n\tlow, high, n int,\n\tf func(low, high int) bool,\n) bool {\n\tvar recur func(int, int, int) bool\n\trecur = func(low, high, n int) bool {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn f(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn f(low, high)\n\t\t\t}\n\t\t\tvar b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tb1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tif !recur(low, mid, half) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn b1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeOr receives a range, a batch count n, and a range predicate function f,\n// divides the range into batches, and invokes the range predicate for each of\n// these batches in parallel, covering the half-open interval from low to high,\n// including low but excluding high.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range predicate is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeOr returns false if all of them return false; or\n// RangeOr returns true when at least one of them returns true, without waiting\n// for the other range predicates to terminate.\n//\n// RangeOr panics if high < low, or if n < 0.\n//\n// If one or more range predicates panic, the corresponding goroutines recover\n// the panics, and RangeOr may eventually panic with the left-most recovered\n// panic value.\nfunc RangeOr(\n\tlow, high, n int,\n\tf func(low, high int) bool,\n) bool {\n\tvar recur func(int, int, int) bool\n\trecur = func(low, high, n int) bool {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn f(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn f(low, high)\n\t\t\t}\n\t\t\tvar b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tb1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tif recur(low, mid, half) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\treturn b1\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduce receives a range, a batch count n, a range reducer function, and\n// a join function, divides the range into batches, and invokes the range\n// reducer for each of these batches in parallel, covering the half-open\n// interval from low to high, including low but excluding high. The results of\n// the range reducer invocations are then combined by repeated invocations of\n// join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduce returns either when all range reducers and joins\n// have terminated with a second return value of false; or when one or more\n// range or join functions return a second return value of true. In the latter\n// case, the first return value of the left-most function that returned true as\n// a second return value becomes the final result, without waiting for the other\n// range and pair reducers to terminate.\n//\n// RangeReduce panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduce eventually panics with the left-most\n// recovered panic value.\nfunc RangeReduce(\n\tlow, high, n int,\n\treduce func(low, high int) (interface{}, bool),\n\tjoin func(x, y interface{}) (interface{}, bool),\n) (interface{}, bool) {\n\tvar recur func(int, int, int) (interface{}, bool)\n\trecur = func(low, high, n int) (interface{}, bool) {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right interface{}\n\t\t\tvar b0, b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft, b0 = recur(low, mid, half)\n\t\t\tif b0 {\n\t\t\t\treturn left, true\n\t\t\t}\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\tif b1 {\n\t\t\t\treturn right, true\n\t\t\t}\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceInt receives a range, a batch count n, a range reducer function,\n// and a join function, divides the range into batches, and invokes the range\n// reducer for each of these batches in parallel, covering the half-open\n// interval from low to high, including low but excluding high. The results of\n// the range reducer invocations are then combined by repeated invocations of\n// join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceInt returns either when all range reducers and\n// joins have terminated with a second return value of false; or when one or\n// more range or join functions return a second return value of true. In the\n// latter case, the first return value of the left-most function that returned\n// true as a second return value becomes the final result, without waiting for\n// the other range and pair reducers to terminate.\n//\n// RangeReduceInt panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceInt eventually panics with the left-most\n// recovered panic value.\nfunc RangeReduceInt(\n\tlow, high, n int,\n\treduce func(low, high int) (int, bool),\n\tjoin func(x, y int) (int, bool),\n) (int, bool) {\n\tvar recur func(int, int, int) (int, bool)\n\trecur = func(low, high, n int) (int, bool) {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right int\n\t\t\tvar b0, b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft, b0 = recur(low, mid, half)\n\t\t\tif b0 {\n\t\t\t\treturn left, true\n\t\t\t}\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\tif b1 {\n\t\t\t\treturn right, true\n\t\t\t}\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceFloat64 receives a range, a batch count n, a range reducer\n// function, and a join function, divides the range into batches, and invokes\n// the range reducer for each of these batches in parallel, covering the\n// half-open interval from low to high, including low but excluding high. The\n// results of the range reducer invocations are then combined by repeated\n// invocations of join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceFloat64 returns either when all range reducers\n// and joins have terminated with a second return value of false; or when one or\n// more range or join functions return a second return value of true. In the\n// latter case, the first return value of the left-most function that returned\n// true as a second return value becomes the final result, without waiting for\n// the other range and pair reducers to terminate.\n//\n// RangeReduceFloat64 panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceFloat64 eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceFloat64(\n\tlow, high, n int,\n\treduce func(low, high int) (float64, bool),\n\tjoin func(x, y float64) (float64, bool),\n) (float64, bool) {\n\tvar recur func(int, int, int) (float64, bool)\n\trecur = func(low, high, n int) (float64, bool) {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right float64\n\t\t\tvar b0, b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft, b0 = recur(low, mid, half)\n\t\t\tif b0 {\n\t\t\t\treturn left, true\n\t\t\t}\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\tif b1 {\n\t\t\t\treturn right, true\n\t\t\t}\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n\n// RangeReduceString receives a range, a batch count n, a range reducer\n// function, and a join function, divides the range into batches, and invokes\n// the range reducer for each of these batches in parallel, covering the\n// half-open interval from low to high, including low but excluding high. The\n// results of the range reducer invocations are then combined by repeated\n// invocations of join.\n//\n// The range is specified by a low and high integer, with low <= high. The\n// batches are determined by dividing up the size of the range (high - low) by\n// n. If n is 0, a reasonable default is used that takes runtime.GOMAXPROCS(0)\n// into account.\n//\n// The range reducer is invoked for each batch in its own goroutine, with 0 <=\n// low <= high, and RangeReduceString returns either when all range reducers and\n// joins have terminated with a second return value of false; or when one or\n// more range or join functions return a second return value of true. In the\n// latter case, the first return value of the left-most function that returned\n// true as a second return value becomes the final result, without waiting for\n// the other range and pair reducers to terminate.\n//\n// RangeReduceString panics if high < low, or if n < 0.\n//\n// If one or more reducer invocations panic, the corresponding goroutines\n// recover the panics, and RangeReduceString eventually panics with the\n// left-most recovered panic value.\nfunc RangeReduceString(\n\tlow, high, n int,\n\treduce func(low, high int) (string, bool),\n\tjoin func(x, y string) (string, bool),\n) (string, bool) {\n\tvar recur func(int, int, int) (string, bool)\n\trecur = func(low, high, n int) (string, bool) {\n\t\tswitch {\n\t\tcase n == 1:\n\t\t\treturn reduce(low, high)\n\t\tcase n > 1:\n\t\t\tbatchSize := ((high - low - 1) / n) + 1\n\t\t\thalf := n / 2\n\t\t\tmid := low + batchSize*half\n\t\t\tif mid >= high {\n\t\t\t\treturn reduce(low, high)\n\t\t\t}\n\t\t\tvar left, right string\n\t\t\tvar b0, b1 bool\n\t\t\tvar p interface{}\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = recur(mid, high, n-half)\n\t\t\t}()\n\t\t\tleft, b0 = recur(low, mid, half)\n\t\t\tif b0 {\n\t\t\t\treturn left, true\n\t\t\t}\n\t\t\twg.Wait()\n\t\t\tif p != nil {\n\t\t\t\tpanic(p)\n\t\t\t}\n\t\t\tif b1 {\n\t\t\t\treturn right, true\n\t\t\t}\n\t\t\treturn join(left, right)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid number of batches: %v\", n))\n\t\t}\n\t}\n\treturn recur(low, high, internal.ComputeNofBatches(low, high, n))\n}\n"
  },
  {
    "path": "sync/map.go",
    "content": "// Package sync provides synchronization primitives similar to the sync package\n// of Go's standard library, however here with a focus on parallel performance\n// rather than concurrency. So far, this package only provides support for a\n// parallel map that can be used to some extent as a drop-in replacement for the\n// concurrent map of the standard library. For other synchronization\n// primitivies, such as condition variables, mutual exclusion locks, object\n// pools, or atomic memory primitives, please use the standard library.\npackage sync\n\nimport (\n\t\"runtime\"\n\t\"sync\"\n\n\t\"github.com/exascience/pargo/internal\"\n\n\t\"github.com/exascience/pargo/parallel\"\n\t\"github.com/exascience/pargo/speculative\"\n)\n\n// A Hasher represents an object that has a hash value, which is needed by Map.\n//\n// If Go would allow access to the predefined hash functions for Go types, this\n// interface would not be needed.\ntype Hasher interface {\n\tHash() uint64\n}\n\n// A Split is a partial map that belongs to a larger Map, which can be\n// individually locked. Its enclosed map can then be individually accessed\n// without blocking accesses to other splits.\ntype Split struct {\n\tsync.RWMutex\n\tMap map[interface{}]interface{}\n}\n\n// A Map is a parallel map that consists of several split maps that can be\n// individually locked and accessed.\n//\n// The zero Map is not valid.\ntype Map struct {\n\tsplits []Split\n}\n\n// NewMap returns a map with size splits.\n//\n// If size is <= 0, runtime.GOMAXPROCS(0) is used instead.\nfunc NewMap(size int) *Map {\n\tif size <= 0 {\n\t\tsize = runtime.GOMAXPROCS(0)\n\t}\n\tsplits := make([]Split, size)\n\tfor i := range splits {\n\t\tsplits[i].Map = make(map[interface{}]interface{})\n\t}\n\treturn &Map{splits}\n}\n\n// Split retrieves the split for a particular key.\n//\n// The split must be locked/unlocked properly by user programs to safely access\n// its contents. In many cases, it is easier to use one of the high-level\n// methods, like Load, LoadOrStore, LoadOrCompute, Delete, DeleteOrStore,\n// DeleteOrCompute, and Modify, which implicitly take care of proper locking.\nfunc (m *Map) Split(key Hasher) *Split {\n\tsplits := m.splits\n\treturn &splits[key.Hash()%uint64(len(splits))]\n}\n\n// Delete deletes the value for a key.\nfunc (m *Map) Delete(key Hasher) {\n\tsplit := m.Split(key)\n\tsplit.Lock()\n\tdelete(split.Map, key)\n\tsplit.Unlock()\n}\n\n// Load returns the value stored in the map for a key, or nil if no value is\n// present. The ok result indicates whether value was found in the map.\nfunc (m *Map) Load(key Hasher) (value interface{}, ok bool) {\n\tsplit := m.Split(key)\n\tsplit.RLock()\n\tvalue, ok = split.Map[key]\n\tsplit.RUnlock()\n\treturn\n}\n\n// LoadOrStore returns the existing value for the key if present. Otherwise, it\n// stores and returns the given value. The loaded result is true if the value\n// was loaded, false if stored.\nfunc (m *Map) LoadOrStore(key Hasher, value interface{}) (actual interface{}, loaded bool) {\n\tsplit := m.Split(key)\n\tsplit.RLock()\n\tactual, loaded = split.Map[key]\n\tsplit.RUnlock()\n\tif loaded {\n\t\treturn\n\t}\n\tsplit.Lock()\n\tif actual, loaded = split.Map[key]; !loaded {\n\t\tactual = value\n\t\tsplit.Map[key] = value\n\t}\n\tsplit.Unlock()\n\treturn\n}\n\n// LoadOrCompute returns the existing value for the key if present. Otherwise,\n// it calls computer, and then stores and returns the computed value. The loaded\n// result is true if the value was loaded, false if stored.\n//\n// The computer function is invoked either zero times or once. While computer is\n// executing no locks related to this map are being held.\n//\n// The computed value may not be stored and returned, since a parallel thread\n// may have successfully stored a value for the key in the meantime. In that\n// case, the value stored by the parallel thread is returned instead.\nfunc (m *Map) LoadOrCompute(key Hasher, computer func() interface{}) (actual interface{}, loaded bool) {\n\tsplit := m.Split(key)\n\tsplit.RLock()\n\tactual, loaded = split.Map[key]\n\tsplit.RUnlock()\n\tif loaded {\n\t\treturn\n\t}\n\tvalue := computer()\n\tsplit.Lock()\n\tif actual, loaded = split.Map[key]; !loaded {\n\t\tactual = value\n\t\tsplit.Map[key] = actual\n\t}\n\tsplit.Unlock()\n\treturn\n}\n\n// DeleteOrStore deletes and returns the existing value for the key if present.\n// Otherwise, it stores and returns the given value. The deleted result is true\n// if the value was deleted, false if stored.\nfunc (m *Map) DeleteOrStore(key Hasher, value interface{}) (actual interface{}, deleted bool) {\n\tsplit := m.Split(key)\n\tsplit.Lock()\n\tif actual, deleted = split.Map[key]; deleted {\n\t\tdelete(split.Map, key)\n\t} else {\n\t\tactual = value\n\t\tsplit.Map[key] = value\n\t}\n\tsplit.Unlock()\n\treturn\n}\n\n// DeleteOrCompute deletes and returns the existing value for the key if\n// present. Otherwise, it calls computer, and then stores and returns the\n// computed value. The deleted result is true if the value was deleted, false if\n// stored.\n//\n// The computer function is invoked either zero times or once. While computer is\n// executing, a lock is being held on a portion of the map, so the function\n// should be brief.\nfunc (m *Map) DeleteOrCompute(key Hasher, computer func() interface{}) (actual interface{}, deleted bool) {\n\tsplit := m.Split(key)\n\tsplit.Lock()\n\tif actual, deleted = split.Map[key]; deleted {\n\t\tdelete(split.Map, key)\n\t} else {\n\t\tactual = computer()\n\t\tsplit.Map[key] = actual\n\t}\n\tsplit.Unlock()\n\treturn\n}\n\n// Modify looks up a value for the key if present and passes it to the modifier.\n// The ok parameter indicates whether value was found in the map. The\n// replacement returned by the modifier is then stored as a value for key in the\n// map if storeNotDelete is true, otherwise the value is deleted from the map.\n// Modify returns the same results as modifier.\n//\n// The modifier is invoked exactly once. While modifier is executing, a lock is\n// being held on a portion of the map, so the function should be brief.\n//\n// This is the most general modification function for parallel maps. Other\n// functions that modify the map are potentially more efficient, so it is better\n// to be more specific if possible.\nfunc (m *Map) Modify(\n\tkey Hasher,\n\tmodifier func(value interface{}, ok bool) (replacement interface{}, storeNotDelete bool),\n) (replacement interface{}, storeNotDelete bool) {\n\tsplit := m.Split(key)\n\tsplit.Lock()\n\tvalue, ok := split.Map[key]\n\tif replacement, storeNotDelete = modifier(value, ok); storeNotDelete {\n\t\tsplit.Map[key] = replacement\n\t} else {\n\t\tdelete(split.Map, key)\n\t}\n\tsplit.Unlock()\n\treturn\n}\n\nfunc (split *Split) srange(f func(key, value interface{}) bool) bool {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\tfor key, value := range split.Map {\n\t\tif !f(key, value) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// Range calls f sequentially for each key and value present in the map. If f\n// returns false, Range stops the iteration.\n//\n// While iterating through a split of m, Range holds the corresponding lock.\n//\n// Range does not necessarily correspond to any consistent snapshot of the Map's\n// contents: no key will be visited more than once, but if the value for any key\n// is stored or deleted concurrently, Range may reflect any mapping for that key\n// from any point during the Range call.\nfunc (m *Map) Range(f func(key, value interface{}) bool) {\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tif !splits[i].srange(f) {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (split *Split) parallelRange(f func(key, value interface{})) {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\tfor key, value := range split.Map {\n\t\tf(key, value)\n\t}\n}\n\n// ParallelRange calls f in parallel for each key and value present in the map.\n//\n// While iterating through a split of m, ParallelRange holds the corresponding\n// lock.\n//\n// ParallelRange does not necessarily correspond to any consistent snapshot of\n// the Map's contents: no key will be visited more than once, but if the value\n// for any key is stored or deleted concurrently, ParallelRange may reflect any\n// mapping for that key from any point during the Range call.\nfunc (m *Map) ParallelRange(f func(key, value interface{})) {\n\tsplits := m.splits\n\tparallel.Range(0, len(splits), 0, func(low, high int) {\n\t\tfor i := low; i < high; i++ {\n\t\t\tsplits[i].parallelRange(f)\n\t\t}\n\t})\n}\n\n// SpeculativeRange calls f in parallel for each key and value present in the\n// map. If f returns false, SpeculativeRange stops the iteration, and returns\n// without waiting for the other goroutines that it invoked to terminate.\n//\n// While iterating through a split of m, SpeculativeRange holds the\n// corresponding lock.\n//\n// SpeculativeRange is useful as an alternative to ParallelRange in cases where\n// ParallelRange tends to use computational resources for too long when false is\n// a common and/or early return value for f. On the other hand, SpeculativeRange\n// adds overhead, so for cases where false is an uncommon and/or late return\n// value for f, it may be more efficient to use ParallelRange.\n//\n// SpeculativeRange does not necessarily correspond to any consistent snapshot\n// of the Map's contents: no key will be visited more than once, but if the\n// value for any key is stored or deleted concurrently, SpeculativeRange may\n// reflect any mapping for that key from any point during the Range call.\nfunc (m *Map) SpeculativeRange(f func(key, value interface{}) bool) {\n\tsplits := m.splits\n\tspeculative.Range(0, len(splits), 0, func(low, high int) bool {\n\t\tfor i := low; i < high; i++ {\n\t\t\tif !splits[i].srange(f) {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n}\n\nfunc (split *Split) predicate(p func(map[interface{}]interface{}) bool) bool {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn p(split.Map)\n}\n\n// And calls predicate for every split of m sequentially. If any predicate\n// invocation returns false, And immediately terminates and also returns false.\n// Otherwise, And returns true.\n//\n// While predicate is executed on a split of m, And holds the corresponding\n// lock.\n//\n// And does not necessarily correspond to any consistent snapshot of the Map's\n// contents: no split will be visited more than once, but if the value for any\n// key is stored or deleted concurrently, And may reflect any mapping for that\n// key from any point during the And call.\nfunc (m *Map) And(predicate func(map[interface{}]interface{}) bool) bool {\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tif ok := splits[i].predicate(predicate); !ok {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// ParallelAnd calls predicate for every split of m in parallel. The results of\n// the predicate invocations are then combined with the && operator.\n//\n// ParallelAnd returns only when all goroutines it spawns have terminated.\n//\n// While predicate is executed on a split of m, ParallelAnd holds the\n// corresponding lock.\n//\n// If one or more predicate invocations panic, the corresponding goroutines\n// recover the panics, and ParallelAnd eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelAnd does not necessarily correspond to any consistent snapshot of the\n// Map's contents: no split will be visited more than once, but if the value for\n// any key is stored or deleted concurrently, ParallelAnd may reflect any\n// mapping for that key from any point during the ParallelAnd call.\nfunc (m *Map) ParallelAnd(predicate func(map[interface{}]interface{}) bool) bool {\n\tsplits := m.splits\n\treturn parallel.RangeAnd(0, len(splits), 0, func(low, high int) bool {\n\t\tfor i := low; i < high; i++ {\n\t\t\tif ok := splits[i].predicate(predicate); !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n}\n\n// SpeculativeAnd calls predicate for every split of m in parallel.\n// SpeculativeAnd returns true if all predicate invocations return true; or\n// SpeculativeAnd return false when at least one of them returns false, without\n// waiting for the other predicates to terminate.\n//\n// While predicate is executed on a split of m, SpeculativeAnd holds the\n// corresponding lock.\n//\n// If one or more predicate invocations panic, the corresponding goroutines\n// recover the panics, and SpeculativeAnd eventually panics with the left-most\n// recovered panic value.\n//\n// SpeculativeAnd does not necessarily correspond to any consistent snapshot of\n// the Map's contents: no split will be visited more than once, but if the value\n// for any key is stored or deleted concurrently, SpeculativeAnd may reflect any\n// mapping for that key from any point during the SpeculativeAnd call.\nfunc (m *Map) SpeculativeAnd(predicate func(map[interface{}]interface{}) bool) bool {\n\tsplits := m.splits\n\treturn speculative.RangeAnd(0, len(splits), 0, func(low, high int) bool {\n\t\tfor i := low; i < high; i++ {\n\t\t\tif ok := splits[i].predicate(predicate); !ok {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n}\n\n// Or calls predicate for every split of m sequentially. If any predicate\n// invocation returns true, Or immediately terminates and also returns true.\n// Otherwise, Or returns false.\n//\n// While predicate is executed on a split of m, Or holds the corresponding lock.\n//\n// Or does not necessarily correspond to any consistent snapshot of the Map's\n// contents: no split will be visited more than once, but if the value for any\n// key is stored or deleted concurrently, Or may reflect any mapping for that\n// key from any point during the Or call.\nfunc (m *Map) Or(predicate func(map[interface{}]interface{}) bool) bool {\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tif ok := splits[i].predicate(predicate); ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// ParallelOr calls predicate for every split of m in parallel. The results of\n// the predicate invocations are then combined with the || operator.\n//\n// ParallelOr returns only when all goroutines it spawns have terminated.\n//\n// While predicate is executed on a split of m, ParallelOr holds the\n// corresponding lock.\n//\n// If one or more predicate invocations panic, the corresponding goroutines\n// recover the panics, and ParallelAnd eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelOr does not necessarily correspond to any consistent snapshot of the\n// Map's contents: no split will be visited more than once, but if the value for\n// any key is stored or deleted concurrently, ParallelOr may reflect any mapping\n// for that key from any point during the ParallelOr call.\nfunc (m *Map) ParallelOr(predicate func(map[interface{}]interface{}) bool) bool {\n\tsplits := m.splits\n\treturn parallel.RangeOr(0, len(splits), 0, func(low, high int) bool {\n\t\tfor i := low; i < high; i++ {\n\t\t\tif ok := splits[i].predicate(predicate); ok {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n}\n\n// SpeculativeOr calls predicate for every split of m in parallel. SpeculativeOr\n// returns false if all predicate invocations return false; or SpeculativeOr\n// return true when at least one of them returns true, without waiting for the\n// other predicates to terminate.\n//\n// While predicate is executed on a split of m, SpeculativeOr holds the\n// corresponding lock.\n//\n// If one or more predicate invocations panic, the corresponding goroutines\n// recover the panics, and SpeculativeOr eventually panics with the left-most\n// recovered panic value.\n//\n// SpeculativeOr does not necessarily correspond to any consistent snapshot of\n// the Map's contents: no split will be visited more than once, but if the value\n// for any key is stored or deleted concurrently, SpeculativeOr may reflect any\n// mapping for that key from any point during the SpeculativeOr call.\nfunc (m *Map) SpeculativeOr(predicate func(map[interface{}]interface{}) bool) bool {\n\tsplits := m.splits\n\treturn speculative.RangeOr(0, len(splits), 0, func(low, high int) bool {\n\t\tfor i := low; i < high; i++ {\n\t\t\tif ok := splits[i].predicate(predicate); ok {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t})\n}\n\nfunc (split *Split) reduce(r func(map[interface{}]interface{}) interface{}) interface{} {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn r(split.Map)\n}\n\n// Reduce calls reduce for every split of m sequentially. The results of the\n// reduce invocations are then combined by repeated invocations of the join\n// function.\n//\n// While reduce is executed on a split of m, Reduce holds the corresponding\n// lock.\n//\n// Reduce does not necessarily correspond to any consistent snapshot of the\n// Map's contents: no split will be visited more than once, but if the value for\n// any key is stored or deleted concurrently, Reduce may reflect any mapping for\n// that key from any point during the Reduce call.\nfunc (m *Map) Reduce(\n\treduce func(map[interface{}]interface{}) interface{},\n\tjoin func(x, y interface{}) interface{},\n) interface{} {\n\tsplits := m.splits\n\t// NewMap ensures that len(splits) > 0\n\tresult := splits[0].reduce(reduce)\n\tfor i := 1; i < len(splits); i++ {\n\t\tresult = join(result, splits[i].reduce(reduce))\n\t}\n\treturn result\n}\n\nfunc (split *Split) reduceFloat64(r func(map[interface{}]interface{}) float64) float64 {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn r(split.Map)\n}\n\n// ReduceFloat64 calls reduce for every split of m sequentially. The results of\n// the reduce invocations are then combined by repeated invocations of the join\n// function.\n//\n// While reduce is executed on a split of m, ReduceFloat64 holds the\n// corresponding lock.\n//\n// ReduceFloat64 does not necessarily correspond to any consistent snapshot of\n// the Map's contents: no split will be visited more than once, but if the value\n// for any key is stored or deleted concurrently, ReduceFloat64 may reflect any\n// mapping for that key from any point during the ReduceFloat64 call.\nfunc (m *Map) ReduceFloat64(\n\treduce func(map[interface{}]interface{}) float64,\n\tjoin func(x, y float64) float64,\n) float64 {\n\tsplits := m.splits\n\t// NewMap ensures that len(splits) > 0\n\tresult := splits[0].reduceFloat64(reduce)\n\tfor i := 1; i < len(splits); i++ {\n\t\tresult = join(result, splits[i].reduceFloat64(reduce))\n\t}\n\treturn result\n}\n\n// ReduceFloat64Sum calls reduce for every split of m sequentially. The results\n// of the reduce invocations are then added together.\n//\n// While reduce is executed on a split of m, ReduceFloat64Sum holds the\n// corresponding lock.\n//\n// ReduceFloat64Sum does not necessarily correspond to any consistent snapshot\n// of the Map's contents: no split will be visited more than once, but if the\n// value for any key is stored or deleted concurrently, ReduceFloat64Sum may\n// reflect any mapping for that key from any point during the ReduceFloat64Sum\n// call.\nfunc (m *Map) ReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tresult := float64(0)\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceFloat64(reduce)\n\t}\n\treturn result\n}\n\n// ReduceFloat64Product calls reduce for every split of m sequentially. The\n// results of the reduce invocations are then multiplied with each other.\n//\n// While reduce is executed on a split of m, ReduceFloat64Product holds the\n// corresponding lock.\n//\n// ReduceFloat64Product does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// ReduceFloat64Product may reflect any mapping for that key from any point\n// during the ReduceFloat64Product call.\nfunc (m *Map) ReduceFloat64Product(reduce func(map[interface{}]interface{}) float64) float64 {\n\tresult := float64(1)\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult *= splits[i].reduceFloat64(reduce)\n\t}\n\treturn result\n}\n\nfunc (split *Split) reduceInt(r func(map[interface{}]interface{}) int) int {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn r(split.Map)\n}\n\n// ReduceInt calls reduce for every split of m sequentially. The results of the\n// reduce invocations are then combined by repeated invocations of the join\n// function.\n//\n// While reduce is executed on a split of m, ReduceInt holds the corresponding\n// lock.\n//\n// ReduceInt does not necessarily correspond to any consistent snapshot of the\n// Map's contents: no split will be visited more than once, but if the value for\n// any key is stored or deleted concurrently, ReduceInt may reflect any mapping\n// for that key from any point during the ReduceInt call.\nfunc (m *Map) ReduceInt(\n\treduce func(map[interface{}]interface{}) int,\n\tjoin func(x, y int) int,\n) int {\n\tsplits := m.splits\n\t// NewMap ensures that len(splits) > 0\n\tresult := splits[0].reduceInt(reduce)\n\tfor i := 1; i < len(splits); i++ {\n\t\tresult = join(result, splits[i].reduceInt(reduce))\n\t}\n\treturn result\n}\n\n// ReduceIntSum calls reduce for every split of m sequentially. The results of\n// the reduce invocations are then added together.\n//\n// While reduce is executed on a split of m, ReduceIntSum holds the\n// corresponding lock.\n//\n// ReduceIntSum does not necessarily correspond to any consistent snapshot of\n// the Map's contents: no split will be visited more than once, but if the value\n// for any key is stored or deleted concurrently, ReduceIntSum may reflect any\n// mapping for that key from any point during the ReduceIntSum call.\nfunc (m *Map) ReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tresult := 0\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceInt(reduce)\n\t}\n\treturn result\n}\n\n// ReduceIntProduct calls reduce for every split of m sequentially. The results\n// of the reduce invocations are then multiplied with each other.\n//\n// While reduce is executed on a split of m, ReduceIntProduct holds the\n// corresponding lock.\n//\n// ReduceIntProduct does not necessarily correspond to any consistent snapshot\n// of the Map's contents: no split will be visited more than once, but if the\n// value for any key is stored or deleted concurrently, ReduceIntProduct may\n// reflect any mapping for that key from any point during the ReduceIntProduct\n// call.\nfunc (m *Map) ReduceIntProduct(reduce func(map[interface{}]interface{}) int) int {\n\tresult := 1\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult *= splits[i].reduceInt(reduce)\n\t}\n\treturn result\n}\n\nfunc (split *Split) reduceString(r func(map[interface{}]interface{}) string) string {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn r(split.Map)\n}\n\n// ReduceString calls reduce for every split of m sequentially. The results of\n// the reduce invocations are then combined by repeated invocations of the join\n// function.\n//\n// While reduce is executed on a split of m, ReduceString holds the\n// corresponding lock.\n//\n// ReduceString does not necessarily correspond to any consistent snapshot of\n// the Map's contents: no split will be visited more than once, but if the value\n// for any key is stored or deleted concurrently, ReduceString may reflect any\n// mapping for that key from any point during the ReduceString call.\nfunc (m *Map) ReduceString(\n\treduce func(map[interface{}]interface{}) string,\n\tjoin func(x, y string) string,\n) string {\n\tsplits := m.splits\n\t// NewMap ensures that len(splits) > 0\n\tresult := splits[0].reduceString(reduce)\n\tfor i := 1; i < len(splits); i++ {\n\t\tresult = join(result, splits[i].reduceString(reduce))\n\t}\n\treturn result\n}\n\n// ReduceStringSum calls reduce for every split of m sequentially. The results\n// of the reduce invocations are then concatenated with each other.\n//\n// While reduce is executed on a split of m, ReduceStringSum holds the\n// corresponding lock.\n//\n// ReduceStringSum does not necessarily correspond to any consistent snapshot of\n// the Map's contents: no split will be visited more than once, but if the value\n// for any key is stored or deleted concurrently, ReduceStringSum may reflect\n// any mapping for that key from any point during the ReduceStringSum call.\nfunc (m *Map) ReduceStringSum(reduce func(map[interface{}]interface{}) string) string {\n\tresult := \"\"\n\tsplits := m.splits\n\tfor i := 0; i < len(splits); i++ {\n\t\tresult += splits[i].reduceString(reduce)\n\t}\n\treturn result\n}\n\n// ParallelReduce calls reduce for every split of m in parallel. The results of\n// the reduce invocations are then combined by repeated invocations of the join\n// function.\n//\n// ParallelReduce returns only when all goroutines it spawns have terminated.\n//\n// While reduce is executed on a split of m, ParallelReduce holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduce eventually panics with the left-most recovered\n// panic value.\n//\n// ParallelReduce does not necessarily correspond to any consistent snapshot of\n// the Map's contents: no split will be visited more than once, but if the value\n// for any key is stored or deleted concurrently, ParallelReduce may reflect any\n// mapping for that key from any point during the ParallelReduce call.\nfunc (m *Map) ParallelReduce(\n\treduce func(map[interface{}]interface{}) interface{},\n\tjoin func(x, y interface{}) interface{},\n) interface{} {\n\tvar recur func(splits []Split) interface{}\n\trecur = func(splits []Split) interface{} {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduce(reduce)\n\t\t}\n\t\tvar left, right interface{}\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduce(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduce(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn join(left, right)\n\t}\n\treturn recur(m.splits)\n}\n\n// ParallelReduceFloat64 calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then combined by repeated invocations\n// of the join function.\n//\n// ParallelReduceFloat64 returns only when all goroutines it spawns have\n// terminated.\n//\n// While reduce is executed on a split of m, ParallelReduceFloat64 holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduceFloat64 eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelReduceFloat64 does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// ParallelReduceFloat64 may reflect any mapping for that key from any point\n// during the ParallelReduceFloat64 call.\nfunc (m *Map) ParallelReduceFloat64(\n\treduce func(map[interface{}]interface{}) float64,\n\tjoin func(x, y float64) float64,\n) float64 {\n\tvar recur func(splits []Split) float64\n\trecur = func(splits []Split) float64 {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceFloat64(reduce)\n\t\t}\n\t\tvar left, right float64\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceFloat64(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceFloat64(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn join(left, right)\n\t}\n\treturn recur(m.splits)\n}\n\n// ParallelReduceFloat64Sum calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then added together.\n//\n// ParallelReduceFloat64Sum returns only when all goroutines it spawns have\n// terminated.\n//\n// While reduce is executed on a split of m, ParallelReduceFloat64Sum holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduceFloat64Sum eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelReduceFloat64Sum does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// ParallelReduceFloat64Sum may reflect any mapping for that key from any point\n// during the ParallelReduceFloat64Sum call.\nfunc (m *Map) ParallelReduceFloat64Sum(reduce func(map[interface{}]interface{}) float64) float64 {\n\tvar recur func(splits []Split) float64\n\trecur = func(splits []Split) float64 {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceFloat64(reduce)\n\t\t}\n\t\tvar left, right float64\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceFloat64(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceFloat64(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}\n\n// ParallelReduceFloat64Product calls reduce for every split of m in parallel.\n// The results of the reduce invocations are then multiplied with each other.\n//\n// ParallelReduceFloat64Product returns only when all goroutines it spawns have\n// terminated.\n//\n// While reduce is executed on a split of m, ParallelReduceFloat64Product holds\n// the corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduceFloat64Product eventually panics with the\n// left-most recovered panic value.\n//\n// ParallelReduceFloat64Product does not necessarily correspond to any\n// consistent snapshot of the Map's contents: no split will be visited more than\n// once, but if the value for any key is stored or deleted concurrently,\n// ParallelReduceFloat64Product may reflect any mapping for that key from any\n// point during the ParallelReduceFloat64Product call.\nfunc (m *Map) ParallelReduceFloat64Product(reduce func(map[interface{}]interface{}) float64) float64 {\n\tvar recur func(splits []Split) float64\n\trecur = func(splits []Split) float64 {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceFloat64(reduce)\n\t\t}\n\t\tvar left, right float64\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceFloat64(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceFloat64(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left * right\n\t}\n\treturn recur(m.splits)\n}\n\n// ParallelReduceInt calls reduce for every split of m in parallel. The results\n// of the reduce invocations are then combined by repeated invocations of the\n// join function.\n//\n// While reduce is executed on a split of m, ParallelReduceInt holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduceInt eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelReduceInt does not necessarily correspond to any consistent snapshot\n// of the Map's contents: no split will be visited more than once, but if the\n// value for any key is stored or deleted concurrently, ParallelReduceInt may\n// reflect any mapping for that key from any point during the ParallelReduceInt\n// call.\nfunc (m *Map) ParallelReduceInt(\n\treduce func(map[interface{}]interface{}) int,\n\tjoin func(x, y int) int,\n) int {\n\tvar recur func(splits []Split) int\n\trecur = func(splits []Split) int {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceInt(reduce)\n\t\t}\n\t\tvar left, right int\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceInt(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceInt(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn join(left, right)\n\t}\n\treturn recur(m.splits)\n}\n\n// ParallelReduceIntSum calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then added together.\n//\n// ParallelReduceIntSum returns only when all goroutines it spawns have\n// terminated.\n//\n// While reduce is executed on a split of m, ParallelReduceIntSum holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduceIntSum eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelReduceIntSum does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// ParallelReduceIntSum may reflect any mapping for that key from any point\n// during the ParallelReduceIntSum call.\nfunc (m *Map) ParallelReduceIntSum(reduce func(map[interface{}]interface{}) int) int {\n\tvar recur func(splits []Split) int\n\trecur = func(splits []Split) int {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceInt(reduce)\n\t\t}\n\t\tvar left, right int\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceInt(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceInt(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}\n\n// ParallelReduceIntProduct calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then multiplied with each other.\n//\n// ParallelReduceIntProduct returns only when all goroutines it spawns have\n// terminated.\n//\n// While reduce is executed on a split of m, ParallelReduceIntProduct holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduceIntProduct eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelReduceIntProduct does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// ParallelReduceIntProduct may reflect any mapping for that key from any point\n// during the ParallelReduceIntProduct call.\nfunc (m *Map) ParallelReduceIntProduct(reduce func(map[interface{}]interface{}) int) int {\n\tvar recur func(splits []Split) int\n\trecur = func(splits []Split) int {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceInt(reduce)\n\t\t}\n\t\tvar left, right int\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceInt(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceInt(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left * right\n\t}\n\treturn recur(m.splits)\n}\n\n// ParallelReduceString calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then combined by repeated invocations\n// of the join function.\n//\n// ParallelReduceString returns only when all goroutines it spawns have\n// terminated.\n//\n// While reduce is executed on a split of m, ParallelReduceString holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduceString eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelReduceString does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// ParallelReduceString may reflect any mapping for that key from any point\n// during the ParallelReduceString call.\nfunc (m *Map) ParallelReduceString(\n\treduce func(map[interface{}]interface{}) string,\n\tjoin func(x, y string) string,\n) string {\n\tvar recur func(splits []Split) string\n\trecur = func(splits []Split) string {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceString(reduce)\n\t\t}\n\t\tvar left, right string\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceString(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceString(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn join(left, right)\n\t}\n\treturn recur(m.splits)\n}\n\n// ParallelReduceStringSum calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then concatenated together.\n//\n// ParallelReduceStringSum returns only when all goroutines it spawns have\n// terminated.\n//\n// While reduce is executed on a split of m, ParallelReduceStringSum holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and ParallelReduceStringSum eventually panics with the left-most\n// recovered panic value.\n//\n// ParallelReduceStringSum does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// ParallelReduceStringSum may reflect any mapping for that key from any point\n// during the ParallelReduceStringSum call.\nfunc (m *Map) ParallelReduceStringSum(reduce func(map[interface{}]interface{}) string) string {\n\tvar recur func(splits []Split) string\n\trecur = func(splits []Split) string {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].reduceString(reduce)\n\t\t}\n\t\tvar left, right string\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = splits[1].reduceString(reduce)\n\t\t\t}()\n\t\t\tleft = splits[0].reduceString(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft = recur(splits[:half])\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\treturn left + right\n\t}\n\treturn recur(m.splits)\n}\n\nfunc (split *Split) speculativeReduce(r func(map[interface{}]interface{}) (interface{}, bool)) (interface{}, bool) {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn r(split.Map)\n}\n\n// SpeculativeReduce calls reduce for every split of m in parallel. The results\n// of the reduce invocations are then combined by repeated invocations of the\n// join function.\n//\n// SpeculativeReduce returns either when all goroutines it spawns have\n// terminated with a second return value of false; or when one or more reduce or\n// join functions return a second return value of true. In the latter case, the\n// first return value of the left-most function that returned true as a second\n// return value becomes the final result, without waiting for the other\n// functions to terminate.\n//\n// While reduce is executed on a split of m, SpeculativeReduce holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and SpeculativeReduce eventually panics with the left-most\n// recovered panic value.\n//\n// SpeculativeReduce does not necessarily correspond to any consistent snapshot\n// of the Map's contents: no split will be visited more than once, but if the\n// value for any key is stored or deleted concurrently, SpeculativeReduce may\n// reflect any mapping for that key from any point during the SpeculativeReduce\n// call.\nfunc (m *Map) SpeculativeReduce(\n\treduce func(map[interface{}]interface{}) (interface{}, bool),\n\tjoin func(x, y interface{}) (interface{}, bool),\n) (interface{}, bool) {\n\tvar recur func(splits []Split) (interface{}, bool)\n\trecur = func(splits []Split) (interface{}, bool) {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].speculativeReduce(reduce)\n\t\t}\n\t\tvar left, right interface{}\n\t\tvar b0, b1 bool\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = splits[1].speculativeReduce(reduce)\n\t\t\t}()\n\t\t\tleft, b0 = splits[0].speculativeReduce(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft, b0 = recur(splits[:half])\n\t\t}\n\t\tif b0 {\n\t\t\treturn left, true\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\tif b1 {\n\t\t\treturn right, true\n\t\t}\n\t\treturn join(left, right)\n\t}\n\treturn recur(m.splits)\n}\n\nfunc (split *Split) speculativeReduceFloat64(r func(map[interface{}]interface{}) (float64, bool)) (float64, bool) {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn r(split.Map)\n}\n\n// SpeculativeReduceFloat64 calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then combined by repeated invocations\n// of the join function.\n//\n// SpeculativeReduceFloat64 returns either when all goroutines it spawns have\n// terminated with a second return value of false; or when one or more reduce or\n// join functions return a second return value of true. In the latter case, the\n// first return value of the left-most function that returned true as a second\n// return value becomes the final result, without waiting for the other\n// functions to terminate.\n//\n// While reduce is executed on a split of m, SpeculativeReduceFloat64 holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and SpeculativeReduceFloat64 eventually panics with the left-most\n// recovered panic value.\n//\n// SpeculativeReduceFloat64 does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// SpeculativeReduceFloat64 may reflect any mapping for that key from any point\n// during the SpeculativeReduceFloat64 call.\nfunc (m *Map) SpeculativeReduceFloat64(\n\treduce func(map[interface{}]interface{}) (float64, bool),\n\tjoin func(x, y float64) (float64, bool),\n) (float64, bool) {\n\tvar recur func(splits []Split) (float64, bool)\n\trecur = func(splits []Split) (float64, bool) {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].speculativeReduceFloat64(reduce)\n\t\t}\n\t\tvar left, right float64\n\t\tvar b0, b1 bool\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = splits[1].speculativeReduceFloat64(reduce)\n\t\t\t}()\n\t\t\tleft, b0 = splits[0].speculativeReduceFloat64(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft, b0 = recur(splits[:half])\n\t\t}\n\t\tif b0 {\n\t\t\treturn left, true\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\tif b1 {\n\t\t\treturn right, true\n\t\t}\n\t\treturn join(left, right)\n\t}\n\treturn recur(m.splits)\n}\n\nfunc (split *Split) speculativeReduceInt(r func(map[interface{}]interface{}) (int, bool)) (int, bool) {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn r(split.Map)\n}\n\n// SpeculativeReduceInt calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then combined by repeated invocations\n// of the join function.\n//\n// SpeculativeReduceInt returns either when all goroutines it spawns have\n// terminated with a second return value of false; or when one or more reduce or\n// join functions return a second return value of true. In the latter case, the\n// first return value of the left-most function that returned true as a second\n// return value becomes the final result, without waiting for the other\n// functions to terminate.\n//\n// While reduce is executed on a split of m, SpeculativeReduceInt holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and SpeculativeReduceInt eventually panics with the left-most\n// recovered panic value.\n//\n// SpeculativeReduceInt does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// SpeculativeReduceInt may reflect any mapping for that key from any point\n// during the SpeculativeReduceInt call.\nfunc (m *Map) SpeculativeReduceInt(\n\treduce func(map[interface{}]interface{}) (int, bool),\n\tjoin func(x, y int) (int, bool),\n) (int, bool) {\n\tvar recur func(splits []Split) (int, bool)\n\trecur = func(splits []Split) (int, bool) {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].speculativeReduceInt(reduce)\n\t\t}\n\t\tvar left, right int\n\t\tvar b0, b1 bool\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = splits[1].speculativeReduceInt(reduce)\n\t\t\t}()\n\t\t\tleft, b0 = splits[0].speculativeReduceInt(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft, b0 = recur(splits[:half])\n\t\t}\n\t\tif b0 {\n\t\t\treturn left, true\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\tif b1 {\n\t\t\treturn right, true\n\t\t}\n\t\treturn join(left, right)\n\t}\n\treturn recur(m.splits)\n}\n\nfunc (split *Split) speculativeReduceString(r func(map[interface{}]interface{}) (string, bool)) (string, bool) {\n\tsplit.Lock()\n\tdefer split.Unlock()\n\treturn r(split.Map)\n}\n\n// SpeculativeReduceString calls reduce for every split of m in parallel. The\n// results of the reduce invocations are then combined by repeated invocations\n// of the join function.\n//\n// SpeculativeReduceString returns either when all goroutines it spawns have\n// terminated with a second return value of false; or when one or more reduce or\n// join functions return a second return value of true. In the latter case, the\n// first return value of the left-most function that returned true as a second\n// return value becomes the final result, without waiting for the other\n// functions to terminate.\n//\n// While reduce is executed on a split of m, SpeculativeReduceString holds the\n// corresponding lock.\n//\n// If one or more reduce invocations panic, the corresponding goroutines recover\n// the panics, and SpeculativeReduceString eventually panics with the left-most\n// recovered panic value.\n//\n// SpeculativeReduceString does not necessarily correspond to any consistent\n// snapshot of the Map's contents: no split will be visited more than once, but\n// if the value for any key is stored or deleted concurrently,\n// SpeculativeReduceString may reflect any mapping for that key from any point\n// during the SpeculativeReduceString call.\nfunc (m *Map) SpeculativeReduceString(\n\treduce func(map[interface{}]interface{}) (string, bool),\n\tjoin func(x, y string) (string, bool),\n) (string, bool) {\n\tvar recur func(splits []Split) (string, bool)\n\trecur = func(splits []Split) (string, bool) {\n\t\tif len(splits) < 2 {\n\t\t\t// NewMap and case 2 below ensure that len(splits) > 0\n\t\t\treturn splits[0].speculativeReduceString(reduce)\n\t\t}\n\t\tvar left, right string\n\t\tvar b0, b1 bool\n\t\tvar p interface{}\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(1)\n\t\tswitch len(splits) {\n\t\tcase 2:\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = splits[1].speculativeReduceString(reduce)\n\t\t\t}()\n\t\t\tleft, b0 = splits[0].speculativeReduceString(reduce)\n\t\tdefault:\n\t\t\thalf := len(splits) / 2\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\tp = internal.WrapPanic(recover())\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t\tright, b1 = recur(splits[half:])\n\t\t\t}()\n\t\t\tleft, b0 = recur(splits[:half])\n\t\t}\n\t\tif b0 {\n\t\t\treturn left, true\n\t\t}\n\t\twg.Wait()\n\t\tif p != nil {\n\t\t\tpanic(p)\n\t\t}\n\t\tif b1 {\n\t\t\treturn right, true\n\t\t}\n\t\treturn join(left, right)\n\t}\n\treturn recur(m.splits)\n}\n"
  }
]