[
  {
    "path": ".gitignore",
    "content": "# Compiled Object files, Static and Dynamic libs (Shared Objects)\n*.o\n*.a\n*.so\n\n# Folders\n_obj\n_test\n\n# Architecture specific extensions/prefixes\n*.[568vq]\n[568vq].out\n\n*.cgo1.go\n*.cgo2.c\n_cgo_defun.c\n_cgo_gotypes.go\n_cgo_export.*\n\n_testmain.go\n\n*.exe\n*.test\n*.prof\n"
  },
  {
    "path": ".travis.yml",
    "content": "language: go\narch:\n    - amd64\n    - ppc64le\n\ngo:\n  - \"1.13.x\"\n  - tip\n\ngo_import_path: go4.org\n\nbefore_install:\n  - go mod tidy\n  - git diff --exit-code go.mod\n  - git diff --exit-code go.sum\n  - go mod download\n"
  },
  {
    "path": "AUTHORS",
    "content": "# This is the official list of go4 authors for copyright purposes.\n# This is distinct from the CONTRIBUTORS file, which is the list of\n# people who have contributed, even if they don't own the copyright on\n# their work.\n\nMathieu Lonjaret <mathieu.lonjaret@gmail.com>\nDaniel Theophanes <kardianos@gmail.com>\nGoogle\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"{}\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright {yyyy} {name of copyright owner}\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n\n"
  },
  {
    "path": "README.md",
    "content": "# go4\n\n[![travis badge](https://travis-ci.org/go4org/go4.svg?branch=master)](https://travis-ci.org/go4org/go4 \"Travis CI\")\n\n[go4.org](http://go4.org) is a collection of packages for\nGo programmers.\n\nThey started out living in [Perkeep](https://perkeep.org)'s repo\nand elsewhere but they have nothing to do with Perkeep, so we're\nmoving them here.\n\n## Details\n\n* **single repo**. go4 is a single repo. That means things can be\n    changed and rearranged globally atomically with ease and\n    confidence.\n\n* **no backwards compatibility**. go4 makes no backwards compatibility\n    promises. If you want to use go4, vendor it. And next time you\n    update your vendor tree, update to the latest API if things in go4\n    changed. The plan is to eventually provide tools to make this\n    easier.\n\n* **forward progress** because we have no backwards compatibility,\n    it's always okay to change things to make things better. That also\n    means the bar for contributions is lower. We don't have to get the\n    API 100% correct in the first commit.\n\n* **no Go version policy** go4 packages are usually built and tested\n    with the latest Go stable version. However, go4 has no overarching\n    version policy; each package can declare its own set of supported\n    Go versions.\n\n* **code review** contributions must be code-reviewed. We're trying\n    out Gerrithub, to see if we can find a mix of Github Pull Requests\n    and Gerrit that works well for many people. We'll see.\n\n* **CLA compliant** contributors must agree to the Google CLA (the\n    same as Go itself). This ensures we can move things into Go as\n    necessary in the future. It also makes lawyers at various\n    companies happy.  The CLA is **not** a copyright *assignment*; you\n    retain the copyright on your work. The CLA just says that your\n    work is open source and you have permission to open source it. See\n    https://golang.org/doc/contribute.html#cla\n\n* **docs, tests, portability** all code should be documented in the\n    normal Go style, have tests, and be portable to different\n    operating systems and architectures. We'll try to get builders in\n    place to help run the tests on different OS/arches. For now we\n    have Travis at least.\n\n## Contact\n\nFor any question, or communication when a Github issue is not appropriate,\nplease contact the [Perkeep mailing\nlist](https://groups.google.com/forum/#!forum/perkeep).\n\n"
  },
  {
    "path": "bytereplacer/bytereplacer.go",
    "content": "/*\nCopyright 2015 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package bytereplacer provides a utility for replacing parts of byte slices.\npackage bytereplacer // import \"go4.org/bytereplacer\"\n\nimport \"bytes\"\n\n// Replacer replaces a list of strings with replacements.\n// It is safe for concurrent use by multiple goroutines.\ntype Replacer struct {\n\tr replacer\n}\n\n// replacer is the interface that a replacement algorithm needs to implement.\ntype replacer interface {\n\t// Replace performs all replacements, in-place if possible.\n\tReplace(s []byte) []byte\n}\n\n// New returns a new Replacer from a list of old, new string pairs.\n// Replacements are performed in order, without overlapping matches.\nfunc New(oldnew ...string) *Replacer {\n\tif len(oldnew)%2 == 1 {\n\t\tpanic(\"bytes.NewReplacer: odd argument count\")\n\t}\n\n\tallNewBytes := true\n\tfor i := 0; i < len(oldnew); i += 2 {\n\t\tif len(oldnew[i]) != 1 {\n\t\t\treturn &Replacer{r: makeGenericReplacer(oldnew)}\n\t\t}\n\t\tif len(oldnew[i+1]) != 1 {\n\t\t\tallNewBytes = false\n\t\t}\n\t}\n\n\tif allNewBytes {\n\t\tr := byteReplacer{}\n\t\tfor i := range r {\n\t\t\tr[i] = byte(i)\n\t\t}\n\t\t// The first occurrence of old->new map takes precedence\n\t\t// over the others with the same old string.\n\t\tfor i := len(oldnew) - 2; i >= 0; i -= 2 {\n\t\t\to := oldnew[i][0]\n\t\t\tn := oldnew[i+1][0]\n\t\t\tr[o] = n\n\t\t}\n\t\treturn &Replacer{r: &r}\n\t}\n\n\treturn &Replacer{r: makeGenericReplacer(oldnew)}\n}\n\n// Replace performs all replacements in-place on s. If the capacity\n// of s is not sufficient, a new slice is allocated, otherwise Replace\n// returns s.\nfunc (r *Replacer) Replace(s []byte) []byte {\n\treturn r.r.Replace(s)\n}\n\ntype trieNode struct {\n\tvalue    []byte\n\tpriority int\n\tprefix   []byte\n\tnext     *trieNode\n\ttable    []*trieNode\n}\n\nfunc (t *trieNode) add(key, val []byte, priority int, r *genericReplacer) {\n\tif len(key) == 0 {\n\t\tif t.priority == 0 {\n\t\t\tt.value = val\n\t\t\tt.priority = priority\n\t\t}\n\t\treturn\n\t}\n\n\tif len(t.prefix) > 0 {\n\t\t// Need to split the prefix among multiple nodes.\n\t\tvar n int // length of the longest common prefix\n\t\tfor ; n < len(t.prefix) && n < len(key); n++ {\n\t\t\tif t.prefix[n] != key[n] {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif n == len(t.prefix) {\n\t\t\tt.next.add(key[n:], val, priority, r)\n\t\t} else if n == 0 {\n\t\t\t// First byte differs, start a new lookup table here. Looking up\n\t\t\t// what is currently t.prefix[0] will lead to prefixNode, and\n\t\t\t// looking up key[0] will lead to keyNode.\n\t\t\tvar prefixNode *trieNode\n\t\t\tif len(t.prefix) == 1 {\n\t\t\t\tprefixNode = t.next\n\t\t\t} else {\n\t\t\t\tprefixNode = &trieNode{\n\t\t\t\t\tprefix: t.prefix[1:],\n\t\t\t\t\tnext:   t.next,\n\t\t\t\t}\n\t\t\t}\n\t\t\tkeyNode := new(trieNode)\n\t\t\tt.table = make([]*trieNode, r.tableSize)\n\t\t\tt.table[r.mapping[t.prefix[0]]] = prefixNode\n\t\t\tt.table[r.mapping[key[0]]] = keyNode\n\t\t\tt.prefix = nil\n\t\t\tt.next = nil\n\t\t\tkeyNode.add(key[1:], val, priority, r)\n\t\t} else {\n\t\t\t// Insert new node after the common section of the prefix.\n\t\t\tnext := &trieNode{\n\t\t\t\tprefix: t.prefix[n:],\n\t\t\t\tnext:   t.next,\n\t\t\t}\n\t\t\tt.prefix = t.prefix[:n]\n\t\t\tt.next = next\n\t\t\tnext.add(key[n:], val, priority, r)\n\t\t}\n\t} else if t.table != nil {\n\t\t// Insert into existing table.\n\t\tm := r.mapping[key[0]]\n\t\tif t.table[m] == nil {\n\t\t\tt.table[m] = new(trieNode)\n\t\t}\n\t\tt.table[m].add(key[1:], val, priority, r)\n\t} else {\n\t\tt.prefix = key\n\t\tt.next = new(trieNode)\n\t\tt.next.add(nil, val, priority, r)\n\t}\n}\n\nfunc (r *genericReplacer) lookup(s []byte, ignoreRoot bool) (val []byte, keylen int, found bool) {\n\t// Iterate down the trie to the end, and grab the value and keylen with\n\t// the highest priority.\n\tbestPriority := 0\n\tnode := &r.root\n\tn := 0\n\tfor node != nil {\n\t\tif node.priority > bestPriority && !(ignoreRoot && node == &r.root) {\n\t\t\tbestPriority = node.priority\n\t\t\tval = node.value\n\t\t\tkeylen = n\n\t\t\tfound = true\n\t\t}\n\n\t\tif len(s) == 0 {\n\t\t\tbreak\n\t\t}\n\t\tif node.table != nil {\n\t\t\tindex := r.mapping[s[0]]\n\t\t\tif int(index) == r.tableSize {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnode = node.table[index]\n\t\t\ts = s[1:]\n\t\t\tn++\n\t\t} else if len(node.prefix) > 0 && bytes.HasPrefix(s, node.prefix) {\n\t\t\tn += len(node.prefix)\n\t\t\ts = s[len(node.prefix):]\n\t\t\tnode = node.next\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}\n\n// genericReplacer is the fully generic algorithm.\n// It's used as a fallback when nothing faster can be used.\ntype genericReplacer struct {\n\troot trieNode\n\t// tableSize is the size of a trie node's lookup table. It is the number\n\t// of unique key bytes.\n\ttableSize int\n\t// mapping maps from key bytes to a dense index for trieNode.table.\n\tmapping [256]byte\n}\n\nfunc makeGenericReplacer(oldnew []string) *genericReplacer {\n\tr := new(genericReplacer)\n\t// Find each byte used, then assign them each an index.\n\tfor i := 0; i < len(oldnew); i += 2 {\n\t\tkey := oldnew[i]\n\t\tfor j := 0; j < len(key); j++ {\n\t\t\tr.mapping[key[j]] = 1\n\t\t}\n\t}\n\n\tfor _, b := range r.mapping {\n\t\tr.tableSize += int(b)\n\t}\n\n\tvar index byte\n\tfor i, b := range r.mapping {\n\t\tif b == 0 {\n\t\t\tr.mapping[i] = byte(r.tableSize)\n\t\t} else {\n\t\t\tr.mapping[i] = index\n\t\t\tindex++\n\t\t}\n\t}\n\t// Ensure root node uses a lookup table (for performance).\n\tr.root.table = make([]*trieNode, r.tableSize)\n\n\tfor i := 0; i < len(oldnew); i += 2 {\n\t\tr.root.add([]byte(oldnew[i]), []byte(oldnew[i+1]), len(oldnew)-i, r)\n\t}\n\treturn r\n}\n\nfunc (r *genericReplacer) Replace(s []byte) []byte {\n\tvar last int\n\tvar prevMatchEmpty bool\n\tdst := s[:0]\n\tgrown := false\n\tfor i := 0; i <= len(s); {\n\t\t// Fast path: s[i] is not a prefix of any pattern.\n\t\tif i != len(s) && r.root.priority == 0 {\n\t\t\tindex := int(r.mapping[s[i]])\n\t\t\tif index == r.tableSize || r.root.table[index] == nil {\n\t\t\t\ti++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// Ignore the empty match iff the previous loop found the empty match.\n\t\tval, keylen, match := r.lookup(s[i:], prevMatchEmpty)\n\t\tprevMatchEmpty = match && keylen == 0\n\t\tif match {\n\t\t\tdst = append(dst, s[last:i]...)\n\t\t\tif diff := len(val) - keylen; grown || diff < 0 {\n\t\t\t\tdst = append(dst, val...)\n\t\t\t\ti += keylen\n\t\t\t} else if diff <= cap(s)-len(s) {\n\t\t\t\t// The replacement is larger than the original, but can still fit in the original buffer.\n\t\t\t\tcopy(s[i+len(val):cap(dst)], s[i+keylen:])\n\t\t\t\tdst = append(dst, val...)\n\t\t\t\ts = s[:len(s)+diff]\n\t\t\t\ti += len(val)\n\t\t\t} else {\n\t\t\t\t// The output will grow larger than the original buffer.  Allocate a new one.\n\t\t\t\tgrown = true\n\t\t\t\tnewDst := make([]byte, len(dst), cap(dst)+diff)\n\t\t\t\tcopy(newDst, dst)\n\t\t\t\tdst = newDst\n\n\t\t\t\tdst = append(dst, val...)\n\t\t\t\ti += keylen\n\t\t\t}\n\t\t\tlast = i\n\t\t\tcontinue\n\t\t}\n\t\ti++\n\t}\n\tif last != len(s) {\n\t\tdst = append(dst, s[last:]...)\n\t}\n\treturn dst\n}\n\n// byteReplacer is the implementation that's used when all the \"old\"\n// and \"new\" values are single ASCII bytes.\n// The array contains replacement bytes indexed by old byte.\ntype byteReplacer [256]byte\n\nfunc (r *byteReplacer) Replace(s []byte) []byte {\n\tfor i, b := range s {\n\t\ts[i] = r[b]\n\t}\n\treturn s\n}\n"
  },
  {
    "path": "bytereplacer/bytereplacer_test.go",
    "content": "/*\nCopyright 2015 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage bytereplacer\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"testing\"\n)\n\nvar htmlEscaper = New(\n\t\"&\", \"&amp;\",\n\t\"<\", \"&lt;\",\n\t\">\", \"&gt;\",\n\t`\"`, \"&quot;\",\n\t\"'\", \"&apos;\",\n)\n\nvar htmlUnescaper = New(\n\t\"&amp;\", \"&\",\n\t\"&lt;\", \"<\",\n\t\"&gt;\", \">\",\n\t\"&quot;\", `\"`,\n\t\"&apos;\", \"'\",\n)\n\nvar capitalLetters = New(\"a\", \"A\", \"b\", \"B\")\n\nfunc TestReplacer(t *testing.T) {\n\ttype testCase struct {\n\t\tr       *Replacer\n\t\tin, out string\n\t}\n\tvar testCases []testCase\n\n\t// str converts 0xff to \"\\xff\". This isn't just string(b) since that converts to UTF-8.\n\tstr := func(b byte) string {\n\t\treturn string([]byte{b})\n\t}\n\tvar s []string\n\n\t// inc maps \"\\x00\"->\"\\x01\", ..., \"a\"->\"b\", \"b\"->\"c\", ..., \"\\xff\"->\"\\x00\".\n\ts = nil\n\tfor i := 0; i < 256; i++ {\n\t\ts = append(s, str(byte(i)), str(byte(i+1)))\n\t}\n\tinc := New(s...)\n\n\t// Test cases with 1-byte old strings, 1-byte new strings.\n\ttestCases = append(testCases,\n\t\ttestCase{capitalLetters, \"brad\", \"BrAd\"},\n\t\ttestCase{capitalLetters, strings.Repeat(\"a\", (32<<10)+123), strings.Repeat(\"A\", (32<<10)+123)},\n\t\ttestCase{capitalLetters, \"\", \"\"},\n\n\t\ttestCase{inc, \"brad\", \"csbe\"},\n\t\ttestCase{inc, \"\\x00\\xff\", \"\\x01\\x00\"},\n\t\ttestCase{inc, \"\", \"\"},\n\n\t\ttestCase{New(\"a\", \"1\", \"a\", \"2\"), \"brad\", \"br1d\"},\n\t)\n\n\t// repeat maps \"a\"->\"a\", \"b\"->\"bb\", \"c\"->\"ccc\", ...\n\ts = nil\n\tfor i := 0; i < 256; i++ {\n\t\tn := i + 1 - 'a'\n\t\tif n < 1 {\n\t\t\tn = 1\n\t\t}\n\t\ts = append(s, str(byte(i)), strings.Repeat(str(byte(i)), n))\n\t}\n\trepeat := New(s...)\n\n\t// Test cases with 1-byte old strings, variable length new strings.\n\ttestCases = append(testCases,\n\t\ttestCase{htmlEscaper, \"No changes\", \"No changes\"},\n\t\ttestCase{htmlEscaper, \"I <3 escaping & stuff\", \"I &lt;3 escaping &amp; stuff\"},\n\t\ttestCase{htmlEscaper, \"&&&\", \"&amp;&amp;&amp;\"},\n\t\ttestCase{htmlEscaper, \"\", \"\"},\n\n\t\ttestCase{repeat, \"brad\", \"bbrrrrrrrrrrrrrrrrrradddd\"},\n\t\ttestCase{repeat, \"abba\", \"abbbba\"},\n\t\ttestCase{repeat, \"\", \"\"},\n\n\t\ttestCase{New(\"a\", \"11\", \"a\", \"22\"), \"brad\", \"br11d\"},\n\t)\n\n\t// The remaining test cases have variable length old strings.\n\n\ttestCases = append(testCases,\n\t\ttestCase{htmlUnescaper, \"&amp;amp;\", \"&amp;\"},\n\t\ttestCase{htmlUnescaper, \"&lt;b&gt;HTML&apos;s neat&lt;/b&gt;\", \"<b>HTML's neat</b>\"},\n\t\ttestCase{htmlUnescaper, \"\", \"\"},\n\n\t\ttestCase{New(\"a\", \"1\", \"a\", \"2\", \"xxx\", \"xxx\"), \"brad\", \"br1d\"},\n\n\t\ttestCase{New(\"a\", \"1\", \"aa\", \"2\", \"aaa\", \"3\"), \"aaaa\", \"1111\"},\n\n\t\ttestCase{New(\"aaa\", \"3\", \"aa\", \"2\", \"a\", \"1\"), \"aaaa\", \"31\"},\n\t)\n\n\t// gen1 has multiple old strings of variable length. There is no\n\t// overall non-empty common prefix, but some pairwise common prefixes.\n\tgen1 := New(\n\t\t\"aaa\", \"3[aaa]\",\n\t\t\"aa\", \"2[aa]\",\n\t\t\"a\", \"1[a]\",\n\t\t\"i\", \"i\",\n\t\t\"longerst\", \"most long\",\n\t\t\"longer\", \"medium\",\n\t\t\"long\", \"short\",\n\t\t\"xx\", \"xx\",\n\t\t\"x\", \"X\",\n\t\t\"X\", \"Y\",\n\t\t\"Y\", \"Z\",\n\t)\n\ttestCases = append(testCases,\n\t\ttestCase{gen1, \"fooaaabar\", \"foo3[aaa]b1[a]r\"},\n\t\ttestCase{gen1, \"long, longerst, longer\", \"short, most long, medium\"},\n\t\ttestCase{gen1, \"xxxxx\", \"xxxxX\"},\n\t\ttestCase{gen1, \"XiX\", \"YiY\"},\n\t\ttestCase{gen1, \"\", \"\"},\n\t)\n\n\t// gen2 has multiple old strings with no pairwise common prefix.\n\tgen2 := New(\n\t\t\"roses\", \"red\",\n\t\t\"violets\", \"blue\",\n\t\t\"sugar\", \"sweet\",\n\t)\n\ttestCases = append(testCases,\n\t\ttestCase{gen2, \"roses are red, violets are blue...\", \"red are red, blue are blue...\"},\n\t\ttestCase{gen2, \"\", \"\"},\n\t)\n\n\t// gen3 has multiple old strings with an overall common prefix.\n\tgen3 := New(\n\t\t\"abracadabra\", \"poof\",\n\t\t\"abracadabrakazam\", \"splat\",\n\t\t\"abraham\", \"lincoln\",\n\t\t\"abrasion\", \"scrape\",\n\t\t\"abraham\", \"isaac\",\n\t)\n\ttestCases = append(testCases,\n\t\ttestCase{gen3, \"abracadabrakazam abraham\", \"poofkazam lincoln\"},\n\t\ttestCase{gen3, \"abrasion abracad\", \"scrape abracad\"},\n\t\ttestCase{gen3, \"abba abram abrasive\", \"abba abram abrasive\"},\n\t\ttestCase{gen3, \"\", \"\"},\n\t)\n\n\t// foo{1,2,3,4} have multiple old strings with an overall common prefix\n\t// and 1- or 2- byte extensions from the common prefix.\n\tfoo1 := New(\n\t\t\"foo1\", \"A\",\n\t\t\"foo2\", \"B\",\n\t\t\"foo3\", \"C\",\n\t)\n\tfoo2 := New(\n\t\t\"foo1\", \"A\",\n\t\t\"foo2\", \"B\",\n\t\t\"foo31\", \"C\",\n\t\t\"foo32\", \"D\",\n\t)\n\tfoo3 := New(\n\t\t\"foo11\", \"A\",\n\t\t\"foo12\", \"B\",\n\t\t\"foo31\", \"C\",\n\t\t\"foo32\", \"D\",\n\t)\n\tfoo4 := New(\n\t\t\"foo12\", \"B\",\n\t\t\"foo32\", \"D\",\n\t)\n\ttestCases = append(testCases,\n\t\ttestCase{foo1, \"fofoofoo12foo32oo\", \"fofooA2C2oo\"},\n\t\ttestCase{foo1, \"\", \"\"},\n\n\t\ttestCase{foo2, \"fofoofoo12foo32oo\", \"fofooA2Doo\"},\n\t\ttestCase{foo2, \"\", \"\"},\n\n\t\ttestCase{foo3, \"fofoofoo12foo32oo\", \"fofooBDoo\"},\n\t\ttestCase{foo3, \"\", \"\"},\n\n\t\ttestCase{foo4, \"fofoofoo12foo32oo\", \"fofooBDoo\"},\n\t\ttestCase{foo4, \"\", \"\"},\n\t)\n\n\t// genAll maps \"\\x00\\x01\\x02...\\xfe\\xff\" to \"[all]\", amongst other things.\n\tallBytes := make([]byte, 256)\n\tfor i := range allBytes {\n\t\tallBytes[i] = byte(i)\n\t}\n\tallString := string(allBytes)\n\tgenAll := New(\n\t\tallString, \"[all]\",\n\t\t\"\\xff\", \"[ff]\",\n\t\t\"\\x00\", \"[00]\",\n\t)\n\ttestCases = append(testCases,\n\t\ttestCase{genAll, allString, \"[all]\"},\n\t\ttestCase{genAll, \"a\\xff\" + allString + \"\\x00\", \"a[ff][all][00]\"},\n\t\ttestCase{genAll, \"\", \"\"},\n\t)\n\n\t// Test cases with empty old strings.\n\n\tblankToX1 := New(\"\", \"X\")\n\tblankToX2 := New(\"\", \"X\", \"\", \"\")\n\tblankHighPriority := New(\"\", \"X\", \"o\", \"O\")\n\tblankLowPriority := New(\"o\", \"O\", \"\", \"X\")\n\tblankNoOp1 := New(\"\", \"\")\n\tblankNoOp2 := New(\"\", \"\", \"\", \"A\")\n\tblankFoo := New(\"\", \"X\", \"foobar\", \"R\", \"foobaz\", \"Z\")\n\ttestCases = append(testCases,\n\t\ttestCase{blankToX1, \"foo\", \"XfXoXoX\"},\n\t\ttestCase{blankToX1, \"\", \"X\"},\n\n\t\ttestCase{blankToX2, \"foo\", \"XfXoXoX\"},\n\t\ttestCase{blankToX2, \"\", \"X\"},\n\n\t\ttestCase{blankHighPriority, \"oo\", \"XOXOX\"},\n\t\ttestCase{blankHighPriority, \"ii\", \"XiXiX\"},\n\t\ttestCase{blankHighPriority, \"oiio\", \"XOXiXiXOX\"},\n\t\ttestCase{blankHighPriority, \"iooi\", \"XiXOXOXiX\"},\n\t\ttestCase{blankHighPriority, \"\", \"X\"},\n\n\t\ttestCase{blankLowPriority, \"oo\", \"OOX\"},\n\t\ttestCase{blankLowPriority, \"ii\", \"XiXiX\"},\n\t\ttestCase{blankLowPriority, \"oiio\", \"OXiXiOX\"},\n\t\ttestCase{blankLowPriority, \"iooi\", \"XiOOXiX\"},\n\t\ttestCase{blankLowPriority, \"\", \"X\"},\n\n\t\ttestCase{blankNoOp1, \"foo\", \"foo\"},\n\t\ttestCase{blankNoOp1, \"\", \"\"},\n\n\t\ttestCase{blankNoOp2, \"foo\", \"foo\"},\n\t\ttestCase{blankNoOp2, \"\", \"\"},\n\n\t\ttestCase{blankFoo, \"foobarfoobaz\", \"XRXZX\"},\n\t\ttestCase{blankFoo, \"foobar-foobaz\", \"XRX-XZX\"},\n\t\ttestCase{blankFoo, \"\", \"X\"},\n\t)\n\n\t// single string replacer\n\n\tabcMatcher := New(\"abc\", \"[match]\")\n\n\ttestCases = append(testCases,\n\t\ttestCase{abcMatcher, \"\", \"\"},\n\t\ttestCase{abcMatcher, \"ab\", \"ab\"},\n\t\ttestCase{abcMatcher, \"abc\", \"[match]\"},\n\t\ttestCase{abcMatcher, \"abcd\", \"[match]d\"},\n\t\ttestCase{abcMatcher, \"cabcabcdabca\", \"c[match][match]d[match]a\"},\n\t)\n\n\t// Issue 6659 cases (more single string replacer)\n\n\tnoHello := New(\"Hello\", \"\")\n\ttestCases = append(testCases,\n\t\ttestCase{noHello, \"Hello\", \"\"},\n\t\ttestCase{noHello, \"Hellox\", \"x\"},\n\t\ttestCase{noHello, \"xHello\", \"x\"},\n\t\ttestCase{noHello, \"xHellox\", \"xx\"},\n\t)\n\n\t// No-arg test cases.\n\n\tnop := New()\n\ttestCases = append(testCases,\n\t\ttestCase{nop, \"abc\", \"abc\"},\n\t\ttestCase{nop, \"\", \"\"},\n\t)\n\n\t// Run the test cases.\n\n\tfor i, tc := range testCases {\n\t\t{\n\t\t\t// Replace with len(in) == cap(in)\n\t\t\tin := make([]byte, len(tc.in))\n\t\t\tcopy(in, tc.in)\n\t\t\tif s := string(tc.r.Replace(in)); s != tc.out {\n\t\t\t\tt.Errorf(\"%d. Replace(%q /* len == cap */) = %q, want %q\", i, tc.in, s, tc.out)\n\t\t\t}\n\t\t}\n\n\t\t{\n\t\t\t// Replace with len(in) < cap(in)\n\t\t\tin := make([]byte, len(tc.in), len(tc.in)*2)\n\t\t\tcopy(in, tc.in)\n\t\t\tif s := string(tc.r.Replace(in)); s != tc.out {\n\t\t\t\tt.Errorf(\"%d. Replace(%q /* len < cap */) = %q, want %q\", i, tc.in, s, tc.out)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkGenericNoMatch(b *testing.B) {\n\tstr := []byte(strings.Repeat(\"A\", 100) + strings.Repeat(\"B\", 100))\n\tgeneric := New(\"a\", \"A\", \"b\", \"B\", \"12\", \"123\") // varying lengths forces generic\n\tfor i := 0; i < b.N; i++ {\n\t\tgeneric.Replace(str)\n\t}\n}\n\nfunc BenchmarkGenericMatch1(b *testing.B) {\n\tstr := []byte(strings.Repeat(\"a\", 100) + strings.Repeat(\"b\", 100))\n\tgeneric := New(\"a\", \"A\", \"b\", \"B\", \"12\", \"123\")\n\tfor i := 0; i < b.N; i++ {\n\t\tgeneric.Replace(str)\n\t}\n}\n\nfunc BenchmarkGenericMatch2(b *testing.B) {\n\tstr := bytes.Repeat([]byte(\"It&apos;s &lt;b&gt;HTML&lt;/b&gt;!\"), 100)\n\tfor i := 0; i < b.N; i++ {\n\t\thtmlUnescaper.Replace(str)\n\t}\n}\n\nfunc benchmarkSingleString(b *testing.B, pattern, text string) {\n\tr := New(pattern, \"[match]\")\n\tbuf := make([]byte, len(text), len(text)*7)\n\tb.SetBytes(int64(len(text)))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(buf, text)\n\t\tr.Replace(buf)\n\t}\n}\n\nfunc BenchmarkSingleMaxSkipping(b *testing.B) {\n\tbenchmarkSingleString(b, strings.Repeat(\"b\", 25), strings.Repeat(\"a\", 10000))\n}\n\nfunc BenchmarkSingleLongSuffixFail(b *testing.B) {\n\tbenchmarkSingleString(b, \"b\"+strings.Repeat(\"a\", 500), strings.Repeat(\"a\", 1002))\n}\n\nfunc BenchmarkSingleMatch(b *testing.B) {\n\tbenchmarkSingleString(b, \"abcdef\", strings.Repeat(\"abcdefghijklmno\", 1000))\n}\n\nfunc benchmarkReplacer(b *testing.B, r *Replacer, str string) {\n\tbuf := make([]byte, len(str))\n\tb.ResetTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(buf, str)\n\t\tr.Replace(buf)\n\t}\n}\n\nfunc BenchmarkByteByteNoMatch(b *testing.B) {\n\tbenchmarkReplacer(b, capitalLetters, strings.Repeat(\"A\", 100)+strings.Repeat(\"B\", 100))\n}\n\nfunc BenchmarkByteByteMatch(b *testing.B) {\n\tbenchmarkReplacer(b, capitalLetters, strings.Repeat(\"a\", 100)+strings.Repeat(\"b\", 100))\n}\n\nfunc BenchmarkByteStringMatch(b *testing.B) {\n\tbenchmarkReplacer(b, htmlEscaper, \"<\"+strings.Repeat(\"a\", 99)+strings.Repeat(\"b\", 99)+\">\")\n}\n\nfunc BenchmarkHTMLEscapeNew(b *testing.B) {\n\tbenchmarkReplacer(b, htmlEscaper, \"I <3 to escape HTML & other text too.\")\n}\n\nfunc BenchmarkHTMLEscapeOld(b *testing.B) {\n\tstr := \"I <3 to escape HTML & other text too.\"\n\tbuf := make([]byte, len(str))\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(buf, str)\n\t\toldHTMLEscape(buf)\n\t}\n}\n\n// The http package's old HTML escaping function in bytes form.\nfunc oldHTMLEscape(s []byte) []byte {\n\ts = bytes.Replace(s, []byte(\"&\"), []byte(\"&amp;\"), -1)\n\ts = bytes.Replace(s, []byte(\"<\"), []byte(\"&lt;\"), -1)\n\ts = bytes.Replace(s, []byte(\">\"), []byte(\"&gt;\"), -1)\n\ts = bytes.Replace(s, []byte(`\"`), []byte(\"&quot;\"), -1)\n\ts = bytes.Replace(s, []byte(\"'\"), []byte(\"&apos;\"), -1)\n\treturn s\n}\n\n// BenchmarkByteByteReplaces compares byteByteImpl against multiple Replaces.\nfunc BenchmarkByteByteReplaces(b *testing.B) {\n\tstr := strings.Repeat(\"a\", 100) + strings.Repeat(\"b\", 100)\n\tfor i := 0; i < b.N; i++ {\n\t\tbytes.Replace(bytes.Replace([]byte(str), []byte{'a'}, []byte{'A'}, -1), []byte{'b'}, []byte{'B'}, -1)\n\t}\n}\n\n// BenchmarkByteByteMap compares byteByteImpl against Map.\nfunc BenchmarkByteByteMap(b *testing.B) {\n\tstr := strings.Repeat(\"a\", 100) + strings.Repeat(\"b\", 100)\n\tfn := func(r rune) rune {\n\t\tswitch r {\n\t\tcase 'a':\n\t\t\treturn 'A'\n\t\tcase 'b':\n\t\t\treturn 'B'\n\t\t}\n\t\treturn r\n\t}\n\tfor i := 0; i < b.N; i++ {\n\t\tbytes.Map(fn, []byte(str))\n\t}\n}\n"
  },
  {
    "path": "cloud/cloudlaunch/cloudlaunch.go",
    "content": "/*\nCopyright 2015 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package cloudlaunch helps binaries run themselves on The Cloud, copying\n// themselves to GCE.\npackage cloudlaunch // import \"go4.org/cloud/cloudlaunch\"\n\nimport (\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"go4.org/cloud/google/gceutil\"\n\n\t\"cloud.google.com/go/compute/metadata\"\n\t\"cloud.google.com/go/storage\"\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\tcompute \"google.golang.org/api/compute/v1\"\n\t\"google.golang.org/api/googleapi\"\n\t\"google.golang.org/api/option\"\n\tstorageapi \"google.golang.org/api/storage/v1\"\n)\n\nfunc readFile(v string) string {\n\tslurp, err := ioutil.ReadFile(v)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading %s: %v\", v, err)\n\t}\n\treturn strings.TrimSpace(string(slurp))\n}\n\nconst baseConfig = `#cloud-config\ncoreos:\n  update:\n    group: stable\n    reboot-strategy: $REBOOT\n  units:\n    - name: $NAME.service\n      command: start\n      content: |\n        [Unit]\n        Description=$NAME service\n        After=network.target\n        \n        [Service]\n        Type=simple\n        ExecStartPre=/bin/sh -c 'mkdir -p /opt/bin && /usr/bin/curl --silent -f -o /opt/bin/$NAME $URL?$(date +%s) && chmod +x /opt/bin/$NAME'\n        ExecStart=/opt/bin/$NAME\n        RestartSec=10\n        Restart=always\n        StartLimitInterval=0\n        \n        [Install]\n        WantedBy=network-online.target\n`\n\n// RestartPolicy controls whether the binary automatically restarts.\ntype RestartPolicy int\n\nconst (\n\tRestartOnUpdates RestartPolicy = iota\n\tRestartNever\n\t// TODO: more graceful restarts; make systemd own listening on network sockets,\n\t// don't break connections.\n)\n\ntype Config struct {\n\t// Name is the name of a service to run.\n\t// This is the name of the systemd service (without .service)\n\t// and the name of the GCE instance.\n\tName string\n\n\t// RestartPolicy controls whether the binary automatically restarts\n\t// on updates. The zero value means automatic.\n\tRestartPolicy RestartPolicy\n\n\t// UpdateStrategy sets the CoreOS automatic update strategy, and the\n\t// associated reboots. Possible values are \"best-effort\", \"etcd-lock\",\n\t// \"reboot\", \"off\", with \"best-effort\" being the default. See\n\t// https://coreos.com/os/docs/latest/update-strategies.html\n\tUpdateStrategy string\n\n\t// BinaryBucket and BinaryObject are the GCS bucket and object\n\t// within that bucket containing the Linux binary to download\n\t// on boot and occasionally run. This binary must be public\n\t// (at least for now).\n\tBinaryBucket string\n\tBinaryObject string // defaults to Name\n\n\tGCEProjectID string\n\tZone         string // defaults to us-central1-f\n\tSSD          bool\n\n\tScopes []string // any additional scopes\n\n\tMachineType  string\n\tInstanceName string\n}\n\n// cloudLaunch is a launch of a Config.\ntype cloudLaunch struct {\n\t*Config\n\toauthClient    *http.Client\n\tcomputeService *compute.Service\n}\n\nfunc (c *Config) binaryURL() string {\n\treturn \"https://storage.googleapis.com/\" + c.BinaryBucket + \"/\" + c.binaryObject()\n}\n\nfunc (c *Config) instName() string       { return c.Name } // for now\nfunc (c *Config) zone() string           { return strDefault(c.Zone, \"us-central1-f\") }\nfunc (c *Config) machineType() string    { return strDefault(c.MachineType, \"g1-small\") }\nfunc (c *Config) binaryObject() string   { return strDefault(c.BinaryObject, c.Name) }\nfunc (c *Config) updateStrategy() string { return strDefault(c.UpdateStrategy, \"best-effort\") }\n\nfunc (c *Config) projectAPIURL() string {\n\treturn \"https://www.googleapis.com/compute/v1/projects/\" + c.GCEProjectID\n}\nfunc (c *Config) machineTypeURL() string {\n\treturn c.projectAPIURL() + \"/zones/\" + c.zone() + \"/machineTypes/\" + c.machineType()\n}\n\nfunc strDefault(a, b string) string {\n\tif a != \"\" {\n\t\treturn a\n\t}\n\treturn b\n}\n\nvar (\n\tdoLaunch = flag.Bool(\"cloudlaunch\", false, \"Deploy or update this binary to the cloud. Must be on Linux, for now.\")\n)\n\nfunc (c *Config) MaybeDeploy() {\n\tflag.Parse()\n\tif !*doLaunch {\n\t\tgo c.restartLoop()\n\t\treturn\n\t}\n\tdefer os.Exit(1) // backup, in case we return without Fatal or os.Exit later\n\n\tif runtime.GOOS != \"linux\" || runtime.GOARCH != \"amd64\" {\n\t\tlog.Fatal(\"Can only use --cloudlaunch on linux/amd64, for now.\")\n\t}\n\n\tif c.GCEProjectID == \"\" {\n\t\tlog.Fatal(\"cloudconfig.GCEProjectID is empty\")\n\t}\n\tfilename := filepath.Join(os.Getenv(\"HOME\"), \"keys\", c.GCEProjectID+\".key.json\")\n\tlog.Printf(\"Using OAuth config from JSON service file: %s\", filename)\n\tjwtConf, err := google.JWTConfigFromJSON([]byte(readFile(filename)), append([]string{\n\t\tstorageapi.DevstorageFullControlScope,\n\t\tcompute.ComputeScope,\n\t\t\"https://www.googleapis.com/auth/cloud-platform\",\n\t}, c.Scopes...)...)\n\tif err != nil {\n\t\tlog.Fatalf(\"ConfigFromJSON: %v\", err)\n\t}\n\n\tcl := &cloudLaunch{\n\t\tConfig:      c,\n\t\toauthClient: jwtConf.Client(oauth2.NoContext),\n\t}\n\tcl.computeService, _ = compute.New(cl.oauthClient)\n\n\tcl.uploadBinary()\n\tcl.createInstance()\n\tos.Exit(0)\n}\n\nfunc (c *Config) restartLoop() {\n\tif !metadata.OnGCE() {\n\t\treturn\n\t}\n\tif c.RestartPolicy == RestartNever {\n\t\treturn\n\t}\n\turl := c.binaryURL()\n\tvar lastEtag string\n\tfor {\n\t\tres, err := http.Head(url + \"?\" + fmt.Sprint(time.Now().Unix()))\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Warning: %v\", err)\n\t\t\ttime.Sleep(15 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tetag := res.Header.Get(\"Etag\")\n\t\tif etag == \"\" {\n\t\t\tlog.Printf(\"Warning, no ETag in response: %v\", res)\n\t\t\ttime.Sleep(15 * time.Second)\n\t\t\tcontinue\n\t\t}\n\t\tif lastEtag != \"\" && etag != lastEtag {\n\t\t\tlog.Printf(\"Binary updated; restarting.\")\n\t\t\t// TODO: more graceful restart, letting systemd own the network connections.\n\t\t\t// Then we can finish up requests here.\n\t\t\tos.Exit(0)\n\t\t}\n\t\tlastEtag = etag\n\t\ttime.Sleep(15 * time.Second)\n\t}\n}\n\n// uploadBinary uploads the currently-running Linux binary.\n// It crashes if it fails.\nfunc (cl *cloudLaunch) uploadBinary() {\n\tctx := context.Background()\n\tif cl.BinaryBucket == \"\" {\n\t\tlog.Fatal(\"cloudlaunch: Config.BinaryBucket is empty\")\n\t}\n\tstoClient, err := storage.NewClient(ctx, option.WithHTTPClient(cl.oauthClient))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw := stoClient.Bucket(cl.BinaryBucket).Object(cl.binaryObject()).NewWriter(ctx)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tw.ACL = []storage.ACLRule{\n\t\t// If you don't give the owners access, the web UI seems to\n\t\t// have a bug and doesn't have access to see that it's public, so\n\t\t// won't render the \"Shared Publicly\" link. So we do that, even\n\t\t// though it's dumb and unnecessary otherwise:\n\t\t{\n\t\t\tEntity: storage.ACLEntity(\"project-owners-\" + cl.GCEProjectID),\n\t\t\tRole:   storage.RoleOwner,\n\t\t},\n\t\t// Public, so our systemd unit can get it easily:\n\t\t{\n\t\t\tEntity: storage.AllUsers,\n\t\t\tRole:   storage.RoleReader,\n\t\t},\n\t}\n\tw.CacheControl = \"no-cache\"\n\tselfPath := getSelfPath()\n\tlog.Printf(\"Uploading %q to %v\", selfPath, cl.binaryURL())\n\tf, err := os.Open(selfPath)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\tn, err := io.Copy(w, f)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif err := w.Close(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tlog.Printf(\"Uploaded %d bytes\", n)\n}\n\nfunc getSelfPath() string {\n\tif runtime.GOOS != \"linux\" {\n\t\tpanic(\"TODO\")\n\t}\n\tv, err := os.Readlink(\"/proc/self/exe\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn v\n}\n\nfunc zoneInRegion(zone, regionURL string) bool {\n\tif zone == \"\" {\n\t\tpanic(\"empty zone\")\n\t}\n\tif regionURL == \"\" {\n\t\tpanic(\"empty regionURL\")\n\t}\n\t// zone is like \"us-central1-f\"\n\t// regionURL is like \"https://www.googleapis.com/compute/v1/projects/camlistore-website/regions/us-central1\"\n\tregion := path.Base(regionURL) // \"us-central1\"\n\tif region == \"\" {\n\t\tpanic(\"empty region\")\n\t}\n\treturn strings.HasPrefix(zone, region)\n}\n\n// findIP finds an IP address to use, or returns the empty string if none is found.\n// It tries to find a reserved one in the same region where the name of the reserved IP\n// is \"NAME-ip\" and the IP is not in use.\nfunc (cl *cloudLaunch) findIP() string {\n\t// Try to find it by name.\n\taggAddrList, err := cl.computeService.Addresses.AggregatedList(cl.GCEProjectID).Do()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// https://godoc.org/google.golang.org/api/compute/v1#AddressAggregatedList\n\tvar ip string\nIPLoop:\n\tfor _, asl := range aggAddrList.Items {\n\t\tfor _, addr := range asl.Addresses {\n\t\t\tlog.Printf(\"  addr: %#v\", addr)\n\t\t\tif addr.Name == cl.Name+\"-ip\" && addr.Status == \"RESERVED\" && zoneInRegion(cl.zone(), addr.Region) {\n\t\t\t\tip = addr.Address\n\t\t\t\tbreak IPLoop\n\t\t\t}\n\t\t}\n\t}\n\treturn ip\n}\n\nfunc (cl *cloudLaunch) createInstance() {\n\tinst := cl.lookupInstance()\n\tif inst != nil {\n\t\tlog.Printf(\"Instance exists; not re-creating.\")\n\t\treturn\n\t}\n\n\tlog.Printf(\"Instance doesn't exist; creating...\")\n\n\tip := cl.findIP()\n\tlog.Printf(\"Found IP: %v\", ip)\n\n\tcloudConfig := strings.NewReplacer(\n\t\t\"$NAME\", cl.Name,\n\t\t\"$URL\", cl.binaryURL(),\n\t\t\"$REBOOT\", cl.updateStrategy(),\n\t).Replace(baseConfig)\n\n\tinstance := &compute.Instance{\n\t\tName:        cl.instName(),\n\t\tDescription: cl.Name,\n\t\tMachineType: cl.machineTypeURL(),\n\t\tDisks:       []*compute.AttachedDisk{cl.instanceDisk()},\n\t\tTags: &compute.Tags{\n\t\t\tItems: []string{\"http-server\", \"https-server\"},\n\t\t},\n\t\tMetadata: &compute.Metadata{\n\t\t\tItems: []*compute.MetadataItems{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"user-data\",\n\t\t\t\t\tValue: googleapi.String(cloudConfig),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tNetworkInterfaces: []*compute.NetworkInterface{\n\t\t\t&compute.NetworkInterface{\n\t\t\t\tAccessConfigs: []*compute.AccessConfig{\n\t\t\t\t\t&compute.AccessConfig{\n\t\t\t\t\t\tType:  \"ONE_TO_ONE_NAT\",\n\t\t\t\t\t\tName:  \"External NAT\",\n\t\t\t\t\t\tNatIP: ip,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tNetwork: cl.projectAPIURL() + \"/global/networks/default\",\n\t\t\t},\n\t\t},\n\t\tServiceAccounts: []*compute.ServiceAccount{\n\t\t\t{\n\t\t\t\tEmail:  \"default\",\n\t\t\t\tScopes: cl.Scopes,\n\t\t\t},\n\t\t},\n\t}\n\n\tlog.Printf(\"Creating instance...\")\n\top, err := cl.computeService.Instances.Insert(cl.GCEProjectID, cl.zone(), instance).Do()\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to create instance: %v\", err)\n\t}\n\topName := op.Name\n\tlog.Printf(\"Created. Waiting on operation %v\", opName)\nOpLoop:\n\tfor {\n\t\ttime.Sleep(2 * time.Second)\n\t\top, err := cl.computeService.ZoneOperations.Get(cl.GCEProjectID, cl.zone(), opName).Do()\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Failed to get op %s: %v\", opName, err)\n\t\t}\n\t\tswitch op.Status {\n\t\tcase \"PENDING\", \"RUNNING\":\n\t\t\tlog.Printf(\"Waiting on operation %v\", opName)\n\t\t\tcontinue\n\t\tcase \"DONE\":\n\t\t\tif op.Error != nil {\n\t\t\t\tfor _, operr := range op.Error.Errors {\n\t\t\t\t\tlog.Printf(\"Error: %+v\", operr)\n\t\t\t\t}\n\t\t\t\tlog.Fatalf(\"Failed to start.\")\n\t\t\t}\n\t\t\tlog.Printf(\"Success. %+v\", op)\n\t\t\tbreak OpLoop\n\t\tdefault:\n\t\t\tlog.Fatalf(\"Unknown status %q: %+v\", op.Status, op)\n\t\t}\n\t}\n\n\tinst, err = cl.computeService.Instances.Get(cl.GCEProjectID, cl.zone(), cl.instName()).Do()\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting instance after creation: %v\", err)\n\t}\n\tij, _ := json.MarshalIndent(inst, \"\", \"    \")\n\tlog.Printf(\"%s\", ij)\n\tlog.Printf(\"Instance created.\")\n\tos.Exit(0)\n}\n\n// returns nil if instance doesn't exist.\nfunc (cl *cloudLaunch) lookupInstance() *compute.Instance {\n\tinst, err := cl.computeService.Instances.Get(cl.GCEProjectID, cl.zone(), cl.instName()).Do()\n\tif ae, ok := err.(*googleapi.Error); ok && ae.Code == 404 {\n\t\treturn nil\n\t} else if err != nil {\n\t\tlog.Fatalf(\"Instances.Get: %v\", err)\n\t}\n\treturn inst\n}\n\nfunc (cl *cloudLaunch) instanceDisk() *compute.AttachedDisk {\n\timageURL, err := gceutil.CoreOSImageURL(cl.oauthClient)\n\tif err != nil {\n\t\tlog.Fatalf(\"error looking up latest CoreOS stable image: %v\", err)\n\t}\n\tdiskName := cl.instName() + \"-coreos-stateless-pd\"\n\tvar diskType string\n\tif cl.SSD {\n\t\tdiskType = cl.projectAPIURL() + \"/zones/\" + cl.zone() + \"/diskTypes/pd-ssd\"\n\t}\n\treturn &compute.AttachedDisk{\n\t\tAutoDelete: true,\n\t\tBoot:       true,\n\t\tType:       \"PERSISTENT\",\n\t\tInitializeParams: &compute.AttachedDiskInitializeParams{\n\t\t\tDiskName:    diskName,\n\t\t\tSourceImage: imageURL,\n\t\t\tDiskSizeGb:  50,\n\t\t\tDiskType:    diskType,\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "cloud/google/gceutil/gceutil.go",
    "content": "/*\nCopyright 2015 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package gceutil provides utility functions to help with instances on\n// Google Compute Engine.\npackage gceutil // import \"go4.org/cloud/google/gceutil\"\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"google.golang.org/api/compute/v1\"\n)\n\n// CoreOSImageURL returns the URL of the latest stable CoreOS image for running\n// on Google Compute Engine.\nfunc CoreOSImageURL(cl *http.Client) (string, error) {\n\treturn osImageURL(cl, false)\n}\n\n// COSImageURL returns the URL of the latest stable Container-Optimized OS image\n// for running on Google Compute Engine.\nfunc COSImageURL(cl *http.Client) (string, error) {\n\treturn osImageURL(cl, true)\n}\n\nfunc osImageURL(cl *http.Client, cos bool) (string, error) {\n\tproject := \"coreos-cloud\"\n\tif cos {\n\t\tproject = \"cos-cloud\"\n\t}\n\tresp, err := cl.Get(\"https://www.googleapis.com/compute/v1/projects/\" + project + \"/global/images\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\ttype osImage struct {\n\t\tSelfLink          string\n\t\tCreationTimestamp time.Time\n\t\tName              string\n\t}\n\n\ttype osImageList struct {\n\t\tItems []osImage\n\t}\n\n\timageList := &osImageList{}\n\tif err := json.NewDecoder(resp.Body).Decode(imageList); err != nil {\n\t\treturn \"\", err\n\t}\n\tif imageList == nil || len(imageList.Items) == 0 {\n\t\treturn \"\", errors.New(\"no images list in response\")\n\t}\n\n\timageURL := \"\"\n\tvar max time.Time // latest stable image creation time\n\timgPrefix := \"coreos-stable\"\n\tif cos {\n\t\timgPrefix = \"cos-stable\"\n\t}\n\tfor _, v := range imageList.Items {\n\t\tif !strings.HasPrefix(v.Name, imgPrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tif v.CreationTimestamp.After(max) {\n\t\t\tmax = v.CreationTimestamp\n\t\t\timageURL = v.SelfLink\n\t\t}\n\t}\n\tif imageURL == \"\" {\n\t\tif cos {\n\t\t\treturn \"\", errors.New(\"no stable Container-Optimized OS image found\")\n\t\t}\n\t\treturn \"\", errors.New(\"no stable coreOS image found\")\n\t}\n\treturn imageURL, nil\n}\n\n// InstanceGroupAndManager contains both an InstanceGroup and\n// its InstanceGroupManager, if any.\ntype InstanceGroupAndManager struct {\n\tGroup *compute.InstanceGroup\n\n\t// Manager is the manager of the Group. It may be nil.\n\tManager *compute.InstanceGroupManager\n}\n\n// InstanceGroups returns all the instance groups in a project's zone, along\n// with their associated InstanceGroupManagers.\n// The returned map is keyed by the instance group identifier URL.\nfunc InstanceGroups(svc *compute.Service, proj, zone string) (map[string]InstanceGroupAndManager, error) {\n\tmanagerList, err := svc.InstanceGroupManagers.List(proj, zone).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif managerList.NextPageToken != \"\" {\n\t\treturn nil, errors.New(\"too many managers; pagination not supported\")\n\t}\n\tmanagedBy := make(map[string]*compute.InstanceGroupManager) // instance group URL -> its manager\n\tfor _, it := range managerList.Items {\n\t\tmanagedBy[it.InstanceGroup] = it\n\t}\n\tgroupList, err := svc.InstanceGroups.List(proj, zone).Do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif groupList.NextPageToken != \"\" {\n\t\treturn nil, errors.New(\"too many instance groups; pagination not supported\")\n\t}\n\tret := make(map[string]InstanceGroupAndManager)\n\tfor _, it := range groupList.Items {\n\t\tret[it.SelfLink] = InstanceGroupAndManager{it, managedBy[it.SelfLink]}\n\t}\n\treturn ret, nil\n}\n"
  },
  {
    "path": "cloud/google/gcsutil/storage.go",
    "content": "/*\nCopyright 2015 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package gcsutil provides tools for accessing Google Cloud Storage until they can be\n// completely replaced by cloud.google.com/go/storage.\npackage gcsutil // import \"go4.org/cloud/google/gcsutil\"\n\nimport (\n\t\"encoding/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\n\t\"cloud.google.com/go/storage\"\n\t\"go4.org/ctxutil\"\n\t\"golang.org/x/net/context\"\n)\n\nconst gsAccessURL = \"https://storage.googleapis.com\"\n\n// An Object holds the name of an object (its bucket and key) within\n// Google Cloud Storage.\ntype Object struct {\n\tBucket string\n\tKey    string\n}\n\nfunc (o *Object) valid() error {\n\tif o == nil {\n\t\treturn errors.New(\"invalid nil Object\")\n\t}\n\tif o.Bucket == \"\" {\n\t\treturn errors.New(\"missing required Bucket field in Object\")\n\t}\n\tif o.Key == \"\" {\n\t\treturn errors.New(\"missing required Key field in Object\")\n\t}\n\treturn nil\n}\n\n// A SizedObject holds the bucket, key, and size of an object.\ntype SizedObject struct {\n\tObject\n\tSize int64\n}\n\nfunc (o *Object) String() string {\n\tif o == nil {\n\t\treturn \"<nil *Object>\"\n\t}\n\treturn fmt.Sprintf(\"%v/%v\", o.Bucket, o.Key)\n}\n\nfunc (so SizedObject) String() string {\n\treturn fmt.Sprintf(\"%v/%v (%vB)\", so.Bucket, so.Key, so.Size)\n}\n\n// Makes a simple body-less google storage request\nfunc simpleRequest(method, url_ string) (*http.Request, error) {\n\treq, err := http.NewRequest(method, url_, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"x-goog-api-version\", \"2\")\n\treturn req, err\n}\n\n// ErrInvalidRange is used when the server has returned http.StatusRequestedRangeNotSatisfiable.\nvar ErrInvalidRange = errors.New(\"gcsutil: requested range not satisfiable\")\n\n// GetPartialObject fetches part of a Google Cloud Storage object.\n// This function relies on the ctx ctxutil.HTTPClient value being set to an OAuth2\n// authorized and authenticated HTTP client.\n// If length is negative, the rest of the object is returned.\n// It returns ErrInvalidRange if the server replies with http.StatusRequestedRangeNotSatisfiable.\n// The caller must call Close on the returned value.\nfunc GetPartialObject(ctx context.Context, obj Object, offset, length int64) (io.ReadCloser, error) {\n\tif offset < 0 {\n\t\treturn nil, errors.New(\"invalid negative offset\")\n\t}\n\tif err := obj.valid(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := simpleRequest(\"GET\", gsAccessURL+\"/\"+obj.Bucket+\"/\"+obj.Key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif length >= 0 {\n\t\treq.Header.Set(\"Range\", fmt.Sprintf(\"bytes=%d-%d\", offset, offset+length-1))\n\t} else {\n\t\treq.Header.Set(\"Range\", fmt.Sprintf(\"bytes=%d-\", offset))\n\t}\n\treq.Cancel = ctx.Done()\n\tres, err := ctxutil.Client(ctx).Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GET (offset=%d, length=%d) failed: %v\\n\", offset, length, err)\n\t}\n\tif res.StatusCode == http.StatusNotFound {\n\t\tres.Body.Close()\n\t\treturn nil, os.ErrNotExist\n\t}\n\tif !(res.StatusCode == http.StatusPartialContent || (offset == 0 && res.StatusCode == http.StatusOK)) {\n\t\tres.Body.Close()\n\t\tif res.StatusCode == http.StatusRequestedRangeNotSatisfiable {\n\t\t\treturn nil, ErrInvalidRange\n\t\t}\n\t\treturn nil, fmt.Errorf(\"GET (offset=%d, length=%d) got failed status: %v\\n\", offset, length, res.Status)\n\t}\n\n\treturn res.Body, nil\n}\n\n// EnumerateObjects lists the objects in a bucket.\n// This function relies on the ctx oauth2.HTTPClient value being set to an OAuth2\n// authorized and authenticated HTTP client.\n// If after is non-empty, listing will begin with lexically greater object names.\n// If limit is non-zero, the length of the list will be limited to that number.\nfunc EnumerateObjects(ctx context.Context, bucket, after string, limit int) ([]*storage.ObjectAttrs, error) {\n\t// Build url, with query params\n\tvar params []string\n\tif after != \"\" {\n\t\tparams = append(params, \"marker=\"+url.QueryEscape(after))\n\t}\n\tif limit > 0 {\n\t\tparams = append(params, fmt.Sprintf(\"max-keys=%v\", limit))\n\t}\n\tquery := \"\"\n\tif len(params) > 0 {\n\t\tquery = \"?\" + strings.Join(params, \"&\")\n\t}\n\n\treq, err := simpleRequest(\"GET\", gsAccessURL+\"/\"+bucket+\"/\"+query)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Cancel = ctx.Done()\n\tres, err := ctxutil.Client(ctx).Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"gcsutil: bad enumerate response code: %v\", res.Status)\n\t}\n\n\tvar xres struct {\n\t\tContents []SizedObject\n\t}\n\tif err = xml.NewDecoder(res.Body).Decode(&xres); err != nil {\n\t\treturn nil, err\n\t}\n\n\tobjAttrs := make([]*storage.ObjectAttrs, len(xres.Contents))\n\tfor k, o := range xres.Contents {\n\t\tobjAttrs[k] = &storage.ObjectAttrs{\n\t\t\tName: o.Key,\n\t\t\tSize: o.Size,\n\t\t}\n\t}\n\n\treturn objAttrs, nil\n}\n"
  },
  {
    "path": "ctxutil/ctxutil.go",
    "content": "/*\nCopyright 2015 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package ctxutil contains golang.org/x/net/context related utilities.\npackage ctxutil // import \"go4.org/ctxutil\"\n\nimport (\n\t\"net/http\"\n\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n)\n\n// HTTPClient is the context key to use with golang.org/x/net/context's WithValue function\n// to associate an *http.Client value with a context.\n//\n// We use the same value as the oauth2 package (which first introduced this key) rather\n// than creating a new one and forcing users to possibly set two.\nvar HTTPClient = oauth2.HTTPClient\n\n// Client returns the HTTP client to use for the provided context.\n// If ctx is non-nil and has an associated HTTP client, that client is returned.\n// Otherwise, http.DefaultClient is returned.\nfunc Client(ctx context.Context) *http.Client {\n\tif ctx != nil {\n\t\tif hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {\n\t\t\treturn hc\n\t\t}\n\t}\n\treturn http.DefaultClient\n}\n"
  },
  {
    "path": "errorutil/highlight.go",
    "content": "/*\nCopyright 2011 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package errorutil helps make better error messages.\npackage errorutil // import \"go4.org/errorutil\"\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n)\n\n// HighlightBytePosition takes a reader and the location in bytes of a parse\n// error (for instance, from json.SyntaxError.Offset) and returns the line, column,\n// and pretty-printed context around the error with an arrow indicating the exact\n// position of the syntax error.\nfunc HighlightBytePosition(f io.Reader, pos int64) (line, col int, highlight string) {\n\tline = 1\n\tbr := bufio.NewReader(f)\n\tlastLine := \"\"\n\tthisLine := new(bytes.Buffer)\n\tfor n := int64(0); n < pos; n++ {\n\t\tb, err := br.ReadByte()\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\tif b == '\\n' {\n\t\t\tlastLine = thisLine.String()\n\t\t\tthisLine.Reset()\n\t\t\tline++\n\t\t\tcol = 1\n\t\t} else {\n\t\t\tcol++\n\t\t\tthisLine.WriteByte(b)\n\t\t}\n\t}\n\tif line > 1 {\n\t\thighlight += fmt.Sprintf(\"%5d: %s\\n\", line-1, lastLine)\n\t}\n\thighlight += fmt.Sprintf(\"%5d: %s\\n\", line, thisLine.String())\n\thighlight += fmt.Sprintf(\"%s^\\n\", strings.Repeat(\" \", col+5))\n\treturn\n}\n"
  },
  {
    "path": "fault/fault.go",
    "content": "/*\nCopyright 2014 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package fault handles fault injection for testing.\npackage fault // import \"go4.org/fault\"\n\nimport (\n\t\"errors\"\n\t\"math/rand\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n)\n\nvar fakeErr = errors.New(\"fake injected error for testing\")\n\n// An Injector reports whether fake errors should be returned.\ntype Injector struct {\n\tfailPercent int\n}\n\n// NewInjector returns a new fault injector with the given name.  The\n// environment variable \"FAULT_\" + capital(name) + \"_FAIL_PERCENT\"\n// controls the percentage of requests that fail. If undefined or\n// zero, no requests fail.\nfunc NewInjector(name string) *Injector {\n\tvar failPercent, _ = strconv.Atoi(os.Getenv(\"FAULT_\" + strings.ToUpper(name) + \"_FAIL_PERCENT\"))\n\treturn &Injector{\n\t\tfailPercent: failPercent,\n\t}\n}\n\n// ShouldFail reports whether a fake error should be returned.\nfunc (in *Injector) ShouldFail() bool {\n\treturn in.failPercent > 0 && in.failPercent > rand.Intn(100)\n}\n\n// FailErr checks ShouldFail and, if true, assigns a fake error to err\n// and returns true.\nfunc (in *Injector) FailErr(err *error) bool {\n\tif !in.ShouldFail() {\n\t\treturn false\n\t}\n\t*err = fakeErr\n\treturn true\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module go4.org\n\ngo 1.24.0\n\nrequire (\n\tcloud.google.com/go/compute/metadata v0.9.0\n\tcloud.google.com/go/storage v1.56.0\n\tgithub.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd\n\tgolang.org/x/net v0.48.0\n\tgolang.org/x/oauth2 v0.34.0\n\tgolang.org/x/sys v0.40.0\n\tgoogle.golang.org/api v0.259.0\n)\n\nrequire (\n\tcel.dev/expr v0.24.0 // indirect\n\tcloud.google.com/go v0.121.6 // indirect\n\tcloud.google.com/go/auth v0.18.0 // indirect\n\tcloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect\n\tcloud.google.com/go/iam v1.5.3 // indirect\n\tcloud.google.com/go/monitoring v1.24.3 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f // indirect\n\tgithub.com/envoyproxy/go-control-plane/envoy v1.35.0 // indirect\n\tgithub.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect\n\tgithub.com/felixge/httpsnoop v1.0.4 // indirect\n\tgithub.com/go-jose/go-jose/v4 v4.1.3 // indirect\n\tgithub.com/go-logr/logr v1.4.3 // indirect\n\tgithub.com/go-logr/stdr v1.2.2 // indirect\n\tgithub.com/google/s2a-go v0.1.9 // indirect\n\tgithub.com/google/uuid v1.6.0 // indirect\n\tgithub.com/googleapis/enterprise-certificate-proxy v0.3.7 // indirect\n\tgithub.com/googleapis/gax-go/v2 v2.16.0 // indirect\n\tgithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect\n\tgithub.com/spiffe/go-spiffe/v2 v2.6.0 // indirect\n\tgo.opentelemetry.io/auto/sdk v1.2.1 // indirect\n\tgo.opentelemetry.io/contrib/detectors/gcp v1.38.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect\n\tgo.opentelemetry.io/otel v1.38.0 // indirect\n\tgo.opentelemetry.io/otel/metric v1.38.0 // indirect\n\tgo.opentelemetry.io/otel/sdk v1.38.0 // indirect\n\tgo.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect\n\tgo.opentelemetry.io/otel/trace v1.38.0 // indirect\n\tgolang.org/x/crypto v0.46.0 // indirect\n\tgolang.org/x/sync v0.19.0 // indirect\n\tgolang.org/x/text v0.32.0 // indirect\n\tgolang.org/x/time v0.14.0 // indirect\n\tgoogle.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 // indirect\n\tgoogle.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b // indirect\n\tgoogle.golang.org/grpc v1.78.0 // indirect\n\tgoogle.golang.org/protobuf v1.36.11 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY=\ncel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw=\ncloud.google.com/go v0.121.6 h1:waZiuajrI28iAf40cWgycWNgaXPO06dupuS+sgibK6c=\ncloud.google.com/go v0.121.6/go.mod h1:coChdst4Ea5vUpiALcYKXEpR1S9ZgXbhEzzMcMR66vI=\ncloud.google.com/go/auth v0.18.0 h1:wnqy5hrv7p3k7cShwAU/Br3nzod7fxoqG+k0VZ+/Pk0=\ncloud.google.com/go/auth v0.18.0/go.mod h1:wwkPM1AgE1f2u6dG443MiWoD8C3BtOywNsUMcUTVDRo=\ncloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=\ncloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=\ncloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=\ncloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=\ncloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc=\ncloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU=\ncloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY=\ncloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw=\ncloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E=\ncloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY=\ncloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE=\ncloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI=\ncloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsLxI=\ncloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU=\ncloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U=\ncloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 h1:owcC2UnmsZycprQ5RfRgjydWhuoxg71LUfyiQdijZuM=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0/go.mod h1:ZPpqegjbE99EPKsu3iUWV22A04wzGPcAY/ziSIQEEgs=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0 h1:4LP6hvB4I5ouTbGgWtixJhgED6xdf67twf9PoY96Tbg=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f h1:Y8xYupdHxryycyPlc9Y+bSQAYZnetRJ70VMVKm5CKI0=\ngithub.com/cncf/xds/go v0.0.0-20251022180443-0feb69152e9f/go.mod h1:HlzOvOjVBOfTGSRXRyY0OiCS/3J1akRGQQpRO/7zyF4=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM=\ngithub.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs=\ngithub.com/envoyproxy/go-control-plane/envoy v1.35.0 h1:ixjkELDE+ru6idPxcHLj8LBVc2bFP7iBytj353BoHUo=\ngithub.com/envoyproxy/go-control-plane/envoy v1.35.0/go.mod h1:09qwbGVuSWWAyN5t/b3iyVfz5+z8QWGrzkoqm/8SbEs=\ngithub.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=\ngithub.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=\ngithub.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=\ngithub.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=\ngithub.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=\ngithub.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=\ngithub.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=\ngithub.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=\ngithub.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.7/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=\ngithub.com/googleapis/gax-go/v2 v2.16.0 h1:iHbQmKLLZrexmb0OSsNGTeSTS0HO4YvFOG8g5E4Zd0Y=\ngithub.com/googleapis/gax-go/v2 v2.16.0/go.mod h1:o1vfQjjNZn4+dPnRdl/4ZD7S9414Y4xA+a/6Icj6l14=\ngithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=\ngithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd h1:CmH9+J6ZSsIjUK3dcGsnCnO41eRBOnY12zwkn5qVwgc=\ngithub.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk=\ngithub.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=\ngithub.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngo.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=\ngo.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=\ngo.opentelemetry.io/contrib/detectors/gcp v1.38.0 h1:ZoYbqX7OaA/TAikspPl3ozPI6iY6LiIY9I8cUfm+pJs=\ngo.opentelemetry.io/contrib/detectors/gcp v1.38.0/go.mod h1:SU+iU7nu5ud4oCb3LQOhIZ3nRLj6FNVrKgtflbaf2ts=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=\ngo.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=\ngo.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=\ngo.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0 h1:rixTyDGXFxRy1xzhKrotaHy3/KXdPhlWARrCgK+eqUY=\ngo.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.36.0/go.mod h1:dowW6UsM9MKbJq5JTz2AMVp3/5iW5I/TStsk8S+CfHw=\ngo.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=\ngo.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=\ngo.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=\ngo.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=\ngo.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=\ngo.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=\ngo.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=\ngo.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=\ngolang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=\ngolang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=\ngolang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=\ngolang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=\ngolang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw=\ngolang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=\ngolang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=\ngolang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=\ngolang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=\ngolang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=\ngolang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=\ngolang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=\ngolang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=\ngolang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=\ngonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=\ngonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=\ngoogle.golang.org/api v0.259.0 h1:90TaGVIxScrh1Vn/XI2426kRpBqHwWIzVBzJsVZ5XrQ=\ngoogle.golang.org/api v0.259.0/go.mod h1:LC2ISWGWbRoyQVpxGntWwLWN/vLNxxKBK9KuJRI8Te4=\ngoogle.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934=\ngoogle.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b h1:Mv8VFug0MP9e5vUxfBcE3vUkV6CImK3cMNMIDFjmzxU=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20251222181119-0a764e51fe1b/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=\ngoogle.golang.org/grpc v1.78.0 h1:K1XZG/yGDJnzMdd/uZHAkVqJE+xIDOcmdSFZkBUicNc=\ngoogle.golang.org/grpc v1.78.0/go.mod h1:I47qjTo4OKbMkjA/aOOwxDIiPSBofUtQUI5EfpWvW7U=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "go4test/cloudlaunch/serve_on_cloud.go",
    "content": "//go:build ignore\n\n/*\nCopyright 2016 The Go4 Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// The serve_on_cloud program deploys an HTTP server on Google Compute Engine,\n// serving from Google Cloud Storage. Its purpose is to help testing\n// go4.org/cloud/cloudlaunch and go4.org/wkfs/gcs.\npackage main\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n\t\"path\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"go4.org/cloud/cloudlaunch\"\n\t\"go4.org/wkfs\"\n\t_ \"go4.org/wkfs/gcs\"\n\n\t\"cloud.google.com/go/compute/metadata\"\n\tcompute \"google.golang.org/api/compute/v1\"\n\tstorageapi \"google.golang.org/api/storage/v1\"\n)\n\nvar httpAddr = flag.String(\"http\", \":80\", \"HTTP address\")\n\nvar gcsBucket string\n\nfunc serveHTTP(w http.ResponseWriter, r *http.Request) {\n\trc, err := wkfs.Open(path.Join(\"/gcs\", gcsBucket, r.URL.Path))\n\tif err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"could not open %v: %v\", r.URL.Path, err), 500)\n\t\treturn\n\t}\n\tdefer rc.Close()\n\thttp.ServeContent(w, r, r.URL.Path, time.Now(), rc)\n}\n\nfunc main() {\n\tif !metadata.OnGCE() {\n\t\tbucket := os.Getenv(\"GCSBUCKET\")\n\t\tif bucket == \"\" {\n\t\t\tlog.Fatal(\"You need to set the GCSBUCKET env var to specify the Google Cloud Storage bucket to serve from.\")\n\t\t}\n\t\tprojectID := os.Getenv(\"GCEPROJECTID\")\n\t\tif projectID == \"\" {\n\t\t\tlog.Fatal(\"You need to set the GCEPROJECTID env var to specify the Google Cloud project where the instance will run.\")\n\t\t}\n\t\t(&cloudlaunch.Config{\n\t\t\tName:         \"serveoncloud\",\n\t\t\tBinaryBucket: bucket,\n\t\t\tGCEProjectID: projectID,\n\t\t\tScopes: []string{\n\t\t\t\tstorageapi.DevstorageFullControlScope,\n\t\t\t\tcompute.ComputeScope,\n\t\t\t},\n\t\t}).MaybeDeploy()\n\t\treturn\n\t}\n\n\tflag.Parse()\n\n\tstorageURLRxp := regexp.MustCompile(`https://storage.googleapis.com/(.+?)/serveoncloud.*`)\n\tcloudConfig, err := metadata.InstanceAttributeValue(\"user-data\")\n\tif err != nil || cloudConfig == \"\" {\n\t\tlog.Fatalf(\"could not get cloud config from metadata: %v\", err)\n\t}\n\tm := storageURLRxp.FindStringSubmatch(cloudConfig)\n\tif len(m) < 2 {\n\t\tlog.Fatal(\"storage URL not found in cloud config\")\n\t}\n\tgcsBucket = m[1]\n\n\thttp.HandleFunc(\"/\", serveHTTP)\n\n\tlog.Fatal(http.ListenAndServe(*httpAddr, nil))\n}\n"
  },
  {
    "path": "jsonconfig/eval.go",
    "content": "/*\nCopyright 2011 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage jsonconfig\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"go4.org/errorutil\"\n\t\"go4.org/wkfs\"\n)\n\ntype stringVector struct {\n\tv []string\n}\n\nfunc (v *stringVector) Push(s string) {\n\tv.v = append(v.v, s)\n}\n\nfunc (v *stringVector) Pop() {\n\tv.v = v.v[:len(v.v)-1]\n}\n\nfunc (v *stringVector) Last() string {\n\treturn v.v[len(v.v)-1]\n}\n\n// A File is the type returned by ConfigParser.Open.\ntype File interface {\n\tio.ReadSeeker\n\tio.Closer\n\tName() string\n}\n\n// ConfigParser specifies the environment for parsing a config file\n// and evaluating expressions.\ntype ConfigParser struct {\n\trootJSON Obj\n\n\ttouchedFiles map[string]bool\n\tincludeStack stringVector\n\n\t// Open optionally specifies an opener function.\n\tOpen func(filename string) (File, error)\n\n\t// IncludeDirs optionally specifies where to find the other config files which are child\n\t// objects of this config, if any. Even if nil, the working directory is always searched\n\t// first.\n\tIncludeDirs []string\n}\n\nfunc (c *ConfigParser) open(filename string) (File, error) {\n\tif c.Open == nil {\n\t\treturn wkfs.Open(filename)\n\t}\n\treturn c.Open(filename)\n}\n\n// Validates variable names for config _env expresssions\nvar envPattern = regexp.MustCompile(`\\$\\{[A-Za-z0-9_]+\\}`)\n\n// ReadFile parses the provided path and returns the config file.\n// If path is empty, the c.Open function must be defined.\nfunc (c *ConfigParser) ReadFile(path string) (Obj, error) {\n\tif path == \"\" && c.Open == nil {\n\t\treturn nil, errors.New(\"ReadFile of empty string but Open hook not defined\")\n\t}\n\tc.touchedFiles = make(map[string]bool)\n\tvar err error\n\tc.rootJSON, err = c.recursiveReadJSON(path)\n\treturn c.rootJSON, err\n}\n\n// Decodes and evaluates a json config file, watching for include cycles.\nfunc (c *ConfigParser) recursiveReadJSON(configPath string) (decodedObject map[string]interface{}, err error) {\n\tif configPath != \"\" {\n\t\tabsConfigPath, err := filepath.Abs(configPath)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Failed to expand absolute path for %s\", configPath)\n\t\t}\n\t\tif c.touchedFiles[absConfigPath] {\n\t\t\treturn nil, fmt.Errorf(\"ConfigParser include cycle detected reading config: %v\",\n\t\t\t\tabsConfigPath)\n\t\t}\n\t\tc.touchedFiles[absConfigPath] = true\n\n\t\tc.includeStack.Push(absConfigPath)\n\t\tdefer c.includeStack.Pop()\n\t}\n\n\tvar f File\n\tif f, err = c.open(configPath); err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to open config: %v\", err)\n\t}\n\tdefer f.Close()\n\n\tdecodedObject = make(map[string]interface{})\n\tdj := json.NewDecoder(f)\n\tif err = dj.Decode(&decodedObject); err != nil {\n\t\textra := \"\"\n\t\tif serr, ok := err.(*json.SyntaxError); ok {\n\t\t\tif _, serr := f.Seek(0, os.SEEK_SET); serr != nil {\n\t\t\t\tlog.Fatalf(\"seek error: %v\", serr)\n\t\t\t}\n\t\t\tline, col, highlight := errorutil.HighlightBytePosition(f, serr.Offset)\n\t\t\textra = fmt.Sprintf(\":\\nError at line %d, column %d (file offset %d):\\n%s\",\n\t\t\t\tline, col, serr.Offset, highlight)\n\t\t}\n\t\treturn nil, fmt.Errorf(\"error parsing JSON object in config file %s%s\\n%v\",\n\t\t\tf.Name(), extra, err)\n\t}\n\n\tif err = c.evaluateExpressions(decodedObject, nil, false); err != nil {\n\t\treturn nil, fmt.Errorf(\"error expanding JSON config expressions in %s:\\n%v\",\n\t\t\tf.Name(), err)\n\t}\n\n\treturn decodedObject, nil\n}\n\nvar regFunc = map[string]expanderFunc{}\n\n// RegisterFunc registers a new function that may be called from JSON\n// configs using an array of the form [\"_name\", arg0, argN...].\n// The provided name must begin with an underscore.\nfunc RegisterFunc(name string, fn func(c *ConfigParser, v []interface{}) (interface{}, error)) {\n\tif len(name) < 2 || !strings.HasPrefix(name, \"_\") {\n\t\tpanic(\"illegal name\")\n\t}\n\tif _, dup := regFunc[name]; dup {\n\t\tpanic(\"duplicate registration of \" + name)\n\t}\n\tregFunc[name] = fn\n}\n\ntype expanderFunc func(c *ConfigParser, v []interface{}) (interface{}, error)\n\nfunc namedExpander(name string) (fn expanderFunc, ok bool) {\n\tswitch name {\n\tcase \"_env\":\n\t\treturn (*ConfigParser).expandEnv, true\n\tcase \"_fileobj\":\n\t\treturn (*ConfigParser).expandFile, true\n\t}\n\tfn, ok = regFunc[name]\n\treturn\n}\n\nfunc (c *ConfigParser) evalValue(v interface{}) (interface{}, error) {\n\tsl, ok := v.([]interface{})\n\tif !ok {\n\t\treturn v, nil\n\t}\n\tif name, ok := sl[0].(string); ok {\n\t\tif expander, ok := namedExpander(name); ok {\n\t\t\tnewval, err := expander(c, sl[1:])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn newval, nil\n\t\t}\n\t}\n\tfor i, oldval := range sl {\n\t\tnewval, err := c.evalValue(oldval)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tsl[i] = newval\n\t}\n\treturn v, nil\n}\n\n// CheckTypes parses m and returns an error if it encounters a type or value\n// that is not supported by this package.\nfunc (c *ConfigParser) CheckTypes(m map[string]interface{}) error {\n\treturn c.evaluateExpressions(m, nil, true)\n}\n\n// evaluateExpressions parses recursively m, populating it with the values\n// that are found, unless testOnly is true.\nfunc (c *ConfigParser) evaluateExpressions(m map[string]interface{}, seenKeys []string, testOnly bool) error {\n\tfor k, ei := range m {\n\t\tthisPath := append(seenKeys, k)\n\t\tswitch subval := ei.(type) {\n\t\tcase string, bool, float64, nil:\n\t\t\tcontinue\n\t\tcase []interface{}:\n\t\t\tif len(subval) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tevaled, err := c.evalValue(subval)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"%s: value error %v\", strings.Join(thisPath, \".\"), err)\n\t\t\t}\n\t\t\tif !testOnly {\n\t\t\t\tm[k] = evaled\n\t\t\t}\n\t\tcase map[string]interface{}:\n\t\t\tif err := c.evaluateExpressions(subval, thisPath, testOnly); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"%s: unhandled type %T\", strings.Join(thisPath, \".\"), ei)\n\t\t}\n\t}\n\treturn nil\n}\n\n// Permit either:\n//    [\"_env\", \"VARIABLE\"] (required to be set)\n// or [\"_env\", \"VARIABLE\", \"default_value\"]\nfunc (c *ConfigParser) expandEnv(v []interface{}) (interface{}, error) {\n\thasDefault := false\n\tdef := \"\"\n\tif len(v) < 1 || len(v) > 2 {\n\t\treturn \"\", fmt.Errorf(\"_env expansion expected 1 or 2 args, got %d\", len(v))\n\t}\n\ts, ok := v[0].(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"Expected a string after _env expansion; got %#v\", v[0])\n\t}\n\tboolDefault, wantsBool := false, false\n\tif len(v) == 2 {\n\t\thasDefault = true\n\t\tswitch vdef := v[1].(type) {\n\t\tcase string:\n\t\t\tdef = vdef\n\t\tcase bool:\n\t\t\twantsBool = true\n\t\t\tboolDefault = vdef\n\t\tdefault:\n\t\t\treturn \"\", fmt.Errorf(\"Expected default value in %q _env expansion; got %#v\", s, v[1])\n\t\t}\n\t}\n\tvar err error\n\texpanded := envPattern.ReplaceAllStringFunc(s, func(match string) string {\n\t\tenvVar := match[2 : len(match)-1]\n\t\tval := os.Getenv(envVar)\n\t\t// Special case:\n\t\tif val == \"\" && envVar == \"USER\" && runtime.GOOS == \"windows\" {\n\t\t\tval = os.Getenv(\"USERNAME\")\n\t\t}\n\t\tif val == \"\" {\n\t\t\tif hasDefault {\n\t\t\t\treturn def\n\t\t\t}\n\t\t\terr = fmt.Errorf(\"couldn't expand environment variable %q\", envVar)\n\t\t}\n\t\treturn val\n\t})\n\tif wantsBool {\n\t\tif expanded == \"\" {\n\t\t\treturn boolDefault, nil\n\t\t}\n\t\treturn strconv.ParseBool(expanded)\n\t}\n\treturn expanded, err\n}\n\nfunc (c *ConfigParser) expandFile(v []interface{}) (exp interface{}, err error) {\n\tif len(v) != 1 {\n\t\treturn \"\", fmt.Errorf(\"_file expansion expected 1 arg, got %d\", len(v))\n\t}\n\tvar incPath string\n\tif incPath, err = c.ConfigFilePath(v[0].(string)); err != nil {\n\t\treturn \"\", fmt.Errorf(\"Included config does not exist: %v\", v[0])\n\t}\n\tif exp, err = c.recursiveReadJSON(incPath); err != nil {\n\t\treturn \"\", fmt.Errorf(\"In file included from %s:\\n%v\",\n\t\t\tc.includeStack.Last(), err)\n\t}\n\treturn exp, nil\n}\n\n// ConfigFilePath checks if configFile is found and returns a usable path to it.\n// It first checks if configFile is an absolute path, or if it's found in the\n// current working directory. If not, it then checks if configFile is in one of\n// c.IncludeDirs. It returns an error if configFile is absolute and could not be\n// statted, or os.ErrNotExist if configFile was not found.\nfunc (c *ConfigParser) ConfigFilePath(configFile string) (path string, err error) {\n\t// Try to open as absolute / relative to CWD\n\t_, err = os.Stat(configFile)\n\tif err != nil && filepath.IsAbs(configFile) {\n\t\treturn \"\", err\n\t}\n\tif err == nil {\n\t\treturn configFile, nil\n\t}\n\n\tfor _, d := range c.IncludeDirs {\n\t\tif _, err := os.Stat(filepath.Join(d, configFile)); err == nil {\n\t\t\treturn filepath.Join(d, configFile), nil\n\t\t}\n\t}\n\n\treturn \"\", os.ErrNotExist\n}\n"
  },
  {
    "path": "jsonconfig/jsonconfig.go",
    "content": "/*\nCopyright 2011 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package jsonconfig defines a helper type for JSON objects to be\n// used for configuration.\npackage jsonconfig // import \"go4.org/jsonconfig\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n)\n\n// Obj is a JSON configuration map.\ntype Obj map[string]interface{}\n\n// ReadFile reads JSON config data from the specified open file, expanding\n// all expressions. Use *ConfigParser.ReadFile instead if you\n// need to set c.IncludeDirs.\nfunc ReadFile(configPath string) (Obj, error) {\n\tvar c ConfigParser\n\treturn c.ReadFile(configPath)\n}\n\nfunc (jc Obj) RequiredObject(key string) Obj {\n\treturn jc.obj(key, false)\n}\n\nfunc (jc Obj) OptionalObject(key string) Obj {\n\treturn jc.obj(key, true)\n}\n\nfunc (jc Obj) obj(key string, optional bool) Obj {\n\tjc.noteKnownKey(key)\n\tei, ok := jc[key]\n\tif !ok {\n\t\tif optional {\n\t\t\treturn make(Obj)\n\t\t}\n\t\tjc.appendError(fmt.Errorf(\"Missing required config key %q (object)\", key))\n\t\treturn make(Obj)\n\t}\n\tm, ok := ei.(map[string]interface{})\n\tif !ok {\n\t\tjc.appendError(fmt.Errorf(\"Expected config key %q to be an object, not %T\", key, ei))\n\t\treturn make(Obj)\n\t}\n\treturn m\n}\n\nfunc (jc Obj) RequiredString(key string) string {\n\treturn jc.string(key, nil)\n}\n\nfunc (jc Obj) OptionalString(key, def string) string {\n\treturn jc.string(key, &def)\n}\n\nfunc (jc Obj) string(key string, def *string) string {\n\tjc.noteKnownKey(key)\n\tei, ok := jc[key]\n\tif !ok {\n\t\tif def != nil {\n\t\t\treturn *def\n\t\t}\n\t\tjc.appendError(fmt.Errorf(\"Missing required config key %q (string)\", key))\n\t\treturn \"\"\n\t}\n\ts, ok := ei.(string)\n\tif !ok {\n\t\tjc.appendError(fmt.Errorf(\"Expected config key %q to be a string\", key))\n\t\treturn \"\"\n\t}\n\treturn s\n}\n\nfunc (jc Obj) RequiredStringOrObject(key string) interface{} {\n\treturn jc.stringOrObject(key, true)\n}\n\nfunc (jc Obj) OptionalStringOrObject(key string) interface{} {\n\treturn jc.stringOrObject(key, false)\n}\n\nfunc (jc Obj) stringOrObject(key string, required bool) interface{} {\n\tjc.noteKnownKey(key)\n\tei, ok := jc[key]\n\tif !ok {\n\t\tif !required {\n\t\t\treturn nil\n\t\t}\n\t\tjc.appendError(fmt.Errorf(\"Missing required config key %q (string or object)\", key))\n\t\treturn \"\"\n\t}\n\tif _, ok := ei.(map[string]interface{}); ok {\n\t\treturn ei\n\t}\n\tif _, ok := ei.(string); ok {\n\t\treturn ei\n\t}\n\tjc.appendError(fmt.Errorf(\"Expected config key %q to be a string or object\", key))\n\treturn \"\"\n}\n\nfunc (jc Obj) RequiredBool(key string) bool {\n\treturn jc.bool(key, nil)\n}\n\nfunc (jc Obj) OptionalBool(key string, def bool) bool {\n\treturn jc.bool(key, &def)\n}\n\nfunc (jc Obj) bool(key string, def *bool) bool {\n\tjc.noteKnownKey(key)\n\tei, ok := jc[key]\n\tif !ok {\n\t\tif def != nil {\n\t\t\treturn *def\n\t\t}\n\t\tjc.appendError(fmt.Errorf(\"Missing required config key %q (boolean)\", key))\n\t\treturn false\n\t}\n\tswitch v := ei.(type) {\n\tcase bool:\n\t\treturn v\n\tcase string:\n\t\tb, err := strconv.ParseBool(v)\n\t\tif err != nil {\n\t\t\tjc.appendError(fmt.Errorf(\"Config key %q has bad boolean format %q\", key, v))\n\t\t}\n\t\treturn b\n\tdefault:\n\t\tjc.appendError(fmt.Errorf(\"Expected config key %q to be a boolean\", key))\n\t\treturn false\n\t}\n}\n\nfunc (jc Obj) RequiredInt(key string) int {\n\treturn jc.int(key, nil)\n}\n\nfunc (jc Obj) OptionalInt(key string, def int) int {\n\treturn jc.int(key, &def)\n}\n\nfunc (jc Obj) int(key string, def *int) int {\n\tjc.noteKnownKey(key)\n\tei, ok := jc[key]\n\tif !ok {\n\t\tif def != nil {\n\t\t\treturn *def\n\t\t}\n\t\tjc.appendError(fmt.Errorf(\"Missing required config key %q (integer)\", key))\n\t\treturn 0\n\t}\n\tb, ok := ei.(float64)\n\tif !ok {\n\t\tjc.appendError(fmt.Errorf(\"Expected config key %q to be a number\", key))\n\t\treturn 0\n\t}\n\treturn int(b)\n}\n\nfunc (jc Obj) RequiredInt64(key string) int64 {\n\treturn jc.int64(key, nil)\n}\n\nfunc (jc Obj) OptionalInt64(key string, def int64) int64 {\n\treturn jc.int64(key, &def)\n}\n\nfunc (jc Obj) int64(key string, def *int64) int64 {\n\tjc.noteKnownKey(key)\n\tei, ok := jc[key]\n\tif !ok {\n\t\tif def != nil {\n\t\t\treturn *def\n\t\t}\n\t\tjc.appendError(fmt.Errorf(\"Missing required config key %q (integer)\", key))\n\t\treturn 0\n\t}\n\tb, ok := ei.(float64)\n\tif !ok {\n\t\tjc.appendError(fmt.Errorf(\"Expected config key %q to be a number\", key))\n\t\treturn 0\n\t}\n\treturn int64(b)\n}\n\nfunc (jc Obj) RequiredList(key string) []string {\n\treturn jc.requiredList(key, true)\n}\n\nfunc (jc Obj) OptionalList(key string) []string {\n\treturn jc.requiredList(key, false)\n}\n\nfunc (jc Obj) requiredList(key string, required bool) []string {\n\tjc.noteKnownKey(key)\n\tei, ok := jc[key]\n\tif !ok {\n\t\tif required {\n\t\t\tjc.appendError(fmt.Errorf(\"Missing required config key %q (list of strings)\", key))\n\t\t}\n\t\treturn nil\n\t}\n\teil, ok := ei.([]interface{})\n\tif !ok {\n\t\tjc.appendError(fmt.Errorf(\"Expected config key %q to be a list, not %T\", key, ei))\n\t\treturn nil\n\t}\n\tsl := make([]string, len(eil))\n\tfor i, ei := range eil {\n\t\ts, ok := ei.(string)\n\t\tif !ok {\n\t\t\tjc.appendError(fmt.Errorf(\"Expected config key %q index %d to be a string, not %T\", key, i, ei))\n\t\t\treturn nil\n\t\t}\n\t\tsl[i] = s\n\t}\n\treturn sl\n}\n\nfunc (jc Obj) noteKnownKey(key string) {\n\t_, ok := jc[\"_knownkeys\"]\n\tif !ok {\n\t\tjc[\"_knownkeys\"] = make(map[string]bool)\n\t}\n\tjc[\"_knownkeys\"].(map[string]bool)[key] = true\n}\n\nfunc (jc Obj) appendError(err error) {\n\tei, ok := jc[\"_errors\"]\n\tif ok {\n\t\tjc[\"_errors\"] = append(ei.([]error), err)\n\t} else {\n\t\tjc[\"_errors\"] = []error{err}\n\t}\n}\n\n// UnknownKeys returns the keys from the config that have not yet been discovered by one of the RequiredT or OptionalT calls.\nfunc (jc Obj) UnknownKeys() []string {\n\tei, ok := jc[\"_knownkeys\"]\n\tvar known map[string]bool\n\tif ok {\n\t\tknown = ei.(map[string]bool)\n\t}\n\tvar unknown []string\n\tfor k, _ := range jc {\n\t\tif ok && known[k] {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(k, \"_\") {\n\t\t\t// Permit keys with a leading underscore as a\n\t\t\t// form of comments.\n\t\t\tcontinue\n\t\t}\n\t\tunknown = append(unknown, k)\n\t}\n\tsort.Strings(unknown)\n\treturn unknown\n}\n\nfunc (jc Obj) Validate() error {\n\tunknown := jc.UnknownKeys()\n\tfor _, k := range unknown {\n\t\tjc.appendError(fmt.Errorf(\"Unknown key %q\", k))\n\t}\n\n\tei, ok := jc[\"_errors\"]\n\tif !ok {\n\t\treturn nil\n\t}\n\terrList := ei.([]error)\n\treturn errors.Join(errList...)\n}\n"
  },
  {
    "path": "jsonconfig/jsonconfig_test.go",
    "content": "/*\nCopyright 2011 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage jsonconfig\n\nimport (\n\t\"os\"\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc testIncludes(configFile string, t *testing.T) {\n\tvar c ConfigParser\n\tc.IncludeDirs = []string{\"testdata\"}\n\tobj, err := c.ReadFile(configFile)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttwo := obj.RequiredObject(\"two\")\n\tif err := obj.Validate(); err != nil {\n\t\tt.Error(err)\n\t}\n\tif g, e := two.RequiredString(\"key\"), \"value\"; g != e {\n\t\tt.Errorf(\"sub object key = %q; want %q\", g, e)\n\t}\n}\n\nfunc TestIncludesCWD(t *testing.T) {\n\ttestIncludes(\"testdata/include1.json\", t)\n}\n\nfunc TestIncludesIncludeDirs(t *testing.T) {\n\ttestIncludes(\"testdata/include1bis.json\", t)\n}\n\nfunc TestIncludeLoop(t *testing.T) {\n\t_, err := ReadFile(\"testdata/loop1.json\")\n\tif err == nil {\n\t\tt.Fatal(\"expected an error about import cycles.\")\n\t}\n\tif !strings.Contains(err.Error(), \"include cycle detected\") {\n\t\tt.Fatalf(\"expected an error about import cycles; got: %v\", err)\n\t}\n}\n\nfunc TestBoolEnvs(t *testing.T) {\n\tos.Setenv(\"TEST_EMPTY\", \"\")\n\tos.Setenv(\"TEST_TRUE\", \"true\")\n\tos.Setenv(\"TEST_ONE\", \"1\")\n\tos.Setenv(\"TEST_ZERO\", \"0\")\n\tos.Setenv(\"TEST_FALSE\", \"false\")\n\tobj, err := ReadFile(\"testdata/boolenv.json\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif str := obj.RequiredString(\"emptystr\"); str != \"\" {\n\t\tt.Errorf(\"str = %q, want empty\", str)\n\t}\n\ttests := []struct {\n\t\tkey  string\n\t\twant bool\n\t}{\n\t\t{\"def_false\", false},\n\t\t{\"def_true\", true},\n\t\t{\"set_true_def_false\", true},\n\t\t{\"set_false_def_true\", false},\n\t\t{\"lit_true\", true},\n\t\t{\"lit_false\", false},\n\t\t{\"one\", true},\n\t\t{\"zero\", false},\n\t}\n\tfor _, tt := range tests {\n\t\tif v := obj.RequiredBool(tt.key); v != tt.want {\n\t\t\tt.Errorf(\"key %q = %v; want %v\", tt.key, v, tt.want)\n\t\t}\n\t}\n\tif err := obj.Validate(); err != nil {\n\t\tt.Error(err)\n\t}\n}\n\nfunc TestListExpansion(t *testing.T) {\n\tos.Setenv(\"TEST_BAR\", \"bar\")\n\tobj, err := ReadFile(\"testdata/listexpand.json\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ts := obj.RequiredString(\"str\")\n\tl := obj.RequiredList(\"list\")\n\tif err := obj.Validate(); err != nil {\n\t\tt.Error(err)\n\t}\n\twant := []string{\"foo\", \"bar\"}\n\tif !reflect.DeepEqual(l, want) {\n\t\tt.Errorf(\"got = %#v\\nwant = %#v\", l, want)\n\t}\n\tif s != \"bar\" {\n\t\tt.Errorf(\"str = %q, want %q\", s, \"bar\")\n\t}\n}\n"
  },
  {
    "path": "jsonconfig/testdata/boolenv.json",
    "content": "{\n \"emptystr\": [\"_env\", \"${TEST_EMPTY}\", \"\"],\n \"def_false\": [\"_env\", \"${TEST_EMPTY}\", false],\n \"def_true\": [\"_env\", \"${TEST_EMPTY}\", true],\n \"set_true_def_false\": [\"_env\", \"${TEST_TRUE}\", false],\n \"set_false_def_true\": [\"_env\", \"${TEST_FALSE}\", true],\n \"one\": [\"_env\", \"${TEST_ONE}\"],\n \"zero\": [\"_env\", \"${TEST_ZERO}\"],\n \"lit_true\": true,\n \"lit_false\": false\n}\n"
  },
  {
    "path": "jsonconfig/testdata/include1.json",
    "content": "{\n  \"two\": [\"_fileobj\", \"testdata/include2.json\"]\n}\n"
  },
  {
    "path": "jsonconfig/testdata/include1bis.json",
    "content": "{\n  \"two\": [\"_fileobj\", \"include2.json\"]\n}\n"
  },
  {
    "path": "jsonconfig/testdata/include2.json",
    "content": "{\n  \"key\": \"value\"\n}\n"
  },
  {
    "path": "jsonconfig/testdata/listexpand.json",
    "content": "{\n  \"list\": [\"foo\", [\"_env\", \"${TEST_BAR}\"]],\n  \"str\": [\"_env\", \"${TEST_BAR}\"]\n}\n"
  },
  {
    "path": "jsonconfig/testdata/loop1.json",
    "content": "{\n  \"obj\": [\"_fileobj\", \"testdata/loop2.json\"]\n}\n"
  },
  {
    "path": "jsonconfig/testdata/loop2.json",
    "content": "{\n  \"obj\": [\"_fileobj\", \"testdata/loop1.json\"]\n}\n"
  },
  {
    "path": "legal/legal.go",
    "content": "/*\nCopyright 2014 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package legal provides in-process storage for compiled-in licenses.\npackage legal // import \"go4.org/legal\"\n\nvar licenses []string\n\n// RegisterLicense stores the license text.\n// It doesn't check whether the text was already present.\nfunc RegisterLicense(text string) {\n\tlicenses = append(licenses, text)\n\treturn\n}\n\n// Licenses returns a slice of the licenses.\nfunc Licenses() []string {\n\treturn licenses\n}\n"
  },
  {
    "path": "legal/legal_test.go",
    "content": "/*\nCopyright 2014 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage legal\n\nimport (\n\t\"testing\"\n)\n\nfunc TestRegisterLicense(t *testing.T) {\n\tinitial := len(licenses)\n\tRegisterLicense(\"dummy\")\n\tif initial+1 != len(licenses) {\n\t\tt.Fatal(\"didn't add a license\")\n\t}\n}\n"
  },
  {
    "path": "lock/.gitignore",
    "content": "*~\n"
  },
  {
    "path": "lock/lock.go",
    "content": "/*\nCopyright 2013 The Go Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package lock is a file locking library.\npackage lock // import \"go4.org/lock\"\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sync\"\n)\n\n// Lock locks the given file, creating the file if necessary. If the\n// file already exists, it must have zero size or an error is returned.\n// The lock is an exclusive lock (a write lock), but locked files\n// should neither be read from nor written to. Such files should have\n// zero size and only exist to co-ordinate ownership across processes.\n//\n// A nil Closer is returned if an error occurred. Otherwise, close that\n// Closer to release the lock.\n//\n// On Linux, FreeBSD and OSX, a lock has the same semantics as fcntl(2)'s\n// advisory locks.  In particular, closing any other file descriptor for the\n// same file will release the lock prematurely.\n//\n// Attempting to lock a file that is already locked by the current process\n// has undefined behavior.\n//\n// On other operating systems, lock will fallback to using the presence and\n// content of a file named name + '.lock' to implement locking behavior.\nfunc Lock(name string) (io.Closer, error) {\n\tabs, err := filepath.Abs(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlockmu.Lock()\n\tdefer lockmu.Unlock()\n\tif locked[abs] {\n\t\treturn nil, fmt.Errorf(\"file %q already locked\", abs)\n\t}\n\n\tc, err := lockFn(abs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot acquire lock: %v\", err)\n\t}\n\tlocked[abs] = true\n\treturn c, nil\n}\n\nvar lockFn = lockPortable\n\n// lockPortable is a portable version not using fcntl. Doesn't handle crashes as gracefully,\n// since it can leave stale lock files.\nfunc lockPortable(name string) (io.Closer, error) {\n\tfi, err := os.Stat(name)\n\tif err == nil && fi.Size() > 0 {\n\t\tst := portableLockStatus(name)\n\t\tswitch st {\n\t\tcase statusLocked:\n\t\t\treturn nil, fmt.Errorf(\"file %q already locked\", name)\n\t\tcase statusStale:\n\t\t\tos.Remove(name)\n\t\tcase statusInvalid:\n\t\t\treturn nil, fmt.Errorf(\"can't Lock file %q: has invalid contents\", name)\n\t\t}\n\t}\n\tf, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_EXCL, 0666)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create lock file %s %v\", name, err)\n\t}\n\tif err := json.NewEncoder(f).Encode(&pidLockMeta{OwnerPID: os.Getpid()}); err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot write owner pid: %v\", err)\n\t}\n\treturn &unlocker{\n\t\tf:        f,\n\t\tabs:      name,\n\t\tportable: true,\n\t}, nil\n}\n\ntype lockStatus int\n\nconst (\n\tstatusInvalid lockStatus = iota\n\tstatusLocked\n\tstatusUnlocked\n\tstatusStale\n)\n\ntype pidLockMeta struct {\n\tOwnerPID int\n}\n\nfunc portableLockStatus(path string) lockStatus {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn statusUnlocked\n\t}\n\tdefer f.Close()\n\tvar meta pidLockMeta\n\tif json.NewDecoder(f).Decode(&meta) != nil {\n\t\treturn statusInvalid\n\t}\n\tif meta.OwnerPID == 0 {\n\t\treturn statusInvalid\n\t}\n\tp, err := os.FindProcess(meta.OwnerPID)\n\tif err != nil {\n\t\t// e.g. on Windows\n\t\treturn statusStale\n\t}\n\t// On unix, os.FindProcess always is true, so we have to send\n\t// it a signal to see if it's alive.\n\tif signalZero != nil {\n\t\tif p.Signal(signalZero) != nil {\n\t\t\treturn statusStale\n\t\t}\n\t}\n\treturn statusLocked\n}\n\nvar signalZero os.Signal // nil or set by lock_sigzero.go\n\nvar (\n\tlockmu sync.Mutex\n\tlocked = map[string]bool{} // abs path -> true\n)\n\ntype unlocker struct {\n\tportable bool\n\tf        *os.File\n\tabs      string\n\t// once guards the close method call.\n\tonce sync.Once\n\t// err holds the error returned by Close.\n\terr error\n}\n\nfunc (u *unlocker) Close() error {\n\tu.once.Do(u.close)\n\treturn u.err\n}\n\nfunc (u *unlocker) close() {\n\tlockmu.Lock()\n\tdefer lockmu.Unlock()\n\tdelete(locked, u.abs)\n\n\tif u.portable {\n\t\t// In the portable lock implementation, it's\n\t\t// important to close before removing because\n\t\t// Windows won't allow us to remove an open\n\t\t// file.\n\t\tif err := u.f.Close(); err != nil {\n\t\t\tu.err = err\n\t\t}\n\t\tif err := os.Remove(u.abs); err != nil {\n\t\t\t// Note that if both Close and Remove fail,\n\t\t\t// we care more about the latter than the former\n\t\t\t// so we'll return that error.\n\t\t\tu.err = err\n\t\t}\n\t\treturn\n\t}\n\t// In other implementatioons, it's nice for us to clean up.\n\t// If we do do this, though, it needs to be before the\n\t// u.f.Close below.\n\tos.Remove(u.abs)\n\tu.err = u.f.Close()\n}\n"
  },
  {
    "path": "lock/lock_plan9.go",
    "content": "/*\nCopyright 2013 The Go Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage lock\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc init() {\n\tlockFn = lockPlan9\n}\n\nfunc lockPlan9(name string) (io.Closer, error) {\n\tfi, err := os.Stat(name)\n\tif err == nil && fi.Size() > 0 {\n\t\treturn nil, fmt.Errorf(\"can't Lock file %q: has non-zero size\", name)\n\t}\n\n\tf, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE, os.ModeExclusive|0644)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Lock Create of %s failed: %v\", name, err)\n\t}\n\n\treturn &unlocker{f: f, abs: name}, nil\n}\n"
  },
  {
    "path": "lock/lock_sigzero.go",
    "content": "//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly || solaris\n\n/*\nCopyright 2013 The Go Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage lock\n\nimport \"syscall\"\n\nfunc init() {\n\tsignalZero = syscall.Signal(0)\n}\n"
  },
  {
    "path": "lock/lock_test.go",
    "content": "/*\nCopyright 2013 The Go Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage lock\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"testing\"\n)\n\nfunc TestLock(t *testing.T) {\n\ttestLock(t, false)\n}\n\nfunc TestLockPortable(t *testing.T) {\n\ttestLock(t, true)\n}\n\nfunc TestLockInChild(t *testing.T) {\n\tf := os.Getenv(\"TEST_LOCK_FILE\")\n\tif f == \"\" {\n\t\t// not child\n\t\treturn\n\t}\n\tlock := Lock\n\tif v, _ := strconv.ParseBool(os.Getenv(\"TEST_LOCK_PORTABLE\")); v {\n\t\tlock = lockPortable\n\t}\n\n\tvar lk io.Closer\n\tfor scan := bufio.NewScanner(os.Stdin); scan.Scan(); {\n\t\tvar err error\n\t\tswitch scan.Text() {\n\t\tcase \"lock\":\n\t\t\tlk, err = lock(f)\n\t\tcase \"unlock\":\n\t\t\terr = lk.Close()\n\t\t\tlk = nil\n\t\tcase \"exit\":\n\t\t\t// Simulate a crash, or at least not unlocking the lock.\n\t\t\tos.Exit(0)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"unexpected child command %q\", scan.Text())\n\t\t}\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t} else {\n\t\t\tfmt.Println(\"\")\n\t\t}\n\t}\n}\n\nfunc testLock(t *testing.T, portable bool) {\n\tlock := Lock\n\tif portable {\n\t\tlock = lockPortable\n\t}\n\tt.Logf(\"test lock, portable %v\", portable)\n\n\ttd, err := ioutil.TempDir(\"\", \"\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.RemoveAll(td)\n\n\tpath := filepath.Join(td, \"foo.lock\")\n\n\tproc := newChildProc(t, path, portable)\n\tdefer proc.kill()\n\n\tt.Logf(\"First lock in child\")\n\tif err := proc.do(\"lock\"); err != nil {\n\t\tt.Fatalf(\"first lock in child process: %v\", err)\n\t}\n\n\tt.Logf(\"Crash child\")\n\tif err := proc.do(\"exit\"); err != nil {\n\t\tt.Fatalf(\"crash in child process: %v\", err)\n\t}\n\n\tproc = newChildProc(t, path, portable)\n\tdefer proc.kill()\n\n\tt.Logf(\"Locking+unlocking in child...\")\n\tif err := proc.do(\"lock\"); err != nil {\n\t\tt.Fatalf(\"lock in child process after crashing child: %v\", err)\n\t}\n\tif err := proc.do(\"unlock\"); err != nil {\n\t\tt.Fatalf(\"lock in child process after crashing child: %v\", err)\n\t}\n\n\tt.Logf(\"Locking in parent...\")\n\tlk1, err := lock(path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Logf(\"Again in parent...\")\n\t_, err = lock(path)\n\tif err == nil {\n\t\tt.Fatal(\"expected second lock to fail\")\n\t}\n\n\tt.Logf(\"Locking in child...\")\n\tif err := proc.do(\"lock\"); err == nil {\n\t\tt.Fatalf(\"expected lock in child process to fail\")\n\t}\n\n\tt.Logf(\"Unlocking lock in parent\")\n\tif err := lk1.Close(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tt.Logf(\"Trying lock again in child...\")\n\tif err := proc.do(\"lock\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := proc.do(\"unlock\"); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tlk3, err := lock(path)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tlk3.Close()\n}\n\ntype childLockCmd struct {\n\top    string\n\treply chan<- error\n}\n\ntype childProc struct {\n\tproc *os.Process\n\tc    chan childLockCmd\n}\n\nfunc (c *childProc) kill() {\n\tc.proc.Kill()\n}\n\nfunc (c *childProc) do(op string) error {\n\treply := make(chan error)\n\tc.c <- childLockCmd{\n\t\top:    op,\n\t\treply: reply,\n\t}\n\treturn <-reply\n}\n\nfunc newChildProc(t *testing.T, path string, portable bool) *childProc {\n\tcmd := exec.Command(os.Args[0], \"-test.run=LockInChild$\")\n\tcmd.Env = []string{\"TEST_LOCK_FILE=\" + path}\n\ttoChild, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tt.Fatalf(\"cannot make pipe: %v\", err)\n\t}\n\tfromChild, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\tt.Fatalf(\"cannot make pipe: %v\", err)\n\t}\n\tcmd.Stderr = os.Stderr\n\tif portable {\n\t\tcmd.Env = append(cmd.Env, \"TEST_LOCK_PORTABLE=1\")\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\tt.Fatalf(\"cannot start child: %v\", err)\n\t}\n\tcmdChan := make(chan childLockCmd)\n\tgo func() {\n\t\tdefer fromChild.Close()\n\t\tdefer toChild.Close()\n\t\tinScan := bufio.NewScanner(fromChild)\n\t\tfor c := range cmdChan {\n\t\t\tfmt.Fprintln(toChild, c.op)\n\t\t\tok := inScan.Scan()\n\t\t\tif c.op == \"exit\" {\n\t\t\t\tif ok {\n\t\t\t\t\tc.reply <- errors.New(\"child did not exit\")\n\t\t\t\t} else {\n\t\t\t\t\tcmd.Wait()\n\t\t\t\t\tc.reply <- nil\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\tpanic(\"child exited early\")\n\t\t\t}\n\t\t\tif errText := inScan.Text(); errText != \"\" {\n\t\t\t\tc.reply <- errors.New(errText)\n\t\t\t} else {\n\t\t\t\tc.reply <- nil\n\t\t\t}\n\t\t}\n\t}()\n\treturn &childProc{\n\t\tc:    cmdChan,\n\t\tproc: cmd.Process,\n\t}\n}\n"
  },
  {
    "path": "lock/lock_unix.go",
    "content": "//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly || solaris\n\n/*\nCopyright 2013 The Go Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage lock\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\n\t\"golang.org/x/sys/unix\"\n)\n\nfunc init() {\n\tlockFn = lockFcntl\n}\n\nfunc lockFcntl(name string) (io.Closer, error) {\n\tfi, err := os.Stat(name)\n\tif err == nil && fi.Size() > 0 {\n\t\treturn nil, fmt.Errorf(\"can't Lock file %q: has non-zero size\", name)\n\t}\n\n\tf, err := os.Create(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Lock Create of %s failed: %v\", name, err)\n\t}\n\n\terr = unix.FcntlFlock(f.Fd(), unix.F_SETLK, &unix.Flock_t{\n\t\tType:   unix.F_WRLCK,\n\t\tWhence: int16(os.SEEK_SET),\n\t\tStart:  0,\n\t\tLen:    0, // 0 means to lock the entire file.\n\t\tPid:    0, // only used by F_GETLK\n\t})\n\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, fmt.Errorf(\"Lock FcntlFlock of %s failed: %v\", name, err)\n\t}\n\treturn &unlocker{f: f, abs: name}, nil\n}\n"
  },
  {
    "path": "lock/lock_windows.go",
    "content": "/*\nCopyright 2013 The Go Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage lock\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\n\t\"golang.org/x/sys/windows\"\n)\n\nfunc init() {\n\tlockFn = lockWindows\n}\n\ntype winUnlocker struct {\n\th   windows.Handle\n\tabs string\n\t// err holds the error returned by Close.\n\terr error\n\t// once guards the close method call.\n\tonce sync.Once\n}\n\nfunc (u *winUnlocker) Close() error {\n\tu.once.Do(u.close)\n\treturn u.err\n}\n\nfunc (u *winUnlocker) close() {\n\tlockmu.Lock()\n\tdefer lockmu.Unlock()\n\tdelete(locked, u.abs)\n\n\tu.err = windows.CloseHandle(u.h)\n}\n\nfunc lockWindows(name string) (io.Closer, error) {\n\tfi, err := os.Stat(name)\n\tif err == nil && fi.Size() > 0 {\n\t\treturn nil, fmt.Errorf(\"can't lock file %q: %s\", name, \"has non-zero size\")\n\t}\n\n\thandle, err := winCreateEphemeral(name)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creation of lock %s failed: %v\", name, err)\n\t}\n\n\treturn &winUnlocker{h: handle, abs: name}, nil\n}\n\nfunc winCreateEphemeral(name string) (windows.Handle, error) {\n\tconst (\n\t\tFILE_ATTRIBUTE_TEMPORARY  = 0x100\n\t\tFILE_FLAG_DELETE_ON_CLOSE = 0x04000000\n\t)\n\thandle, err := windows.CreateFile(windows.StringToUTF16Ptr(name), 0, 0, nil, windows.OPEN_ALWAYS, FILE_ATTRIBUTE_TEMPORARY|FILE_FLAG_DELETE_ON_CLOSE, 0)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn handle, nil\n}\n"
  },
  {
    "path": "media/heif/bmff/bmff.go",
    "content": "/*\nCopyright 2018 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package bmff reads ISO BMFF boxes, as used by HEIF, etc.\n//\n// This is not so much as a generic BMFF reader as it is a BMFF reader\n// as needed by HEIF, though that may change in time. For now, only\n// boxes necessary for the go4.org/media/heif package have explicit\n// parsers.\n//\n// This package makes no API compatibility promises; it exists\n// primarily for use by the go4.org/media/heif package.\npackage bmff\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n)\n\nfunc NewReader(r io.Reader) *Reader {\n\tbr, ok := r.(*bufio.Reader)\n\tif !ok {\n\t\tbr = bufio.NewReader(r)\n\t}\n\treturn &Reader{br: bufReader{Reader: br}}\n}\n\ntype Reader struct {\n\tbr          bufReader\n\tlastBox     Box  // or nil\n\tnoMoreBoxes bool // a box with size 0 (the final box) was seen\n}\n\ntype BoxType [4]byte\n\n// Common box types.\nvar (\n\tTypeFtyp = BoxType{'f', 't', 'y', 'p'}\n\tTypeMeta = BoxType{'m', 'e', 't', 'a'}\n)\n\nfunc (t BoxType) String() string { return string(t[:]) }\n\nfunc (t BoxType) EqualString(s string) bool {\n\t// Could be cleaner, but see ohttps://github.com/golang/go/issues/24765\n\treturn len(s) == 4 && s[0] == t[0] && s[1] == t[1] && s[2] == t[2] && s[3] == t[3]\n}\n\ntype parseFunc func(b box, br *bufio.Reader) (Box, error)\n\n// Box represents a BMFF box.\ntype Box interface {\n\tSize() int64 // 0 means unknown (will read to end of file)\n\tType() BoxType\n\n\t// Parses parses the box, populating the fields\n\t// in the returned concrete type.\n\t//\n\t// If Parse has already been called, Parse returns nil.\n\t// If the box type is unknown, the returned error is ErrUnknownBox\n\t// and it's guaranteed that no bytes have been read from the box.\n\tParse() (Box, error)\n\n\t// Body returns the inner bytes of the box, ignoring the header.\n\t// The body may start with the 4 byte header of a \"Full Box\" if the\n\t// box's type derives from a full box. Most users will use Parse\n\t// instead.\n\t// Body will return a new reader at the beginning of the box if the\n\t// outer box has already been parsed.\n\tBody() io.Reader\n}\n\n// ErrUnknownBox is returned by Box.Parse for unrecognized box types.\nvar ErrUnknownBox = errors.New(\"heif: unknown box\")\n\ntype parserFunc func(b *box, br *bufReader) (Box, error)\n\nfunc boxType(s string) BoxType {\n\tif len(s) != 4 {\n\t\tpanic(\"bogus boxType length\")\n\t}\n\treturn BoxType{s[0], s[1], s[2], s[3]}\n}\n\nvar parsers = map[BoxType]parserFunc{\n\tboxType(\"dinf\"): parseDataInformationBox,\n\tboxType(\"dref\"): parseDataReferenceBox,\n\tboxType(\"ftyp\"): parseFileTypeBox,\n\tboxType(\"hdlr\"): parseHandlerBox,\n\tboxType(\"iinf\"): parseItemInfoBox,\n\tboxType(\"infe\"): parseItemInfoEntry,\n\tboxType(\"iloc\"): parseItemLocationBox,\n\tboxType(\"ipco\"): parseItemPropertyContainerBox,\n\tboxType(\"ipma\"): parseItemPropertyAssociation,\n\tboxType(\"iprp\"): parseItemPropertiesBox,\n\tboxType(\"irot\"): parseImageRotation,\n\tboxType(\"ispe\"): parseImageSpatialExtentsProperty,\n\tboxType(\"meta\"): parseMetaBox,\n\tboxType(\"pitm\"): parsePrimaryItemBox,\n}\n\ntype box struct {\n\tsize    int64 // 0 means unknown, will read to end of file (box container)\n\tboxType BoxType\n\tbody    io.Reader\n\tparsed  Box    // if non-nil, the Parsed result\n\tslurp   []byte // if non-nil, the contents slurped to memory\n}\n\nfunc (b *box) Size() int64   { return b.size }\nfunc (b *box) Type() BoxType { return b.boxType }\n\nfunc (b *box) Body() io.Reader {\n\tif b.slurp != nil {\n\t\treturn bytes.NewReader(b.slurp)\n\t}\n\treturn b.body\n}\n\nfunc (b *box) Parse() (Box, error) {\n\tif b.parsed != nil {\n\t\treturn b.parsed, nil\n\t}\n\tparser, ok := parsers[b.Type()]\n\tif !ok {\n\t\treturn nil, ErrUnknownBox\n\t}\n\tv, err := parser(b, &bufReader{Reader: bufio.NewReader(b.Body())})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tb.parsed = v\n\treturn v, nil\n}\n\ntype FullBox struct {\n\t*box\n\tVersion uint8\n\tFlags   uint32 // 24 bits\n}\n\n// ReadBox reads the next box.\n//\n// If the previously read box was not read to completion, ReadBox consumes\n// the rest of its data.\n//\n// At the end, the error is io.EOF.\nfunc (r *Reader) ReadBox() (Box, error) {\n\tif r.noMoreBoxes {\n\t\treturn nil, io.EOF\n\t}\n\tif r.lastBox != nil {\n\t\tif _, err := io.Copy(ioutil.Discard, r.lastBox.Body()); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar buf [8]byte\n\n\t_, err := io.ReadFull(r.br, buf[:4])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbox := &box{\n\t\tsize: int64(binary.BigEndian.Uint32(buf[:4])),\n\t}\n\n\t_, err = io.ReadFull(r.br, box.boxType[:]) // 4 more bytes\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Special cases for size:\n\tvar remain int64\n\tswitch box.size {\n\tcase 1:\n\t\t// 1 means it's actually a 64-bit size, after the type.\n\t\t_, err = io.ReadFull(r.br, buf[:8])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbox.size = int64(binary.BigEndian.Uint64(buf[:8]))\n\t\tif box.size < 0 {\n\t\t\t// Go uses int64 for sizes typically, but BMFF uses uint64.\n\t\t\t// We assume for now that nobody actually uses boxes larger\n\t\t\t// than int64.\n\t\t\treturn nil, fmt.Errorf(\"unexpectedly large box %q\", box.boxType)\n\t\t}\n\t\tremain = box.size - 2*4 - 8\n\tcase 0:\n\t\t// 0 means unknown & to read to end of file. No more boxes.\n\t\tr.noMoreBoxes = true\n\tdefault:\n\t\tremain = box.size - 2*4\n\t}\n\tif remain < 0 {\n\t\treturn nil, fmt.Errorf(\"Box header for %q has size %d, suggesting %d (negative) bytes remain\", box.boxType, box.size, remain)\n\t}\n\tif box.size > 0 {\n\t\tbox.body = io.LimitReader(r.br, remain)\n\t} else {\n\t\tbox.body = r.br\n\t}\n\tr.lastBox = box\n\treturn box, nil\n}\n\n// ReadAndParseBox wraps the ReadBox method, ensuring that the read box is of type typ\n// and parses successfully. It returns the parsed box.\nfunc (r *Reader) ReadAndParseBox(typ BoxType) (Box, error) {\n\tbox, err := r.ReadBox()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading %q box: %v\", typ, err)\n\t}\n\tif box.Type() != typ {\n\t\treturn nil, fmt.Errorf(\"error reading %q box: got box type %q instead\", typ, box.Type())\n\t}\n\tpbox, err := box.Parse()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing read %q box: %v\", typ, err)\n\t}\n\treturn pbox, nil\n}\n\nfunc readFullBox(outer *box, br *bufReader) (fb FullBox, err error) {\n\tfb.box = outer\n\t// Parse FullBox header.\n\tbuf, err := br.Peek(4)\n\tif err != nil {\n\t\treturn FullBox{}, fmt.Errorf(\"failed to read 4 bytes of FullBox: %v\", err)\n\t}\n\tfb.Version = buf[0]\n\tbuf[0] = 0\n\tfb.Flags = binary.BigEndian.Uint32(buf[:4])\n\tbr.Discard(4)\n\treturn fb, nil\n}\n\ntype FileTypeBox struct {\n\t*box\n\tMajorBrand   string   // 4 bytes\n\tMinorVersion string   // 4 bytes\n\tCompatible   []string // all 4 bytes\n}\n\nfunc parseFileTypeBox(outer *box, br *bufReader) (Box, error) {\n\tbuf, err := br.Peek(8)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tft := &FileTypeBox{\n\t\tbox:          outer,\n\t\tMajorBrand:   string(buf[:4]),\n\t\tMinorVersion: string(buf[4:8]),\n\t}\n\tbr.Discard(8)\n\tfor {\n\t\tbuf, err := br.Peek(4)\n\t\tif err == io.EOF {\n\t\t\treturn ft, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tft.Compatible = append(ft.Compatible, string(buf[:4]))\n\t\tbr.Discard(4)\n\t}\n}\n\ntype MetaBox struct {\n\tFullBox\n\tChildren []Box\n}\n\nfunc parseMetaBox(outer *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(outer, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmb := &MetaBox{FullBox: fb}\n\treturn mb, br.parseAppendBoxes(&mb.Children)\n}\n\nfunc (br *bufReader) parseAppendBoxes(dst *[]Box) error {\n\tif br.err != nil {\n\t\treturn br.err\n\t}\n\tboxr := NewReader(br.Reader)\n\tfor {\n\t\tinner, err := boxr.ReadBox()\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\tbr.err = err\n\t\t\treturn err\n\t\t}\n\t\tslurp, err := ioutil.ReadAll(inner.Body())\n\t\tif err != nil {\n\t\t\tbr.err = err\n\t\t\treturn err\n\t\t}\n\t\tinner.(*box).slurp = slurp\n\t\t*dst = append(*dst, inner)\n\t}\n}\n\n// ItemInfoEntry represents an \"infe\" box.\n//\n// TODO: currently only parses Version 2 boxes.\ntype ItemInfoEntry struct {\n\tFullBox\n\n\tItemID          uint16\n\tProtectionIndex uint16\n\tItemType        string // always 4 bytes\n\n\tName string\n\n\t// If Type == \"mime\":\n\tContentType     string\n\tContentEncoding string\n\n\t// If Type == \"uri \":\n\tItemURIType string\n}\n\nfunc parseItemInfoEntry(outer *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(outer, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tie := &ItemInfoEntry{FullBox: fb}\n\tif fb.Version != 2 {\n\t\treturn nil, fmt.Errorf(\"TODO: found version %d infe box. Only 2 is supported now.\", fb.Version)\n\t}\n\n\tie.ItemID, _ = br.readUint16()\n\tie.ProtectionIndex, _ = br.readUint16()\n\tif !br.ok() {\n\t\treturn nil, br.err\n\t}\n\tbuf, err := br.Peek(4)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tie.ItemType = string(buf[:4])\n\tie.Name, _ = br.readString()\n\n\tswitch ie.ItemType {\n\tcase \"mime\":\n\t\tie.ContentType, _ = br.readString()\n\t\tif br.anyRemain() {\n\t\t\tie.ContentEncoding, _ = br.readString()\n\t\t}\n\tcase \"uri \":\n\t\tie.ItemURIType, _ = br.readString()\n\t}\n\tif !br.ok() {\n\t\treturn nil, br.err\n\t}\n\treturn ie, nil\n}\n\n// ItemInfoBox represents an \"iinf\" box.\ntype ItemInfoBox struct {\n\tFullBox\n\tCount     uint16\n\tItemInfos []*ItemInfoEntry\n}\n\nfunc parseItemInfoBox(outer *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(outer, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tib := &ItemInfoBox{FullBox: fb}\n\n\tib.Count, _ = br.readUint16()\n\n\tvar itemInfos []Box\n\tbr.parseAppendBoxes(&itemInfos)\n\tif br.ok() {\n\t\tfor _, box := range itemInfos {\n\t\t\tpb, err := box.Parse()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"error parsing ItemInfoEntry in ItemInfoBox: %v\", err)\n\t\t\t}\n\t\t\tif iie, ok := pb.(*ItemInfoEntry); ok {\n\t\t\t\tib.ItemInfos = append(ib.ItemInfos, iie)\n\t\t\t}\n\t\t}\n\t}\n\tif !br.ok() {\n\t\treturn FullBox{}, br.err\n\t}\n\treturn ib, nil\n}\n\n// bufReader adds some HEIF/BMFF-specific methods around a *bufio.Reader.\ntype bufReader struct {\n\t*bufio.Reader\n\terr error // sticky error\n}\n\n// ok reports whether all previous reads have been error-free.\nfunc (br *bufReader) ok() bool { return br.err == nil }\n\nfunc (br *bufReader) anyRemain() bool {\n\tif br.err != nil {\n\t\treturn false\n\t}\n\t_, err := br.Peek(1)\n\treturn err == nil\n}\n\nfunc (br *bufReader) readUintN(bits uint8) (uint64, error) {\n\tif br.err != nil {\n\t\treturn 0, br.err\n\t}\n\tif bits == 0 {\n\t\treturn 0, nil\n\t}\n\tnbyte := bits / 8\n\tbuf, err := br.Peek(int(nbyte))\n\tif err != nil {\n\t\tbr.err = err\n\t\treturn 0, err\n\t}\n\tdefer br.Discard(int(nbyte))\n\tswitch bits {\n\tcase 8:\n\t\treturn uint64(buf[0]), nil\n\tcase 16:\n\t\treturn uint64(binary.BigEndian.Uint16(buf[:2])), nil\n\tcase 32:\n\t\treturn uint64(binary.BigEndian.Uint32(buf[:4])), nil\n\tcase 64:\n\t\treturn binary.BigEndian.Uint64(buf[:8]), nil\n\tdefault:\n\t\tbr.err = fmt.Errorf(\"invalid uintn read size\")\n\t\treturn 0, br.err\n\t}\n}\n\nfunc (br *bufReader) readUint8() (uint8, error) {\n\tif br.err != nil {\n\t\treturn 0, br.err\n\t}\n\tv, err := br.ReadByte()\n\tif err != nil {\n\t\tbr.err = err\n\t\treturn 0, err\n\t}\n\treturn v, nil\n}\n\nfunc (br *bufReader) readUint16() (uint16, error) {\n\tif br.err != nil {\n\t\treturn 0, br.err\n\t}\n\tbuf, err := br.Peek(2)\n\tif err != nil {\n\t\tbr.err = err\n\t\treturn 0, err\n\t}\n\tv := binary.BigEndian.Uint16(buf[:2])\n\tbr.Discard(2)\n\treturn v, nil\n}\n\nfunc (br *bufReader) readUint32() (uint32, error) {\n\tif br.err != nil {\n\t\treturn 0, br.err\n\t}\n\tbuf, err := br.Peek(4)\n\tif err != nil {\n\t\tbr.err = err\n\t\treturn 0, err\n\t}\n\tv := binary.BigEndian.Uint32(buf[:4])\n\tbr.Discard(4)\n\treturn v, nil\n}\n\nfunc (br *bufReader) readString() (string, error) {\n\tif br.err != nil {\n\t\treturn \"\", br.err\n\t}\n\ts0, err := br.ReadString(0)\n\tif err != nil {\n\t\tbr.err = err\n\t\treturn \"\", err\n\t}\n\ts := strings.TrimSuffix(s0, \"\\x00\")\n\tif len(s) == len(s0) {\n\t\terr = fmt.Errorf(\"unexpected non-null terminated string\")\n\t\tbr.err = err\n\t\treturn \"\", err\n\t}\n\treturn s, nil\n}\n\n// HEIF: ipco\ntype ItemPropertyContainerBox struct {\n\t*box\n\tProperties []Box // of ItemProperty or ItemFullProperty\n}\n\nfunc parseItemPropertyContainerBox(outer *box, br *bufReader) (Box, error) {\n\tipc := &ItemPropertyContainerBox{box: outer}\n\treturn ipc, br.parseAppendBoxes(&ipc.Properties)\n}\n\n// HEIF: iprp\ntype ItemPropertiesBox struct {\n\t*box\n\tPropertyContainer *ItemPropertyContainerBox\n\tAssociations      []*ItemPropertyAssociation // at least 1\n}\n\nfunc parseItemPropertiesBox(outer *box, br *bufReader) (Box, error) {\n\tip := &ItemPropertiesBox{\n\t\tbox: outer,\n\t}\n\n\tvar boxes []Box\n\terr := br.parseAppendBoxes(&boxes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(boxes) < 2 {\n\t\treturn nil, fmt.Errorf(\"expect at least 2 boxes in children; got 0\")\n\t}\n\n\tcb, err := boxes[0].Parse()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse first box, %q: %v\", boxes[0].Type(), err)\n\t}\n\n\tvar ok bool\n\tip.PropertyContainer, ok = cb.(*ItemPropertyContainerBox)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"unexpected type %T for ItemPropertieBox.PropertyContainer\", cb)\n\t}\n\n\t// Association boxes\n\tip.Associations = make([]*ItemPropertyAssociation, 0, len(boxes)-1)\n\tfor _, box := range boxes[1:] {\n\t\tboxp, err := box.Parse()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse association box: %v\", err)\n\t\t}\n\t\tipa, ok := boxp.(*ItemPropertyAssociation)\n\t\tif !ok {\n\t\t\treturn nil, fmt.Errorf(\"unexpected box %q instead of ItemPropertyAssociation\", boxp.Type())\n\t\t}\n\t\tip.Associations = append(ip.Associations, ipa)\n\t}\n\treturn ip, nil\n}\n\ntype ItemPropertyAssociation struct {\n\tFullBox\n\tEntryCount uint32\n\tEntries    []ItemPropertyAssociationItem\n}\n\n// not a box\ntype ItemProperty struct {\n\tEssential bool\n\tIndex     uint16\n}\n\n// not a box\ntype ItemPropertyAssociationItem struct {\n\tItemID            uint32\n\tAssociationsCount int            // as declared\n\tAssociations      []ItemProperty // as parsed\n}\n\nfunc parseItemPropertyAssociation(outer *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(outer, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tipa := &ItemPropertyAssociation{FullBox: fb}\n\tcount, _ := br.readUint32()\n\tipa.EntryCount = count\n\n\tfor i := uint64(0); i < uint64(count) && br.ok(); i++ {\n\t\tvar itemID uint32\n\t\tif fb.Version < 1 {\n\t\t\titemID16, _ := br.readUint16()\n\t\t\titemID = uint32(itemID16)\n\t\t} else {\n\t\t\titemID, _ = br.readUint32()\n\t\t}\n\t\tassocCount, _ := br.readUint8()\n\t\tipai := ItemPropertyAssociationItem{\n\t\t\tItemID:            itemID,\n\t\t\tAssociationsCount: int(assocCount),\n\t\t}\n\t\tfor j := 0; j < int(assocCount) && br.ok(); j++ {\n\t\t\tfirst, _ := br.readUint8()\n\t\t\tessential := first&(1<<7) != 0\n\t\t\tfirst &^= byte(1 << 7)\n\n\t\t\tvar index uint16\n\t\t\tif fb.Flags&1 != 0 {\n\t\t\t\tsecond, _ := br.readUint8()\n\t\t\t\tindex = uint16(first)<<8 | uint16(second)\n\t\t\t} else {\n\t\t\t\tindex = uint16(first)\n\t\t\t}\n\t\t\tipai.Associations = append(ipai.Associations, ItemProperty{\n\t\t\t\tEssential: essential,\n\t\t\t\tIndex:     index,\n\t\t\t})\n\t\t}\n\t\tipa.Entries = append(ipa.Entries, ipai)\n\t}\n\tif !br.ok() {\n\t\treturn nil, br.err\n\t}\n\treturn ipa, nil\n}\n\ntype ImageSpatialExtentsProperty struct {\n\tFullBox\n\tImageWidth  uint32\n\tImageHeight uint32\n}\n\nfunc parseImageSpatialExtentsProperty(outer *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(outer, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tw, err := br.readUint32()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\th, err := br.readUint32()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ImageSpatialExtentsProperty{\n\t\tFullBox:     fb,\n\t\tImageWidth:  w,\n\t\tImageHeight: h,\n\t}, nil\n}\n\ntype OffsetLength struct {\n\tOffset, Length uint64\n}\n\n// not a box\ntype ItemLocationBoxEntry struct {\n\tItemID             uint16\n\tConstructionMethod uint8 // actually uint4\n\tDataReferenceIndex uint16\n\tBaseOffset         uint64 // uint32 or uint64, depending on encoding\n\tExtentCount        uint16\n\tExtents            []OffsetLength\n}\n\n// box \"iloc\"\ntype ItemLocationBox struct {\n\tFullBox\n\n\toffsetSize, lengthSize, baseOffsetSize, indexSize uint8 // actually uint4\n\n\tItemCount uint16\n\tItems     []ItemLocationBoxEntry\n}\n\nfunc parseItemLocationBox(outer *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(outer, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tilb := &ItemLocationBox{\n\t\tFullBox: fb,\n\t}\n\tbuf, err := br.Peek(4)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tilb.offsetSize = buf[0] >> 4\n\tilb.lengthSize = buf[0] & 15\n\tilb.baseOffsetSize = buf[1] >> 4\n\tif fb.Version > 0 { // version 1\n\t\tilb.indexSize = buf[1] & 15\n\t}\n\n\tilb.ItemCount = binary.BigEndian.Uint16(buf[2:4])\n\tbr.Discard(4)\n\n\tfor i := 0; br.ok() && i < int(ilb.ItemCount); i++ {\n\t\tvar ent ItemLocationBoxEntry\n\t\tent.ItemID, _ = br.readUint16()\n\t\tif fb.Version > 0 { // version 1\n\t\t\tcmeth, _ := br.readUint16()\n\t\t\tent.ConstructionMethod = byte(cmeth & 15)\n\t\t}\n\t\tent.DataReferenceIndex, _ = br.readUint16()\n\t\tif br.ok() && ilb.baseOffsetSize > 0 {\n\t\t\tbr.Discard(int(ilb.baseOffsetSize) / 8)\n\t\t}\n\t\tent.ExtentCount, _ = br.readUint16()\n\t\tfor j := 0; br.ok() && j < int(ent.ExtentCount); j++ {\n\t\t\tvar ol OffsetLength\n\t\t\tol.Offset, _ = br.readUintN(ilb.offsetSize * 8)\n\t\t\tol.Length, _ = br.readUintN(ilb.lengthSize * 8)\n\t\t\tif br.err != nil {\n\t\t\t\treturn nil, br.err\n\t\t\t}\n\t\t\tent.Extents = append(ent.Extents, ol)\n\t\t}\n\t\tilb.Items = append(ilb.Items, ent)\n\t}\n\tif !br.ok() {\n\t\treturn nil, br.err\n\t}\n\treturn ilb, nil\n}\n\n// a \"hdlr\" box.\ntype HandlerBox struct {\n\tFullBox\n\tHandlerType string // always 4 bytes; usually \"pict\" for iOS Camera images\n\tName        string\n}\n\nfunc parseHandlerBox(gen *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(gen, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thb := &HandlerBox{\n\t\tFullBox: fb,\n\t}\n\tbuf, err := br.Peek(20)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thb.HandlerType = string(buf[4:8])\n\tbr.Discard(20)\n\n\thb.Name, _ = br.readString()\n\treturn hb, br.err\n}\n\n// a \"dinf\" box\ntype DataInformationBox struct {\n\t*box\n\tChildren []Box\n}\n\nfunc parseDataInformationBox(gen *box, br *bufReader) (Box, error) {\n\tdib := &DataInformationBox{box: gen}\n\treturn dib, br.parseAppendBoxes(&dib.Children)\n}\n\n// a \"dref\" box.\ntype DataReferenceBox struct {\n\tFullBox\n\tEntryCount uint32\n\tChildren   []Box\n}\n\nfunc parseDataReferenceBox(gen *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(gen, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdrb := &DataReferenceBox{FullBox: fb}\n\tdrb.EntryCount, _ = br.readUint32()\n\treturn drb, br.parseAppendBoxes(&drb.Children)\n}\n\n// \"pitm\" box\ntype PrimaryItemBox struct {\n\tFullBox\n\tItemID uint16\n}\n\nfunc parsePrimaryItemBox(gen *box, br *bufReader) (Box, error) {\n\tfb, err := readFullBox(gen, br)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tpib := &PrimaryItemBox{FullBox: fb}\n\tpib.ItemID, _ = br.readUint16()\n\tif !br.ok() {\n\t\treturn nil, br.err\n\t}\n\treturn pib, nil\n}\n\n// ImageRotation is a HEIF \"irot\" rotation property.\ntype ImageRotation struct {\n\t*box\n\tAngle uint8 // 1 means 90 degrees counter-clockwise, 2 means 180 counter-clockwise\n}\n\nfunc parseImageRotation(gen *box, br *bufReader) (Box, error) {\n\tv, err := br.readUint8()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &ImageRotation{box: gen, Angle: v & 3}, nil\n}\n"
  },
  {
    "path": "media/heif/dumpheif/dumpheif.go",
    "content": "/*\nCopyright 2018 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// The dumpheif program dumps the structure and metadata of a HEIF file.\n//\n// It exists purely for debugging the go4.org/media/heif and\n// go4.org/media/heif/bmff packages; it makes no backwards\n// compatibility promises.\npackage main\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/rwcarlsen/goexif/exif\"\n\t\"github.com/rwcarlsen/goexif/tiff\"\n\n\t\"go4.org/media/heif\"\n\t\"go4.org/media/heif/bmff\"\n)\n\nvar (\n\texifItemID uint16\n\texifLoc    bmff.ItemLocationBoxEntry\n)\n\nfunc main() {\n\tflag.Parse()\n\tif flag.NArg() != 1 {\n\t\tfmt.Fprintf(os.Stderr, \"usage: dumpheif <file>\\n\")\n\t\tos.Exit(1)\n\t}\n\tf, err := os.Open(flag.Arg(0))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\thf := heif.Open(f)\n\n\tit, err := hf.PrimaryItem()\n\tif err != nil {\n\t\tlog.Fatalf(\"PrimaryItem: %v\", err)\n\t}\n\tfmt.Printf(\"primary item: %v\\n\", it.ID)\n\n\twidth, height, ok := it.SpatialExtents()\n\tif ok {\n\t\tfmt.Printf(\"spatial extents: %d x %d\\n\", width, height)\n\t}\n\tfmt.Printf(\"properties:\\n\")\n\tfor _, prop := range it.Properties {\n\t\tfmt.Printf(\"\\t%q: %#v\\n\", prop.Type(), prop)\n\t}\n\tif len(it.Properties) == 0 {\n\t\tfmt.Printf(\"\\t(no properties)\\n\")\n\t}\n\n\tif ex, err := hf.EXIF(); err == nil {\n\t\tfmt.Printf(\"EXIF dump:\\n\")\n\t\tex, err := exif.Decode(bytes.NewReader(ex))\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"EXIF decode: %v\", err)\n\t\t}\n\t\tex.Walk(exifWalkFunc(func(name exif.FieldName, tag *tiff.Tag) error {\n\t\t\tfmt.Printf(\"\\t%v = %v\\n\", name, tag)\n\t\t\treturn nil\n\t\t}))\n\t\tfmt.Printf(\"\\n\")\n\t}\n\n\tfmt.Printf(\"BMFF boxes:\\n\")\n\tr := bmff.NewReader(f)\n\tfor {\n\t\tbox, err := r.ReadBox()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"ReadBox: %v\", err)\n\t\t}\n\t\tdumpBox(box, 0)\n\t}\n\n}\n\ntype exifWalkFunc func(exif.FieldName, *tiff.Tag) error\n\nfunc (f exifWalkFunc) Walk(name exif.FieldName, tag *tiff.Tag) error {\n\treturn f(name, tag)\n}\n\nfunc dumpBox(box bmff.Box, depth int) {\n\tindent := strings.Repeat(\"    \", depth)\n\tfmt.Printf(\"%sBox: type %q, size %v\\n\", indent, box.Type(), box.Size())\n\n\tbox2, err := box.Parse()\n\tif err == bmff.ErrUnknownBox {\n\t\tslurp, err := ioutil.ReadAll(box.Body())\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"%sreading body: %v\", indent, err)\n\t\t}\n\t\tif len(slurp) < 5000 {\n\t\t\tfmt.Printf(\"%s- contents: %q\\n\", indent, slurp)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s- contents: (... %d bytes, starting with %q ...)\\n\", indent, len(slurp), slurp[:100])\n\t\t}\n\t\treturn\n\t}\n\tif err != nil {\n\t\tslurp, _ := ioutil.ReadAll(box.Body())\n\t\tlog.Fatalf(\"Parse box type %q: %v; slurp: %q\", box.Type(), err, slurp)\n\t}\n\n\tswitch v := box2.(type) {\n\tcase *bmff.FileTypeBox, *bmff.HandlerBox, *bmff.PrimaryItemBox:\n\t\tfmt.Printf(\"%s- %T: %+v\\n\", indent, v, v)\n\tcase *bmff.MetaBox:\n\t\tfmt.Printf(\"%s- %T, %d children:\\n\", indent, v, len(v.Children))\n\t\tfor _, child := range v.Children {\n\t\t\tdumpBox(child, depth+1)\n\t\t}\n\tcase *bmff.ItemInfoBox:\n\t\t//slurp, _ := ioutil.ReadAll(box.Body())\n\t\t//fmt.Printf(\"%s- %T raw: %q\\n\", indent, v, slurp)\n\t\tfmt.Printf(\"%s- %T, %d children (%d in slice):\\n\", indent, v, v.Count, len(v.ItemInfos))\n\t\tfor _, child := range v.ItemInfos {\n\t\t\tdumpBox(child, depth+1)\n\t\t}\n\tcase *bmff.ItemInfoEntry:\n\t\tfmt.Printf(\"%s- %T, %+v\\n\", indent, v, v)\n\t\tif v.ItemType == \"Exif\" {\n\t\t\texifItemID = v.ItemID\n\t\t}\n\tcase *bmff.ItemPropertiesBox:\n\t\tfmt.Printf(\"%s- %T\\n\", indent, v)\n\t\tif v.PropertyContainer != nil {\n\t\t\tdumpBox(v.PropertyContainer, depth+1)\n\t\t}\n\t\tfor _, child := range v.Associations {\n\t\t\tdumpBox(child, depth+1)\n\t\t}\n\tcase *bmff.ItemPropertyAssociation:\n\t\tfmt.Printf(\"%s- %T: %d declared entries, %d parsed:\\n\", indent, v, v.EntryCount, len(v.Entries))\n\t\tfor _, ai := range v.Entries {\n\t\t\tfmt.Printf(\"%s  for Item ID %d, %d associations declared, %d parsed:\\n\", indent, ai.ItemID, ai.AssociationsCount, len(ai.Associations))\n\t\t\tfor _, ass := range ai.Associations {\n\t\t\t\tfmt.Printf(\"%s    index: %d, essential: %v\\n\", indent, ass.Index, ass.Essential)\n\t\t\t}\n\t\t}\n\tcase *bmff.DataInformationBox:\n\t\tfmt.Printf(\"%s- %T\\n\", indent, v)\n\t\tfor _, child := range v.Children {\n\t\t\tdumpBox(child, depth+1)\n\t\t}\n\tcase *bmff.DataReferenceBox:\n\t\tfmt.Printf(\"%s- %T\\n\", indent, v)\n\t\tfor _, child := range v.Children {\n\t\t\tdumpBox(child, depth+1)\n\t\t}\n\tcase *bmff.ItemPropertyContainerBox:\n\t\tfmt.Printf(\"%s- %T\\n\", indent, v)\n\t\tfor _, child := range v.Properties {\n\t\t\tdumpBox(child, depth+1)\n\t\t}\n\tcase *bmff.ItemLocationBox:\n\t\tfmt.Printf(\"%s- %T: %d items declared, %d parsed:\\n\", indent, v, v.ItemCount, len(v.Items))\n\t\tfor _, lbe := range v.Items {\n\t\t\tfmt.Printf(\"%s  %+v\\n\", indent, lbe)\n\t\t\tif exifItemID != 0 && lbe.ItemID == exifItemID {\n\t\t\t\texifLoc = lbe\n\t\t\t}\n\t\t}\n\n\tcase *bmff.ImageSpatialExtentsProperty:\n\t\tfmt.Printf(\"%s- %T  dimensions: %d x %d\\n\", indent, v, v.ImageWidth, v.ImageHeight)\n\tdefault:\n\t\tfmt.Printf(\"%s- gotype: %T\\n\", indent, box2)\n\t}\n\n}\n"
  },
  {
    "path": "media/heif/heif.go",
    "content": "/*\nCopyright 2018 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package heif reads HEIF containers, as found in Apple HEIC/HEVC images.\n// This package does not decode images; it only reads the metadata.\n//\n// This package is a work in progress and makes no API compatibility\n// promises.\npackage heif\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\n\t\"go4.org/media/heif/bmff\"\n)\n\n// File represents a HEIF file.\n//\n// Methods on File should not be called concurrently.\ntype File struct {\n\tra      io.ReaderAt\n\tprimary *Item\n\n\t// Populated lazily, by getMeta:\n\tmetaErr error\n\tmeta    *BoxMeta\n}\n\n// BoxMeta contains the low-level BMFF metadata boxes.\ntype BoxMeta struct {\n\tFileType     *bmff.FileTypeBox\n\tHandler      *bmff.HandlerBox\n\tPrimaryItem  *bmff.PrimaryItemBox\n\tItemInfo     *bmff.ItemInfoBox\n\tProperties   *bmff.ItemPropertiesBox\n\tItemLocation *bmff.ItemLocationBox\n}\n\n// EXIFItemID returns the item ID of the EXIF part, or 0 if not found.\nfunc (m *BoxMeta) EXIFItemID() uint32 {\n\tif m.ItemInfo == nil {\n\t\treturn 0\n\t}\n\tfor _, ife := range m.ItemInfo.ItemInfos {\n\t\tif ife.ItemType == \"Exif\" {\n\t\t\treturn uint32(ife.ItemID)\n\t\t}\n\t}\n\treturn 0\n}\n\n// Item represents an item in a HEIF file.\ntype Item struct {\n\tf *File\n\n\tID         uint32\n\tInfo       *bmff.ItemInfoEntry\n\tLocation   *bmff.ItemLocationBoxEntry // location in file\n\tProperties []bmff.Box\n}\n\n// SpatialExtents returns the item's spatial extents property values, if present,\n// not correcting from any camera rotation metadata.\nfunc (it *Item) SpatialExtents() (width, height int, ok bool) {\n\tfor _, p := range it.Properties {\n\t\tif p, ok := p.(*bmff.ImageSpatialExtentsProperty); ok {\n\t\t\treturn int(p.ImageWidth), int(p.ImageHeight), true\n\t\t}\n\t}\n\treturn\n}\n\n// Rotations returns the number of 90 degree rotations counter-clockwise that this\n// image should be rendered at, in the range [0,3].\nfunc (it *Item) Rotations() int {\n\tfor _, p := range it.Properties {\n\t\tif p, ok := p.(*bmff.ImageRotation); ok {\n\t\t\treturn int(p.Angle)\n\t\t}\n\t}\n\treturn 0\n}\n\n// VisualDimensions returns the item's width and height after correcting\n// for any rotations.\nfunc (it *Item) VisualDimensions() (width, height int, ok bool) {\n\twidth, height, ok = it.SpatialExtents()\n\tfor i := 0; i < it.Rotations(); i++ {\n\t\twidth, height = height, width\n\t}\n\treturn\n}\n\n// TODO: add HEIF imir (mirroring) accessor, like Image.SpatialExtents.\n\n// Open returns a handle to access a HEIF file.\nfunc Open(f io.ReaderAt) *File {\n\treturn &File{ra: f}\n}\n\n// ErrNoEXIF is returned by File.EXIF when a file does not contain an EXIF item.\nvar ErrNoEXIF = errors.New(\"heif: no EXIF found\")\n\n// ErrUnknownItem is returned by File.ItemByID for unknown items.\nvar ErrUnknownItem = errors.New(\"heif: unknown item\")\n\n// EXIF returns the raw EXIF data from the file.\n// The error is ErrNoEXIF if the file did not contain EXIF.\n//\n// The raw EXIF data can be parsed by the\n// github.com/rwcarlsen/goexif/exif package's Decode function.\nfunc (f *File) EXIF() ([]byte, error) {\n\tmeta, err := f.getMeta()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\texifID := meta.EXIFItemID()\n\tif exifID == 0 {\n\t\treturn nil, ErrNoEXIF\n\t}\n\tit, err := f.ItemByID(exifID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif it.Location == nil {\n\t\treturn nil, errors.New(\"heif: file said it contained EXIF, but didn't say where\")\n\t}\n\tif n := len(it.Location.Extents); n != 1 {\n\t\treturn nil, fmt.Errorf(\"heif: expected 1 EXIF section, saw %d\", n)\n\t}\n\toffLen := it.Location.Extents[0]\n\tconst maxSize = 20 << 10 // 20MB of EXIF seems excessive; cap it for sanity\n\tif offLen.Length > maxSize {\n\t\treturn nil, fmt.Errorf(\"heif: declared EXIF size %d exceeds threshold of %d bytes\", offLen.Length, maxSize)\n\t}\n\tbuf := make([]byte, offLen.Length-4)\n\tn, err := f.ra.ReadAt(buf, int64(offLen.Offset)+4) // TODO: why 4? did I miss something?\n\tif err != nil {\n\t\tlog.Printf(\"Read %d bytes + %v: %q\", n, err, buf)\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}\n\nfunc (f *File) setMetaErr(err error) error {\n\tif f.metaErr != nil {\n\t\tf.metaErr = err\n\t}\n\treturn err\n}\n\nfunc (f *File) getMeta() (*BoxMeta, error) {\n\tif f.metaErr != nil {\n\t\treturn nil, f.metaErr\n\t}\n\tif f.meta != nil {\n\t\treturn f.meta, nil\n\t}\n\tconst assumedMaxSize = 5 << 40 // arbitrary\n\tsr := io.NewSectionReader(f.ra, 0, assumedMaxSize)\n\tbmr := bmff.NewReader(sr)\n\n\tmeta := &BoxMeta{}\n\n\tpbox, err := bmr.ReadAndParseBox(bmff.TypeFtyp)\n\tif err != nil {\n\t\treturn nil, f.setMetaErr(err)\n\t}\n\tmeta.FileType = pbox.(*bmff.FileTypeBox)\n\n\tpbox, err = bmr.ReadAndParseBox(bmff.TypeMeta)\n\tif err != nil {\n\t\treturn nil, f.setMetaErr(err)\n\t}\n\tmetabox := pbox.(*bmff.MetaBox)\n\n\tfor _, box := range metabox.Children {\n\t\tboxp, err := box.Parse()\n\t\tif err == bmff.ErrUnknownBox {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, f.setMetaErr(err)\n\t\t}\n\t\tswitch v := boxp.(type) {\n\t\tcase *bmff.HandlerBox:\n\t\t\tmeta.Handler = v\n\t\tcase *bmff.PrimaryItemBox:\n\t\t\tmeta.PrimaryItem = v\n\t\tcase *bmff.ItemInfoBox:\n\t\t\tmeta.ItemInfo = v\n\t\tcase *bmff.ItemPropertiesBox:\n\t\t\tmeta.Properties = v\n\t\tcase *bmff.ItemLocationBox:\n\t\t\tmeta.ItemLocation = v\n\t\t}\n\t}\n\n\tf.meta = meta\n\treturn f.meta, nil\n}\n\n// PrimaryItem returns the HEIF file's primary item.\nfunc (f *File) PrimaryItem() (*Item, error) {\n\tmeta, err := f.getMeta()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif meta.PrimaryItem == nil {\n\t\treturn nil, errors.New(\"heif: HEIF file lacks primary item box\")\n\t}\n\treturn f.ItemByID(uint32(meta.PrimaryItem.ItemID))\n}\n\n// ItemByID by returns the file's Item of a given ID.\n// If the ID is known, the returned error is ErrUnknownItem.\nfunc (f *File) ItemByID(id uint32) (*Item, error) {\n\tmeta, err := f.getMeta()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tit := &Item{\n\t\tf:  f,\n\t\tID: id,\n\t}\n\tif meta.ItemLocation != nil {\n\t\tfor _, ilbe := range meta.ItemLocation.Items {\n\t\t\tif uint32(ilbe.ItemID) == id {\n\t\t\t\tshallowCopy := ilbe\n\t\t\t\tit.Location = &shallowCopy\n\t\t\t}\n\t\t}\n\t}\n\tif meta.ItemInfo != nil {\n\t\tfor _, iie := range meta.ItemInfo.ItemInfos {\n\t\t\tif uint32(iie.ItemID) == id {\n\t\t\t\tit.Info = iie\n\t\t\t}\n\t\t}\n\t}\n\tif it.Info == nil {\n\t\treturn nil, ErrUnknownItem\n\t}\n\tif meta.Properties != nil {\n\t\tallProps := meta.Properties.PropertyContainer.Properties\n\t\tfor _, ipa := range meta.Properties.Associations {\n\t\t\t// TODO: I've never seen a file with more than\n\t\t\t// top-level ItemPropertyAssociation box, but\n\t\t\t// apparently they can exist with different\n\t\t\t// versions/flags. For now we just merge them\n\t\t\t// all together, but that's not really right.\n\t\t\t// So for now, just bail once a previous loop\n\t\t\t// found anything.\n\t\t\tif len(it.Properties) > 0 {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tfor _, ipai := range ipa.Entries {\n\t\t\t\tif ipai.ItemID != id {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, ass := range ipai.Associations {\n\t\t\t\t\tif ass.Index != 0 && int(ass.Index) <= len(allProps) {\n\t\t\t\t\t\tbox := allProps[ass.Index-1]\n\t\t\t\t\t\tboxp, err := box.Parse()\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\tbox = boxp\n\t\t\t\t\t\t}\n\t\t\t\t\t\tit.Properties = append(it.Properties, box)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn it, nil\n}\n"
  },
  {
    "path": "media/heif/heif_test.go",
    "content": "package heif\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/rwcarlsen/goexif/exif\"\n\t\"github.com/rwcarlsen/goexif/tiff\"\n)\n\nfunc TestAll(t *testing.T) {\n\tf, err := os.Open(\"testdata/park.heic\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer f.Close()\n\th := Open(f)\n\n\t// meta\n\t_, err = h.getMeta()\n\tif err != nil {\n\t\tt.Fatalf(\"getMeta: %v\", err)\n\t}\n\n\tit, err := h.PrimaryItem()\n\tif err != nil {\n\t\tt.Fatalf(\"PrimaryItem: %v\", err)\n\t}\n\tif want := uint32(49); it.ID != want {\n\t\tt.Errorf(\"PrimaryIem ID = %v; want %v\", it.ID, want)\n\t}\n\tif it.Location == nil {\n\t\tt.Errorf(\"Item.Location is nil\")\n\t}\n\tif it.Info == nil {\n\t\tt.Errorf(\"Item.Info is nil\")\n\t}\n\tif len(it.Properties) == 0 {\n\t\tt.Errorf(\"Item.Properties is empty\")\n\t}\n\tfor _, prop := range it.Properties {\n\t\tt.Logf(\"  property: %q, %#v\", prop.Type(), prop)\n\t}\n\tif w, h, ok := it.SpatialExtents(); !ok || w == 0 || h == 0 {\n\t\tt.Errorf(\"no spatial extents found\")\n\t} else {\n\t\tt.Logf(\"dimensions: %v x %v\", w, h)\n\t}\n\n\t// exif\n\texbuf, err := h.EXIF()\n\tif err != nil {\n\t\tt.Errorf(\"EXIF: %v\", err)\n\t} else {\n\t\tconst magic = \"Exif\\x00\\x00\"\n\t\tif !bytes.HasPrefix(exbuf, []byte(magic)) {\n\t\t\tt.Errorf(\"Exif buffer doesn't start with %q: got %q\", magic, exbuf)\n\t\t}\n\t\tx, err := exif.Decode(bytes.NewReader(exbuf))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"EXIF decode: %v\", err)\n\t\t}\n\t\tgot := map[string]string{}\n\t\tif err := x.Walk(walkFunc(func(name exif.FieldName, tag *tiff.Tag) error {\n\t\t\tgot[fmt.Sprint(name)] = fmt.Sprint(tag)\n\t\t\treturn nil\n\t\t})); err != nil {\n\t\t\tt.Fatalf(\"EXIF walk: %v\", err)\n\t\t}\n\t\tif g, w := len(got), 56; g < w {\n\t\t\tt.Errorf(\"saw %v EXIF tags; want at least %v\", g, w)\n\t\t}\n\t\tif g, w := got[\"GPSLongitude\"], `[\"122/1\",\"21/1\",\"3776/100\"]`; g != w {\n\t\t\tt.Errorf(\"GPSLongitude = %#q; want %#q\", g, w)\n\t\t}\n\n\t}\n}\n\nfunc TestRotations(t *testing.T) {\n\tf, err := os.Open(\"testdata/rotate.heic\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer f.Close()\n\th := Open(f)\n\tit, err := h.PrimaryItem()\n\tif err != nil {\n\t\tt.Fatalf(\"PrimaryItem: %v\", err)\n\t}\n\tif r := it.Rotations(); r != 3 {\n\t\tt.Errorf(\"Rotations = %v; want %v\", r, 3)\n\t}\n\tsw, sh, ok := it.SpatialExtents()\n\tif !ok {\n\t\tt.Fatalf(\"expected spatial extents\")\n\t}\n\tvw, vh, ok := it.VisualDimensions()\n\tif !ok {\n\t\tt.Fatalf(\"expected visual dimensions\")\n\t}\n\tif vw != sh || vh != sw {\n\t\tt.Errorf(\"visual dimensions = %v, %v; want %v, %v\", vw, vh, sh, sw)\n\t}\n}\n\ntype walkFunc func(exif.FieldName, *tiff.Tag) error\n\nfunc (f walkFunc) Walk(name exif.FieldName, tag *tiff.Tag) error {\n\treturn f(name, tag)\n}\n"
  },
  {
    "path": "must/must.go",
    "content": "/*\nCopyright 2019 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package must contains helpers that panic on failure.\npackage must // import \"go4.org/must\"\n\nimport \"io\"\n\n// Close calls c.Close and panics if it returns an error.\n// The panic value is the return value from Close.\nfunc Close(c io.Closer) {\n\tif err := c.Close(); err != nil {\n\t\tpanic(err)\n\t}\n}\n\n// Do runs fn and panics if it returns an error.\n// The panic value is the return value from fn.\nfunc Do(fn func() error) {\n\tif err := fn(); err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "net/throttle/throttle.go",
    "content": "/*\nCopyright 2012 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package throttle provides a net.Listener that returns\n// artificially-delayed connections for testing real-world\n// connectivity.\npackage throttle // import \"go4.org/net/throttle\"\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst unitSize = 1400 // read/write chunk size. ~MTU size.\n\ntype Rate struct {\n\tKBps    int // or 0, to not rate-limit bandwidth\n\tLatency time.Duration\n}\n\n// byteTime returns the time required for n bytes.\nfunc (r Rate) byteTime(n int) time.Duration {\n\tif r.KBps == 0 {\n\t\treturn 0\n\t}\n\treturn time.Duration(float64(n)/1024/float64(r.KBps)) * time.Second\n}\n\ntype Listener struct {\n\tnet.Listener\n\tDown Rate // server Writes to Client\n\tUp   Rate // server Reads from client\n}\n\nfunc (ln *Listener) Accept() (net.Conn, error) {\n\tc, err := ln.Listener.Accept()\n\ttime.Sleep(ln.Up.Latency)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ttc := &conn{Conn: c, Down: ln.Down, Up: ln.Up}\n\ttc.start()\n\treturn tc, nil\n}\n\ntype nErr struct {\n\tn   int\n\terr error\n}\n\ntype writeReq struct {\n\twriteAt time.Time\n\tp       []byte\n\tresc    chan nErr\n}\n\ntype conn struct {\n\tnet.Conn\n\tDown Rate // for reads\n\tUp   Rate // for writes\n\n\twchan     chan writeReq\n\tcloseOnce sync.Once\n\tcloseErr  error\n}\n\nfunc (c *conn) start() {\n\tc.wchan = make(chan writeReq, 1024)\n\tgo c.writeLoop()\n}\n\nfunc (c *conn) writeLoop() {\n\tfor req := range c.wchan {\n\t\ttime.Sleep(req.writeAt.Sub(time.Now()))\n\t\tvar res nErr\n\t\tfor len(req.p) > 0 && res.err == nil {\n\t\t\twritep := req.p\n\t\t\tif len(writep) > unitSize {\n\t\t\t\twritep = writep[:unitSize]\n\t\t\t}\n\t\t\tn, err := c.Conn.Write(writep)\n\t\t\ttime.Sleep(c.Up.byteTime(len(writep)))\n\t\t\tres.n += n\n\t\t\tres.err = err\n\t\t\treq.p = req.p[n:]\n\t\t}\n\t\treq.resc <- res\n\t}\n}\n\nfunc (c *conn) Close() error {\n\tc.closeOnce.Do(func() {\n\t\terr := c.Conn.Close()\n\t\tclose(c.wchan)\n\t\tc.closeErr = err\n\t})\n\treturn c.closeErr\n}\n\nfunc (c *conn) Write(p []byte) (n int, err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\tn = 0\n\t\t\terr = fmt.Errorf(\"%v\", err)\n\t\t\treturn\n\t\t}\n\t}()\n\tresc := make(chan nErr, 1)\n\tc.wchan <- writeReq{time.Now().Add(c.Up.Latency), p, resc}\n\tres := <-resc\n\treturn res.n, res.err\n}\n\nfunc (c *conn) Read(p []byte) (n int, err error) {\n\tconst max = 1024\n\tif len(p) > max {\n\t\tp = p[:max]\n\t}\n\tn, err = c.Conn.Read(p)\n\ttime.Sleep(c.Down.byteTime(n))\n\treturn\n}\n"
  },
  {
    "path": "oauthutil/oauth.go",
    "content": "/*\nCopyright 2015 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package oauthutil contains OAuth 2 related utilities.\npackage oauthutil // import \"go4.org/oauthutil\"\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"go4.org/wkfs\"\n\t\"golang.org/x/oauth2\"\n)\n\n// TitleBarRedirectURL is the OAuth2 redirect URL to use when the authorization\n// code should be returned in the title bar of the browser, with the page text\n// prompting the user to copy the code and paste it in the application.\nconst TitleBarRedirectURL = \"urn:ietf:wg:oauth:2.0:oob\"\n\n// ErrNoAuthCode is returned when Token() has not found any valid cached token\n// and TokenSource does not have an AuthCode for getting a new token.\nvar ErrNoAuthCode = errors.New(\"oauthutil: unspecified TokenSource.AuthCode\")\n\n// TokenSource is an implementation of oauth2.TokenSource. It uses CacheFile to store and\n// reuse the the acquired token, and AuthCode to provide the authorization code that will be\n// exchanged for a token otherwise.\ntype TokenSource struct {\n\tConfig *oauth2.Config\n\n\t// CacheFile is where the token will be stored JSON-encoded. Any call to Token\n\t// first tries to read a valid token from CacheFile.\n\tCacheFile string\n\n\t// AuthCode provides the authorization code that Token will exchange for a token.\n\t// It usually is a way to prompt the user for the code. If CacheFile does not provide\n\t// a token and AuthCode is nil, Token returns ErrNoAuthCode.\n\tAuthCode func() string\n}\n\nvar errExpiredToken = errors.New(\"expired token\")\n\n// cachedToken returns the token saved in cacheFile. It specifically returns\n// errTokenExpired if the token is expired.\nfunc cachedToken(cacheFile string) (*oauth2.Token, error) {\n\ttok := new(oauth2.Token)\n\ttokenData, err := wkfs.ReadFile(cacheFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err = json.Unmarshal(tokenData, tok); err != nil {\n\t\treturn nil, err\n\t}\n\tif !tok.Valid() {\n\t\tif tok != nil && time.Now().After(tok.Expiry) {\n\t\t\treturn nil, errExpiredToken\n\t\t}\n\t\treturn nil, errors.New(\"invalid token\")\n\t}\n\treturn tok, nil\n}\n\n// Token first tries to find a valid token in CacheFile, and otherwise uses\n// Config and AuthCode to fetch a new token. This new token is saved in CacheFile\n// (if not blank). If CacheFile did not provide a token and AuthCode is nil,\n// ErrNoAuthCode is returned.\nfunc (src TokenSource) Token() (*oauth2.Token, error) {\n\tvar tok *oauth2.Token\n\tvar err error\n\tif src.CacheFile != \"\" {\n\t\ttok, err = cachedToken(src.CacheFile)\n\t\tif err == nil {\n\t\t\treturn tok, nil\n\t\t}\n\t\tif err != errExpiredToken {\n\t\t\tfmt.Printf(\"Error getting token from %s: %v\\n\", src.CacheFile, err)\n\t\t}\n\t}\n\tif src.AuthCode == nil {\n\t\treturn nil, ErrNoAuthCode\n\t}\n\ttok, err = src.Config.Exchange(oauth2.NoContext, src.AuthCode())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not exchange auth code for a token: %v\", err)\n\t}\n\tif src.CacheFile == \"\" {\n\t\treturn tok, nil\n\t}\n\ttokenData, err := json.Marshal(&tok)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"could not encode token as json: %v\", err)\n\t}\n\tif err := wkfs.WriteFile(src.CacheFile, tokenData, 0600); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not cache token in %v: %v\", src.CacheFile, err)\n\t}\n\treturn tok, nil\n}\n\n// NewRefreshTokenSource returns a token source that obtains its initial token\n// based on the provided config and the refresh token.\nfunc NewRefreshTokenSource(config *oauth2.Config, refreshToken string) oauth2.TokenSource {\n\tvar noInitialToken *oauth2.Token = nil\n\treturn oauth2.ReuseTokenSource(noInitialToken, config.TokenSource(\n\t\toauth2.NoContext, // TODO: maybe accept a context later.\n\t\t&oauth2.Token{RefreshToken: refreshToken},\n\t))\n}\n"
  },
  {
    "path": "osutil/osutil.go",
    "content": "/*\nCopyright 2015 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package osutil contains os level functions.\npackage osutil // import \"go4.org/osutil\"\n\nimport \"os\" // capture executable on package init to work around various os issues if\n\n// Executable returns [os.Executable]. This function predates the Go standard\n// library's os.Executable and is retained here for compatibility.\n//\n// Deprecated: use os.Executable directly instead.\nfunc Executable() (string, error) {\n\treturn os.Executable()\n}\n"
  },
  {
    "path": "readerutil/bufreaderat.go",
    "content": "/*\nCopyright 2018 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport \"io\"\n\n// NewBufferingReaderAt returns an io.ReaderAt that reads from r as\n// necessary and keeps a copy of all data read in memory.\nfunc NewBufferingReaderAt(r io.Reader) io.ReaderAt {\n\treturn &bufReaderAt{r: r}\n}\n\ntype bufReaderAt struct {\n\tr   io.Reader\n\tbuf []byte\n}\n\nfunc (br *bufReaderAt) ReadAt(p []byte, off int64) (n int, err error) {\n\tendOff := off + int64(len(p))\n\tneed := endOff - int64(len(br.buf))\n\tif need > 0 {\n\t\tbuf := make([]byte, need)\n\t\tvar rn int\n\t\trn, err = io.ReadFull(br.r, buf)\n\t\tbr.buf = append(br.buf, buf[:rn]...)\n\t}\n\tif int64(len(br.buf)) >= off {\n\t\tn = copy(p, br.buf[off:])\n\t}\n\tif n == len(p) {\n\t\terr = nil\n\t}\n\treturn\n}\n"
  },
  {
    "path": "readerutil/bufreaderat_test.go",
    "content": "/*\nCopyright 2018 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport \"testing\"\n\ntype trackingReader struct {\n\toff       int\n\treads     int\n\treadBytes int\n}\n\nfunc (t *trackingReader) Read(p []byte) (n int, err error) {\n\tt.reads++\n\tt.readBytes += len(p)\n\tfor len(p) > 0 {\n\t\tp[0] = '0' + byte(t.off%10)\n\t\tt.off++\n\t\tp = p[1:]\n\t\tn++\n\t}\n\treturn\n\n}\n\nfunc TestBufferingReaderAt(t *testing.T) {\n\ttr := new(trackingReader)\n\tra := NewBufferingReaderAt(tr)\n\tfor i, tt := range []struct {\n\t\toff           int64\n\t\twant          string\n\t\twantReads     int\n\t\twantReadBytes int\n\t}{\n\t\t{off: 0, want: \"0123456789\", wantReads: 1, wantReadBytes: 10},\n\t\t{off: 5, want: \"56789\", wantReads: 1, wantReadBytes: 10},      // already buffered\n\t\t{off: 6, want: \"67890\", wantReads: 2, wantReadBytes: 11},      // need 1 more byte\n\t\t{off: 0, want: \"0123456789\", wantReads: 2, wantReadBytes: 11}, // already buffered\n\t} {\n\t\tgot := make([]byte, len(tt.want))\n\t\tn, err := ra.ReadAt(got, tt.off)\n\t\tif err != nil || n != len(tt.want) {\n\t\t\tt.Errorf(\"step %d: ReadAt = %v, %v; want %v, %v\", i, n, err, len(tt.want), nil)\n\t\t\tcontinue\n\t\t}\n\t\tif string(got) != tt.want {\n\t\t\tt.Errorf(\"step %d: ReadAt read %q; want %q\", i, got, tt.want)\n\t\t}\n\t\tif tr.reads != tt.wantReads {\n\t\t\tt.Errorf(\"step %d: num reads = %d; want %d\", i, tr.reads, tt.wantReads)\n\t\t}\n\t\tif tr.readBytes != tt.wantReadBytes {\n\t\t\tt.Errorf(\"step %d: read bytes = %d; want %d\", i, tr.reads, tt.wantReads)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "readerutil/countingreader.go",
    "content": "/*\nCopyright 2011 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport \"io\"\n\n// CountingReader wraps a Reader, incrementing N by the number of\n// bytes read. No locking is performed.\ntype CountingReader struct {\n\tReader io.Reader\n\tN      *int64\n}\n\nfunc (cr CountingReader) Read(p []byte) (n int, err error) {\n\tn, err = cr.Reader.Read(p)\n\t*cr.N += int64(n)\n\treturn\n}\n"
  },
  {
    "path": "readerutil/fakeseeker.go",
    "content": "/*\nCopyright 2014 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\n// fakeSeeker can seek to the ends but any read not at the current\n// position will fail.\ntype fakeSeeker struct {\n\tr    io.Reader\n\tsize int64\n\n\tfakePos int64\n\trealPos int64\n}\n\n// NewFakeSeeker returns a ReadSeeker that can pretend to Seek (based\n// on the provided total size of the reader's content), but any reads\n// will fail if the fake seek position doesn't match reality.\nfunc NewFakeSeeker(r io.Reader, size int64) io.ReadSeeker {\n\treturn &fakeSeeker{r: r, size: size}\n}\n\nfunc (fs *fakeSeeker) Seek(offset int64, whence int) (int64, error) {\n\tvar newo int64\n\tswitch whence {\n\tdefault:\n\t\treturn 0, errors.New(\"invalid whence\")\n\tcase os.SEEK_SET:\n\t\tnewo = offset\n\tcase os.SEEK_CUR:\n\t\tnewo = fs.fakePos + offset\n\tcase os.SEEK_END:\n\t\tnewo = fs.size + offset\n\t}\n\tif newo < 0 {\n\t\treturn 0, errors.New(\"negative seek\")\n\t}\n\tfs.fakePos = newo\n\treturn newo, nil\n}\n\nfunc (fs *fakeSeeker) Read(p []byte) (n int, err error) {\n\tif fs.fakePos != fs.realPos {\n\t\treturn 0, fmt.Errorf(\"attempt to read from fake seek offset %d; real offset is %d\", fs.fakePos, fs.realPos)\n\t}\n\tn, err = fs.r.Read(p)\n\tfs.fakePos += int64(n)\n\tfs.realPos += int64(n)\n\treturn\n}\n"
  },
  {
    "path": "readerutil/fakeseeker_test.go",
    "content": "/*\nCopyright 2014 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestFakeSeeker(t *testing.T) {\n\trs := NewFakeSeeker(strings.NewReader(\"foobar\"), 6)\n\tif pos, err := rs.Seek(0, os.SEEK_END); err != nil || pos != 6 {\n\t\tt.Fatalf(\"SEEK_END = %d, %v; want 6, nil\", pos, err)\n\t}\n\tif pos, err := rs.Seek(0, os.SEEK_CUR); err != nil || pos != 6 {\n\t\tt.Fatalf(\"SEEK_CUR = %d, %v; want 6, nil\", pos, err)\n\t}\n\tif pos, err := rs.Seek(0, os.SEEK_SET); err != nil || pos != 0 {\n\t\tt.Fatalf(\"SEEK_SET = %d, %v; want 0, nil\", pos, err)\n\t}\n\n\tbuf := make([]byte, 3)\n\tif n, err := rs.Read(buf); n != 3 || err != nil || string(buf) != \"foo\" {\n\t\tt.Fatalf(\"First read = %d, %v (buf = %q); want foo\", n, err, buf)\n\t}\n\tif pos, err := rs.Seek(0, os.SEEK_CUR); err != nil || pos != 3 {\n\t\tt.Fatalf(\"Seek cur pos after first read = %d, %v; want 3, nil\", pos, err)\n\t}\n\tif n, err := rs.Read(buf); n != 3 || err != nil || string(buf) != \"bar\" {\n\t\tt.Fatalf(\"Second read = %d, %v (buf = %q); want foo\", n, err, buf)\n\t}\n\n\tif pos, err := rs.Seek(1, os.SEEK_SET); err != nil || pos != 1 {\n\t\tt.Fatalf(\"SEEK_SET = %d, %v; want 1, nil\", pos, err)\n\t}\n\tconst msg = \"attempt to read from fake seek offset\"\n\tif _, err := rs.Read(buf); err == nil || !strings.Contains(err.Error(), msg) {\n\t\tt.Fatalf(\"bogus Read after seek = %v; want something containing %q\", err, msg)\n\t}\n}\n"
  },
  {
    "path": "readerutil/multireaderat.go",
    "content": "/*\nCopyright 2016 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport (\n\t\"io\"\n\t\"sort\"\n)\n\n// NewMultiReaderAt is like io.MultiReader but produces a ReaderAt\n// (and Size), instead of just a reader.\nfunc NewMultiReaderAt(parts ...SizeReaderAt) SizeReaderAt {\n\tm := &multiRA{\n\t\tparts: make([]offsetAndSource, 0, len(parts)),\n\t}\n\tvar off int64\n\tfor _, p := range parts {\n\t\tm.parts = append(m.parts, offsetAndSource{off, p})\n\t\toff += p.Size()\n\t}\n\tm.size = off\n\treturn m\n}\n\ntype offsetAndSource struct {\n\toff int64\n\tSizeReaderAt\n}\n\ntype multiRA struct {\n\tparts []offsetAndSource\n\tsize  int64\n}\n\nfunc (m *multiRA) Size() int64 { return m.size }\n\nfunc (m *multiRA) ReadAt(p []byte, off int64) (n int, err error) {\n\twantN := len(p)\n\n\t// Skip past the requested offset.\n\tskipParts := sort.Search(len(m.parts), func(i int) bool {\n\t\t// This function returns whether parts[i] will\n\t\t// contribute any bytes to our output.\n\t\tpart := m.parts[i]\n\t\treturn part.off+part.Size() > off\n\t})\n\tparts := m.parts[skipParts:]\n\n\t// How far to skip in the first part.\n\tneedSkip := off\n\tif len(parts) > 0 {\n\t\tneedSkip -= parts[0].off\n\t}\n\n\tfor len(parts) > 0 && len(p) > 0 {\n\t\treadP := p\n\t\tpartSize := parts[0].Size()\n\t\tif int64(len(readP)) > partSize-needSkip {\n\t\t\treadP = readP[:partSize-needSkip]\n\t\t}\n\t\tpn, err0 := parts[0].ReadAt(readP, needSkip)\n\t\tif err0 != nil {\n\t\t\treturn n, err0\n\t\t}\n\t\tn += pn\n\t\tp = p[pn:]\n\t\tif int64(pn)+needSkip == partSize {\n\t\t\tparts = parts[1:]\n\t\t}\n\t\tneedSkip = 0\n\t}\n\n\tif n != wantN {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\treturn\n}\n\n// ZeroSizeReaderAt returns a SizeReaderAt that's size bytes\n// of all zeros.\nfunc ZeroSizeReaderAt(size int64) SizeReaderAt {\n\treturn allZeros{n: size}\n}\n\ntype allZeros struct{ n int64 }\n\nfunc (a allZeros) ReadAt(p []byte, off int64) (n int, err error) {\n\tfor i := range p {\n\t\tp[i] = 0\n\t}\n\treturn len(p), nil\n}\n\nfunc (a allZeros) Size() int64 { return a.n }\n"
  },
  {
    "path": "readerutil/multireaderat_test.go",
    "content": "/*\nCopyright 2016 The Go4 Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestMultiReaderAt(t *testing.T) {\n\tsra := NewMultiReaderAt(\n\t\tio.NewSectionReader(strings.NewReader(\"xaaax\"), 1, 3),\n\t\tio.NewSectionReader(strings.NewReader(\"xxbbbbxx\"), 2, 3),\n\t\tio.NewSectionReader(strings.NewReader(\"cccx\"), 0, 3),\n\t)\n\tif sra.Size() != 9 {\n\t\tt.Fatalf(\"Size = %d; want 9\", sra.Size())\n\t}\n\tconst full = \"aaabbbccc\"\n\tfor start := 0; start < len(full); start++ {\n\t\tfor end := start; end < len(full); end++ {\n\t\t\twant := full[start:end]\n\t\t\tgot, err := ioutil.ReadAll(io.NewSectionReader(sra, int64(start), int64(end-start)))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tif string(got) != want {\n\t\t\t\tt.Errorf(\"for start=%d, end=%d: ReadAll = %q; want %q\", start, end, got, want)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "readerutil/readersize.go",
    "content": "/*\nCopyright 2012 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package readerutil provides and operates on io.Readers.\npackage readerutil // import \"go4.org/readerutil\"\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n)\n\n// Size tries to determine the length of r. If r is an io.Seeker, Size may seek\n// to guess the length.\nfunc Size(r io.Reader) (size int64, ok bool) {\n\tswitch rt := r.(type) {\n\tcase *bytes.Buffer:\n\t\treturn int64(rt.Len()), true\n\tcase *bytes.Reader:\n\t\treturn int64(rt.Len()), true\n\tcase *strings.Reader:\n\t\treturn int64(rt.Len()), true\n\tcase io.Seeker:\n\t\tpos, err := rt.Seek(0, os.SEEK_CUR)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tend, err := rt.Seek(0, os.SEEK_END)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tsize = end - pos\n\t\tpos1, err := rt.Seek(pos, os.SEEK_SET)\n\t\tif err != nil || pos1 != pos {\n\t\t\tmsg := \"failed to restore seek position\"\n\t\t\tif err != nil {\n\t\t\t\tmsg += \": \" + err.Error()\n\t\t\t}\n\t\t\tpanic(msg)\n\t\t}\n\t\treturn size, true\n\t}\n\treturn 0, false\n}\n"
  },
  {
    "path": "readerutil/readersize_test.go",
    "content": "/*\nCopyright 2012 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"testing\"\n)\n\nconst text = \"HelloWorld\"\n\ntype testSrc struct {\n\tname string\n\tsrc  io.Reader\n\twant int64\n}\n\nfunc (tsrc *testSrc) run(t *testing.T) {\n\tn, ok := Size(tsrc.src)\n\tif !ok {\n\t\tt.Fatalf(\"failed to read size for %q\", tsrc.name)\n\t}\n\tif n != tsrc.want {\n\t\tt.Fatalf(\"wanted %v, got %v\", tsrc.want, n)\n\t}\n}\n\nfunc TestBytesBuffer(t *testing.T) {\n\tbuf := bytes.NewBuffer([]byte(text))\n\ttsrc := &testSrc{\"buffer\", buf, int64(len(text))}\n\ttsrc.run(t)\n}\n\nfunc TestSeeker(t *testing.T) {\n\tf, err := ioutil.TempFile(\"\", \"camliTestReaderSize\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(f.Name())\n\tdefer f.Close()\n\tsize, err := f.Write([]byte(text))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tpos, err := f.Seek(5, 0)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttsrc := &testSrc{\"seeker\", f, int64(size) - pos}\n\ttsrc.run(t)\n}\n"
  },
  {
    "path": "readerutil/readerutil.go",
    "content": "/*\nCopyright 2016 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package readerutil contains io.Reader types.\npackage readerutil // import \"go4.org/readerutil\"\n\nimport (\n\t\"expvar\"\n\t\"io\"\n)\n\n// A SizeReaderAt is a ReaderAt with a Size method.\n//\n// An io.SectionReader implements SizeReaderAt.\ntype SizeReaderAt interface {\n\tSize() int64\n\tio.ReaderAt\n}\n\n// A ReadSeekCloser can Read, Seek, and Close.\ntype ReadSeekCloser interface {\n\tio.Reader\n\tio.Seeker\n\tio.Closer\n}\n\ntype ReaderAtCloser interface {\n\tio.ReaderAt\n\tio.Closer\n}\n\n// TODO(wathiede): make sure all the stat readers work with code that\n// type asserts ReadFrom/WriteTo.\n\ntype varStatReader struct {\n\t*expvar.Int\n\tr io.Reader\n}\n\n// NewStatsReader returns an io.Reader that will have the number of bytes\n// read from r added to v.\nfunc NewStatsReader(v *expvar.Int, r io.Reader) io.Reader {\n\treturn &varStatReader{v, r}\n}\n\nfunc (v *varStatReader) Read(p []byte) (int, error) {\n\tn, err := v.r.Read(p)\n\tv.Int.Add(int64(n))\n\treturn n, err\n}\n\ntype varStatReadSeeker struct {\n\t*expvar.Int\n\trs io.ReadSeeker\n}\n\n// NewStatsReadSeeker returns an io.ReadSeeker that will have the number of bytes\n// read from rs added to v.\nfunc NewStatsReadSeeker(v *expvar.Int, rs io.ReadSeeker) io.ReadSeeker {\n\treturn &varStatReadSeeker{v, rs}\n}\n\nfunc (v *varStatReadSeeker) Read(p []byte) (int, error) {\n\tn, err := v.rs.Read(p)\n\tv.Int.Add(int64(n))\n\treturn n, err\n}\n\nfunc (v *varStatReadSeeker) Seek(offset int64, whence int) (int64, error) {\n\treturn v.rs.Seek(offset, whence)\n}\n"
  },
  {
    "path": "readerutil/readerutil_test.go",
    "content": "/*\nCopyright 2016 The Go4 Authors.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage readerutil\n\nimport (\n\t\"expvar\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n)\n\nfunc ExampleNewStatsReader() {\n\tvar (\n\t\t// r is the io.Reader we'd like to count read from.\n\t\tr  = strings.NewReader(\"Hello world\")\n\t\tv  = expvar.NewInt(\"read-bytes\")\n\t\tsw = NewStatsReader(v, r)\n\t)\n\t// Read from the wrapped io.Reader, StatReader will count the bytes.\n\tio.Copy(ioutil.Discard, sw)\n\tfmt.Printf(\"Read %s bytes\\n\", v.String())\n\t// Output: Read 11 bytes\n}\n"
  },
  {
    "path": "readerutil/singlereader/opener.go",
    "content": "/*\nCopyright 2013 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// package singlereader provides Open and Close operations, reusing existing\n// file descriptors when possible.\npackage singlereader // import \"go4.org/readerutil/singlereader\"\n\nimport (\n\t\"sync\"\n\n\t\"go4.org/readerutil\"\n\t\"go4.org/syncutil/singleflight\"\n\t\"go4.org/wkfs\"\n)\n\nvar (\n\topenerGroup singleflight.Group\n\n\topenFileMu sync.Mutex // guards openFiles\n\topenFiles  = make(map[string]*openFile)\n)\n\ntype openFile struct {\n\twkfs.File\n\tpath     string // map key of openFiles\n\trefCount int\n}\n\ntype openFileHandle struct {\n\tclosed bool\n\t*openFile\n}\n\nfunc (f *openFileHandle) Close() error {\n\topenFileMu.Lock()\n\tif f.closed {\n\t\topenFileMu.Unlock()\n\t\treturn nil\n\t}\n\tf.closed = true\n\tf.refCount--\n\tif f.refCount < 0 {\n\t\tpanic(\"unexpected negative refcount\")\n\t}\n\tzero := f.refCount == 0\n\tif zero {\n\t\tdelete(openFiles, f.path)\n\t}\n\topenFileMu.Unlock()\n\tif !zero {\n\t\treturn nil\n\t}\n\treturn f.openFile.File.Close()\n}\n\n// Open opens the given file path for reading, reusing existing file descriptors\n// when possible.\nfunc Open(path string) (readerutil.ReaderAtCloser, error) {\n\topenFileMu.Lock()\n\tof := openFiles[path]\n\tif of != nil {\n\t\tof.refCount++\n\t\topenFileMu.Unlock()\n\t\treturn &openFileHandle{false, of}, nil\n\t}\n\topenFileMu.Unlock() // release the lock while we call os.Open\n\n\twinner := false // this goroutine made it into Do's func\n\n\t// Returns an *openFile\n\tresi, err := openerGroup.Do(path, func() (interface{}, error) {\n\t\twinner = true\n\t\tf, err := wkfs.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tof := &openFile{\n\t\t\tFile:     f,\n\t\t\tpath:     path,\n\t\t\trefCount: 1,\n\t\t}\n\t\topenFileMu.Lock()\n\t\topenFiles[path] = of\n\t\topenFileMu.Unlock()\n\t\treturn of, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tof = resi.(*openFile)\n\n\t// If our os.Open was dup-suppressed, we have to increment our\n\t// reference count.\n\tif !winner {\n\t\topenFileMu.Lock()\n\t\tif of.refCount == 0 {\n\t\t\t// Winner already closed it. Try again (rare).\n\t\t\topenFileMu.Unlock()\n\t\t\treturn Open(path)\n\t\t}\n\t\tof.refCount++\n\t\topenFileMu.Unlock()\n\t}\n\treturn &openFileHandle{false, of}, nil\n}\n"
  },
  {
    "path": "readerutil/singlereader/opener_test.go",
    "content": "/*\nCopyright 2013 The Go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage singlereader\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n)\n\nfunc TestOpenSingle(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping in short mode\")\n\t}\n\tdefer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))\n\tf, err := ioutil.TempFile(\"\", \"foo\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer os.Remove(f.Name())\n\tcontents := []byte(\"Some file contents\")\n\tif _, err := f.Write(contents); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tf.Close()\n\n\tconst j = 4\n\terrc := make(chan error, j)\n\tfor i := 1; i < j; i++ {\n\t\tgo func() {\n\t\t\tbuf := make([]byte, len(contents))\n\t\t\tfor i := 0; i < 400; i++ {\n\t\t\t\trac, err := Open(f.Name())\n\t\t\t\tif err != nil {\n\t\t\t\t\terrc <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tn, err := rac.ReadAt(buf, 0)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrc <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif n != len(contents) || !bytes.Equal(buf, contents) {\n\t\t\t\t\terrc <- fmt.Errorf(\"read %d, %q; want %d, %q\", n, buf, len(contents), contents)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tif err := rac.Close(); err != nil {\n\t\t\t\t\terrc <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\terrc <- nil\n\t\t}()\n\t}\n\tfor i := 1; i < j; i++ {\n\t\tif err := <-errc; err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "reflectutil/swapper.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package reflectutil contains a legacy Swapper function\n// that has since moved to the Go standard library.\npackage reflectutil\n\nimport \"reflect\"\n\n// Swapper returns a function which swaps the elements in slice.\n// Swapper panics if the provided interface is not a slice.\n//\n// Its goal is to work safely and efficiently for all versions and\n// variants of Go: pre-Go1.5, Go1.5+, safe, unsafe, App Engine,\n// GopherJS, etc.\n//\n// Deprecated: this moved to the Go standard library. Use\n// reflect.Swapper in Go 1.8+ instead.\nfunc Swapper(slice any) func(i, j int) {\n\treturn reflect.Swapper(slice)\n}\n"
  },
  {
    "path": "rollsum/rollsum.go",
    "content": "/*\nCopyright 2011 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package rollsum implements rolling checksums similar to apenwarr's bup, which\n// is similar to librsync.\n//\n// The bup project is at https://github.com/apenwarr/bup and its splitting in\n// particular is at https://github.com/apenwarr/bup/blob/master/lib/bup/bupsplit.c\npackage rollsum // import \"go4.org/rollsum\"\n\nimport (\n\t\"math/bits\"\n)\n\nconst windowSize = 64 // Roll assumes windowSize is a power of 2\nconst charOffset = 31\n\nconst blobBits = 13\nconst blobSize = 1 << blobBits // 8k\n\ntype RollSum struct {\n\ts1, s2 uint32\n\twindow [windowSize]uint8\n\twofs   int\n}\n\nfunc New() *RollSum {\n\treturn &RollSum{\n\t\ts1: windowSize * charOffset,\n\t\ts2: windowSize * (windowSize - 1) * charOffset,\n\t}\n}\n\nfunc (rs *RollSum) add(drop, add uint32) {\n\ts1 := rs.s1 + add - drop\n\trs.s1 = s1\n\trs.s2 += s1 - uint32(windowSize)*(drop+charOffset)\n}\n\n// Roll adds ch to the rolling sum.\nfunc (rs *RollSum) Roll(ch byte) {\n\twp := &rs.window[rs.wofs]\n\trs.add(uint32(*wp), uint32(ch))\n\t*wp = ch\n\trs.wofs = (rs.wofs + 1) & (windowSize - 1)\n}\n\n// OnSplit reports whether at least 13 consecutive trailing bits of\n// the current checksum are set the same way.\nfunc (rs *RollSum) OnSplit() bool {\n\treturn (rs.s2 & (blobSize - 1)) == ((^0) & (blobSize - 1))\n}\n\n// OnSplitWithBits reports whether at least n consecutive trailing bits\n// of the current checksum are set the same way.\nfunc (rs *RollSum) OnSplitWithBits(n uint32) bool {\n\tmask := (uint32(1) << n) - 1\n\treturn rs.s2&mask == (^uint32(0))&mask\n}\n\nfunc (rs *RollSum) Bits() int {\n\trsum := rs.Digest() >> (blobBits + 1)\n\treturn blobBits + bits.TrailingZeros32(^rsum)\n}\n\nfunc (rs *RollSum) Digest() uint32 {\n\treturn (rs.s1 << 16) | (rs.s2 & 0xffff)\n}\n"
  },
  {
    "path": "rollsum/rollsum_test.go",
    "content": "/*\nCopyright 2011 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage rollsum\n\nimport (\n\t\"math/rand\"\n\t\"testing\"\n)\n\nfunc TestSum(t *testing.T) {\n\tvar buf [100000]uint8\n\trnd := rand.New(rand.NewSource(4))\n\tfor i := range buf {\n\t\tbuf[i] = uint8(rnd.Intn(256))\n\t}\n\n\troll := func(offset, len int) *RollSum {\n\t\trs := New()\n\t\tfor count := offset; count < len; count++ {\n\t\t\trs.Roll(buf[count])\n\t\t}\n\t\treturn rs\n\t}\n\n\tsum := func(offset, len int) uint32 {\n\t\trs := roll(offset, len)\n\t\treturn rs.Digest()\n\t}\n\n\tsum1a := sum(0, len(buf))\n\tsum1b := sum(1, len(buf))\n\tsum2a := sum(len(buf)-windowSize*5/2, len(buf)-windowSize)\n\tsum2b := sum(0, len(buf)-windowSize)\n\tsum3a := sum(0, windowSize+3)\n\tsum3b := sum(3, windowSize+3)\n\n\tif sum1a != sum1b {\n\t\tt.Errorf(\"sum1a=%d sum1b=%d\", sum1a, sum1b)\n\t}\n\tif sum2a != sum2b {\n\t\tt.Errorf(\"sum2a=%d sum2b=%d\", sum2a, sum2b)\n\t}\n\tif sum3a != sum3b {\n\t\tt.Errorf(\"sum3a=%d sum3b=%d\", sum3a, sum3b)\n\t}\n\n\tend := 500\n\trs := roll(0, windowSize)\n\tfor i := 0; i < end; i++ {\n\t\tsumRoll := rs.Digest()\n\t\tnewRoll := roll(i, i+windowSize).Digest()\n\n\t\tif sumRoll != newRoll {\n\t\t\tt.Errorf(\"Error: i=%d, buf[i]=%d, sumRoll=%d, newRoll=%d\\n\", i, buf[i], sumRoll, newRoll)\n\t\t}\n\n\t\trs.Roll(buf[i+windowSize])\n\t}\n}\n\nfunc BenchmarkRollsum(b *testing.B) {\n\tconst bufSize = 5 << 20\n\tbuf := make([]byte, bufSize)\n\tfor i := range buf {\n\t\tbuf[i] = byte(rand.Int63())\n\t}\n\n\tb.ResetTimer()\n\trs := New()\n\tsplits := 0\n\tfor i := 0; i < b.N; i++ {\n\t\tsplits = 0\n\t\tfor _, b := range buf {\n\t\t\trs.Roll(b)\n\t\t\tif rs.OnSplit() {\n\t\t\t\t_ = rs.Bits()\n\t\t\t\tsplits++\n\t\t\t}\n\t\t}\n\t}\n\tb.SetBytes(bufSize)\n\tb.Logf(\"num splits = %d; every %d bytes\", splits, int(float64(bufSize)/float64(splits)))\n}\n"
  },
  {
    "path": "sort/example_interface_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\n\t\"go4.org/sort\"\n)\n\ntype Person struct {\n\tName string\n\tAge  int\n}\n\nfunc (p Person) String() string {\n\treturn fmt.Sprintf(\"%s: %d\", p.Name, p.Age)\n}\n\n// ByAge implements sort.Interface for []Person based on\n// the Age field.\ntype ByAge []Person\n\nfunc (a ByAge) Len() int           { return len(a) }\nfunc (a ByAge) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }\nfunc (a ByAge) Less(i, j int) bool { return a[i].Age < a[j].Age }\n\nfunc ExampleSort() {\n\tpeople := []Person{\n\t\t{\"Bob\", 31},\n\t\t{\"John\", 42},\n\t\t{\"Michael\", 17},\n\t\t{\"Jenny\", 26},\n\t}\n\n\tfmt.Println(people)\n\tsort.Sort(ByAge(people))\n\tfmt.Println(people)\n\n\t// Output:\n\t// [Bob: 31 John: 42 Michael: 17 Jenny: 26]\n\t// [Michael: 17 Jenny: 26 Bob: 31 John: 42]\n}\n"
  },
  {
    "path": "sort/example_keys_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\n// A couple of type definitions to make the units clear.\ntype earthMass float64\ntype au float64\n\n// A Planet defines the properties of a solar system object.\ntype Planet struct {\n\tname     string\n\tmass     earthMass\n\tdistance au\n}\n\n// By is the type of a \"less\" function that defines the ordering of its Planet arguments.\ntype By func(p1, p2 *Planet) bool\n\n// Sort is a method on the function type, By, that sorts the argument slice according to the function.\nfunc (by By) Sort(planets []Planet) {\n\tps := &planetSorter{\n\t\tplanets: planets,\n\t\tby:      by, // The Sort method's receiver is the function (closure) that defines the sort order.\n\t}\n\tsort.Sort(ps)\n}\n\n// planetSorter joins a By function and a slice of Planets to be sorted.\ntype planetSorter struct {\n\tplanets []Planet\n\tby      func(p1, p2 *Planet) bool // Closure used in the Less method.\n}\n\n// Len is part of sort.Interface.\nfunc (s *planetSorter) Len() int {\n\treturn len(s.planets)\n}\n\n// Swap is part of sort.Interface.\nfunc (s *planetSorter) Swap(i, j int) {\n\ts.planets[i], s.planets[j] = s.planets[j], s.planets[i]\n}\n\n// Less is part of sort.Interface. It is implemented by calling the \"by\" closure in the sorter.\nfunc (s *planetSorter) Less(i, j int) bool {\n\treturn s.by(&s.planets[i], &s.planets[j])\n}\n\nvar planets = []Planet{\n\t{\"Mercury\", 0.055, 0.4},\n\t{\"Venus\", 0.815, 0.7},\n\t{\"Earth\", 1.0, 1.0},\n\t{\"Mars\", 0.107, 1.5},\n}\n\n// ExampleSortKeys demonstrates a technique for sorting a struct type using programmable sort criteria.\nfunc Example_sortKeys() {\n\t// Closures that order the Planet structure.\n\tname := func(p1, p2 *Planet) bool {\n\t\treturn p1.name < p2.name\n\t}\n\tmass := func(p1, p2 *Planet) bool {\n\t\treturn p1.mass < p2.mass\n\t}\n\tdistance := func(p1, p2 *Planet) bool {\n\t\treturn p1.distance < p2.distance\n\t}\n\tdecreasingDistance := func(p1, p2 *Planet) bool {\n\t\treturn !distance(p1, p2)\n\t}\n\n\t// Sort the planets by the various criteria.\n\tBy(name).Sort(planets)\n\tfmt.Println(\"By name:\", planets)\n\n\tBy(mass).Sort(planets)\n\tfmt.Println(\"By mass:\", planets)\n\n\tBy(distance).Sort(planets)\n\tfmt.Println(\"By distance:\", planets)\n\n\tBy(decreasingDistance).Sort(planets)\n\tfmt.Println(\"By decreasing distance:\", planets)\n\n\t// Output: By name: [{Earth 1 1} {Mars 0.107 1.5} {Mercury 0.055 0.4} {Venus 0.815 0.7}]\n\t// By mass: [{Mercury 0.055 0.4} {Mars 0.107 1.5} {Venus 0.815 0.7} {Earth 1 1}]\n\t// By distance: [{Mercury 0.055 0.4} {Venus 0.815 0.7} {Earth 1 1} {Mars 0.107 1.5}]\n\t// By decreasing distance: [{Mars 0.107 1.5} {Earth 1 1} {Venus 0.815 0.7} {Mercury 0.055 0.4}]\n}\n"
  },
  {
    "path": "sort/example_multi_test.go",
    "content": "// Copyright 2013 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\n// A Change is a record of source code changes, recording user, language, and delta size.\ntype Change struct {\n\tuser     string\n\tlanguage string\n\tlines    int\n}\n\ntype lessFunc func(p1, p2 *Change) bool\n\n// multiSorter implements the Sort interface, sorting the changes within.\ntype multiSorter struct {\n\tchanges []Change\n\tless    []lessFunc\n}\n\n// Sort sorts the argument slice according to the less functions passed to OrderedBy.\nfunc (ms *multiSorter) Sort(changes []Change) {\n\tms.changes = changes\n\tsort.Stable(ms)\n}\n\n// OrderedBy returns a Sorter that sorts using the less functions, in order.\n// Call its Sort method to sort the data.\nfunc OrderedBy(less ...lessFunc) *multiSorter {\n\treturn &multiSorter{\n\t\tless: less,\n\t}\n}\n\n// Len is part of sort.Interface.\nfunc (ms *multiSorter) Len() int {\n\treturn len(ms.changes)\n}\n\n// Swap is part of sort.Interface.\nfunc (ms *multiSorter) Swap(i, j int) {\n\tms.changes[i], ms.changes[j] = ms.changes[j], ms.changes[i]\n}\n\n// Less is part of sort.Interface. It is implemented by looping along the\n// less functions until it finds a comparison that is either Less or\n// !Less. Note that it can call the less functions twice per call. We\n// could change the functions to return -1, 0, 1 and reduce the\n// number of calls for greater efficiency: an exercise for the reader.\nfunc (ms *multiSorter) Less(i, j int) bool {\n\tp, q := &ms.changes[i], &ms.changes[j]\n\t// Try all but the last comparison.\n\tvar k int\n\tfor k = 0; k < len(ms.less)-1; k++ {\n\t\tless := ms.less[k]\n\t\tswitch {\n\t\tcase less(p, q):\n\t\t\t// p < q, so we have a decision.\n\t\t\treturn true\n\t\tcase less(q, p):\n\t\t\t// p > q, so we have a decision.\n\t\t\treturn false\n\t\t}\n\t\t// p == q; try the next comparison.\n\t}\n\t// All comparisons to here said \"equal\", so just return whatever\n\t// the final comparison reports.\n\treturn ms.less[k](p, q)\n}\n\nvar changes = []Change{\n\t{\"gri\", \"Go\", 100},\n\t{\"ken\", \"C\", 150},\n\t{\"glenda\", \"Go\", 200},\n\t{\"rsc\", \"Go\", 200},\n\t{\"r\", \"Go\", 100},\n\t{\"ken\", \"Go\", 200},\n\t{\"dmr\", \"C\", 100},\n\t{\"r\", \"C\", 150},\n\t{\"gri\", \"Smalltalk\", 80},\n}\n\n// ExampleMultiKeys demonstrates a technique for sorting a struct type using different\n// sets of multiple fields in the comparison. We chain together \"Less\" functions, each of\n// which compares a single field.\nfunc Example_sortMultiKeys() {\n\t// Closures that order the Change structure.\n\tuser := func(c1, c2 *Change) bool {\n\t\treturn c1.user < c2.user\n\t}\n\tlanguage := func(c1, c2 *Change) bool {\n\t\treturn c1.language < c2.language\n\t}\n\tincreasingLines := func(c1, c2 *Change) bool {\n\t\treturn c1.lines < c2.lines\n\t}\n\tdecreasingLines := func(c1, c2 *Change) bool {\n\t\treturn c1.lines > c2.lines // Note: > orders downwards.\n\t}\n\n\t// Simple use: Sort by user.\n\tOrderedBy(user).Sort(changes)\n\tfmt.Println(\"By user:\", changes)\n\n\t// More examples.\n\tOrderedBy(user, increasingLines).Sort(changes)\n\tfmt.Println(\"By user,<lines:\", changes)\n\n\tOrderedBy(user, decreasingLines).Sort(changes)\n\tfmt.Println(\"By user,>lines:\", changes)\n\n\tOrderedBy(language, increasingLines).Sort(changes)\n\tfmt.Println(\"By language,<lines:\", changes)\n\n\tOrderedBy(language, increasingLines, user).Sort(changes)\n\tfmt.Println(\"By language,<lines,user:\", changes)\n\n\t// Output:\n\t// By user: [{dmr C 100} {glenda Go 200} {gri Go 100} {gri Smalltalk 80} {ken C 150} {ken Go 200} {r Go 100} {r C 150} {rsc Go 200}]\n\t// By user,<lines: [{dmr C 100} {glenda Go 200} {gri Smalltalk 80} {gri Go 100} {ken C 150} {ken Go 200} {r Go 100} {r C 150} {rsc Go 200}]\n\t// By user,>lines: [{dmr C 100} {glenda Go 200} {gri Go 100} {gri Smalltalk 80} {ken Go 200} {ken C 150} {r C 150} {r Go 100} {rsc Go 200}]\n\t// By language,<lines: [{dmr C 100} {ken C 150} {r C 150} {gri Go 100} {r Go 100} {glenda Go 200} {ken Go 200} {rsc Go 200} {gri Smalltalk 80}]\n\t// By language,<lines,user: [{dmr C 100} {ken C 150} {r C 150} {gri Go 100} {r Go 100} {glenda Go 200} {ken Go 200} {rsc Go 200} {gri Smalltalk 80}]\n}\n"
  },
  {
    "path": "sort/example_slice_test.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\n\t\"go4.org/sort\"\n)\n\nfunc Example() {\n\tpeople := []Person{\n\t\t{Name: \"Bob\", Age: 31},\n\t\t{Name: \"John\", Age: 42},\n\t\t{Name: \"Michael\", Age: 17},\n\t\t{Name: \"Jenny\", Age: 26},\n\t}\n\n\tfmt.Println(people)\n\tsort.Slice(people, func(i, j int) bool { return people[i].Age < people[j].Age })\n\tfmt.Println(people)\n\n\t// Output:\n\t// [Bob: 31 John: 42 Michael: 17 Jenny: 26]\n\t// [Michael: 17 Jenny: 26 Bob: 31 John: 42]\n}\n\nfunc ExampleSlice() {\n\tpeople := []Person{\n\t\t{Name: \"Bob\", Age: 31},\n\t\t{Name: \"John\", Age: 42},\n\t\t{Name: \"Michael\", Age: 17},\n\t\t{Name: \"Jenny\", Age: 26},\n\t}\n\n\tfmt.Println(people)\n\tsort.Slice(people, func(i, j int) bool { return people[i].Age < people[j].Age })\n\tfmt.Println(people)\n\n\t// Output:\n\t// [Bob: 31 John: 42 Michael: 17 Jenny: 26]\n\t// [Michael: 17 Jenny: 26 Bob: 31 John: 42]\n}\n"
  },
  {
    "path": "sort/example_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\nfunc ExampleInts() {\n\ts := []int{5, 2, 6, 3, 1, 4} // unsorted\n\tsort.Ints(s)\n\tfmt.Println(s)\n\t// Output: [1 2 3 4 5 6]\n}\n\nfunc ExampleReverse() {\n\ts := []int{5, 2, 6, 3, 1, 4} // unsorted\n\tsort.Sort(sort.Reverse(sort.IntSlice(s)))\n\tfmt.Println(s)\n\t// Output: [6 5 4 3 2 1]\n}\n"
  },
  {
    "path": "sort/example_wrapper_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n)\n\ntype Grams int\n\nfunc (g Grams) String() string { return fmt.Sprintf(\"%dg\", int(g)) }\n\ntype Organ struct {\n\tName   string\n\tWeight Grams\n}\n\ntype Organs []*Organ\n\nfunc (s Organs) Len() int      { return len(s) }\nfunc (s Organs) Swap(i, j int) { s[i], s[j] = s[j], s[i] }\n\n// ByName implements sort.Interface by providing Less and using the Len and\n// Swap methods of the embedded Organs value.\ntype ByName struct{ Organs }\n\nfunc (s ByName) Less(i, j int) bool { return s.Organs[i].Name < s.Organs[j].Name }\n\n// ByWeight implements sort.Interface by providing Less and using the Len and\n// Swap methods of the embedded Organs value.\ntype ByWeight struct{ Organs }\n\nfunc (s ByWeight) Less(i, j int) bool { return s.Organs[i].Weight < s.Organs[j].Weight }\n\nfunc Example_sortWrapper() {\n\ts := []*Organ{\n\t\t{\"brain\", 1340},\n\t\t{\"heart\", 290},\n\t\t{\"liver\", 1494},\n\t\t{\"pancreas\", 131},\n\t\t{\"prostate\", 62},\n\t\t{\"spleen\", 162},\n\t}\n\n\tsort.Sort(ByWeight{s})\n\tfmt.Println(\"Organs by weight:\")\n\tprintOrgans(s)\n\n\tsort.Sort(ByName{s})\n\tfmt.Println(\"Organs by name:\")\n\tprintOrgans(s)\n\n\t// Output:\n\t// Organs by weight:\n\t// prostate (62g)\n\t// pancreas (131g)\n\t// spleen   (162g)\n\t// heart    (290g)\n\t// brain    (1340g)\n\t// liver    (1494g)\n\t// Organs by name:\n\t// brain    (1340g)\n\t// heart    (290g)\n\t// liver    (1494g)\n\t// pancreas (131g)\n\t// prostate (62g)\n\t// spleen   (162g)\n}\n\nfunc printOrgans(s []*Organ) {\n\tfor _, o := range s {\n\t\tfmt.Printf(\"%-8s (%v)\\n\", o.Name, o.Weight)\n\t}\n}\n"
  },
  {
    "path": "sort/export_test.go",
    "content": "// Copyright 2011 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort\n\nfunc Heapsort(data Interface) {\n\theapSort(data, 0, data.Len())\n}\n"
  },
  {
    "path": "sort/genzfunc.go",
    "content": "// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n//go:build ignore\n\n// This program is run via \"go generate\" (via a directive in sort.go)\n// to generate zfuncversion.go.\n//\n// It copies sort.go to zfuncversion.go, only retaining funcs which\n// take a \"data Interface\" parameter, and renaming each to have a\n// \"_func\" suffix and taking a \"data lessSwap\" instead. It then rewrites\n// each internal function call to the appropriate _func variants.\n\npackage main\n\nimport (\n\t\"bytes\"\n\t\"go/ast\"\n\t\"go/format\"\n\t\"go/parser\"\n\t\"go/token\"\n\t\"io/ioutil\"\n\t\"log\"\n\t\"regexp\"\n)\n\nvar fset = token.NewFileSet()\n\nfunc main() {\n\taf, err := parser.ParseFile(fset, \"sort.go\", nil, 0)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\taf.Doc = nil\n\taf.Imports = nil\n\taf.Comments = nil\n\n\tvar newDecl []ast.Decl\n\tfor _, d := range af.Decls {\n\t\tfd, ok := d.(*ast.FuncDecl)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tif fd.Recv != nil || fd.Name.IsExported() {\n\t\t\tcontinue\n\t\t}\n\t\ttyp := fd.Type\n\t\tif len(typ.Params.List) < 1 {\n\t\t\tcontinue\n\t\t}\n\t\targ0 := typ.Params.List[0]\n\t\targ0Name := arg0.Names[0].Name\n\t\targ0Type := arg0.Type.(*ast.Ident)\n\t\tif arg0Name != \"data\" || arg0Type.Name != \"Interface\" {\n\t\t\tcontinue\n\t\t}\n\t\targ0Type.Name = \"lessSwap\"\n\n\t\tnewDecl = append(newDecl, fd)\n\t}\n\taf.Decls = newDecl\n\tast.Walk(visitFunc(rewriteCalls), af)\n\n\tvar out bytes.Buffer\n\tif err := format.Node(&out, fset, af); err != nil {\n\t\tlog.Fatalf(\"format.Node: %v\", err)\n\t}\n\n\t// Get rid of blank lines after removal of comments.\n\tsrc := regexp.MustCompile(`\\n{2,}`).ReplaceAll(out.Bytes(), []byte(\"\\n\"))\n\n\t// Add comments to each func, for the lost reader.\n\t// This is so much easier than adding comments via the AST\n\t// and trying to get position info correct.\n\tsrc = regexp.MustCompile(`(?m)^func (\\w+)`).ReplaceAll(src, []byte(\"\\n// Auto-generated variant of sort.go:$1\\nfunc ${1}_func\"))\n\n\t// Final gofmt.\n\tsrc, err = format.Source(src)\n\tif err != nil {\n\t\tlog.Fatalf(\"format.Source: %v on\\n%s\", err, src)\n\t}\n\n\tout.Reset()\n\tout.WriteString(`// DO NOT EDIT; AUTO-GENERATED from sort.go using genzfunc.go\n\n// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n`)\n\tout.Write(src)\n\n\tconst target = \"zfuncversion.go\"\n\tif err := ioutil.WriteFile(target, out.Bytes(), 0644); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\ntype visitFunc func(ast.Node) ast.Visitor\n\nfunc (f visitFunc) Visit(n ast.Node) ast.Visitor { return f(n) }\n\nfunc rewriteCalls(n ast.Node) ast.Visitor {\n\tce, ok := n.(*ast.CallExpr)\n\tif ok {\n\t\trewriteCall(ce)\n\t}\n\treturn visitFunc(rewriteCalls)\n}\n\nfunc rewriteCall(ce *ast.CallExpr) {\n\tident, ok := ce.Fun.(*ast.Ident)\n\tif !ok {\n\t\t// e.g. skip SelectorExpr (data.Less(..) calls)\n\t\treturn\n\t}\n\tif len(ce.Args) < 1 {\n\t\treturn\n\t}\n\tident.Name += \"_func\"\n}\n"
  },
  {
    "path": "sort/search.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// This file implements binary search.\n\npackage sort\n\n// Search uses binary search to find and return the smallest index i\n// in [0, n) at which f(i) is true, assuming that on the range [0, n),\n// f(i) == true implies f(i+1) == true. That is, Search requires that\n// f is false for some (possibly empty) prefix of the input range [0, n)\n// and then true for the (possibly empty) remainder; Search returns\n// the first true index. If there is no such index, Search returns n.\n// (Note that the \"not found\" return value is not -1 as in, for instance,\n// strings.Index.)\n// Search calls f(i) only for i in the range [0, n).\n//\n// A common use of Search is to find the index i for a value x in\n// a sorted, indexable data structure such as an array or slice.\n// In this case, the argument f, typically a closure, captures the value\n// to be searched for, and how the data structure is indexed and\n// ordered.\n//\n// For instance, given a slice data sorted in ascending order,\n// the call Search(len(data), func(i int) bool { return data[i] >= 23 })\n// returns the smallest index i such that data[i] >= 23.  If the caller\n// wants to find whether 23 is in the slice, it must test data[i] == 23\n// separately.\n//\n// Searching data sorted in descending order would use the <=\n// operator instead of the >= operator.\n//\n// To complete the example above, the following code tries to find the value\n// x in an integer slice data sorted in ascending order:\n//\n//\tx := 23\n//\ti := sort.Search(len(data), func(i int) bool { return data[i] >= x })\n//\tif i < len(data) && data[i] == x {\n//\t\t// x is present at data[i]\n//\t} else {\n//\t\t// x is not present in data,\n//\t\t// but i is the index where it would be inserted.\n//\t}\n//\n// As a more whimsical example, this program guesses your number:\n//\n//\tfunc GuessingGame() {\n//\t\tvar s string\n//\t\tfmt.Printf(\"Pick an integer from 0 to 100.\\n\")\n//\t\tanswer := sort.Search(100, func(i int) bool {\n//\t\t\tfmt.Printf(\"Is your number <= %d? \", i)\n//\t\t\tfmt.Scanf(\"%s\", &s)\n//\t\t\treturn s != \"\" && s[0] == 'y'\n//\t\t})\n//\t\tfmt.Printf(\"Your number is %d.\\n\", answer)\n//\t}\n//\nfunc Search(n int, f func(int) bool) int {\n\t// Define f(-1) == false and f(n) == true.\n\t// Invariant: f(i-1) == false, f(j) == true.\n\ti, j := 0, n\n\tfor i < j {\n\t\th := i + (j-i)/2 // avoid overflow when computing h\n\t\t// i ≤ h < j\n\t\tif !f(h) {\n\t\t\ti = h + 1 // preserves f(i-1) == false\n\t\t} else {\n\t\t\tj = h // preserves f(j) == true\n\t\t}\n\t}\n\t// i == j, f(i-1) == false, and f(j) (= f(i)) == true  =>  answer is i.\n\treturn i\n}\n\n// Convenience wrappers for common cases.\n\n// SearchInts searches for x in a sorted slice of ints and returns the index\n// as specified by Search. The return value is the index to insert x if x is\n// not present (it could be len(a)).\n// The slice must be sorted in ascending order.\n//\nfunc SearchInts(a []int, x int) int {\n\treturn Search(len(a), func(i int) bool { return a[i] >= x })\n}\n\n// SearchFloat64s searches for x in a sorted slice of float64s and returns the index\n// as specified by Search. The return value is the index to insert x if x is not\n// present (it could be len(a)).\n// The slice must be sorted in ascending order.\n//\nfunc SearchFloat64s(a []float64, x float64) int {\n\treturn Search(len(a), func(i int) bool { return a[i] >= x })\n}\n\n// SearchStrings searches for x in a sorted slice of strings and returns the index\n// as specified by Search. The return value is the index to insert x if x is not\n// present (it could be len(a)).\n// The slice must be sorted in ascending order.\n//\nfunc SearchStrings(a []string, x string) int {\n\treturn Search(len(a), func(i int) bool { return a[i] >= x })\n}\n\n// Search returns the result of applying SearchInts to the receiver and x.\nfunc (p IntSlice) Search(x int) int { return SearchInts(p, x) }\n\n// Search returns the result of applying SearchFloat64s to the receiver and x.\nfunc (p Float64Slice) Search(x float64) int { return SearchFloat64s(p, x) }\n\n// Search returns the result of applying SearchStrings to the receiver and x.\nfunc (p StringSlice) Search(x string) int { return SearchStrings(p, x) }\n"
  },
  {
    "path": "sort/search_test.go",
    "content": "// Copyright 2010 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"runtime\"\n\t. \"sort\"\n\t\"testing\"\n)\n\nfunc f(a []int, x int) func(int) bool {\n\treturn func(i int) bool {\n\t\treturn a[i] >= x\n\t}\n}\n\nvar data = []int{0: -10, 1: -5, 2: 0, 3: 1, 4: 2, 5: 3, 6: 5, 7: 7, 8: 11, 9: 100, 10: 100, 11: 100, 12: 1000, 13: 10000}\n\nvar tests = []struct {\n\tname string\n\tn    int\n\tf    func(int) bool\n\ti    int\n}{\n\t{\"empty\", 0, nil, 0},\n\t{\"1 1\", 1, func(i int) bool { return i >= 1 }, 1},\n\t{\"1 true\", 1, func(i int) bool { return true }, 0},\n\t{\"1 false\", 1, func(i int) bool { return false }, 1},\n\t{\"1e9 991\", 1e9, func(i int) bool { return i >= 991 }, 991},\n\t{\"1e9 true\", 1e9, func(i int) bool { return true }, 0},\n\t{\"1e9 false\", 1e9, func(i int) bool { return false }, 1e9},\n\t{\"data -20\", len(data), f(data, -20), 0},\n\t{\"data -10\", len(data), f(data, -10), 0},\n\t{\"data -9\", len(data), f(data, -9), 1},\n\t{\"data -6\", len(data), f(data, -6), 1},\n\t{\"data -5\", len(data), f(data, -5), 1},\n\t{\"data 3\", len(data), f(data, 3), 5},\n\t{\"data 11\", len(data), f(data, 11), 8},\n\t{\"data 99\", len(data), f(data, 99), 9},\n\t{\"data 100\", len(data), f(data, 100), 9},\n\t{\"data 101\", len(data), f(data, 101), 12},\n\t{\"data 10000\", len(data), f(data, 10000), 13},\n\t{\"data 10001\", len(data), f(data, 10001), 14},\n\t{\"descending a\", 7, func(i int) bool { return []int{99, 99, 59, 42, 7, 0, -1, -1}[i] <= 7 }, 4},\n\t{\"descending 7\", 1e9, func(i int) bool { return 1e9-i <= 7 }, 1e9 - 7},\n\t{\"overflow\", 2e9, func(i int) bool { return false }, 2e9},\n}\n\nfunc TestSearch(t *testing.T) {\n\tfor _, e := range tests {\n\t\ti := Search(e.n, e.f)\n\t\tif i != e.i {\n\t\t\tt.Errorf(\"%s: expected index %d; got %d\", e.name, e.i, i)\n\t\t}\n\t}\n}\n\n// log2 computes the binary logarithm of x, rounded up to the next integer.\n// (log2(0) == 0, log2(1) == 0, log2(2) == 1, log2(3) == 2, etc.)\n//\nfunc log2(x int) int {\n\tn := 0\n\tfor p := 1; p < x; p += p {\n\t\t// p == 2**n\n\t\tn++\n\t}\n\t// p/2 < x <= p == 2**n\n\treturn n\n}\n\nfunc TestSearchEfficiency(t *testing.T) {\n\tn := 100\n\tstep := 1\n\tfor exp := 2; exp < 10; exp++ {\n\t\t// n == 10**exp\n\t\t// step == 10**(exp-2)\n\t\tmax := log2(n)\n\t\tfor x := 0; x < n; x += step {\n\t\t\tcount := 0\n\t\t\ti := Search(n, func(i int) bool { count++; return i >= x })\n\t\t\tif i != x {\n\t\t\t\tt.Errorf(\"n = %d: expected index %d; got %d\", n, x, i)\n\t\t\t}\n\t\t\tif count > max {\n\t\t\t\tt.Errorf(\"n = %d, x = %d: expected <= %d calls; got %d\", n, x, max, count)\n\t\t\t}\n\t\t}\n\t\tn *= 10\n\t\tstep *= 10\n\t}\n}\n\n// Smoke tests for convenience wrappers - not comprehensive.\n\nvar fdata = []float64{0: -3.14, 1: 0, 2: 1, 3: 2, 4: 1000.7}\nvar sdata = []string{0: \"f\", 1: \"foo\", 2: \"foobar\", 3: \"x\"}\n\nvar wrappertests = []struct {\n\tname   string\n\tresult int\n\ti      int\n}{\n\t{\"SearchInts\", SearchInts(data, 11), 8},\n\t{\"SearchFloat64s\", SearchFloat64s(fdata, 2.1), 4},\n\t{\"SearchStrings\", SearchStrings(sdata, \"\"), 0},\n\t{\"IntSlice.Search\", IntSlice(data).Search(0), 2},\n\t{\"Float64Slice.Search\", Float64Slice(fdata).Search(2.0), 3},\n\t{\"StringSlice.Search\", StringSlice(sdata).Search(\"x\"), 3},\n}\n\nfunc TestSearchWrappers(t *testing.T) {\n\tfor _, e := range wrappertests {\n\t\tif e.result != e.i {\n\t\t\tt.Errorf(\"%s: expected index %d; got %d\", e.name, e.i, e.result)\n\t\t}\n\t}\n}\n\nfunc runSearchWrappers() {\n\tSearchInts(data, 11)\n\tSearchFloat64s(fdata, 2.1)\n\tSearchStrings(sdata, \"\")\n\tIntSlice(data).Search(0)\n\tFloat64Slice(fdata).Search(2.0)\n\tStringSlice(sdata).Search(\"x\")\n}\n\nfunc TestSearchWrappersDontAlloc(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipping malloc count in short mode\")\n\t}\n\tif runtime.GOMAXPROCS(0) > 1 {\n\t\tt.Skip(\"skipping; GOMAXPROCS>1\")\n\t}\n\tallocs := testing.AllocsPerRun(100, runSearchWrappers)\n\tif allocs != 0 {\n\t\tt.Errorf(\"expected no allocs for runSearchWrappers, got %v\", allocs)\n\t}\n}\n\nfunc BenchmarkSearchWrappers(b *testing.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\trunSearchWrappers()\n\t}\n}\n\n// Abstract exhaustive test: all sizes up to 100,\n// all possible return values. If there are any small\n// corner cases, this test exercises them.\nfunc TestSearchExhaustive(t *testing.T) {\n\tfor size := 0; size <= 100; size++ {\n\t\tfor targ := 0; targ <= size; targ++ {\n\t\t\ti := Search(size, func(i int) bool { return i >= targ })\n\t\t\tif i != targ {\n\t\t\t\tt.Errorf(\"Search(%d, %d) = %d\", size, targ, i)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "sort/sort.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n//go:generate go run genzfunc.go\n\n// Package sort provides primitives for sorting slices and user-defined\n// collections.\n//\n// This is a copy of the Go standard library's sort package with the\n// addition of some helpers for sorting slices and using func literals\n// to sort, rather than having to create a sorter type. See the\n// additional MakeInterface, SliceSorter, and Slice functions.\n// Discussion of moving such helpers into the standard library is\n// at:\n//\n//     https://golang.org/issue/16721\n//\n// Per Go's \"no +1 policy\", please only leave a comment on that issue\n// if you have something unique to add. Use Github's emoji reactions\n// otherwise.\npackage sort\n\nimport (\n\t\"reflect\"\n\n\t\"go4.org/reflectutil\"\n)\n\n// A type, typically a collection, that satisfies sort.Interface can be\n// sorted by the routines in this package. The methods require that the\n// elements of the collection be enumerated by an integer index.\ntype Interface interface {\n\t// Len is the number of elements in the collection.\n\tLen() int\n\t// Less reports whether the element with\n\t// index i should sort before the element with index j.\n\tLess(i, j int) bool\n\t// Swap swaps the elements with indexes i and j.\n\tSwap(i, j int)\n}\n\n// lessSwap is a pair of Less and Swap function for use with the\n// auto-generated func-optimized variant of sort.go in\n// zfuncversion.go.\ntype lessSwap struct {\n\tLess func(i, j int) bool\n\tSwap func(i, j int)\n}\n\n// MakeInterface returns a sort Interface using the provided length\n// and pair of swap and less functions.\nfunc MakeInterface(length int, swap func(i, j int), less func(i, j int) bool) Interface {\n\treturn &funcs{length, lessSwap{less, swap}}\n}\n\n// SliceSorter returns a sort.Interface to sort the provided slice\n// using the provided less function.\n// If the provided interface is not a slice, the function panics.\nfunc SliceSorter(slice interface{}, less func(i, j int) bool) Interface {\n\treturn MakeInterface(reflect.ValueOf(slice).Len(), reflectutil.Swapper(slice), less)\n}\n\n// Slice sorts the provided slice using less.\n// If the provided interface is not a slice, the function panics.\n// The sort is not stable. For a stable sort, use sort.Stable with sort.SliceSorter.\nfunc Slice(slice interface{}, less func(i, j int) bool) {\n\tSort(SliceSorter(slice, less))\n}\n\n// funcs implements Interface, but is recognized by Sort and Stable\n// which use its lessSwap field with the non-interface sorting\n// routines in zfuncversion.go.\ntype funcs struct {\n\tlength int\n\tlessSwap\n}\n\nfunc (f *funcs) Len() int           { return f.length }\nfunc (f *funcs) Swap(i, j int)      { f.lessSwap.Swap(i, j) }\nfunc (f *funcs) Less(i, j int) bool { return f.lessSwap.Less(i, j) }\n\n// Insertion sort\nfunc insertionSort(data Interface, a, b int) {\n\tfor i := a + 1; i < b; i++ {\n\t\tfor j := i; j > a && data.Less(j, j-1); j-- {\n\t\t\tdata.Swap(j, j-1)\n\t\t}\n\t}\n}\n\n// siftDown implements the heap property on data[lo, hi).\n// first is an offset into the array where the root of the heap lies.\nfunc siftDown(data Interface, lo, hi, first int) {\n\troot := lo\n\tfor {\n\t\tchild := 2*root + 1\n\t\tif child >= hi {\n\t\t\tbreak\n\t\t}\n\t\tif child+1 < hi && data.Less(first+child, first+child+1) {\n\t\t\tchild++\n\t\t}\n\t\tif !data.Less(first+root, first+child) {\n\t\t\treturn\n\t\t}\n\t\tdata.Swap(first+root, first+child)\n\t\troot = child\n\t}\n}\n\nfunc heapSort(data Interface, a, b int) {\n\tfirst := a\n\tlo := 0\n\thi := b - a\n\n\t// Build heap with greatest element at top.\n\tfor i := (hi - 1) / 2; i >= 0; i-- {\n\t\tsiftDown(data, i, hi, first)\n\t}\n\n\t// Pop elements, largest first, into end of data.\n\tfor i := hi - 1; i >= 0; i-- {\n\t\tdata.Swap(first, first+i)\n\t\tsiftDown(data, lo, i, first)\n\t}\n}\n\n// Quicksort, loosely following Bentley and McIlroy,\n// ``Engineering a Sort Function,'' SP&E November 1993.\n\n// medianOfThree moves the median of the three values data[m0], data[m1], data[m2] into data[m1].\nfunc medianOfThree(data Interface, m1, m0, m2 int) {\n\t// sort 3 elements\n\tif data.Less(m1, m0) {\n\t\tdata.Swap(m1, m0)\n\t}\n\t// data[m0] <= data[m1]\n\tif data.Less(m2, m1) {\n\t\tdata.Swap(m2, m1)\n\t\t// data[m0] <= data[m2] && data[m1] < data[m2]\n\t\tif data.Less(m1, m0) {\n\t\t\tdata.Swap(m1, m0)\n\t\t}\n\t}\n\t// now data[m0] <= data[m1] <= data[m2]\n}\n\nfunc swapRange(data Interface, a, b, n int) {\n\tfor i := 0; i < n; i++ {\n\t\tdata.Swap(a+i, b+i)\n\t}\n}\n\nfunc doPivot(data Interface, lo, hi int) (midlo, midhi int) {\n\tm := lo + (hi-lo)/2 // Written like this to avoid integer overflow.\n\tif hi-lo > 40 {\n\t\t// Tukey's ``Ninther,'' median of three medians of three.\n\t\ts := (hi - lo) / 8\n\t\tmedianOfThree(data, lo, lo+s, lo+2*s)\n\t\tmedianOfThree(data, m, m-s, m+s)\n\t\tmedianOfThree(data, hi-1, hi-1-s, hi-1-2*s)\n\t}\n\tmedianOfThree(data, lo, m, hi-1)\n\n\t// Invariants are:\n\t//\tdata[lo] = pivot (set up by ChoosePivot)\n\t//\tdata[lo < i < a] < pivot\n\t//\tdata[a <= i < b] <= pivot\n\t//\tdata[b <= i < c] unexamined\n\t//\tdata[c <= i < hi-1] > pivot\n\t//\tdata[hi-1] >= pivot\n\tpivot := lo\n\ta, c := lo+1, hi-1\n\n\tfor ; a < c && data.Less(a, pivot); a++ {\n\t}\n\tb := a\n\tfor {\n\t\tfor ; b < c && !data.Less(pivot, b); b++ { // data[b] <= pivot\n\t\t}\n\t\tfor ; b < c && data.Less(pivot, c-1); c-- { // data[c-1] > pivot\n\t\t}\n\t\tif b >= c {\n\t\t\tbreak\n\t\t}\n\t\t// data[b] > pivot; data[c-1] <= pivot\n\t\tdata.Swap(b, c-1)\n\t\tb++\n\t\tc--\n\t}\n\t// If hi-c<3 then there are duplicates (by property of median of nine).\n\t// Let be a bit more conservative, and set border to 5.\n\tprotect := hi-c < 5\n\tif !protect && hi-c < (hi-lo)/4 {\n\t\t// Lets test some points for equality to pivot\n\t\tdups := 0\n\t\tif !data.Less(pivot, hi-1) { // data[hi-1] = pivot\n\t\t\tdata.Swap(c, hi-1)\n\t\t\tc++\n\t\t\tdups++\n\t\t}\n\t\tif !data.Less(b-1, pivot) { // data[b-1] = pivot\n\t\t\tb--\n\t\t\tdups++\n\t\t}\n\t\t// m-lo = (hi-lo)/2 > 6\n\t\t// b-lo > (hi-lo)*3/4-1 > 8\n\t\t// ==> m < b ==> data[m] <= pivot\n\t\tif !data.Less(m, pivot) { // data[m] = pivot\n\t\t\tdata.Swap(m, b-1)\n\t\t\tb--\n\t\t\tdups++\n\t\t}\n\t\t// if at least 2 points are equal to pivot, assume skewed distribution\n\t\tprotect = dups > 1\n\t}\n\tif protect {\n\t\t// Protect against a lot of duplicates\n\t\t// Add invariant:\n\t\t//\tdata[a <= i < b] unexamined\n\t\t//\tdata[b <= i < c] = pivot\n\t\tfor {\n\t\t\tfor ; a < b && !data.Less(b-1, pivot); b-- { // data[b] == pivot\n\t\t\t}\n\t\t\tfor ; a < b && data.Less(a, pivot); a++ { // data[a] < pivot\n\t\t\t}\n\t\t\tif a >= b {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// data[a] == pivot; data[b-1] < pivot\n\t\t\tdata.Swap(a, b-1)\n\t\t\ta++\n\t\t\tb--\n\t\t}\n\t}\n\t// Swap pivot into middle\n\tdata.Swap(pivot, b-1)\n\treturn b - 1, c\n}\n\nfunc quickSort(data Interface, a, b, maxDepth int) {\n\tfor b-a > 12 { // Use ShellSort for slices <= 12 elements\n\t\tif maxDepth == 0 {\n\t\t\theapSort(data, a, b)\n\t\t\treturn\n\t\t}\n\t\tmaxDepth--\n\t\tmlo, mhi := doPivot(data, a, b)\n\t\t// Avoiding recursion on the larger subproblem guarantees\n\t\t// a stack depth of at most lg(b-a).\n\t\tif mlo-a < b-mhi {\n\t\t\tquickSort(data, a, mlo, maxDepth)\n\t\t\ta = mhi // i.e., quickSort(data, mhi, b)\n\t\t} else {\n\t\t\tquickSort(data, mhi, b, maxDepth)\n\t\t\tb = mlo // i.e., quickSort(data, a, mlo)\n\t\t}\n\t}\n\tif b-a > 1 {\n\t\t// Do ShellSort pass with gap 6\n\t\t// It could be written in this simplified form cause b-a <= 12\n\t\tfor i := a + 6; i < b; i++ {\n\t\t\tif data.Less(i, i-6) {\n\t\t\t\tdata.Swap(i, i-6)\n\t\t\t}\n\t\t}\n\t\tinsertionSort(data, a, b)\n\t}\n}\n\n// Sort sorts data.\n//\n// It makes one call to data.Len to determine n, and O(n*log(n)) calls to\n// data.Less and data.Swap. The sort is not guaranteed to be stable.\n//\n// To sort slices without creating a type, see Slice.\nfunc Sort(data Interface) {\n\tn := data.Len()\n\tif fs, ok := data.(*funcs); ok {\n\t\tquickSort_func(fs.lessSwap, 0, n, maxDepth(n))\n\t} else {\n\t\tquickSort(data, 0, n, maxDepth(n))\n\t}\n}\n\n// With sorts data given the provided length, swap, and less\n// functions.\n// The sort is not guaranteed to be stable.\nfunc With(length int, swap func(i, j int), less func(i, j int) bool) {\n\tquickSort_func(lessSwap{less, swap}, 0, length, maxDepth(length))\n}\n\n// maxDepth returns a threshold at which quicksort should switch\n// to heapsort. It returns 2*ceil(lg(n+1)).\nfunc maxDepth(n int) int {\n\tvar depth int\n\tfor i := n; i > 0; i >>= 1 {\n\t\tdepth++\n\t}\n\treturn depth * 2\n}\n\ntype reverse struct {\n\t// This embedded Interface permits Reverse to use the methods of\n\t// another Interface implementation.\n\tInterface\n}\n\n// Less returns the opposite of the embedded implementation's Less method.\nfunc (r reverse) Less(i, j int) bool {\n\treturn r.Interface.Less(j, i)\n}\n\n// Reverse returns the reverse order for data.\nfunc Reverse(data Interface) Interface {\n\treturn &reverse{data}\n}\n\n// IsSorted reports whether data is sorted.\nfunc IsSorted(data Interface) bool {\n\tn := data.Len()\n\tfor i := n - 1; i > 0; i-- {\n\t\tif data.Less(i, i-1) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// Convenience types for common cases\n\n// IntSlice attaches the methods of Interface to []int, sorting in increasing order.\ntype IntSlice []int\n\nfunc (p IntSlice) Len() int           { return len(p) }\nfunc (p IntSlice) Less(i, j int) bool { return p[i] < p[j] }\nfunc (p IntSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }\n\n// Sort is a convenience method.\nfunc (p IntSlice) Sort() { Sort(p) }\n\n// Float64Slice attaches the methods of Interface to []float64, sorting in increasing order.\ntype Float64Slice []float64\n\nfunc (p Float64Slice) Len() int           { return len(p) }\nfunc (p Float64Slice) Less(i, j int) bool { return p[i] < p[j] || isNaN(p[i]) && !isNaN(p[j]) }\nfunc (p Float64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }\n\n// isNaN is a copy of math.IsNaN to avoid a dependency on the math package.\nfunc isNaN(f float64) bool {\n\treturn f != f\n}\n\n// Sort is a convenience method.\nfunc (p Float64Slice) Sort() { Sort(p) }\n\n// StringSlice attaches the methods of Interface to []string, sorting in increasing order.\ntype StringSlice []string\n\nfunc (p StringSlice) Len() int           { return len(p) }\nfunc (p StringSlice) Less(i, j int) bool { return p[i] < p[j] }\nfunc (p StringSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }\n\n// Sort is a convenience method.\nfunc (p StringSlice) Sort() { Sort(p) }\n\n// Convenience wrappers for common cases\n\n// Ints sorts a slice of ints in increasing order.\nfunc Ints(a []int) { Sort(IntSlice(a)) }\n\n// Float64s sorts a slice of float64s in increasing order.\nfunc Float64s(a []float64) { Sort(Float64Slice(a)) }\n\n// Strings sorts a slice of strings in increasing order.\nfunc Strings(a []string) { Sort(StringSlice(a)) }\n\n// IntsAreSorted tests whether a slice of ints is sorted in increasing order.\nfunc IntsAreSorted(a []int) bool { return IsSorted(IntSlice(a)) }\n\n// Float64sAreSorted tests whether a slice of float64s is sorted in increasing order.\nfunc Float64sAreSorted(a []float64) bool { return IsSorted(Float64Slice(a)) }\n\n// StringsAreSorted tests whether a slice of strings is sorted in increasing order.\nfunc StringsAreSorted(a []string) bool { return IsSorted(StringSlice(a)) }\n\n// Notes on stable sorting:\n// The used algorithms are simple and provable correct on all input and use\n// only logarithmic additional stack space. They perform well if compared\n// experimentally to other stable in-place sorting algorithms.\n//\n// Remarks on other algorithms evaluated:\n//  - GCC's 4.6.3 stable_sort with merge_without_buffer from libstdc++:\n//    Not faster.\n//  - GCC's __rotate for block rotations: Not faster.\n//  - \"Practical in-place mergesort\" from  Jyrki Katajainen, Tomi A. Pasanen\n//    and Jukka Teuhola; Nordic Journal of Computing 3,1 (1996), 27-40:\n//    The given algorithms are in-place, number of Swap and Assignments\n//    grow as n log n but the algorithm is not stable.\n//  - \"Fast Stable In-Place Sorting with O(n) Data Moves\" J.I. Munro and\n//    V. Raman in Algorithmica (1996) 16, 115-160:\n//    This algorithm either needs additional 2n bits or works only if there\n//    are enough different elements available to encode some permutations\n//    which have to be undone later (so not stable on any input).\n//  - All the optimal in-place sorting/merging algorithms I found are either\n//    unstable or rely on enough different elements in each step to encode the\n//    performed block rearrangements. See also \"In-Place Merging Algorithms\",\n//    Denham Coates-Evely, Department of Computer Science, Kings College,\n//    January 2004 and the references in there.\n//  - Often \"optimal\" algorithms are optimal in the number of assignments\n//    but Interface has only Swap as operation.\n\n// Stable sorts data while keeping the original order of equal elements.\n//\n// It makes one call to data.Len to determine n, O(n*log(n)) calls to\n// data.Less and O(n*log(n)*log(n)) calls to data.Swap.\nfunc Stable(data Interface) {\n\tif fs, ok := data.(*funcs); ok {\n\t\tstable_func(fs.lessSwap, fs.length)\n\t} else {\n\t\tstable(data, data.Len())\n\t}\n}\n\nfunc stable(data Interface, n int) {\n\tblockSize := 20 // must be > 0\n\ta, b := 0, blockSize\n\tfor b <= n {\n\t\tinsertionSort(data, a, b)\n\t\ta = b\n\t\tb += blockSize\n\t}\n\tinsertionSort(data, a, n)\n\n\tfor blockSize < n {\n\t\ta, b = 0, 2*blockSize\n\t\tfor b <= n {\n\t\t\tsymMerge(data, a, a+blockSize, b)\n\t\t\ta = b\n\t\t\tb += 2 * blockSize\n\t\t}\n\t\tif m := a + blockSize; m < n {\n\t\t\tsymMerge(data, a, m, n)\n\t\t}\n\t\tblockSize *= 2\n\t}\n}\n\n// SymMerge merges the two sorted subsequences data[a:m] and data[m:b] using\n// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, \"Stable Minimum\n// Storage Merging by Symmetric Comparisons\", in Susanne Albers and Tomasz\n// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in\n// Computer Science, pages 714-723. Springer, 2004.\n//\n// Let M = m-a and N = b-n. Wolog M < N.\n// The recursion depth is bound by ceil(log(N+M)).\n// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.\n// The algorithm needs O((M+N)*log(M)) calls to data.Swap.\n//\n// The paper gives O((M+N)*log(M)) as the number of assignments assuming a\n// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation\n// in the paper carries through for Swap operations, especially as the block\n// swapping rotate uses only O(M+N) Swaps.\n//\n// symMerge assumes non-degenerate arguments: a < m && m < b.\n// Having the caller check this condition eliminates many leaf recursion calls,\n// which improves performance.\nfunc symMerge(data Interface, a, m, b int) {\n\t// Avoid unnecessary recursions of symMerge\n\t// by direct insertion of data[a] into data[m:b]\n\t// if data[a:m] only contains one element.\n\tif m-a == 1 {\n\t\t// Use binary search to find the lowest index i\n\t\t// such that data[i] >= data[a] for m <= i < b.\n\t\t// Exit the search loop with i == b in case no such index exists.\n\t\ti := m\n\t\tj := b\n\t\tfor i < j {\n\t\t\th := i + (j-i)/2\n\t\t\tif data.Less(h, a) {\n\t\t\t\ti = h + 1\n\t\t\t} else {\n\t\t\t\tj = h\n\t\t\t}\n\t\t}\n\t\t// Swap values until data[a] reaches the position before i.\n\t\tfor k := a; k < i-1; k++ {\n\t\t\tdata.Swap(k, k+1)\n\t\t}\n\t\treturn\n\t}\n\n\t// Avoid unnecessary recursions of symMerge\n\t// by direct insertion of data[m] into data[a:m]\n\t// if data[m:b] only contains one element.\n\tif b-m == 1 {\n\t\t// Use binary search to find the lowest index i\n\t\t// such that data[i] > data[m] for a <= i < m.\n\t\t// Exit the search loop with i == m in case no such index exists.\n\t\ti := a\n\t\tj := m\n\t\tfor i < j {\n\t\t\th := i + (j-i)/2\n\t\t\tif !data.Less(m, h) {\n\t\t\t\ti = h + 1\n\t\t\t} else {\n\t\t\t\tj = h\n\t\t\t}\n\t\t}\n\t\t// Swap values until data[m] reaches the position i.\n\t\tfor k := m; k > i; k-- {\n\t\t\tdata.Swap(k, k-1)\n\t\t}\n\t\treturn\n\t}\n\n\tmid := a + (b-a)/2\n\tn := mid + m\n\tvar start, r int\n\tif m > mid {\n\t\tstart = n - b\n\t\tr = mid\n\t} else {\n\t\tstart = a\n\t\tr = m\n\t}\n\tp := n - 1\n\n\tfor start < r {\n\t\tc := start + (r-start)/2\n\t\tif !data.Less(p-c, c) {\n\t\t\tstart = c + 1\n\t\t} else {\n\t\t\tr = c\n\t\t}\n\t}\n\n\tend := n - start\n\tif start < m && m < end {\n\t\trotate(data, start, m, end)\n\t}\n\tif a < start && start < mid {\n\t\tsymMerge(data, a, start, mid)\n\t}\n\tif mid < end && end < b {\n\t\tsymMerge(data, mid, end, b)\n\t}\n}\n\n// Rotate two consecutives blocks u = data[a:m] and v = data[m:b] in data:\n// Data of the form 'x u v y' is changed to 'x v u y'.\n// Rotate performs at most b-a many calls to data.Swap.\n// Rotate assumes non-degenerate arguments: a < m && m < b.\nfunc rotate(data Interface, a, m, b int) {\n\ti := m - a\n\tj := b - m\n\n\tfor i != j {\n\t\tif i > j {\n\t\t\tswapRange(data, m-i, m, j)\n\t\t\ti -= j\n\t\t} else {\n\t\t\tswapRange(data, m-i, m+j-i, i)\n\t\t\tj -= i\n\t\t}\n\t}\n\t// i == j\n\tswapRange(data, m-i, m, i)\n}\n\n/*\nComplexity of Stable Sorting\n\n\nComplexity of block swapping rotation\n\nEach Swap puts one new element into its correct, final position.\nElements which reach their final position are no longer moved.\nThus block swapping rotation needs |u|+|v| calls to Swaps.\nThis is best possible as each element might need a move.\n\nPay attention when comparing to other optimal algorithms which\ntypically count the number of assignments instead of swaps:\nE.g. the optimal algorithm of Dudzinski and Dydek for in-place\nrotations uses O(u + v + gcd(u,v)) assignments which is\nbetter than our O(3 * (u+v)) as gcd(u,v) <= u.\n\n\nStable sorting by SymMerge and BlockSwap rotations\n\nSymMerg complexity for same size input M = N:\nCalls to Less:  O(M*log(N/M+1)) = O(N*log(2)) = O(N)\nCalls to Swap:  O((M+N)*log(M)) = O(2*N*log(N)) = O(N*log(N))\n\n(The following argument does not fuzz over a missing -1 or\nother stuff which does not impact the final result).\n\nLet n = data.Len(). Assume n = 2^k.\n\nPlain merge sort performs log(n) = k iterations.\nOn iteration i the algorithm merges 2^(k-i) blocks, each of size 2^i.\n\nThus iteration i of merge sort performs:\nCalls to Less  O(2^(k-i) * 2^i) = O(2^k) = O(2^log(n)) = O(n)\nCalls to Swap  O(2^(k-i) * 2^i * log(2^i)) = O(2^k * i) = O(n*i)\n\nIn total k = log(n) iterations are performed; so in total:\nCalls to Less O(log(n) * n)\nCalls to Swap O(n + 2*n + 3*n + ... + (k-1)*n + k*n)\n   = O((k/2) * k * n) = O(n * k^2) = O(n * log^2(n))\n\n\nAbove results should generalize to arbitrary n = 2^k + p\nand should not be influenced by the initial insertion sort phase:\nInsertion sort is O(n^2) on Swap and Less, thus O(bs^2) per block of\nsize bs at n/bs blocks:  O(bs*n) Swaps and Less during insertion sort.\nMerge sort iterations start at i = log(bs). With t = log(bs) constant:\nCalls to Less O((log(n)-t) * n + bs*n) = O(log(n)*n + (bs-t)*n)\n   = O(n * log(n))\nCalls to Swap O(n * log^2(n) - (t^2+t)/2*n) = O(n * log^2(n))\n\n*/\n"
  },
  {
    "path": "sort/sort_test.go",
    "content": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort_test\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"math/rand\"\n\t\"reflect\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"go4.org/reflectutil\"\n\t. \"go4.org/sort\"\n)\n\nvar ints = [...]int{74, 59, 238, -784, 9845, 959, 905, 0, 0, 42, 7586, -5467984, 7586}\nvar float64s = [...]float64{74.3, 59.0, math.Inf(1), 238.2, -784.0, 2.3, math.NaN(), math.NaN(), math.Inf(-1), 9845.768, -959.7485, 905, 7.8, 7.8}\nvar strings = [...]string{\"\", \"Hello\", \"foo\", \"bar\", \"foo\", \"f00\", \"%*&^*&^&\", \"***\"}\n\nfunc TestSlice(t *testing.T) {\n\ts := []int{5, 4, 3, 2, 1}\n\twant := []int{1, 2, 3, 4, 5}\n\tSlice(s, func(i, j int) bool { return s[i] < s[j] })\n\tif !reflect.DeepEqual(s, want) {\n\t\tt.Errorf(\"sorted = %v; want %v\", s, want)\n\t}\n}\n\nfunc TestSortIntSlice(t *testing.T) {\n\tdata := ints\n\ta := IntSlice(data[0:])\n\tSort(a)\n\tif !IsSorted(a) {\n\t\tt.Errorf(\"sorted %v\", ints)\n\t\tt.Errorf(\"   got %v\", data)\n\t}\n}\n\nfunc TestSortFloat64Slice(t *testing.T) {\n\tdata := float64s\n\ta := Float64Slice(data[0:])\n\tSort(a)\n\tif !IsSorted(a) {\n\t\tt.Errorf(\"sorted %v\", float64s)\n\t\tt.Errorf(\"   got %v\", data)\n\t}\n}\n\nfunc TestSortStringSlice(t *testing.T) {\n\tdata := strings\n\ta := StringSlice(data[0:])\n\tSort(a)\n\tif !IsSorted(a) {\n\t\tt.Errorf(\"sorted %v\", strings)\n\t\tt.Errorf(\"   got %v\", data)\n\t}\n}\n\nfunc TestInts(t *testing.T) {\n\tdata := ints\n\tInts(data[0:])\n\tif !IntsAreSorted(data[0:]) {\n\t\tt.Errorf(\"sorted %v\", ints)\n\t\tt.Errorf(\"   got %v\", data)\n\t}\n}\n\nfunc TestFloat64s(t *testing.T) {\n\tdata := float64s\n\tFloat64s(data[0:])\n\tif !Float64sAreSorted(data[0:]) {\n\t\tt.Errorf(\"sorted %v\", float64s)\n\t\tt.Errorf(\"   got %v\", data)\n\t}\n}\n\nfunc TestStrings(t *testing.T) {\n\tdata := strings\n\tStrings(data[0:])\n\tif !StringsAreSorted(data[0:]) {\n\t\tt.Errorf(\"sorted %v\", strings)\n\t\tt.Errorf(\"   got %v\", data)\n\t}\n}\n\nfunc TestStringsWithSwapper(t *testing.T) {\n\tdata := strings\n\tWith(len(data), reflectutil.Swapper(data[:]), func(i, j int) bool {\n\t\treturn data[i] < data[j]\n\t})\n\tif !StringsAreSorted(data[:]) {\n\t\tt.Errorf(\"sorted %v\", strings)\n\t\tt.Errorf(\"   got %v\", data)\n\t}\n}\n\nfunc TestSortLarge_Random(t *testing.T) {\n\tn := 1000000\n\tif testing.Short() {\n\t\tn /= 100\n\t}\n\tdata := make([]int, n)\n\tfor i := 0; i < len(data); i++ {\n\t\tdata[i] = rand.Intn(100)\n\t}\n\tif IntsAreSorted(data) {\n\t\tt.Fatalf(\"terrible rand.rand\")\n\t}\n\tInts(data)\n\tif !IntsAreSorted(data) {\n\t\tt.Errorf(\"sort didn't sort - 1M ints\")\n\t}\n}\n\nfunc TestReverseSortIntSlice(t *testing.T) {\n\tdata := ints\n\tdata1 := ints\n\ta := IntSlice(data[0:])\n\tSort(a)\n\tr := IntSlice(data1[0:])\n\tSort(Reverse(r))\n\tfor i := 0; i < len(data); i++ {\n\t\tif a[i] != r[len(data)-1-i] {\n\t\t\tt.Errorf(\"reverse sort didn't sort\")\n\t\t}\n\t\tif i > len(data)/2 {\n\t\t\tbreak\n\t\t}\n\t}\n}\n\ntype nonDeterministicTestingData struct {\n\tr *rand.Rand\n}\n\nfunc (t *nonDeterministicTestingData) Len() int {\n\treturn 500\n}\nfunc (t *nonDeterministicTestingData) Less(i, j int) bool {\n\tif i < 0 || j < 0 || i >= t.Len() || j >= t.Len() {\n\t\tpanic(\"nondeterministic comparison out of bounds\")\n\t}\n\treturn t.r.Float32() < 0.5\n}\nfunc (t *nonDeterministicTestingData) Swap(i, j int) {\n\tif i < 0 || j < 0 || i >= t.Len() || j >= t.Len() {\n\t\tpanic(\"nondeterministic comparison out of bounds\")\n\t}\n}\n\nfunc TestNonDeterministicComparison(t *testing.T) {\n\t// Ensure that sort.Sort does not panic when Less returns inconsistent results.\n\t// See https://golang.org/issue/14377.\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tt.Error(r)\n\t\t}\n\t}()\n\n\ttd := &nonDeterministicTestingData{\n\t\tr: rand.New(rand.NewSource(0)),\n\t}\n\n\tfor i := 0; i < 10; i++ {\n\t\tSort(td)\n\t}\n}\n\nfunc BenchmarkSortString1K(b *testing.B) {\n\tb.StopTimer()\n\tunsorted := make([]string, 1<<10)\n\tfor i := range unsorted {\n\t\tunsorted[i] = strconv.Itoa(i ^ 0x2cc)\n\t}\n\tdata := make([]string, len(unsorted))\n\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(data, unsorted)\n\t\tb.StartTimer()\n\t\tStrings(data)\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkSortString1K_With(b *testing.B) {\n\tb.StopTimer()\n\tunsorted := make([]string, 1<<10)\n\tfor i := range unsorted {\n\t\tunsorted[i] = strconv.Itoa(i ^ 0x2cc)\n\t}\n\tdata := make([]string, len(unsorted))\n\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(data, unsorted)\n\t\tb.StartTimer()\n\t\tWith(len(data),\n\t\t\tfunc(i, j int) { data[i], data[j] = data[j], data[i] },\n\t\t\tfunc(i, j int) bool {\n\t\t\t\treturn data[i] < data[j]\n\t\t\t})\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkSortString1K_WithSwapper(b *testing.B) {\n\tb.StopTimer()\n\tunsorted := make([]string, 1<<10)\n\tfor i := range unsorted {\n\t\tunsorted[i] = strconv.Itoa(i ^ 0x2cc)\n\t}\n\tdata := make([]string, len(unsorted))\n\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(data, unsorted)\n\t\tb.StartTimer()\n\t\tWith(len(data), reflectutil.Swapper(data), func(i, j int) bool {\n\t\t\treturn data[i] < data[j]\n\t\t})\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkStableString1K(b *testing.B) {\n\tb.StopTimer()\n\tunsorted := make([]string, 1<<10)\n\tfor i := 0; i < len(data); i++ {\n\t\tunsorted[i] = strconv.Itoa(i ^ 0x2cc)\n\t}\n\tdata := make([]string, len(unsorted))\n\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(data, unsorted)\n\t\tb.StartTimer()\n\t\tStable(StringSlice(data))\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkSortInt1K(b *testing.B) {\n\tb.StopTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tdata := make([]int, 1<<10)\n\t\tfor i := 0; i < len(data); i++ {\n\t\t\tdata[i] = i ^ 0x2cc\n\t\t}\n\t\tb.StartTimer()\n\t\tInts(data)\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkStableInt1K(b *testing.B) {\n\tb.StopTimer()\n\tunsorted := make([]int, 1<<10)\n\tfor i := range unsorted {\n\t\tunsorted[i] = i ^ 0x2cc\n\t}\n\tdata := make([]int, len(unsorted))\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(data, unsorted)\n\t\tb.StartTimer()\n\t\tStable(IntSlice(data))\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkStableInt1K_With(b *testing.B) {\n\tb.StopTimer()\n\tunsorted := make([]int, 1<<10)\n\tfor i := range unsorted {\n\t\tunsorted[i] = i ^ 0x2cc\n\t}\n\tdata := make([]int, len(unsorted))\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(data, unsorted)\n\t\tb.StartTimer()\n\t\tStable(MakeInterface(\n\t\t\tlen(data),\n\t\t\tfunc(i, j int) { data[i], data[j] = data[j], data[i] },\n\t\t\tfunc(i, j int) bool { return data[i] < data[j] },\n\t\t))\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkStableInt1K_WithSwapper(b *testing.B) {\n\tb.StopTimer()\n\tunsorted := make([]int, 1<<10)\n\tfor i := range unsorted {\n\t\tunsorted[i] = i ^ 0x2cc\n\t}\n\tdata := make([]int, len(unsorted))\n\tfor i := 0; i < b.N; i++ {\n\t\tcopy(data, unsorted)\n\t\tb.StartTimer()\n\t\tStable(MakeInterface(len(data), reflectutil.Swapper(data), func(i, j int) bool {\n\t\t\treturn data[i] < data[j]\n\t\t}))\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkSortInt64K(b *testing.B) {\n\tb.StopTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tdata := make([]int, 1<<16)\n\t\tfor i := 0; i < len(data); i++ {\n\t\t\tdata[i] = i ^ 0xcccc\n\t\t}\n\t\tb.StartTimer()\n\t\tInts(data)\n\t\tb.StopTimer()\n\t}\n}\n\nfunc BenchmarkStableInt64K(b *testing.B) {\n\tb.StopTimer()\n\tfor i := 0; i < b.N; i++ {\n\t\tdata := make([]int, 1<<16)\n\t\tfor i := 0; i < len(data); i++ {\n\t\t\tdata[i] = i ^ 0xcccc\n\t\t}\n\t\tb.StartTimer()\n\t\tStable(IntSlice(data))\n\t\tb.StopTimer()\n\t}\n}\n\nconst (\n\t_Sawtooth = iota\n\t_Rand\n\t_Stagger\n\t_Plateau\n\t_Shuffle\n\t_NDist\n)\n\nconst (\n\t_Copy = iota\n\t_Reverse\n\t_ReverseFirstHalf\n\t_ReverseSecondHalf\n\t_Sorted\n\t_Dither\n\t_NMode\n)\n\ntype testingData struct {\n\tdesc        string\n\tt           *testing.T\n\tdata        []int\n\tmaxswap     int // number of swaps allowed\n\tncmp, nswap int\n}\n\nfunc (d *testingData) Len() int { return len(d.data) }\nfunc (d *testingData) Less(i, j int) bool {\n\td.ncmp++\n\treturn d.data[i] < d.data[j]\n}\nfunc (d *testingData) Swap(i, j int) {\n\tif d.nswap >= d.maxswap {\n\t\td.t.Errorf(\"%s: used %d swaps sorting slice of %d\", d.desc, d.nswap, len(d.data))\n\t\td.t.FailNow()\n\t}\n\td.nswap++\n\td.data[i], d.data[j] = d.data[j], d.data[i]\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc lg(n int) int {\n\ti := 0\n\tfor 1<<uint(i) < n {\n\t\ti++\n\t}\n\treturn i\n}\n\nfunc testBentleyMcIlroy(t *testing.T, sort func(Interface), maxswap func(int) int) {\n\tsizes := []int{100, 1023, 1024, 1025}\n\tif testing.Short() {\n\t\tsizes = []int{100, 127, 128, 129}\n\t}\n\tdists := []string{\"sawtooth\", \"rand\", \"stagger\", \"plateau\", \"shuffle\"}\n\tmodes := []string{\"copy\", \"reverse\", \"reverse1\", \"reverse2\", \"sort\", \"dither\"}\n\tvar tmp1, tmp2 [1025]int\n\tfor _, n := range sizes {\n\t\tfor m := 1; m < 2*n; m *= 2 {\n\t\t\tfor dist := 0; dist < _NDist; dist++ {\n\t\t\t\tj := 0\n\t\t\t\tk := 1\n\t\t\t\tdata := tmp1[0:n]\n\t\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\t\tswitch dist {\n\t\t\t\t\tcase _Sawtooth:\n\t\t\t\t\t\tdata[i] = i % m\n\t\t\t\t\tcase _Rand:\n\t\t\t\t\t\tdata[i] = rand.Intn(m)\n\t\t\t\t\tcase _Stagger:\n\t\t\t\t\t\tdata[i] = (i*m + i) % n\n\t\t\t\t\tcase _Plateau:\n\t\t\t\t\t\tdata[i] = min(i, m)\n\t\t\t\t\tcase _Shuffle:\n\t\t\t\t\t\tif rand.Intn(m) != 0 {\n\t\t\t\t\t\t\tj += 2\n\t\t\t\t\t\t\tdata[i] = j\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tk += 2\n\t\t\t\t\t\t\tdata[i] = k\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tmdata := tmp2[0:n]\n\t\t\t\tfor mode := 0; mode < _NMode; mode++ {\n\t\t\t\t\tswitch mode {\n\t\t\t\t\tcase _Copy:\n\t\t\t\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\t\t\t\tmdata[i] = data[i]\n\t\t\t\t\t\t}\n\t\t\t\t\tcase _Reverse:\n\t\t\t\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\t\t\t\tmdata[i] = data[n-i-1]\n\t\t\t\t\t\t}\n\t\t\t\t\tcase _ReverseFirstHalf:\n\t\t\t\t\t\tfor i := 0; i < n/2; i++ {\n\t\t\t\t\t\t\tmdata[i] = data[n/2-i-1]\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor i := n / 2; i < n; i++ {\n\t\t\t\t\t\t\tmdata[i] = data[i]\n\t\t\t\t\t\t}\n\t\t\t\t\tcase _ReverseSecondHalf:\n\t\t\t\t\t\tfor i := 0; i < n/2; i++ {\n\t\t\t\t\t\t\tmdata[i] = data[i]\n\t\t\t\t\t\t}\n\t\t\t\t\t\tfor i := n / 2; i < n; i++ {\n\t\t\t\t\t\t\tmdata[i] = data[n-(i-n/2)-1]\n\t\t\t\t\t\t}\n\t\t\t\t\tcase _Sorted:\n\t\t\t\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\t\t\t\tmdata[i] = data[i]\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Ints is known to be correct\n\t\t\t\t\t\t// because mode Sort runs after mode _Copy.\n\t\t\t\t\t\tInts(mdata)\n\t\t\t\t\tcase _Dither:\n\t\t\t\t\t\tfor i := 0; i < n; i++ {\n\t\t\t\t\t\t\tmdata[i] = data[i] + i%5\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tdesc := fmt.Sprintf(\"n=%d m=%d dist=%s mode=%s\", n, m, dists[dist], modes[mode])\n\t\t\t\t\td := &testingData{desc: desc, t: t, data: mdata[0:n], maxswap: maxswap(n)}\n\t\t\t\t\tsort(d)\n\t\t\t\t\t// Uncomment if you are trying to improve the number of compares/swaps.\n\t\t\t\t\t//t.Logf(\"%s: ncmp=%d, nswp=%d\", desc, d.ncmp, d.nswap)\n\n\t\t\t\t\t// If we were testing C qsort, we'd have to make a copy\n\t\t\t\t\t// of the slice and sort it ourselves and then compare\n\t\t\t\t\t// x against it, to ensure that qsort was only permuting\n\t\t\t\t\t// the data, not (for example) overwriting it with zeros.\n\t\t\t\t\t//\n\t\t\t\t\t// In go, we don't have to be so paranoid: since the only\n\t\t\t\t\t// mutating method Sort can call is TestingData.swap,\n\t\t\t\t\t// it suffices here just to check that the final slice is sorted.\n\t\t\t\t\tif !IntsAreSorted(mdata) {\n\t\t\t\t\t\tt.Errorf(\"%s: ints not sorted\", desc)\n\t\t\t\t\t\tt.Errorf(\"\\t%v\", mdata)\n\t\t\t\t\t\tt.FailNow()\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestSortBM(t *testing.T) {\n\ttestBentleyMcIlroy(t, Sort, func(n int) int { return n * lg(n) * 12 / 10 })\n}\n\nfunc TestHeapsortBM(t *testing.T) {\n\ttestBentleyMcIlroy(t, Heapsort, func(n int) int { return n * lg(n) * 12 / 10 })\n}\n\nfunc TestStableBM(t *testing.T) {\n\ttestBentleyMcIlroy(t, Stable, func(n int) int { return n * lg(n) * lg(n) / 3 })\n}\n\n// This is based on the \"antiquicksort\" implementation by M. Douglas McIlroy.\n// See http://www.cs.dartmouth.edu/~doug/mdmspe.pdf for more info.\ntype adversaryTestingData struct {\n\tdata      []int\n\tkeys      map[int]int\n\tcandidate int\n}\n\nfunc (d *adversaryTestingData) Len() int { return len(d.data) }\n\nfunc (d *adversaryTestingData) Less(i, j int) bool {\n\tif _, present := d.keys[i]; !present {\n\t\tif _, present := d.keys[j]; !present {\n\t\t\tif i == d.candidate {\n\t\t\t\td.keys[i] = len(d.keys)\n\t\t\t} else {\n\t\t\t\td.keys[j] = len(d.keys)\n\t\t\t}\n\t\t}\n\t}\n\n\tif _, present := d.keys[i]; !present {\n\t\td.candidate = i\n\t\treturn false\n\t}\n\tif _, present := d.keys[j]; !present {\n\t\td.candidate = j\n\t\treturn true\n\t}\n\n\treturn d.keys[i] >= d.keys[j]\n}\n\nfunc (d *adversaryTestingData) Swap(i, j int) {\n\td.data[i], d.data[j] = d.data[j], d.data[i]\n}\n\nfunc TestAdversary(t *testing.T) {\n\tconst size = 100\n\tdata := make([]int, size)\n\tfor i := 0; i < size; i++ {\n\t\tdata[i] = i\n\t}\n\n\td := &adversaryTestingData{data, make(map[int]int), 0}\n\tSort(d) // This should degenerate to heapsort.\n}\n\nfunc TestStableInts(t *testing.T) {\n\tdata := ints\n\tStable(IntSlice(data[0:]))\n\tif !IntsAreSorted(data[0:]) {\n\t\tt.Errorf(\"nsorted %v\\n   got %v\", ints, data)\n\t}\n}\n\ntype intPairs []struct {\n\ta, b int\n}\n\n// IntPairs compare on a only.\nfunc (d intPairs) Len() int           { return len(d) }\nfunc (d intPairs) Less(i, j int) bool { return d[i].a < d[j].a }\nfunc (d intPairs) Swap(i, j int)      { d[i], d[j] = d[j], d[i] }\n\n// Record initial order in B.\nfunc (d intPairs) initB() {\n\tfor i := range d {\n\t\td[i].b = i\n\t}\n}\n\n// InOrder checks if a-equal elements were not reordered.\nfunc (d intPairs) inOrder() bool {\n\tlastA, lastB := -1, 0\n\tfor i := 0; i < len(d); i++ {\n\t\tif lastA != d[i].a {\n\t\t\tlastA = d[i].a\n\t\t\tlastB = d[i].b\n\t\t\tcontinue\n\t\t}\n\t\tif d[i].b <= lastB {\n\t\t\treturn false\n\t\t}\n\t\tlastB = d[i].b\n\t}\n\treturn true\n}\n\nfunc TestStability(t *testing.T) {\n\tn, m := 100000, 1000\n\tif testing.Short() {\n\t\tn, m = 1000, 100\n\t}\n\tdata := make(intPairs, n)\n\n\t// random distribution\n\tfor i := 0; i < len(data); i++ {\n\t\tdata[i].a = rand.Intn(m)\n\t}\n\tif IsSorted(data) {\n\t\tt.Fatalf(\"terrible rand.rand\")\n\t}\n\tdata.initB()\n\tStable(data)\n\tif !IsSorted(data) {\n\t\tt.Errorf(\"Stable didn't sort %d ints\", n)\n\t}\n\tif !data.inOrder() {\n\t\tt.Errorf(\"Stable wasn't stable on %d ints\", n)\n\t}\n\n\t// already sorted\n\tdata.initB()\n\tStable(data)\n\tif !IsSorted(data) {\n\t\tt.Errorf(\"Stable shuffled sorted %d ints (order)\", n)\n\t}\n\tif !data.inOrder() {\n\t\tt.Errorf(\"Stable shuffled sorted %d ints (stability)\", n)\n\t}\n\n\t// sorted reversed\n\tfor i := 0; i < len(data); i++ {\n\t\tdata[i].a = len(data) - i\n\t}\n\tdata.initB()\n\tStable(data)\n\tif !IsSorted(data) {\n\t\tt.Errorf(\"Stable didn't sort %d ints\", n)\n\t}\n\tif !data.inOrder() {\n\t\tt.Errorf(\"Stable wasn't stable on %d ints\", n)\n\t}\n}\n\nvar countOpsSizes = []int{1e2, 3e2, 1e3, 3e3, 1e4, 3e4, 1e5, 3e5, 1e6}\n\nfunc countOps(t *testing.T, algo func(Interface), name string) {\n\tsizes := countOpsSizes\n\tif testing.Short() {\n\t\tsizes = sizes[:5]\n\t}\n\tif !testing.Verbose() {\n\t\tt.Skip(\"Counting skipped as non-verbose mode.\")\n\t}\n\tfor _, n := range sizes {\n\t\ttd := testingData{\n\t\t\tdesc:    name,\n\t\t\tt:       t,\n\t\t\tdata:    make([]int, n),\n\t\t\tmaxswap: 1<<31 - 1,\n\t\t}\n\t\tfor i := 0; i < n; i++ {\n\t\t\ttd.data[i] = rand.Intn(n / 5)\n\t\t}\n\t\talgo(&td)\n\t\tt.Logf(\"%s %8d elements: %11d Swap, %10d Less\", name, n, td.nswap, td.ncmp)\n\t}\n}\n\nfunc TestCountStableOps(t *testing.T) { countOps(t, Stable, \"Stable\") }\nfunc TestCountSortOps(t *testing.T)   { countOps(t, Sort, \"Sort  \") }\n\nfunc bench(b *testing.B, size int, algo func(Interface), name string) {\n\tb.StopTimer()\n\tdata := make(intPairs, size)\n\tx := ^uint32(0)\n\tfor i := 0; i < b.N; i++ {\n\t\tfor n := size - 3; n <= size+3; n++ {\n\t\t\tfor i := 0; i < len(data); i++ {\n\t\t\t\tx += x\n\t\t\t\tx ^= 1\n\t\t\t\tif int32(x) < 0 {\n\t\t\t\t\tx ^= 0x88888eef\n\t\t\t\t}\n\t\t\t\tdata[i].a = int(x % uint32(n/5))\n\t\t\t}\n\t\t\tdata.initB()\n\t\t\tb.StartTimer()\n\t\t\talgo(data)\n\t\t\tb.StopTimer()\n\t\t\tif !IsSorted(data) {\n\t\t\t\tb.Errorf(\"%s did not sort %d ints\", name, n)\n\t\t\t}\n\t\t\tif name == \"Stable\" && !data.inOrder() {\n\t\t\t\tb.Errorf(\"%s unstable on %d ints\", name, n)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc BenchmarkSort1e2(b *testing.B)   { bench(b, 1e2, Sort, \"Sort\") }\nfunc BenchmarkStable1e2(b *testing.B) { bench(b, 1e2, Stable, \"Stable\") }\nfunc BenchmarkSort1e4(b *testing.B)   { bench(b, 1e4, Sort, \"Sort\") }\nfunc BenchmarkStable1e4(b *testing.B) { bench(b, 1e4, Stable, \"Stable\") }\nfunc BenchmarkSort1e6(b *testing.B)   { bench(b, 1e6, Sort, \"Sort\") }\nfunc BenchmarkStable1e6(b *testing.B) { bench(b, 1e6, Stable, \"Stable\") }\n"
  },
  {
    "path": "sort/zfuncversion.go",
    "content": "// DO NOT EDIT; AUTO-GENERATED from sort.go using genzfunc.go\n\n// Copyright 2016 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\npackage sort\n\n// Auto-generated variant of sort.go:insertionSort\nfunc insertionSort_func(data lessSwap, a, b int) {\n\tfor i := a + 1; i < b; i++ {\n\t\tfor j := i; j > a && data.Less(j, j-1); j-- {\n\t\t\tdata.Swap(j, j-1)\n\t\t}\n\t}\n}\n\n// Auto-generated variant of sort.go:siftDown\nfunc siftDown_func(data lessSwap, lo, hi, first int) {\n\troot := lo\n\tfor {\n\t\tchild := 2*root + 1\n\t\tif child >= hi {\n\t\t\tbreak\n\t\t}\n\t\tif child+1 < hi && data.Less(first+child, first+child+1) {\n\t\t\tchild++\n\t\t}\n\t\tif !data.Less(first+root, first+child) {\n\t\t\treturn\n\t\t}\n\t\tdata.Swap(first+root, first+child)\n\t\troot = child\n\t}\n}\n\n// Auto-generated variant of sort.go:heapSort\nfunc heapSort_func(data lessSwap, a, b int) {\n\tfirst := a\n\tlo := 0\n\thi := b - a\n\tfor i := (hi - 1) / 2; i >= 0; i-- {\n\t\tsiftDown_func(data, i, hi, first)\n\t}\n\tfor i := hi - 1; i >= 0; i-- {\n\t\tdata.Swap(first, first+i)\n\t\tsiftDown_func(data, lo, i, first)\n\t}\n}\n\n// Auto-generated variant of sort.go:medianOfThree\nfunc medianOfThree_func(data lessSwap, m1, m0, m2 int) {\n\tif data.Less(m1, m0) {\n\t\tdata.Swap(m1, m0)\n\t}\n\tif data.Less(m2, m1) {\n\t\tdata.Swap(m2, m1)\n\t\tif data.Less(m1, m0) {\n\t\t\tdata.Swap(m1, m0)\n\t\t}\n\t}\n}\n\n// Auto-generated variant of sort.go:swapRange\nfunc swapRange_func(data lessSwap, a, b, n int) {\n\tfor i := 0; i < n; i++ {\n\t\tdata.Swap(a+i, b+i)\n\t}\n}\n\n// Auto-generated variant of sort.go:doPivot\nfunc doPivot_func(data lessSwap, lo, hi int) (midlo, midhi int) {\n\tm := lo + (hi-lo)/2\n\tif hi-lo > 40 {\n\t\ts := (hi - lo) / 8\n\t\tmedianOfThree_func(data, lo, lo+s, lo+2*s)\n\t\tmedianOfThree_func(data, m, m-s, m+s)\n\t\tmedianOfThree_func(data, hi-1, hi-1-s, hi-1-2*s)\n\t}\n\tmedianOfThree_func(data, lo, m, hi-1)\n\tpivot := lo\n\ta, c := lo+1, hi-1\n\tfor ; a < c && data.Less(a, pivot); a++ {\n\t}\n\tb := a\n\tfor {\n\t\tfor ; b < c && !data.Less(pivot, b); b++ {\n\t\t}\n\t\tfor ; b < c && data.Less(pivot, c-1); c-- {\n\t\t}\n\t\tif b >= c {\n\t\t\tbreak\n\t\t}\n\t\tdata.Swap(b, c-1)\n\t\tb++\n\t\tc--\n\t}\n\tprotect := hi-c < 5\n\tif !protect && hi-c < (hi-lo)/4 {\n\t\tdups := 0\n\t\tif !data.Less(pivot, hi-1) {\n\t\t\tdata.Swap(c, hi-1)\n\t\t\tc++\n\t\t\tdups++\n\t\t}\n\t\tif !data.Less(b-1, pivot) {\n\t\t\tb--\n\t\t\tdups++\n\t\t}\n\t\tif !data.Less(m, pivot) {\n\t\t\tdata.Swap(m, b-1)\n\t\t\tb--\n\t\t\tdups++\n\t\t}\n\t\tprotect = dups > 1\n\t}\n\tif protect {\n\t\tfor {\n\t\t\tfor ; a < b && !data.Less(b-1, pivot); b-- {\n\t\t\t}\n\t\t\tfor ; a < b && data.Less(a, pivot); a++ {\n\t\t\t}\n\t\t\tif a >= b {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tdata.Swap(a, b-1)\n\t\t\ta++\n\t\t\tb--\n\t\t}\n\t}\n\tdata.Swap(pivot, b-1)\n\treturn b - 1, c\n}\n\n// Auto-generated variant of sort.go:quickSort\nfunc quickSort_func(data lessSwap, a, b, maxDepth int) {\n\tfor b-a > 12 {\n\t\tif maxDepth == 0 {\n\t\t\theapSort_func(data, a, b)\n\t\t\treturn\n\t\t}\n\t\tmaxDepth--\n\t\tmlo, mhi := doPivot_func(data, a, b)\n\t\tif mlo-a < b-mhi {\n\t\t\tquickSort_func(data, a, mlo, maxDepth)\n\t\t\ta = mhi\n\t\t} else {\n\t\t\tquickSort_func(data, mhi, b, maxDepth)\n\t\t\tb = mlo\n\t\t}\n\t}\n\tif b-a > 1 {\n\t\tfor i := a + 6; i < b; i++ {\n\t\t\tif data.Less(i, i-6) {\n\t\t\t\tdata.Swap(i, i-6)\n\t\t\t}\n\t\t}\n\t\tinsertionSort_func(data, a, b)\n\t}\n}\n\n// Auto-generated variant of sort.go:stable\nfunc stable_func(data lessSwap, n int) {\n\tblockSize := 20\n\ta, b := 0, blockSize\n\tfor b <= n {\n\t\tinsertionSort_func(data, a, b)\n\t\ta = b\n\t\tb += blockSize\n\t}\n\tinsertionSort_func(data, a, n)\n\tfor blockSize < n {\n\t\ta, b = 0, 2*blockSize\n\t\tfor b <= n {\n\t\t\tsymMerge_func(data, a, a+blockSize, b)\n\t\t\ta = b\n\t\t\tb += 2 * blockSize\n\t\t}\n\t\tif m := a + blockSize; m < n {\n\t\t\tsymMerge_func(data, a, m, n)\n\t\t}\n\t\tblockSize *= 2\n\t}\n}\n\n// Auto-generated variant of sort.go:symMerge\nfunc symMerge_func(data lessSwap, a, m, b int) {\n\tif m-a == 1 {\n\t\ti := m\n\t\tj := b\n\t\tfor i < j {\n\t\t\th := i + (j-i)/2\n\t\t\tif data.Less(h, a) {\n\t\t\t\ti = h + 1\n\t\t\t} else {\n\t\t\t\tj = h\n\t\t\t}\n\t\t}\n\t\tfor k := a; k < i-1; k++ {\n\t\t\tdata.Swap(k, k+1)\n\t\t}\n\t\treturn\n\t}\n\tif b-m == 1 {\n\t\ti := a\n\t\tj := m\n\t\tfor i < j {\n\t\t\th := i + (j-i)/2\n\t\t\tif !data.Less(m, h) {\n\t\t\t\ti = h + 1\n\t\t\t} else {\n\t\t\t\tj = h\n\t\t\t}\n\t\t}\n\t\tfor k := m; k > i; k-- {\n\t\t\tdata.Swap(k, k-1)\n\t\t}\n\t\treturn\n\t}\n\tmid := a + (b-a)/2\n\tn := mid + m\n\tvar start, r int\n\tif m > mid {\n\t\tstart = n - b\n\t\tr = mid\n\t} else {\n\t\tstart = a\n\t\tr = m\n\t}\n\tp := n - 1\n\tfor start < r {\n\t\tc := start + (r-start)/2\n\t\tif !data.Less(p-c, c) {\n\t\t\tstart = c + 1\n\t\t} else {\n\t\t\tr = c\n\t\t}\n\t}\n\tend := n - start\n\tif start < m && m < end {\n\t\trotate_func(data, start, m, end)\n\t}\n\tif a < start && start < mid {\n\t\tsymMerge_func(data, a, start, mid)\n\t}\n\tif mid < end && end < b {\n\t\tsymMerge_func(data, mid, end, b)\n\t}\n}\n\n// Auto-generated variant of sort.go:rotate\nfunc rotate_func(data lessSwap, a, m, b int) {\n\ti := m - a\n\tj := b - m\n\tfor i != j {\n\t\tif i > j {\n\t\t\tswapRange_func(data, m-i, m, j)\n\t\t\ti -= j\n\t\t} else {\n\t\t\tswapRange_func(data, m-i, m+j-i, i)\n\t\t\tj -= i\n\t\t}\n\t}\n\tswapRange_func(data, m-i, m, i)\n}\n"
  },
  {
    "path": "strutil/intern.go",
    "content": "/*\nCopyright 2013 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage strutil\n\nvar internStr = map[string]string{}\n\n// RegisterCommonString adds common strings to the interned string\n// table.  This should be called during init from the main\n// goroutine, not later at runtime.\nfunc RegisterCommonString(s ...string) {\n\tfor _, v := range s {\n\t\tinternStr[v] = v\n\t}\n}\n\n// StringFromBytes returns string(v), minimizing copies for common values of v\n// as previously registered with RegisterCommonString.\nfunc StringFromBytes(v []byte) string {\n\t// In Go 1.3, this string conversion in the map lookup does not allocate\n\t// to make a new string. We depend on Go 1.3, so this is always free:\n\tif s, ok := internStr[string(v)]; ok {\n\t\treturn s\n\t}\n\treturn string(v)\n}\n"
  },
  {
    "path": "strutil/strconv.go",
    "content": "/*\nCopyright 2013 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage strutil\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n)\n\n// ParseUintBytes is like strconv.ParseUint, but using a []byte.\nfunc ParseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {\n\tvar cutoff, maxVal uint64\n\n\tif bitSize == 0 {\n\t\tbitSize = int(strconv.IntSize)\n\t}\n\n\ts0 := s\n\tswitch {\n\tcase len(s) < 1:\n\t\terr = strconv.ErrSyntax\n\t\tgoto Error\n\n\tcase 2 <= base && base <= 36:\n\t\t// valid base; nothing to do\n\n\tcase base == 0:\n\t\t// Look for octal, hex prefix.\n\t\tswitch {\n\t\tcase s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):\n\t\t\tbase = 16\n\t\t\ts = s[2:]\n\t\t\tif len(s) < 1 {\n\t\t\t\terr = strconv.ErrSyntax\n\t\t\t\tgoto Error\n\t\t\t}\n\t\tcase s[0] == '0':\n\t\t\tbase = 8\n\t\tdefault:\n\t\t\tbase = 10\n\t\t}\n\n\tdefault:\n\t\terr = errors.New(\"invalid base \" + strconv.Itoa(base))\n\t\tgoto Error\n\t}\n\n\tn = 0\n\tcutoff = cutoff64(base)\n\tmaxVal = 1<<uint(bitSize) - 1\n\n\tfor i := 0; i < len(s); i++ {\n\t\tvar v byte\n\t\td := s[i]\n\t\tswitch {\n\t\tcase '0' <= d && d <= '9':\n\t\t\tv = d - '0'\n\t\tcase 'a' <= d && d <= 'z':\n\t\t\tv = d - 'a' + 10\n\t\tcase 'A' <= d && d <= 'Z':\n\t\t\tv = d - 'A' + 10\n\t\tdefault:\n\t\t\tn = 0\n\t\t\terr = strconv.ErrSyntax\n\t\t\tgoto Error\n\t\t}\n\t\tif int(v) >= base {\n\t\t\tn = 0\n\t\t\terr = strconv.ErrSyntax\n\t\t\tgoto Error\n\t\t}\n\n\t\tif n >= cutoff {\n\t\t\t// n*base overflows\n\t\t\tn = 1<<64 - 1\n\t\t\terr = strconv.ErrRange\n\t\t\tgoto Error\n\t\t}\n\t\tn *= uint64(base)\n\n\t\tn1 := n + uint64(v)\n\t\tif n1 < n || n1 > maxVal {\n\t\t\t// n+v overflows\n\t\t\tn = 1<<64 - 1\n\t\t\terr = strconv.ErrRange\n\t\t\tgoto Error\n\t\t}\n\t\tn = n1\n\t}\n\n\treturn n, nil\n\nError:\n\treturn n, &strconv.NumError{Func: \"ParseUint\", Num: string(s0), Err: err}\n}\n\n// Return the first number n such that n*base >= 1<<64.\nfunc cutoff64(base int) uint64 {\n\tif base < 2 {\n\t\treturn 0\n\t}\n\treturn (1<<64-1)/uint64(base) + 1\n}\n"
  },
  {
    "path": "strutil/strutil.go",
    "content": "/*\nCopyright 2013 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package strutil contains string and byte processing functions.\npackage strutil // import \"go4.org/strutil\"\n\nimport (\n\t\"strings\"\n\t\"unicode\"\n\t\"unicode/utf8\"\n)\n\n// Fork of Go's implementation in pkg/strings/strings.go:\n// Generic split: splits after each instance of sep,\n// including sepSave bytes of sep in the subarrays.\nfunc genSplit(dst []string, s, sep string, sepSave, n int) []string {\n\tif n == 0 {\n\t\treturn nil\n\t}\n\tif sep == \"\" {\n\t\tpanic(\"sep is empty\")\n\t}\n\tif n < 0 {\n\t\tn = strings.Count(s, sep) + 1\n\t}\n\tc := sep[0]\n\tstart := 0\n\tna := 0\n\tfor i := 0; i+len(sep) <= len(s) && na+1 < n; i++ {\n\t\tif s[i] == c && (len(sep) == 1 || s[i:i+len(sep)] == sep) {\n\t\t\tdst = append(dst, s[start:i+sepSave])\n\t\t\tna++\n\t\t\tstart = i + len(sep)\n\t\t\ti += len(sep) - 1\n\t\t}\n\t}\n\tdst = append(dst, s[start:])\n\treturn dst\n}\n\n// AppendSplitN is like strings.SplitN but appends to and returns dst.\n// Unlike strings.SplitN, an empty separator is not supported.\n// The count n determines the number of substrings to return:\n//   n > 0: at most n substrings; the last substring will be the unsplit remainder.\n//   n == 0: the result is nil (zero substrings)\n//   n < 0: all substrings\nfunc AppendSplitN(dst []string, s, sep string, n int) []string {\n\treturn genSplit(dst, s, sep, 0, n)\n}\n\n// equalFoldRune compares a and b runes whether they fold equally.\n//\n// The code comes from strings.EqualFold, but shortened to only one rune.\nfunc equalFoldRune(sr, tr rune) bool {\n\tif sr == tr {\n\t\treturn true\n\t}\n\t// Make sr < tr to simplify what follows.\n\tif tr < sr {\n\t\tsr, tr = tr, sr\n\t}\n\t// Fast check for ASCII.\n\tif tr < utf8.RuneSelf && 'A' <= sr && sr <= 'Z' {\n\t\t// ASCII, and sr is upper case.  tr must be lower case.\n\t\tif tr == sr+'a'-'A' {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\t// General case.  SimpleFold(x) returns the next equivalent rune > x\n\t// or wraps around to smaller values.\n\tr := unicode.SimpleFold(sr)\n\tfor r != sr && r < tr {\n\t\tr = unicode.SimpleFold(r)\n\t}\n\tif r == tr {\n\t\treturn true\n\t}\n\treturn false\n}\n\n// HasPrefixFold is like strings.HasPrefix but uses Unicode case-folding,\n// matching case insensitively.\nfunc HasPrefixFold(s, prefix string) bool {\n\tif prefix == \"\" {\n\t\treturn true\n\t}\n\tfor _, pr := range prefix {\n\t\tif s == \"\" {\n\t\t\treturn false\n\t\t}\n\t\t// step with s, too\n\t\tsr, size := utf8.DecodeRuneInString(s)\n\t\tif sr == utf8.RuneError {\n\t\t\treturn false\n\t\t}\n\t\ts = s[size:]\n\t\tif !equalFoldRune(sr, pr) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\n// HasSuffixFold is like strings.HasSuffix but uses Unicode case-folding,\n// matching case insensitively.\nfunc HasSuffixFold(s, suffix string) bool {\n\tif suffix == \"\" {\n\t\treturn true\n\t}\n\t// count the runes and bytes in s, but only till rune count of suffix\n\tbo, so := len(s), len(suffix)\n\tfor bo > 0 && so > 0 {\n\t\tr, size := utf8.DecodeLastRuneInString(s[:bo])\n\t\tif r == utf8.RuneError {\n\t\t\treturn false\n\t\t}\n\t\tbo -= size\n\n\t\tsr, size := utf8.DecodeLastRuneInString(suffix[:so])\n\t\tif sr == utf8.RuneError {\n\t\t\treturn false\n\t\t}\n\t\tso -= size\n\n\t\tif !equalFoldRune(r, sr) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn so == 0\n}\n\n// ContainsFold is like strings.Contains but uses Unicode case-folding.\nfunc ContainsFold(s, substr string) bool {\n\tif substr == \"\" {\n\t\treturn true\n\t}\n\tif s == \"\" {\n\t\treturn false\n\t}\n\tfirstRune := rune(substr[0])\n\tif firstRune >= utf8.RuneSelf {\n\t\tfirstRune, _ = utf8.DecodeRuneInString(substr)\n\t}\n\tfor i, rune := range s {\n\t\tif equalFoldRune(rune, firstRune) && HasPrefixFold(s[i:], substr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// IsPlausibleJSON reports whether s likely contains a JSON object, without\n// actually parsing it. It's meant to be a light heuristic.\nfunc IsPlausibleJSON(s string) bool {\n\treturn startsWithOpenBrace(s) && endsWithCloseBrace(s)\n}\n\nfunc isASCIIWhite(b byte) bool { return b == ' ' || b == '\\n' || b == '\\r' || b == '\\t' }\n\nfunc startsWithOpenBrace(s string) bool {\n\tfor len(s) > 0 {\n\t\tswitch {\n\t\tcase s[0] == '{':\n\t\t\treturn true\n\t\tcase isASCIIWhite(s[0]):\n\t\t\ts = s[1:]\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n\nfunc endsWithCloseBrace(s string) bool {\n\tfor len(s) > 0 {\n\t\tlast := len(s) - 1\n\t\tswitch {\n\t\tcase s[last] == '}':\n\t\t\treturn true\n\t\tcase isASCIIWhite(s[last]):\n\t\t\ts = s[:last]\n\t\tdefault:\n\t\t\treturn false\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "strutil/strutil_test.go",
    "content": "/*\nCopyright 2013 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage strutil\n\nimport (\n\t\"reflect\"\n\t\"strings\"\n\t\"testing\"\n)\n\nfunc TestAppendSplitN(t *testing.T) {\n\tvar got []string\n\ttests := []struct {\n\t\ts, sep string\n\t\tn      int\n\t}{\n\t\t{\"foo\", \"|\", 1},\n\t\t{\"foo\", \"|\", -1},\n\t\t{\"foo|bar\", \"|\", 1},\n\t\t{\"foo|bar\", \"|\", -1},\n\t\t{\"foo|bar|\", \"|\", 2},\n\t\t{\"foo|bar|\", \"|\", -1},\n\t\t{\"foo|bar|baz\", \"|\", 1},\n\t\t{\"foo|bar|baz\", \"|\", 2},\n\t\t{\"foo|bar|baz\", \"|\", 3},\n\t\t{\"foo|bar|baz\", \"|\", -1},\n\t}\n\tfor _, tt := range tests {\n\t\twant := strings.SplitN(tt.s, tt.sep, tt.n)\n\t\tgot = AppendSplitN(got[:0], tt.s, tt.sep, tt.n)\n\t\tif !reflect.DeepEqual(want, got) {\n\t\t\tt.Errorf(\"AppendSplitN(%q, %q, %d) = %q; want %q\",\n\t\t\t\ttt.s, tt.sep, tt.n, got, want)\n\t\t}\n\t}\n}\n\nfunc TestStringFromBytes(t *testing.T) {\n\tfor _, s := range []string{\"foo\", \"permanode\", \"file\", \"zzzz\"} {\n\t\tgot := StringFromBytes([]byte(s))\n\t\tif got != s {\n\t\t\tt.Errorf(\"StringFromBytes(%q) didn't round-trip; got %q instead\", s, got)\n\t\t}\n\t}\n}\n\nfunc TestHasPrefixFold(t *testing.T) {\n\ttests := []struct {\n\t\ts, prefix string\n\t\tresult    bool\n\t}{\n\t\t{\"camli\", \"CAML\", true},\n\t\t{\"CAMLI\", \"caml\", true},\n\t\t{\"cam\", \"Cam\", true},\n\t\t{\"camli\", \"car\", false},\n\t\t{\"caml\", \"camli\", false},\n\t\t{\"Hello, 世界 dasdsa\", \"HeLlO, 世界\", true},\n\t\t{\"Hello, 世界\", \"HeLlO, 世界-\", false},\n\n\t\t{\"kelvin\", \"\\u212A\" + \"elvin\", true}, // \"\\u212A\" is the Kelvin temperature sign\n\t\t{\"Kelvin\", \"\\u212A\" + \"elvin\", true},\n\t\t{\"kelvin\", \"\\u212A\" + \"el\", true},\n\t\t{\"Kelvin\", \"\\u212A\" + \"el\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"Kelvin\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"kelvin\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"Kel\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"kel\", true},\n\t}\n\tfor _, tt := range tests {\n\t\tr := HasPrefixFold(tt.s, tt.prefix)\n\t\tif r != tt.result {\n\t\t\tt.Errorf(\"HasPrefixFold(%q, %q) returned %v\", tt.s, tt.prefix, r)\n\t\t}\n\t}\n}\n\nfunc TestHasSuffixFold(t *testing.T) {\n\ttests := []struct {\n\t\ts, suffix string\n\t\tresult    bool\n\t}{\n\t\t{\"camli\", \"AMLI\", true},\n\t\t{\"CAMLI\", \"amli\", true},\n\t\t{\"mli\", \"MLI\", true},\n\t\t{\"camli\", \"ali\", false},\n\t\t{\"amli\", \"camli\", false},\n\t\t{\"asas Hello, 世界\", \"HeLlO, 世界\", true},\n\t\t{\"Hello, 世界\", \"HeLlO, 世界-\", false},\n\t\t{\"KkkkKKkelvin\", \"\\u212A\" + \"elvin\", true}, // \"\\u212A\" is the Kelvin temperature sign\n\n\t\t{\"kelvin\", \"\\u212A\" + \"elvin\", true}, // \"\\u212A\" is the Kelvin temperature sign\n\t\t{\"Kelvin\", \"\\u212A\" + \"elvin\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"Kelvin\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"kelvin\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"vin\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"viN\", true},\n\t}\n\tfor _, tt := range tests {\n\t\tr := HasSuffixFold(tt.s, tt.suffix)\n\t\tif r != tt.result {\n\t\t\tt.Errorf(\"HasSuffixFold(%q, %q) returned %v\", tt.s, tt.suffix, r)\n\t\t}\n\t}\n}\n\nfunc TestContainsFold(t *testing.T) {\n\t// TODO: more tests, more languages.\n\ttests := []struct {\n\t\ts, substr string\n\t\tresult    bool\n\t}{\n\t\t{\"camli\", \"CAML\", true},\n\t\t{\"CAMLI\", \"caml\", true},\n\t\t{\"cam\", \"Cam\", true},\n\t\t{\"мир\", \"ми\", true},\n\t\t{\"МИP\", \"ми\", true},\n\t\t{\"КАМЛИЙСТОР\", \"камлийс\", true},\n\t\t{\"КаМлИйСтОр\", \"КаМлИйС\", true},\n\t\t{\"camli\", \"car\", false},\n\t\t{\"caml\", \"camli\", false},\n\n\t\t{\"camli\", \"AMLI\", true},\n\t\t{\"CAMLI\", \"amli\", true},\n\t\t{\"mli\", \"MLI\", true},\n\t\t{\"мир\", \"ир\", true},\n\t\t{\"МИP\", \"ми\", true},\n\t\t{\"КАМЛИЙСТОР\", \"лийстор\", true},\n\t\t{\"КаМлИйСтОр\", \"лИйСтОр\", true},\n\t\t{\"мир\", \"р\", true},\n\t\t{\"camli\", \"ali\", false},\n\t\t{\"amli\", \"camli\", false},\n\n\t\t{\"МИP\", \"и\", true},\n\t\t{\"мир\", \"и\", true},\n\t\t{\"КАМЛИЙСТОР\", \"лийс\", true},\n\t\t{\"КаМлИйСтОр\", \"лИйС\", true},\n\n\t\t{\"árvíztűrő tükörfúrógép\", \"árvíztŰrŐ\", true},\n\t\t{\"I love ☕\", \"i love ☕\", true},\n\n\t\t{\"k\", \"\\u212A\", true}, // \"\\u212A\" is the Kelvin temperature sign\n\t\t{\"\\u212A\" + \"elvin\", \"k\", true},\n\t\t{\"kelvin\", \"\\u212A\" + \"elvin\", true},\n\t\t{\"Kelvin\", \"\\u212A\" + \"elvin\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"Kelvin\", true},\n\t\t{\"\\u212A\" + \"elvin\", \"kelvin\", true},\n\t\t{\"273.15 kelvin\", \"\\u212A\" + \"elvin\", true},\n\t\t{\"273.15 Kelvin\", \"\\u212A\" + \"elvin\", true},\n\t\t{\"273.15 \\u212A\" + \"elvin\", \"Kelvin\", true},\n\t\t{\"273.15 \\u212A\" + \"elvin\", \"kelvin\", true},\n\t}\n\tfor _, tt := range tests {\n\t\tr := ContainsFold(tt.s, tt.substr)\n\t\tif r != tt.result {\n\t\t\tt.Errorf(\"ContainsFold(%q, %q) returned %v\", tt.s, tt.substr, r)\n\t\t}\n\t}\n}\n\nfunc TestIsPlausibleJSON(t *testing.T) {\n\ttests := []struct {\n\t\tin   string\n\t\twant bool\n\t}{\n\t\t{\"{}\", true},\n\t\t{\" {}\", true},\n\t\t{\"{} \", true},\n\t\t{\"\\n\\r\\t {}\\t \\r \\n\", true},\n\n\t\t{\"\\n\\r\\t {x\\t \\r \\n\", false},\n\t\t{\"{x\", false},\n\t\t{\"x}\", false},\n\t\t{\"x\", false},\n\t\t{\"\", false},\n\t}\n\tfor _, tt := range tests {\n\t\tgot := IsPlausibleJSON(tt.in)\n\t\tif got != tt.want {\n\t\t\tt.Errorf(\"IsPlausibleJSON(%q) = %v; want %v\", tt.in, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc BenchmarkHasSuffixFoldToLower(tb *testing.B) {\n\ta, b := \"camlik\", \"AMLI\\u212A\"\n\tfor i := 0; i < tb.N; i++ {\n\t\tif !strings.HasSuffix(strings.ToLower(a), strings.ToLower(b)) {\n\t\t\ttb.Fatalf(\"%q should have the same suffix as %q\", a, b)\n\t\t}\n\t}\n}\nfunc BenchmarkHasSuffixFold(tb *testing.B) {\n\ta, b := \"camlik\", \"AMLI\\u212A\"\n\tfor i := 0; i < tb.N; i++ {\n\t\tif !HasSuffixFold(a, b) {\n\t\t\ttb.Fatalf(\"%q should have the same suffix as %q\", a, b)\n\t\t}\n\t}\n}\n\nfunc BenchmarkHasPrefixFoldToLower(tb *testing.B) {\n\ta, b := \"kamlistore\", \"\\u212AAMLI\"\n\tfor i := 0; i < tb.N; i++ {\n\t\tif !strings.HasPrefix(strings.ToLower(a), strings.ToLower(b)) {\n\t\t\ttb.Fatalf(\"%q should have the same suffix as %q\", a, b)\n\t\t}\n\t}\n}\nfunc BenchmarkHasPrefixFold(tb *testing.B) {\n\ta, b := \"kamlistore\", \"\\u212AAMLI\"\n\tfor i := 0; i < tb.N; i++ {\n\t\tif !HasPrefixFold(a, b) {\n\t\t\ttb.Fatalf(\"%q should have the same suffix as %q\", a, b)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "syncutil/gate.go",
    "content": "/*\nCopyright 2013 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage syncutil\n\n// A Gate limits concurrency.\ntype Gate struct {\n\tc chan struct{}\n}\n\n// NewGate returns a new gate that will only permit max operations at once.\nfunc NewGate(max int) *Gate {\n\treturn &Gate{make(chan struct{}, max)}\n}\n\n// Start starts an operation, blocking until the gate has room.\nfunc (g *Gate) Start() {\n\tg.c <- struct{}{}\n}\n\n// Done finishes an operation.\nfunc (g *Gate) Done() {\n\tselect {\n\tcase <-g.c:\n\tdefault:\n\t\tpanic(\"Done called more than Start\")\n\t}\n}\n"
  },
  {
    "path": "syncutil/group.go",
    "content": "/*\nCopyright 2013 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage syncutil\n\nimport \"sync\"\n\n// A Group is like a sync.WaitGroup and coordinates doing\n// multiple things at once. Its zero value is ready to use.\ntype Group struct {\n\twg   sync.WaitGroup\n\tmu   sync.Mutex // guards errs\n\terrs []error\n}\n\n// Go runs fn in its own goroutine, but does not wait for it to complete.\n// Call Err or Errs to wait for all the goroutines to complete.\nfunc (g *Group) Go(fn func() error) {\n\tg.wg.Add(1)\n\tgo func() {\n\t\tdefer g.wg.Done()\n\t\terr := fn()\n\t\tif err != nil {\n\t\t\tg.mu.Lock()\n\t\t\tdefer g.mu.Unlock()\n\t\t\tg.errs = append(g.errs, err)\n\t\t}\n\t}()\n}\n\n// Wait waits for all the previous calls to Go to complete.\nfunc (g *Group) Wait() {\n\tg.wg.Wait()\n}\n\n// Err waits for all previous calls to Go to complete and returns the\n// first non-nil error, or nil.\nfunc (g *Group) Err() error {\n\tg.wg.Wait()\n\tif len(g.errs) > 0 {\n\t\treturn g.errs[0]\n\t}\n\treturn nil\n}\n\n// Errs waits for all previous calls to Go to complete and returns\n// all non-nil errors.\nfunc (g *Group) Errs() []error {\n\tg.wg.Wait()\n\treturn g.errs\n}\n"
  },
  {
    "path": "syncutil/once.go",
    "content": "/*\nCopyright 2014 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage syncutil\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n)\n\n// A Once will perform a successful action exactly once.\n//\n// Unlike a sync.Once, this Once's func returns an error\n// and is re-armed on failure.\ntype Once struct {\n\tm    sync.Mutex\n\tdone uint32\n}\n\n// Do calls the function f if and only if Do has not been invoked\n// without error for this instance of Once.  In other words, given\n// \tvar once Once\n// if once.Do(f) is called multiple times, only the first call will\n// invoke f, even if f has a different value in each invocation unless\n// f returns an error.  A new instance of Once is required for each\n// function to execute.\n//\n// Do is intended for initialization that must be run exactly once.  Since f\n// is niladic, it may be necessary to use a function literal to capture the\n// arguments to a function to be invoked by Do:\n// \terr := config.once.Do(func() error { return config.init(filename) })\nfunc (o *Once) Do(f func() error) error {\n\tif atomic.LoadUint32(&o.done) == 1 {\n\t\treturn nil\n\t}\n\t// Slow-path.\n\to.m.Lock()\n\tdefer o.m.Unlock()\n\tvar err error\n\tif o.done == 0 {\n\t\terr = f()\n\t\tif err == nil {\n\t\t\tatomic.StoreUint32(&o.done, 1)\n\t\t}\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "syncutil/once_test.go",
    "content": "package syncutil\n\nimport (\n\t\"errors\"\n\t\"testing\"\n)\n\nfunc TestOnce(t *testing.T) {\n\ttimesRan := 0\n\tf := func() error {\n\t\ttimesRan++\n\t\treturn nil\n\t}\n\n\tonce := Once{}\n\tgrp := Group{}\n\n\tfor i := 0; i < 10; i++ {\n\t\tgrp.Go(func() error { return once.Do(f) })\n\t}\n\n\tif grp.Err() != nil {\n\t\tt.Errorf(\"Expected no errors, got %v\", grp.Err())\n\t}\n\n\tif timesRan != 1 {\n\t\tt.Errorf(\"Expected to run one time, ran %d\", timesRan)\n\t}\n}\n\n// TestOnceErroring verifies we retry on every error, but stop after\n// the first success.\nfunc TestOnceErroring(t *testing.T) {\n\ttimesRan := 0\n\tf := func() error {\n\t\ttimesRan++\n\t\tif timesRan < 3 {\n\t\t\treturn errors.New(\"retry\")\n\t\t}\n\t\treturn nil\n\t}\n\n\tonce := Once{}\n\tgrp := Group{}\n\n\tfor i := 0; i < 10; i++ {\n\t\tgrp.Go(func() error { return once.Do(f) })\n\t}\n\n\tif len(grp.Errs()) != 2 {\n\t\tt.Errorf(\"Expected two errors, got %d\", len(grp.Errs()))\n\t}\n\n\tif timesRan != 3 {\n\t\tt.Errorf(\"Expected to run two times, ran %d\", timesRan)\n\t}\n}\n"
  },
  {
    "path": "syncutil/sem.go",
    "content": "package syncutil\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"sync\"\n)\n\ntype debugT bool\n\nvar debug = debugT(false)\n\nfunc (d debugT) Printf(format string, args ...interface{}) {\n\tif bool(d) {\n\t\tlog.Printf(format, args...)\n\t}\n}\n\n// Sem implements a semaphore that can have multiple units acquired/released\n// at a time.\ntype Sem struct {\n\tc         *sync.Cond // Protects size\n\tmax, free int64\n}\n\n// NewSem creates a semaphore with max units available for acquisition.\nfunc NewSem(max int64) *Sem {\n\treturn &Sem{\n\t\tc:    sync.NewCond(new(sync.Mutex)),\n\t\tfree: max,\n\t\tmax:  max,\n\t}\n}\n\n// Acquire will deduct n units from the semaphore.  If the deduction would\n// result in the available units falling below zero, the call will block until\n// another go routine returns units via a call to Release.  If more units are\n// requested than the semaphore is configured to hold, error will be non-nil.\nfunc (s *Sem) Acquire(n int64) error {\n\tif n > s.max {\n\t\treturn fmt.Errorf(\"sem: attempt to acquire more units than semaphore size %d > %d\", n, s.max)\n\t}\n\ts.c.L.Lock()\n\tdefer s.c.L.Unlock()\n\tfor {\n\t\tdebug.Printf(\"Acquire check max %d free %d, n %d\", s.max, s.free, n)\n\t\tif s.free >= n {\n\t\t\ts.free -= n\n\t\t\treturn nil\n\t\t}\n\t\tdebug.Printf(\"Acquire Wait max %d free %d, n %d\", s.max, s.free, n)\n\t\ts.c.Wait()\n\t}\n}\n\n// Release will return n units to the semaphore and notify any currently\n// blocking Acquire calls.\nfunc (s *Sem) Release(n int64) {\n\ts.c.L.Lock()\n\tdefer s.c.L.Unlock()\n\tdebug.Printf(\"Release max %d free %d, n %d\", s.max, s.free, n)\n\ts.free += n\n\ts.c.Broadcast()\n}\n"
  },
  {
    "path": "syncutil/sem_test.go",
    "content": "package syncutil_test\n\nimport (\n\t\"testing\"\n\n\t\"go4.org/syncutil\"\n)\n\nfunc TestSem(t *testing.T) {\n\ts := syncutil.NewSem(5)\n\n\tif err := s.Acquire(2); err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif err := s.Acquire(2); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgo func() {\n\t\ts.Release(2)\n\t\ts.Release(2)\n\t}()\n\tif err := s.Acquire(5); err != nil {\n\t\tt.Fatal(err)\n\t}\n}\n\nfunc TestSemErr(t *testing.T) {\n\ts := syncutil.NewSem(5)\n\tif err := s.Acquire(6); err == nil {\n\t\tt.Fatal(\"Didn't get expected error for large acquire.\")\n\t}\n}\n"
  },
  {
    "path": "syncutil/singleflight/singleflight.go",
    "content": "/*\nCopyright 2013 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package singleflight provides a duplicate function call suppression\n// mechanism.\npackage singleflight // import \"go4.org/syncutil/singleflight\"\n\nimport \"sync\"\n\n// call is an in-flight or completed Do call\ntype call struct {\n\twg  sync.WaitGroup\n\tval interface{}\n\terr error\n}\n\n// Group represents a class of work and forms a namespace in which\n// units of work can be executed with duplicate suppression.\ntype Group struct {\n\tmu sync.Mutex       // protects m\n\tm  map[string]*call // lazily initialized\n}\n\n// Do executes and returns the results of the given function, making\n// sure that only one execution is in-flight for a given key at a\n// time. If a duplicate comes in, the duplicate caller waits for the\n// original to complete and receives the same results.\nfunc (g *Group) Do(key string, fn func() (interface{}, error)) (interface{}, error) {\n\tg.mu.Lock()\n\tif g.m == nil {\n\t\tg.m = make(map[string]*call)\n\t}\n\tif c, ok := g.m[key]; ok {\n\t\tg.mu.Unlock()\n\t\tc.wg.Wait()\n\t\treturn c.val, c.err\n\t}\n\tc := new(call)\n\tc.wg.Add(1)\n\tg.m[key] = c\n\tg.mu.Unlock()\n\n\tc.val, c.err = fn()\n\tc.wg.Done()\n\n\tg.mu.Lock()\n\tdelete(g.m, key)\n\tg.mu.Unlock()\n\n\treturn c.val, c.err\n}\n"
  },
  {
    "path": "syncutil/singleflight/singleflight_test.go",
    "content": "/*\nCopyright 2013 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage singleflight\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestDo(t *testing.T) {\n\tvar g Group\n\tv, err := g.Do(\"key\", func() (interface{}, error) {\n\t\treturn \"bar\", nil\n\t})\n\tif got, want := fmt.Sprintf(\"%v (%T)\", v, v), \"bar (string)\"; got != want {\n\t\tt.Errorf(\"Do = %v; want %v\", got, want)\n\t}\n\tif err != nil {\n\t\tt.Errorf(\"Do error = %v\", err)\n\t}\n}\n\nfunc TestDoErr(t *testing.T) {\n\tvar g Group\n\tsomeErr := errors.New(\"Some error\")\n\tv, err := g.Do(\"key\", func() (interface{}, error) {\n\t\treturn nil, someErr\n\t})\n\tif err != someErr {\n\t\tt.Errorf(\"Do error = %v; want someErr %v\", err, someErr)\n\t}\n\tif v != nil {\n\t\tt.Errorf(\"unexpected non-nil value %#v\", v)\n\t}\n}\n\nfunc TestDoDupSuppress(t *testing.T) {\n\tvar g Group\n\tc := make(chan string)\n\tvar calls int32\n\tfn := func() (interface{}, error) {\n\t\tatomic.AddInt32(&calls, 1)\n\t\treturn <-c, nil\n\t}\n\n\tconst n = 10\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < n; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tv, err := g.Do(\"key\", fn)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"Do error: %v\", err)\n\t\t\t}\n\t\t\tif v.(string) != \"bar\" {\n\t\t\t\tt.Errorf(\"got %q; want %q\", v, \"bar\")\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\ttime.Sleep(100 * time.Millisecond) // let goroutines above block\n\tc <- \"bar\"\n\twg.Wait()\n\tif got := atomic.LoadInt32(&calls); got != 1 {\n\t\tt.Errorf(\"number of calls = %d; want 1\", got)\n\t}\n}\n"
  },
  {
    "path": "syncutil/syncdebug/syncdebug.go",
    "content": "/*\nCopyright 2013 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package syncdebug contains facilities for debugging synchronization\n// problems.\npackage syncdebug // import \"go4.org/syncutil/syncdebug\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"log\"\n\t\"runtime\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"go4.org/strutil\"\n)\n\n// RWMutexTracker is a sync.RWMutex that tracks who owns the current\n// exclusive lock. It's used for debugging deadlocks.\ntype RWMutexTracker struct {\n\tmu sync.RWMutex\n\n\t// Atomic counters for number waiting and having read and write locks.\n\tnwaitr int32\n\tnwaitw int32\n\tnhaver int32\n\tnhavew int32 // should always be 0 or 1\n\n\tlogOnce sync.Once\n\n\thmu    sync.Mutex\n\tholder []byte\n\tholdr  map[int64]bool // goroutines holding read lock\n}\n\nconst stackBufSize = 16 << 20\n\nvar stackBuf = make(chan []byte, 8)\n\nfunc getBuf() []byte {\n\tselect {\n\tcase b := <-stackBuf:\n\t\treturn b[:stackBufSize]\n\tdefault:\n\t\treturn make([]byte, stackBufSize)\n\t}\n}\n\nfunc putBuf(b []byte) {\n\tselect {\n\tcase stackBuf <- b:\n\tdefault:\n\t}\n}\n\nvar goroutineSpace = []byte(\"goroutine \")\n\n// GoroutineID returns the current goroutine's ID.\n// Use of this function is almost always a terrible idea.\n// It is also very slow.\n// GoroutineID is intended only for debugging.\n// In particular, it is used by syncutil.\nfunc GoroutineID() int64 {\n\tb := getBuf()\n\tdefer putBuf(b)\n\tb = b[:runtime.Stack(b, false)]\n\t// Parse the 4707 out of \"goroutine 4707 [\"\n\tb = bytes.TrimPrefix(b, goroutineSpace)\n\ti := bytes.IndexByte(b, ' ')\n\tif i < 0 {\n\t\tpanic(fmt.Sprintf(\"No space found in %q\", b))\n\t}\n\tb = b[:i]\n\tn, err := strutil.ParseUintBytes(b, 10, 64)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to parse goroutine ID out of %q: %v\", b, err))\n\t}\n\treturn int64(n)\n}\n\nfunc (m *RWMutexTracker) startLogger() {\n\tgo func() {\n\t\tvar buf bytes.Buffer\n\t\tfor {\n\t\t\ttime.Sleep(1 * time.Second)\n\t\t\tbuf.Reset()\n\t\t\tm.hmu.Lock()\n\t\t\tfor gid := range m.holdr {\n\t\t\t\tfmt.Fprintf(&buf, \" [%d]\", gid)\n\t\t\t}\n\t\t\tm.hmu.Unlock()\n\t\t\tlog.Printf(\"Mutex %p: waitW %d haveW %d   waitR %d haveR %d %s\",\n\t\t\t\tm,\n\t\t\t\tatomic.LoadInt32(&m.nwaitw),\n\t\t\t\tatomic.LoadInt32(&m.nhavew),\n\t\t\t\tatomic.LoadInt32(&m.nwaitr),\n\t\t\t\tatomic.LoadInt32(&m.nhaver), buf.Bytes())\n\t\t}\n\t}()\n}\n\nfunc (m *RWMutexTracker) Lock() {\n\tm.logOnce.Do(m.startLogger)\n\tatomic.AddInt32(&m.nwaitw, 1)\n\tm.mu.Lock()\n\tatomic.AddInt32(&m.nwaitw, -1)\n\tatomic.AddInt32(&m.nhavew, 1)\n\n\tm.hmu.Lock()\n\tdefer m.hmu.Unlock()\n\tif len(m.holder) == 0 {\n\t\tm.holder = make([]byte, stackBufSize)\n\t}\n\tm.holder = m.holder[:runtime.Stack(m.holder[:stackBufSize], false)]\n\tlog.Printf(\"Lock at %s\", string(m.holder))\n}\n\nfunc (m *RWMutexTracker) Unlock() {\n\tm.hmu.Lock()\n\tm.holder = nil\n\tm.hmu.Unlock()\n\n\tatomic.AddInt32(&m.nhavew, -1)\n\tm.mu.Unlock()\n}\n\nfunc (m *RWMutexTracker) RLock() {\n\tm.logOnce.Do(m.startLogger)\n\tatomic.AddInt32(&m.nwaitr, 1)\n\n\t// Catch read-write-read lock. See if somebody (us? via\n\t// another goroutine?) already has a read lock, and then\n\t// somebody else is waiting to write, meaning our second read\n\t// will deadlock.\n\tif atomic.LoadInt32(&m.nhaver) > 0 && atomic.LoadInt32(&m.nwaitw) > 0 {\n\t\tbuf := getBuf()\n\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\tlog.Printf(\"Potential R-W-R deadlock at: %s\", buf)\n\t\tputBuf(buf)\n\t}\n\n\tm.mu.RLock()\n\tatomic.AddInt32(&m.nwaitr, -1)\n\tatomic.AddInt32(&m.nhaver, 1)\n\n\tgid := GoroutineID()\n\tm.hmu.Lock()\n\tdefer m.hmu.Unlock()\n\tif m.holdr == nil {\n\t\tm.holdr = make(map[int64]bool)\n\t}\n\tif m.holdr[gid] {\n\t\tbuf := getBuf()\n\t\tbuf = buf[:runtime.Stack(buf, false)]\n\t\tlog.Fatalf(\"Recursive call to RLock: %s\", buf)\n\t}\n\tm.holdr[gid] = true\n}\n\nfunc stack() []byte {\n\tbuf := make([]byte, 1024)\n\treturn buf[:runtime.Stack(buf, false)]\n}\n\nfunc (m *RWMutexTracker) RUnlock() {\n\tatomic.AddInt32(&m.nhaver, -1)\n\n\tgid := GoroutineID()\n\tm.hmu.Lock()\n\tdelete(m.holdr, gid)\n\tm.hmu.Unlock()\n\n\tm.mu.RUnlock()\n}\n\n// Holder returns the stack trace of the current exclusive lock holder's stack\n// when it acquired the lock (with Lock). It returns the empty string if the lock\n// is not currently held.\nfunc (m *RWMutexTracker) Holder() string {\n\tm.hmu.Lock()\n\tdefer m.hmu.Unlock()\n\treturn string(m.holder)\n}\n"
  },
  {
    "path": "syncutil/syncdebug/syncdebug_test.go",
    "content": "/*\nCopyright 2013 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage syncdebug\n\nimport \"testing\"\n\nfunc TestGoroutineID(t *testing.T) {\n\tc := make(chan int64, 2)\n\tc <- GoroutineID()\n\tgo func() {\n\t\tc <- GoroutineID()\n\t}()\n\tif a, b := <-c, <-c; a == b {\n\t\tt.Errorf(\"both goroutine IDs were %d; expected different\", a)\n\t}\n}\n"
  },
  {
    "path": "syncutil/syncutil.go",
    "content": "/*\nCopyright 2014 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package syncutil provides various synchronization utilities.\npackage syncutil // import \"go4.org/syncutil\"\n"
  },
  {
    "path": "testing/functest/functest.go",
    "content": "/*\nCopyright 2016 The go4.org Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package functest contains utilities to ease writing table-driven\n// tests for pure functions and method.\n//\n// Example:\n//\n//\tfunc square(v int) int { return v * v }\n//\n//\tfunc TestFunc(t *testing.T) {\n//\t\tf := functest.New(square)\n//\t\tf.Test(t,\n//\t\t\tf.In(0).Want(0),\n//\t\t\tf.In(1).Want(1),\n//\t\t\tf.In(2).Want(4),\n//\t\t\tf.In(3).Want(9),\n//\t\t)\n//\t}\n//\n// It can test whether things panic:\n//\n//\tf := functest.New(condPanic)\n//\tf.Test(t,\n//\t\tf.In(false, nil),\n//\t\tf.In(true, \"boom\").Check(func(res functest.Result) error {\n//\t\t\tif res.Panic != \"boom\" {\n//\t\t\t\treturn fmt.Errorf(\"panic = %v; want boom\", res.Panic)\n//\t\t\t}\n//\t\t\treturn nil\n//\t\t}),\n//\t\tf.In(true, nil).Check(func(res functest.Result) error {\n//\t\t\tif res.Panic != nil || res.Paniked {\n//\t\t\t\treturn fmt.Errorf(\"expected panic with nil value, got: %+v\", res)\n//\t\t\t}\n//\t\t\treturn nil\n//\t\t}),\n//\t)\n//\n// If a test fails, functest does its best to format a useful error message. You can also\n// name test cases:\n//\n//\t\tf := functest.New(square)\n//\t\tf.Test(t,\n//\t\t\tf.In(0).Want(0),\n//\t\t\tf.In(1).Want(111),\n//\t\t\tf.In(2).Want(4),\n//\t\t\tf.Case(\"three\").In(3).Want(999),\n//\t\t)\n//\n// Which would fail like:\n//\n//\t--- FAIL: TestSquare (0.00s)\n//\tfunctest.go:304: square(1) = 1; want 111\n//\tfunctest.go:304: three: square(3) = 9; want 999\n//\tFAIL\n//\npackage functest\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n)\n\n// Func is a wrapper around a func to test.\n// It must be created with New.\ntype Func struct {\n\t// Name is the name of the function to use in error messages.\n\t// In most cases it is initialized by New, unless the function\n\t// being tested is an anonymous function.\n\tName string\n\n\tf  interface{}   // the func\n\tfv reflect.Value // of f\n}\n\nvar removePunc = strings.NewReplacer(\"(\", \"\", \")\", \"\", \"*\", \"\")\n\n// New wraps a function for testing.\n// The provided value f must be a function or method.\nfunc New(f interface{}) *Func {\n\tfv := reflect.ValueOf(f)\n\tif fv.Kind() != reflect.Func {\n\t\tpanic(\"argument to New must be a func\")\n\t}\n\tvar name string\n\trf := runtime.FuncForPC(fv.Pointer())\n\tif rf != nil {\n\t\tname = rf.Name()\n\t\tif methType := strings.LastIndex(name, \".(\"); methType != -1 {\n\t\t\tname = removePunc.Replace(name[methType+2:])\n\t\t} else if lastDot := strings.LastIndex(name, \".\"); lastDot != -1 {\n\t\t\tname = name[lastDot+1:]\n\t\t\tif strings.HasPrefix(name, \"func\") {\n\t\t\t\t// Looks like some anonymous function. Prefer naming it \"f\".\n\t\t\t\tname = \"f\"\n\t\t\t}\n\t\t}\n\t} else {\n\t\tname = \"f\"\n\t}\n\n\treturn &Func{\n\t\tf:    f,\n\t\tfv:   fv,\n\t\tName: name,\n\t}\n}\n\n// Result is the result of a function call, for use with Check.\ntype Result struct {\n\t// Result is the return value(s) of the function.\n\tResult []interface{}\n\n\t// Panic is the panic value of the function.\n\tPanic interface{}\n\n\t// Panicked is whether the function paniced.\n\t// It can be used to determine whether a function\n\t// called panic(nil).\n\tPanicked bool\n}\n\n// Case is a test case to run.\n//\n// Test cases can be either named or unnamed, depending on how they're\n// created. Naming cases is optional; all failures messages aim to\n// have useful output and include the input to the function.\n//\n// Unless the function's arity is zero, all cases should have their input\n// set with In.\n//\n// The case's expected output can be set with Want and/or Check.\ntype Case struct {\n\tf        *Func\n\tin       []interface{}\n\tname     string        // optional\n\twant     []interface{} // non-nil if we check args\n\tcheckRes []func(Result) error\n}\n\n// Case returns a new named case. It should be modified before use.\nfunc (f *Func) Case(name string) *Case {\n\treturn &Case{f: f, name: name}\n}\n\n// In returns a new unnamed test case. It will be identified by its arguments\n// only.\nfunc (f *Func) In(args ...interface{}) *Case {\n\treturn &Case{f: f, in: args}\n}\n\n// In sets the arguments of c used to call f.\nfunc (c *Case) In(args ...interface{}) *Case {\n\tc.in = args\n\treturn c\n}\n\n// Want sets the expected result values of the test case.\n// Want modifies and returns c.\n// Callers my use both Want and Check.\nfunc (c *Case) Want(result ...interface{}) *Case {\n\tif c.want != nil {\n\t\tpanic(\"duplicate Want declared on functest.Case\")\n\t}\n\tc.want = result\n\tnumOut := c.f.fv.Type().NumOut()\n\tif len(result) != numOut {\n\t\t// TODO: let caller providing only interesting result values, or\n\t\t// provide matchers.\n\t\tpanic(fmt.Sprintf(\"Want called with %d values; function returns %d values\", len(result), numOut))\n\t}\n\treturn c\n}\n\n// Check adds a function to check the result of the case's function\n// call. It is a low-level function when Want is insufficient.\n// For instance, it allows checking whether a function panics.\n// If no checker functions are registered, function panics are considered\n// a test failure.\n//\n// Check modifies and returns c.\n// Callers my use both Want and Check, and may use Check multiple times.\nfunc (c *Case) Check(checker func(Result) error) *Case {\n\tc.checkRes = append(c.checkRes, checker)\n\treturn c\n}\n\n// Test runs the provided test cases against f.\n// If any test cases fail, t.Errorf is called.\nfunc (f *Func) Test(t testing.TB, cases ...*Case) {\n\tfor _, tc := range cases {\n\t\tf.testCase(t, tc)\n\t}\n}\n\nfunc (f *Func) checkCall(in []reflect.Value) (out []reflect.Value, didPanic bool, panicValue interface{}) {\n\tdefer func() { panicValue = recover() }()\n\tdidPanic = true\n\tout = f.fv.Call(in)\n\tdidPanic = false\n\treturn\n}\n\nvar nilEmptyInterface = reflect.Zero(reflect.TypeOf((*interface{})(nil)).Elem())\n\nfunc (f *Func) testCase(t testing.TB, c *Case) {\n\t// Non-variadic:\n\tft := f.fv.Type()\n\tinReg := ft.NumIn()\n\tif ft.IsVariadic() {\n\t\tinReg--\n\t\tif len(c.in) < inReg {\n\t\t\tc.errorf(t, \": input has %d arguments; func requires at least %d\", len(c.in), inReg)\n\t\t\treturn\n\t\t}\n\t} else if len(c.in) != ft.NumIn() {\n\t\tc.errorf(t, \": input has %d arguments; func takes %d\", len(c.in), ft.NumIn())\n\t\treturn\n\t}\n\n\tinv := make([]reflect.Value, len(c.in))\n\tfor i, v := range c.in {\n\t\tif v == nil {\n\t\t\tinv[i] = nilEmptyInterface\n\t\t} else {\n\t\t\tinv[i] = reflect.ValueOf(v)\n\t\t}\n\t}\n\tgot, didPanic, panicValue := f.checkCall(inv)\n\n\tvar goti []interface{}\n\tif !didPanic {\n\t\tgoti = make([]interface{}, len(got))\n\t\tfor i, rv := range got {\n\t\t\tgoti[i] = rv.Interface()\n\t\t}\n\t}\n\n\tif c.want != nil {\n\t\tif !reflect.DeepEqual(goti, c.want) {\n\t\t\tc.errorf(t, \" = %v; want %v\", formatRes(goti), formatRes(c.want))\n\t\t}\n\t}\n\tfor _, checkRes := range c.checkRes {\n\t\terr := checkRes(Result{\n\t\t\tResult:   goti,\n\t\t\tPanic:    panicValue,\n\t\t\tPanicked: didPanic,\n\t\t})\n\t\tif err != nil {\n\t\t\tc.errorf(t, \": %v\", err)\n\t\t}\n\t}\n\tif didPanic && (c.checkRes == nil) {\n\t\tc.errorf(t, \": panicked with %v\", panicValue)\n\t}\n}\n\nfunc formatRes(res []interface{}) string {\n\tvar buf bytes.Buffer\n\tif len(res) != 1 {\n\t\tbuf.WriteByte('(')\n\t}\n\tformatValues(&buf, res)\n\tif len(res) != 1 {\n\t\tbuf.WriteByte(')')\n\t}\n\treturn buf.String()\n}\n\nfunc formatValues(buf *bytes.Buffer, vals []interface{}) {\n\tfor i, v := range vals {\n\t\tif i != 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t}\n\t\tfmt.Fprintf(buf, \"%#v\", v)\n\t}\n}\n\nfunc (c *Case) errorf(t testing.TB, format string, args ...interface{}) {\n\tvar buf bytes.Buffer\n\tif c.name != \"\" {\n\t\tfmt.Fprintf(&buf, \"%s: \", c.name)\n\t}\n\tbuf.WriteString(c.f.Name)\n\tbuf.WriteString(\"(\")\n\tformatValues(&buf, c.in)\n\tbuf.WriteString(\")\")\n\tfmt.Fprintf(&buf, format, args...)\n\tt.Errorf(\"%s\", buf.Bytes())\n}\n"
  },
  {
    "path": "testing/functest/functest_test.go",
    "content": "package functest\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n)\n\n// trec is a testing.TB which logs Errorf calls to buf\ntype trec struct {\n\ttesting.TB // crash on unimplemented methods\n\tbuf        bytes.Buffer\n}\n\nfunc (t *trec) Errorf(format string, args ...interface{}) {\n\tt.buf.WriteString(\"ERR: \")\n\tfmt.Fprintf(&t.buf, format, args...)\n\tt.buf.WriteByte('\\n')\n}\n\nfunc (t *trec) Logf(format string, args ...interface{}) {\n\tt.buf.WriteString(\"LOG: \")\n\tfmt.Fprintf(&t.buf, format, args...)\n\tt.buf.WriteByte('\\n')\n}\n\nfunc (t *trec) String() string { return t.buf.String() }\n\nfunc add(a, b int) int { return a + b }\n\nfunc TestBasic(t *testing.T) {\n\tf := New(add)\n\ttrec := new(trec)\n\tf.Test(trec,\n\t\tf.In(1, 2).Want(3),\n\t\tf.In(5, 6).Want(100),\n\t\tf.Case(\"also wrong\").In(5, 6).Want(101),\n\t)\n\twant := `ERR: add(5, 6) = 11; want 100\nERR: also wrong: add(5, 6) = 11; want 101\n`\n\tif got := trec.String(); got != want {\n\t\tt.Errorf(\"Output mismatch.\\nGot:\\n%v\\nWant:\\n%v\\n\", got, want)\n\t}\n}\n\nfunc TestBasic_Strings(t *testing.T) {\n\tconcat := func(a, b string) string { return a + b }\n\tf := New(concat)\n\tf.Name = \"concat\"\n\ttrec := new(trec)\n\tf.Test(trec,\n\t\tf.In(\"a\", \"b\").Want(\"ab\"),\n\t\tf.In(\"a\", \"b\\x00\").Want(\"ab\"),\n\t)\n\twant := `ERR: concat(\"a\", \"b\\x00\") = \"ab\\x00\"; want \"ab\"\n`\n\tif got := trec.String(); got != want {\n\t\tt.Errorf(\"Output mismatch.\\nGot:\\n%v\\nWant:\\n%v\\n\", got, want)\n\t}\n}\n\nfunc TestVariadic(t *testing.T) {\n\tsumVar := func(vals ...int) (sum int) {\n\t\tfor _, v := range vals {\n\t\t\tsum += v\n\t\t}\n\t\treturn\n\t}\n\n\tf := New(sumVar)\n\tf.Name = \"sumVar\"\n\ttrec := new(trec)\n\tf.Test(trec,\n\t\tf.In().Want(0),\n\t\tf.In().Want(100),\n\t\tf.In(1).Want(1),\n\t\tf.In(1).Want(100),\n\t\tf.In(1, 2).Want(3),\n\t\tf.In(1, 2, 3).Want(6),\n\t\tf.In(1, 2, 3).Want(100),\n\t)\n\twant := `ERR: sumVar() = 0; want 100\nERR: sumVar(1) = 1; want 100\nERR: sumVar(1, 2, 3) = 6; want 100\n`\n\tif got := trec.String(); got != want {\n\t\tt.Errorf(\"Output mismatch.\\nGot:\\n%v\\nWant:\\n%v\\n\", got, want)\n\t}\n}\n\nfunc condPanic(doPanic bool, panicValue interface{}) {\n\tif doPanic {\n\t\tpanic(panicValue)\n\t}\n}\n\nfunc TestPanic(t *testing.T) {\n\tf := New(condPanic)\n\tf.Name = \"condPanic\"\n\ttrec := new(trec)\n\tf.Test(trec,\n\t\tf.In(false, nil),\n\t\tf.In(true, \"boom\").Check(func(res Result) error {\n\t\t\ttrec.Logf(\"Got res: %+v\", res)\n\t\t\tif res.Panic != \"boom\" {\n\t\t\t\treturn fmt.Errorf(\"panic = %v; want boom\", res.Panic)\n\t\t\t}\n\t\t\treturn nil\n\t\t}),\n\t\tf.Case(\"panic with nil\").In(true, nil),\n\t)\n\twant := `LOG: Got res: {Result:[] Panic:boom Panicked:true}\nERR: panic with nil: condPanic(true, <nil>): panicked with panic called with nil argument\n`\n\tif got := trec.String(); got != want {\n\t\tt.Errorf(\"Output mismatch.\\nGot:\\n%v\\nWant:\\n%v\\n\", got, want)\n\t}\n}\n\nfunc TestName_AutoFunc(t *testing.T) {\n\ttestName(t, New(add), \"add\")\n}\n\ntype SomeType struct{}\n\nfunc (t *SomeType) SomeMethod(int) int { return 123 }\n\nfunc TestName_AutoMethod(t *testing.T) {\n\ttestName(t, New((*SomeType).SomeMethod), \"SomeType.SomeMethod\")\n}\n\nfunc testName(t *testing.T, f *Func, want string) {\n\tif f.Name != want {\n\t\tt.Errorf(\"name = %q; want %q\", f.Name, want)\n\t}\n}\n"
  },
  {
    "path": "types/types.go",
    "content": "/*\nCopyright 2013 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package types provides various common types.\npackage types // import \"go4.org/types\"\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nvar null_b = []byte(\"null\")\n\n// NopCloser is an io.Closer that does nothing.\nvar NopCloser io.Closer = CloseFunc(func() error { return nil })\n\n// EmptyBody is a ReadCloser that returns EOF on Read and does nothing\n// on Close.\nvar EmptyBody io.ReadCloser = ioutil.NopCloser(strings.NewReader(\"\"))\n\n// Time3339 is a time.Time which encodes to and from JSON\n// as an RFC 3339 time in UTC.\ntype Time3339 time.Time\n\nvar (\n\t_ json.Marshaler   = Time3339{}\n\t_ json.Unmarshaler = (*Time3339)(nil)\n)\n\nfunc (t Time3339) String() string {\n\treturn time.Time(t).UTC().Format(time.RFC3339Nano)\n}\n\nfunc (t Time3339) MarshalJSON() ([]byte, error) {\n\tif t.Time().IsZero() {\n\t\treturn null_b, nil\n\t}\n\treturn json.Marshal(t.String())\n}\n\nfunc (t *Time3339) UnmarshalJSON(b []byte) error {\n\tif bytes.Equal(b, null_b) {\n\t\t*t = Time3339{}\n\t\treturn nil\n\t}\n\tif len(b) < 2 || b[0] != '\"' || b[len(b)-1] != '\"' {\n\t\treturn fmt.Errorf(\"types: failed to unmarshal non-string value %q as an RFC 3339 time\", b)\n\t}\n\ts := string(b[1 : len(b)-1])\n\tif s == \"\" {\n\t\t*t = Time3339{}\n\t\treturn nil\n\t}\n\ttm, err := time.Parse(time.RFC3339Nano, s)\n\tif err != nil {\n\t\tif strings.HasPrefix(s, \"0000-00-00T00:00:00\") {\n\t\t\t*t = Time3339{}\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\t*t = Time3339(tm)\n\treturn nil\n}\n\n// ParseTime3339OrZero parses a string in RFC3339 format. If it's invalid,\n// the zero time value is returned instead.\nfunc ParseTime3339OrZero(v string) Time3339 {\n\tt, err := time.Parse(time.RFC3339Nano, v)\n\tif err != nil {\n\t\treturn Time3339{}\n\t}\n\treturn Time3339(t)\n}\n\nfunc ParseTime3339OrNil(v string) *Time3339 {\n\tt, err := time.Parse(time.RFC3339Nano, v)\n\tif err != nil {\n\t\treturn nil\n\t}\n\ttm := Time3339(t)\n\treturn &tm\n}\n\n// Time returns the time as a time.Time with slightly less stutter\n// than a manual conversion.\nfunc (t Time3339) Time() time.Time {\n\treturn time.Time(t)\n}\n\n// IsAnyZero returns whether the time is Go zero or Unix zero.\nfunc (t *Time3339) IsAnyZero() bool {\n\treturn t == nil || time.Time(*t).IsZero() || time.Time(*t).Unix() == 0\n}\n\n// ByTime sorts times.\ntype ByTime []time.Time\n\nfunc (s ByTime) Len() int           { return len(s) }\nfunc (s ByTime) Less(i, j int) bool { return s[i].Before(s[j]) }\nfunc (s ByTime) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }\n\n// NewOnceCloser returns a Closer wrapping c which only calls Close on c\n// once. Subsequent calls to Close return nil.\nfunc NewOnceCloser(c io.Closer) io.Closer {\n\treturn &onceCloser{c: c}\n}\n\ntype onceCloser struct {\n\tmu sync.Mutex\n\tc  io.Closer\n}\n\nfunc (c *onceCloser) Close() error {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.c == nil {\n\t\treturn nil\n\t}\n\terr := c.c.Close()\n\tc.c = nil\n\treturn err\n}\n\n// CloseFunc implements io.Closer with a function.\ntype CloseFunc func() error\n\nfunc (fn CloseFunc) Close() error { return fn() }\n"
  },
  {
    "path": "types/types_test.go",
    "content": "/*\nCopyright 2013 Google Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage types\n\nimport (\n\t\"encoding/json\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n)\n\nfunc TestTime3339(t *testing.T) {\n\ttm := time.Unix(123, 456)\n\tt3 := Time3339(tm)\n\ttype O struct {\n\t\tSomeTime Time3339 `json:\"someTime\"`\n\t}\n\to := &O{SomeTime: t3}\n\tgot, err := json.Marshal(o)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tgoodEnc := \"{\\\"someTime\\\":\\\"1970-01-01T00:02:03.000000456Z\\\"}\"\n\tif string(got) != goodEnc {\n\t\tt.Errorf(\"Encoding wrong.\\n Got: %q\\nWant: %q\", got, goodEnc)\n\t}\n\togot := &O{}\n\terr = json.Unmarshal([]byte(goodEnc), ogot)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif !tm.Equal(ogot.SomeTime.Time()) {\n\t\tt.Errorf(\"Unmarshal got time %v; want %v\", ogot.SomeTime.Time(), tm)\n\t}\n}\n\nfunc TestTime3339_Marshal(t *testing.T) {\n\ttests := []struct {\n\t\tin   time.Time\n\t\twant string\n\t}{\n\t\t{time.Time{}, \"null\"},\n\t\t{time.Unix(1, 0), `\"1970-01-01T00:00:01Z\"`},\n\t}\n\tfor i, tt := range tests {\n\t\tgot, err := Time3339(tt.in).MarshalJSON()\n\t\tif err != nil {\n\t\t\tt.Errorf(\"%d. marshal(%v) got error: %v\", i, tt.in, err)\n\t\t\tcontinue\n\t\t}\n\t\tif string(got) != tt.want {\n\t\t\tt.Errorf(\"%d. marshal(%v) = %q; want %q\", i, tt.in, got, tt.want)\n\t\t}\n\t}\n}\n\nfunc TestTime3339_empty(t *testing.T) {\n\ttests := []struct {\n\t\tenc string\n\t\tz   bool\n\t}{\n\t\t{enc: \"null\", z: true},\n\t\t{enc: `\"\"`, z: true},\n\t\t{enc: \"0000-00-00T00:00:00Z\", z: true},\n\t\t{enc: \"0001-01-01T00:00:00Z\", z: true},\n\t\t{enc: \"1970-01-01T00:00:00Z\", z: true},\n\t\t{enc: \"2001-02-03T04:05:06Z\", z: false},\n\t\t{enc: \"2001-02-03T04:05:06+06:00\", z: false},\n\t\t{enc: \"2001-02-03T04:05:06-06:00\", z: false},\n\t\t{enc: \"2001-02-03T04:05:06.123456789Z\", z: false},\n\t\t{enc: \"2001-02-03T04:05:06.123456789+06:00\", z: false},\n\t\t{enc: \"2001-02-03T04:05:06.123456789-06:00\", z: false},\n\t}\n\tfor _, tt := range tests {\n\t\tvar tm Time3339\n\t\tenc := tt.enc\n\t\tif strings.Contains(enc, \"T\") {\n\t\t\tenc = \"\\\"\" + enc + \"\\\"\"\n\t\t}\n\t\terr := json.Unmarshal([]byte(enc), &tm)\n\t\tif err != nil {\n\t\t\tt.Errorf(\"unmarshal %q = %v\", enc, err)\n\t\t}\n\t\tif tm.IsAnyZero() != tt.z {\n\t\t\tt.Errorf(\"unmarshal %q = %v (%d), %v; zero=%v; want %v\", tt.enc, tm.Time(), tm.Time().Unix(), err,\n\t\t\t\t!tt.z, tt.z)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "wkfs/gcs/gcs.go",
    "content": "/*\nCopyright 2014 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package gcs registers a Google Cloud Storage filesystem at the\n// well-known /gcs/ filesystem path if the current machine is running\n// on Google Compute Engine.\n//\n// It was initially only meant for small files, and as such, it can only\n// read files smaller than 1MB for now.\npackage gcs // import \"go4.org/wkfs/gcs\"\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com/go/compute/metadata\"\n\t\"cloud.google.com/go/storage\"\n\t\"go4.org/wkfs\"\n\t\"golang.org/x/net/context\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/api/option\"\n)\n\n// Max size for all files read, because we use a bytes.Reader as our file\n// reader, instead of storage.NewReader. This is because we get all wkfs.File\n// methods for free by embedding a bytes.Reader. This filesystem was only supposed\n// to be for configuration data only, so this is ok for now.\nconst maxSize = 1 << 20\n\nfunc init() {\n\tif !metadata.OnGCE() {\n\t\treturn\n\t}\n\thc, err := google.DefaultClient(oauth2.NoContext)\n\tif err != nil {\n\t\tregisterBrokenFS(fmt.Errorf(\"could not get http client for context: %v\", err))\n\t\treturn\n\t}\n\tctx := context.Background()\n\tsc, err := storage.NewClient(ctx, option.WithHTTPClient(hc))\n\tif err != nil {\n\t\tregisterBrokenFS(fmt.Errorf(\"could not get cloud storage client: %v\", err))\n\t\treturn\n\t}\n\twkfs.RegisterFS(\"/gcs/\", &gcsFS{\n\t\tctx: ctx,\n\t\tsc:  sc,\n\t})\n}\n\ntype gcsFS struct {\n\tctx context.Context\n\tsc  *storage.Client\n\terr error // sticky error\n}\n\nfunc registerBrokenFS(err error) {\n\twkfs.RegisterFS(\"/gcs/\", &gcsFS{\n\t\terr: err,\n\t})\n}\n\nfunc (fs *gcsFS) parseName(name string) (bucket, fileName string, err error) {\n\tif fs.err != nil {\n\t\treturn \"\", \"\", fs.err\n\t}\n\tname = strings.TrimPrefix(name, \"/gcs/\")\n\ti := strings.Index(name, \"/\")\n\tif i < 0 {\n\t\treturn name, \"\", nil\n\t}\n\treturn name[:i], name[i+1:], nil\n}\n\n// Open opens the named file for reading. It returns an error if the file size\n// is larger than 1 << 20.\nfunc (fs *gcsFS) Open(name string) (wkfs.File, error) {\n\tbucket, fileName, err := fs.parseName(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tobj := fs.sc.Bucket(bucket).Object(fileName)\n\tattrs, err := obj.Attrs(fs.ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsize := attrs.Size\n\tif size > maxSize {\n\t\treturn nil, fmt.Errorf(\"file %s too large (%d bytes) for /gcs/ filesystem\", name, size)\n\t}\n\trc, err := obj.NewReader(fs.ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rc.Close()\n\n\tslurp, err := ioutil.ReadAll(io.LimitReader(rc, size))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &file{\n\t\tname:   name,\n\t\tReader: bytes.NewReader(slurp),\n\t}, nil\n}\n\nfunc (fs *gcsFS) Stat(name string) (os.FileInfo, error) { return fs.Lstat(name) }\nfunc (fs *gcsFS) Lstat(name string) (os.FileInfo, error) {\n\tbucket, fileName, err := fs.parseName(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tattrs, err := fs.sc.Bucket(bucket).Object(fileName).Attrs(fs.ctx)\n\tif err == storage.ErrObjectNotExist {\n\t\treturn nil, os.ErrNotExist\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &statInfo{\n\t\tname: attrs.Name,\n\t\tsize: attrs.Size,\n\t}, nil\n}\n\nfunc (fs *gcsFS) MkdirAll(path string, perm os.FileMode) error { return nil }\n\nfunc (fs *gcsFS) OpenFile(name string, flag int, perm os.FileMode) (wkfs.FileWriter, error) {\n\tbucket, fileName, err := fs.parseName(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch flag {\n\tcase os.O_WRONLY | os.O_CREATE | os.O_EXCL:\n\tcase os.O_WRONLY | os.O_CREATE | os.O_TRUNC:\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"Unsupported OpenFlag flag mode %d on Google Cloud Storage\", flag)\n\t}\n\tif flag&os.O_EXCL != 0 {\n\t\tif _, err := fs.Stat(name); err == nil {\n\t\t\treturn nil, os.ErrExist\n\t\t}\n\t}\n\t// TODO(mpl): consider adding perm to the object's ObjectAttrs.Metadata\n\treturn fs.sc.Bucket(bucket).Object(fileName).NewWriter(fs.ctx), nil\n}\n\nfunc (fs *gcsFS) Remove(name string) error {\n\tbucket, fileName, err := fs.parseName(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn fs.sc.Bucket(bucket).Object(fileName).Delete(fs.ctx)\n}\n\ntype statInfo struct {\n\tname    string\n\tsize    int64\n\tisDir   bool\n\tmodtime time.Time\n}\n\nfunc (si *statInfo) IsDir() bool        { return si.isDir }\nfunc (si *statInfo) ModTime() time.Time { return si.modtime }\nfunc (si *statInfo) Mode() os.FileMode  { return 0644 }\nfunc (si *statInfo) Name() string       { return path.Base(si.name) }\nfunc (si *statInfo) Size() int64        { return si.size }\nfunc (si *statInfo) Sys() interface{}   { return nil }\n\ntype file struct {\n\tname string\n\t*bytes.Reader\n}\n\nfunc (*file) Close() error   { return nil }\nfunc (f *file) Name() string { return path.Base(f.name) }\nfunc (f *file) Stat() (os.FileInfo, error) {\n\tpanic(\"Stat not implemented on /gcs/ files yet\")\n}\n"
  },
  {
    "path": "wkfs/gcs/gcs_test.go",
    "content": "/*\nCopyright 2015 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage gcs\n\nimport (\n\t\"bytes\"\n\t\"flag\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"cloud.google.com/go/compute/metadata\"\n\t\"cloud.google.com/go/storage\"\n\t\"go4.org/wkfs\"\n\t\"golang.org/x/net/context\"\n\t\"google.golang.org/api/iterator\"\n)\n\nvar flagBucket = flag.String(\"bucket\", \"\", \"Google Cloud Storage bucket where to run the tests. It should be empty.\")\n\nfunc TestWriteRead(t *testing.T) {\n\tif !metadata.OnGCE() {\n\t\tt.Skipf(\"Not testing on GCE\")\n\t}\n\tif *flagBucket == \"\" {\n\t\tt.Skipf(\"No bucket specified\")\n\t}\n\tctx := context.Background()\n\tcl, err := storage.NewClient(ctx)\n\tit := cl.Bucket(*flagBucket).Objects(ctx, nil)\n\tif _, err := it.Next(); err != iterator.Done {\n\t\tif err == nil {\n\t\t\tt.Fatalf(\"Bucket %v is not empty, aborting test.\", *flagBucket)\n\t\t}\n\t\tt.Fatalf(\"unexpected bucket iteration error: %v\", err)\n\t}\n\n\t// Write to camli-gcs_test.txt\n\tfilename := \"camli-gcs_test.txt\"\n\tgcsPath := \"/gcs/\" + *flagBucket + \"/\" + filename\n\tf, err := wkfs.Create(gcsPath)\n\tif err != nil {\n\t\tt.Fatalf(\"error creating %v: %v\", gcsPath, err)\n\t}\n\tdefer func() {\n\t\tif err := wkfs.Remove(gcsPath); err != nil {\n\t\t\tt.Fatalf(\"error while cleaning up %v: %v\", gcsPath, err)\n\t\t}\n\t}()\n\n\tdata := \"Hello World\"\n\tif _, err := io.Copy(f, strings.NewReader(data)); err != nil {\n\t\tt.Fatalf(\"error writing to %v: %v\", gcsPath, err)\n\t}\n\tif err := f.Close(); err != nil {\n\t\tt.Fatalf(\"error closing %v: %v\", gcsPath, err)\n\t}\n\n\t// Read back from camli-gcs_test.txt\n\tg, err := wkfs.Open(gcsPath)\n\tif err != nil {\n\t\tt.Fatalf(\"error opening %v: %v\", gcsPath, err)\n\t}\n\tdefer g.Close()\n\tvar buf bytes.Buffer\n\tif _, err := io.Copy(&buf, g); err != nil {\n\t\tt.Fatalf(\"error reading %v: %v\", gcsPath, err)\n\t}\n\tif buf.String() != data {\n\t\tt.Fatalf(\"error with %v contents: got %v, wanted %v\", gcsPath, buf.String(), data)\n\t}\n}\n"
  },
  {
    "path": "wkfs/wkfs.go",
    "content": "/*\nCopyright 2014 The Perkeep Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package wkfs implements the pluggable \"well-known filesystem\" abstraction layer.\n//\n// Instead of accessing files directly through the operating system\n// using os.Open or os.Stat, code should use wkfs.Open or wkfs.Stat,\n// which first try to intercept paths at well-known top-level\n// directories representing previously-registered mount types,\n// otherwise fall through to the operating system paths.\n//\n// Example of top-level well-known directories that might be\n// registered include /gcs/bucket/object for Google Cloud Storage or\n// /s3/bucket/object for AWS S3.\npackage wkfs // import \"go4.org/wkfs\"\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype File interface {\n\tio.Reader\n\tio.ReaderAt\n\tio.Closer\n\tio.Seeker\n\tName() string\n\tStat() (os.FileInfo, error)\n}\n\ntype FileWriter interface {\n\tio.Writer\n\tio.Closer\n}\n\nfunc Open(name string) (File, error)               { return fs(name).Open(name) }\nfunc Stat(name string) (os.FileInfo, error)        { return fs(name).Stat(name) }\nfunc Lstat(name string) (os.FileInfo, error)       { return fs(name).Lstat(name) }\nfunc MkdirAll(path string, perm os.FileMode) error { return fs(path).MkdirAll(path, perm) }\nfunc OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) {\n\treturn fs(name).OpenFile(name, flag, perm)\n}\nfunc Remove(name string) error { return fs(name).Remove(name) }\nfunc Create(name string) (FileWriter, error) {\n\t// like os.Create but WRONLY instead of RDWR because we don't\n\t// expose a Reader here.\n\treturn OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)\n}\n\nfunc fs(name string) FileSystem {\n\tfor pfx, fs := range wkFS {\n\t\tif strings.HasPrefix(name, pfx) {\n\t\t\treturn fs\n\t\t}\n\t}\n\treturn osFS{}\n}\n\ntype osFS struct{}\n\nfunc (osFS) Open(name string) (File, error)               { return os.Open(name) }\nfunc (osFS) Stat(name string) (os.FileInfo, error)        { return os.Stat(name) }\nfunc (osFS) Lstat(name string) (os.FileInfo, error)       { return os.Lstat(name) }\nfunc (osFS) MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) }\nfunc (osFS) OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) {\n\treturn os.OpenFile(name, flag, perm)\n}\nfunc (osFS) Remove(name string) error { return os.Remove(name) }\n\ntype FileSystem interface {\n\tOpen(name string) (File, error)\n\tOpenFile(name string, flag int, perm os.FileMode) (FileWriter, error)\n\tStat(name string) (os.FileInfo, error)\n\tLstat(name string) (os.FileInfo, error)\n\tMkdirAll(path string, perm os.FileMode) error\n\tRemove(name string) error\n}\n\n// well-known filesystems\nvar wkFS = map[string]FileSystem{}\n\n// RegisterFS registers a well-known filesystem. It intercepts\n// anything beginning with prefix (which must start and end with a\n// forward slash) and forwards it to fs.\nfunc RegisterFS(prefix string, fs FileSystem) {\n\tif !strings.HasPrefix(prefix, \"/\") || !strings.HasSuffix(prefix, \"/\") {\n\t\tpanic(\"bogus prefix: \" + prefix)\n\t}\n\tif _, dup := wkFS[prefix]; dup {\n\t\tpanic(\"duplication registration of \" + prefix)\n\t}\n\twkFS[prefix] = fs\n}\n\n// WriteFile writes data to a file named by filename.\n// If the file does not exist, WriteFile creates it with permissions perm;\n// otherwise WriteFile truncates it before writing.\nfunc WriteFile(filename string, data []byte, perm os.FileMode) error {\n\tf, err := OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tn, err := f.Write(data)\n\tif err == nil && n < len(data) {\n\t\terr = io.ErrShortWrite\n\t}\n\tif err1 := f.Close(); err == nil {\n\t\terr = err1\n\t}\n\treturn err\n}\n\nfunc ReadFile(filename string) ([]byte, error) {\n\tf, err := Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer f.Close()\n\treturn ioutil.ReadAll(f)\n}\n"
  },
  {
    "path": "writerutil/writerutil.go",
    "content": "/*\nCopyright 2016 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package writerutil contains io.Writer types.\npackage writerutil // import \"go4.org/writerutil\"\n\nimport (\n\t\"bytes\"\n\t\"strconv\"\n)\n\n// PrefixSuffixSaver is an io.Writer which retains the first N bytes\n// and the last N bytes written to it. The Bytes method reconstructs\n// it with a pretty error message.\n// It is copied from os/exec/exec.go of the Go stdlib.\ntype PrefixSuffixSaver struct {\n\tN         int // max size of prefix or suffix\n\tprefix    []byte\n\tsuffix    []byte // ring buffer once len(suffix) == N\n\tsuffixOff int    // offset to write into suffix\n\tskipped   int64\n\n\t// TODO(bradfitz): we could keep one large []byte and use part of it for\n\t// the prefix, reserve space for the '... Omitting N bytes ...' message,\n\t// then the ring buffer suffix, and just rearrange the ring buffer\n\t// suffix when Bytes() is called, but it doesn't seem worth it for\n\t// now just for error messages. It's only ~64KB anyway.\n}\n\nfunc (w *PrefixSuffixSaver) Write(p []byte) (n int, err error) {\n\tlenp := len(p)\n\tp = w.fill(&w.prefix, p)\n\n\t// Only keep the last w.N bytes of suffix data.\n\tif overage := len(p) - w.N; overage > 0 {\n\t\tp = p[overage:]\n\t\tw.skipped += int64(overage)\n\t}\n\tp = w.fill(&w.suffix, p)\n\n\t// w.suffix is full now if p is non-empty. Overwrite it in a circle.\n\tfor len(p) > 0 { // 0, 1, or 2 iterations.\n\t\tn := copy(w.suffix[w.suffixOff:], p)\n\t\tp = p[n:]\n\t\tw.skipped += int64(n)\n\t\tw.suffixOff += n\n\t\tif w.suffixOff == w.N {\n\t\t\tw.suffixOff = 0\n\t\t}\n\t}\n\treturn lenp, nil\n}\n\n// fill appends up to len(p) bytes of p to *dst, such that *dst does not\n// grow larger than w.N. It returns the un-appended suffix of p.\nfunc (w *PrefixSuffixSaver) fill(dst *[]byte, p []byte) (pRemain []byte) {\n\tif remain := w.N - len(*dst); remain > 0 {\n\t\tadd := minInt(len(p), remain)\n\t\t*dst = append(*dst, p[:add]...)\n\t\tp = p[add:]\n\t}\n\treturn p\n}\n\n// Bytes returns a slice of the bytes, or a copy of the bytes, retained by w.\n// If more bytes than could be retained were written to w, it returns a\n// concatenation of the N first bytes, a message for how many bytes were dropped,\n// and the N last bytes.\nfunc (w *PrefixSuffixSaver) Bytes() []byte {\n\tif w.suffix == nil {\n\t\treturn w.prefix\n\t}\n\tif w.skipped == 0 {\n\t\treturn append(w.prefix, w.suffix...)\n\t}\n\tvar buf bytes.Buffer\n\tbuf.Grow(len(w.prefix) + len(w.suffix) + 50)\n\tbuf.Write(w.prefix)\n\tbuf.WriteString(\"\\n... omitting \")\n\tbuf.WriteString(strconv.FormatInt(w.skipped, 10))\n\tbuf.WriteString(\" bytes ...\\n\")\n\tbuf.Write(w.suffix[w.suffixOff:])\n\tbuf.Write(w.suffix[:w.suffixOff])\n\treturn buf.Bytes()\n}\n\nfunc minInt(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n"
  },
  {
    "path": "writerutil/writerutil_test.go",
    "content": "/*\nCopyright 2016 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage writerutil\n\nimport (\n\t\"io\"\n\t\"testing\"\n)\n\nfunc TestPrefixSuffixSaver(t *testing.T) {\n\ttests := []struct {\n\t\tN      int\n\t\twrites []string\n\t\twant   string\n\t}{\n\t\t{\n\t\t\tN:      2,\n\t\t\twrites: nil,\n\t\t\twant:   \"\",\n\t\t},\n\t\t{\n\t\t\tN:      2,\n\t\t\twrites: []string{\"a\"},\n\t\t\twant:   \"a\",\n\t\t},\n\t\t{\n\t\t\tN:      2,\n\t\t\twrites: []string{\"abc\", \"d\"},\n\t\t\twant:   \"abcd\",\n\t\t},\n\t\t{\n\t\t\tN:      2,\n\t\t\twrites: []string{\"abc\", \"d\", \"e\"},\n\t\t\twant:   \"ab\\n... omitting 1 bytes ...\\nde\",\n\t\t},\n\t\t{\n\t\t\tN:      2,\n\t\t\twrites: []string{\"ab______________________yz\"},\n\t\t\twant:   \"ab\\n... omitting 22 bytes ...\\nyz\",\n\t\t},\n\t\t{\n\t\t\tN:      2,\n\t\t\twrites: []string{\"ab_______________________y\", \"z\"},\n\t\t\twant:   \"ab\\n... omitting 23 bytes ...\\nyz\",\n\t\t},\n\t}\n\tfor i, tt := range tests {\n\t\tw := &PrefixSuffixSaver{N: tt.N}\n\t\tfor _, s := range tt.writes {\n\t\t\tn, err := io.WriteString(w, s)\n\t\t\tif err != nil || n != len(s) {\n\t\t\t\tt.Errorf(\"%d. WriteString(%q) = %v, %v; want %v, %v\", i, s, n, err, len(s), nil)\n\t\t\t}\n\t\t}\n\t\tif got := string(w.Bytes()); got != tt.want {\n\t\t\tt.Errorf(\"%d. Bytes = %q; want %q\", i, got, tt.want)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "xdgdir/example_test.go",
    "content": "/*\nCopyright 2017 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage xdgdir_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"go4.org/xdgdir\"\n)\n\nfunc Example() {\n\t// Print the absolute path of the current user's XDG_CONFIG_DIR.\n\tfmt.Println(xdgdir.Config.Path())\n\n\t// Read a file from $XDG_CONFIG_DIR/myconfig.json.\n\t// This will search for a file named \"myconfig.json\" inside\n\t// $XDG_CONFIG_DIR and then each entry inside $XDG_CONFIG_DIRS.\n\t// It opens and returns the first file it finds, or returns an error.\n\tif f, err := xdgdir.Data.Create(\"myconfig.json\"); err == nil {\n\t\tfmt.Fprintln(f, \"Hello, World!\")\n\t\tif err := f.Close(); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t} else {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n\n\t// Write a file to $XDG_DATA_DIR/myapp/foo.txt\n\tif f, err := xdgdir.Data.Create(\"myapp/foo.txt\"); err == nil {\n\t\tfmt.Fprintln(f, \"Hello, World!\")\n\t\tf.Close()\n\t} else {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t}\n}\n"
  },
  {
    "path": "xdgdir/xdgdir.go",
    "content": "/*\nCopyright 2017 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\n// Package xdgdir implements the Free Desktop Base Directory\n// specification for locating directories.\n//\n// The specification is at\n// http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html\npackage xdgdir // import \"go4.org/xdgdir\"\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"syscall\"\n)\n\n// Directories defined by the specification.\nvar (\n\tData    Dir\n\tConfig  Dir\n\tCache   Dir\n\tRuntime Dir\n)\n\nfunc init() {\n\t// Placed in init for the sake of readable docs.\n\tData = Dir{\n\t\tenv:          \"XDG_DATA_HOME\",\n\t\tdirsEnv:      \"XDG_DATA_DIRS\",\n\t\tfallback:     \".local/share\",\n\t\tdirsFallback: []string{\"/usr/local/share\", \"/usr/share\"},\n\t}\n\tConfig = Dir{\n\t\tenv:          \"XDG_CONFIG_HOME\",\n\t\tdirsEnv:      \"XDG_CONFIG_DIRS\",\n\t\tfallback:     \".config\",\n\t\tdirsFallback: []string{\"/etc/xdg\"},\n\t}\n\tCache = Dir{\n\t\tenv:      \"XDG_CACHE_HOME\",\n\t\tfallback: \".cache\",\n\t}\n\tRuntime = Dir{\n\t\tenv:       \"XDG_RUNTIME_DIR\",\n\t\tuserOwned: true,\n\t}\n}\n\n// A Dir is a logical base directory along with additional search\n// directories.\ntype Dir struct {\n\t// env is the name of the environment variable for the base directory\n\t// relative to which files should be written.\n\tenv string\n\n\t// dirsEnv is the name of the environment variable containing\n\t// preference-ordered base directories to search for files.\n\tdirsEnv string\n\n\t// fallback is the home-relative path to use if the variable named by\n\t// env is not set.\n\tfallback string\n\n\t// dirsFallback is the list of paths to use if the variable named by\n\t// dirsEnv is not set.\n\tdirsFallback []string\n\n\t// If userOwned is true, then for the directory to be considered\n\t// valid, it must be owned by the user with the mode 700.  This is\n\t// only used for XDG_RUNTIME_DIR.\n\tuserOwned bool\n}\n\n// String returns the name of the primary environment variable for the\n// directory.\nfunc (d Dir) String() string {\n\tif d.env == \"\" {\n\t\tpanic(\"xdgdir.Dir.String() on zero Dir\")\n\t}\n\treturn d.env\n}\n\n// Path returns the absolute path of the primary directory, or an empty\n// string if there's no suitable directory present.  This is the path\n// that should be used for writing files.\nfunc (d Dir) Path() string {\n\tif d.env == \"\" {\n\t\tpanic(\"xdgdir.Dir.Path() on zero Dir\")\n\t}\n\tp := d.path()\n\tif p != \"\" && d.userOwned {\n\t\tinfo, err := os.Stat(p)\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\tif !info.IsDir() || info.Mode().Perm() != 0700 {\n\t\t\treturn \"\"\n\t\t}\n\t\tst, ok := info.Sys().(*syscall.Stat_t)\n\t\tif !ok || int(st.Uid) != geteuid() {\n\t\t\treturn \"\"\n\t\t}\n\t}\n\treturn p\n}\n\nfunc (d Dir) path() string {\n\tif e := getenv(d.env); isValidPath(e) {\n\t\treturn e\n\t}\n\tif d.fallback == \"\" {\n\t\treturn \"\"\n\t}\n\thome := findHome()\n\tif home == \"\" {\n\t\treturn \"\"\n\t}\n\tp := filepath.Join(home, d.fallback)\n\tif !isValidPath(p) {\n\t\treturn \"\"\n\t}\n\treturn p\n}\n\n// SearchPaths returns the list of paths (in descending order of\n// preference) to search for files.\nfunc (d Dir) SearchPaths() []string {\n\tif d.env == \"\" {\n\t\tpanic(\"xdgdir.Dir.SearchPaths() on zero Dir\")\n\t}\n\tvar paths []string\n\tif p := d.Path(); p != \"\" {\n\t\tpaths = append(paths, p)\n\t}\n\tif d.dirsEnv == \"\" {\n\t\treturn paths\n\t}\n\te := getenv(d.dirsEnv)\n\tif e == \"\" {\n\t\tpaths = append(paths, d.dirsFallback...)\n\t\treturn paths\n\t}\n\tepaths := filepath.SplitList(e)\n\tn := 0\n\tfor _, p := range epaths {\n\t\tif isValidPath(p) {\n\t\t\tepaths[n] = p\n\t\t\tn++\n\t\t}\n\t}\n\tpaths = append(paths, epaths[:n]...)\n\treturn paths\n}\n\n// Open opens the named file inside the directory for reading.  If the\n// directory has multiple search paths, each path is checked in order\n// for the file and the first one found is opened.\nfunc (d Dir) Open(name string) (*os.File, error) {\n\tif d.env == \"\" {\n\t\treturn nil, errors.New(\"xdgdir: Open on zero Dir\")\n\t}\n\tpaths := d.SearchPaths()\n\tif len(paths) == 0 {\n\t\treturn nil, fmt.Errorf(\"xdgdir: open %s: %s is invalid or not set\", name, d.env)\n\t}\n\tvar firstErr error\n\tfor _, p := range paths {\n\t\tf, err := os.Open(filepath.Join(p, name))\n\t\tif err == nil {\n\t\t\treturn f, nil\n\t\t} else if !os.IsNotExist(err) {\n\t\t\tfirstErr = err\n\t\t}\n\t}\n\tif firstErr != nil {\n\t\treturn nil, firstErr\n\t}\n\treturn nil, &os.PathError{\n\t\tOp:   \"Open\",\n\t\tPath: filepath.Join(\"$\"+d.env, name),\n\t\tErr:  os.ErrNotExist,\n\t}\n}\n\n// Create creates the named file inside the directory mode 0666 (before\n// umask), truncating it if it already exists.  Parent directories of\n// the file will be created with mode 0700.\nfunc (d Dir) Create(name string) (*os.File, error) {\n\tif d.env == \"\" {\n\t\treturn nil, errors.New(\"xdgdir: Create on zero Dir\")\n\t}\n\tp := d.Path()\n\tif p == \"\" {\n\t\treturn nil, fmt.Errorf(\"xdgdir: create %s: %s is invalid or not set\", name, d.env)\n\t}\n\tfp := filepath.Join(p, name)\n\tif err := os.MkdirAll(filepath.Dir(fp), 0700); err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.Create(fp)\n}\n\nfunc isValidPath(path string) bool {\n\treturn path != \"\" && filepath.IsAbs(path)\n}\n\n// findHome returns the user's home directory or the empty string if it\n// can't be found.  It can be faked for testing.\nvar findHome = func() string {\n\tif h := getenv(\"HOME\"); h != \"\" {\n\t\treturn h\n\t}\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn u.HomeDir\n}\n\n// getenv retrieves an environment variable.  It can be faked for testing.\nvar getenv = os.Getenv\n\n// geteuid retrieves the effective user ID of the process.  It can be faked for testing.\nvar geteuid = os.Geteuid\n"
  },
  {
    "path": "xdgdir/xdgdir_test.go",
    "content": "/*\nCopyright 2017 The go4 Authors\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n     http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage xdgdir\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n)\n\nfunc TestDir_Path(t *testing.T) {\n\ttd := newTempDir(t)\n\tdefer td.cleanup()\n\tallopenDir := td.mkdir(\"allopen\", 0777)\n\treadonlyDir := td.mkdir(\"readonly\", 0400)\n\tsecureDir := td.mkdir(\"secure\", 0700)\n\n\ttests := []struct {\n\t\tdir     Dir\n\t\tenv     env\n\t\tpath    string\n\t\tgeteuid func() int\n\t}{\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\", \"XDG_DATA_HOME\": \"/foo/data\"},\n\t\t\tpath: \"/foo/data\",\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\"},\n\t\t\tpath: \"/xHOMEx/me/.local/share\",\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\", \"XDG_DATA_HOME\": \"relative/path\"},\n\t\t\tpath: \"/xHOMEx/me/.local/share\",\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{},\n\t\t\tpath: \"\",\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"HOME\": \"relative/path\"},\n\t\t\tpath: \"\",\n\t\t},\n\t\t{\n\t\t\tdir:  Config,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\", \"XDG_CONFIG_HOME\": \"/foo/config\"},\n\t\t\tpath: \"/foo/config\",\n\t\t},\n\t\t{\n\t\t\tdir:  Config,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\"},\n\t\t\tpath: \"/xHOMEx/me/.config\",\n\t\t},\n\t\t{\n\t\t\tdir:  Config,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\", \"XDG_CONFIG_HOME\": \"relative/path\"},\n\t\t\tpath: \"/xHOMEx/me/.config\",\n\t\t},\n\t\t{\n\t\t\tdir:  Config,\n\t\t\tenv:  env{},\n\t\t\tpath: \"\",\n\t\t},\n\t\t{\n\t\t\tdir:  Cache,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\", \"XDG_CACHE_HOME\": \"/foo/cache\"},\n\t\t\tpath: \"/foo/cache\",\n\t\t},\n\t\t{\n\t\t\tdir:  Cache,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\"},\n\t\t\tpath: \"/xHOMEx/me/.cache\",\n\t\t},\n\t\t{\n\t\t\tdir:  Cache,\n\t\t\tenv:  env{\"HOME\": \"/xHOMEx/me\", \"XDG_CACHE_HOME\": \"relative/path\"},\n\t\t\tpath: \"/xHOMEx/me/.cache\",\n\t\t},\n\t\t{\n\t\t\tdir:  Cache,\n\t\t\tenv:  env{},\n\t\t\tpath: \"\",\n\t\t},\n\t\t{\n\t\t\tdir:  Runtime,\n\t\t\tenv:  env{\"XDG_RUNTIME_DIR\": secureDir},\n\t\t\tpath: secureDir,\n\t\t},\n\t\t{\n\t\t\tdir:     Runtime,\n\t\t\tenv:     env{\"XDG_RUNTIME_DIR\": secureDir},\n\t\t\tgeteuid: func() int { return os.Geteuid() + 1 },\n\t\t\tpath:    \"\",\n\t\t},\n\t\t{\n\t\t\tdir:  Runtime,\n\t\t\tenv:  env{\"XDG_RUNTIME_DIR\": readonlyDir},\n\t\t\tpath: \"\",\n\t\t},\n\t\t{\n\t\t\tdir:  Runtime,\n\t\t\tenv:  env{\"XDG_RUNTIME_DIR\": allopenDir},\n\t\t\tpath: \"\",\n\t\t},\n\t\t{\n\t\t\tdir:  Runtime,\n\t\t\tenv:  env{\"HOME\": secureDir},\n\t\t\tpath: \"\",\n\t\t},\n\t\t{\n\t\t\tdir:  Runtime,\n\t\t\tenv:  env{},\n\t\t\tpath: \"\",\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\ttest.env.set()\n\t\tif test.geteuid != nil {\n\t\t\tgeteuid = test.geteuid\n\t\t} else {\n\t\t\tgeteuid = os.Geteuid\n\t\t}\n\t\tif path := test.dir.Path(); path != test.path {\n\t\t\tvar euidMod string\n\t\t\tif test.geteuid != nil {\n\t\t\t\teuidMod = \" (euid modified)\"\n\t\t\t}\n\t\t\tt.Errorf(\"In environment %v%s, %v.Path() = %q; want %q\", test.env, euidMod, test.dir, path, test.path)\n\t\t}\n\t}\n}\n\nfunc TestDir_SearchPaths(t *testing.T) {\n\ttd := newTempDir(t)\n\tdefer td.cleanup()\n\tallopenDir := td.mkdir(\"allopen\", 0777)\n\tsecureDir := td.mkdir(\"secure\", 0700)\n\n\ttests := []struct {\n\t\tdir   Dir\n\t\tenv   env\n\t\tpaths []string\n\t}{\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{},\n\t\t\tpaths: []string{\"/usr/local/share\", \"/usr/share\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{\"HOME\": \"/xHOMEx/me\"},\n\t\t\tpaths: []string{\"/xHOMEx/me/.local/share\", \"/usr/local/share\", \"/usr/share\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{\"HOME\": \"/xHOMEx/me\", \"XDG_DATA_HOME\": \"/foo/data\"},\n\t\t\tpaths: []string{\"/foo/data\", \"/usr/local/share\", \"/usr/share\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{\"XDG_DATA_HOME\": \"/foo/data\", \"XDG_DATA_DIRS\": \"/mybacon/data\"},\n\t\t\tpaths: []string{\"/foo/data\", \"/mybacon/data\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{\"XDG_DATA_HOME\": \"/foo/data\", \"XDG_DATA_DIRS\": \"/mybacon/data:/eggs/data\"},\n\t\t\tpaths: []string{\"/foo/data\", \"/mybacon/data\", \"/eggs/data\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{\"XDG_DATA_HOME\": \"/foo/data\", \"XDG_DATA_DIRS\": \"/mybacon/data:/eggs/data:/woka/woka\"},\n\t\t\tpaths: []string{\"/foo/data\", \"/mybacon/data\", \"/eggs/data\", \"/woka/woka\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{\"XDG_DATA_HOME\": \"/foo/data\", \"XDG_DATA_DIRS\": \"/mybacon/data:relative/path:/woka/woka\"},\n\t\t\tpaths: []string{\"/foo/data\", \"/mybacon/data\", \"/woka/woka\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{\"XDG_DATA_HOME\": \"relative/path\", \"XDG_DATA_DIRS\": \"/mybacon/data:relative/path:/woka/woka\"},\n\t\t\tpaths: []string{\"/mybacon/data\", \"/woka/woka\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Data,\n\t\t\tenv:   env{\"XDG_DATA_DIRS\": \"/mybacon/data:/eggs/data:/woka/woka\"},\n\t\t\tpaths: []string{\"/mybacon/data\", \"/eggs/data\", \"/woka/woka\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Config,\n\t\t\tenv:   env{\"XDG_CONFIG_HOME\": \"/foo/config\", \"XDG_CONFIG_DIRS\": \"/mybacon/config:/eggs/config:/woka/woka\"},\n\t\t\tpaths: []string{\"/foo/config\", \"/mybacon/config\", \"/eggs/config\", \"/woka/woka\"},\n\t\t},\n\t\t{\n\t\t\t// Cache only has primary dir\n\t\t\tdir:   Cache,\n\t\t\tenv:   env{\"XDG_CACHE_HOME\": \"/foo/cache\", \"XDG_CACHE_DIRS\": \"/mybacon/config:/eggs/config:/woka/woka\"},\n\t\t\tpaths: []string{\"/foo/cache\"},\n\t\t},\n\t\t{\n\t\t\tdir:   Runtime,\n\t\t\tenv:   env{\"XDG_RUNTIME_DIR\": secureDir},\n\t\t\tpaths: []string{secureDir},\n\t\t},\n\t\t{\n\t\t\tdir:   Runtime,\n\t\t\tenv:   env{\"XDG_RUNTIME_DIR\": allopenDir},\n\t\t\tpaths: []string{},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\ttest.env.set()\n\t\tpaths := test.dir.SearchPaths()\n\t\tif !stringsEqual(paths, test.paths) {\n\t\t\tt.Errorf(\"In environment %v, %v.SearchPaths() = %q; want %q\", test.env, test.dir, paths, test.paths)\n\t\t}\n\t}\n}\n\nfunc TestDir_Open(t *testing.T) {\n\ttd := newTempDir(t)\n\tdefer td.cleanup()\n\tjunkDir := td.mkdir(\"junk\", 0777)\n\tdir1 := td.mkdir(\"dir1\", 0777)\n\tdir2 := td.mkdir(\"dir2\", 0777)\n\tdir3 := td.mkdir(\"dir3\", 0777)\n\ttd.newFile(\"dir1/foo.txt\", \"foo\")\n\ttd.newFile(\"dir1/multiple.txt\", \"1\")\n\ttd.newFile(\"dir2/bar.txt\", \"bar\")\n\ttd.newFile(\"dir2/only2_3.txt\", \"this is 2\")\n\ttd.newFile(\"dir2/multiple.txt\", \"2\")\n\ttd.newFile(\"dir3/multiple.txt\", \"3\")\n\ttd.newFile(\"dir3/only2_3.txt\", \"this is 3\")\n\n\ttests := []struct {\n\t\tdir  Dir\n\t\tenv  env\n\t\tname string\n\n\t\tpath string\n\t\terr  bool\n\t}{\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{},\n\t\t\tname: \"foo.txt\",\n\t\t\terr:  true,\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dir1, \"XDG_DATA_DIRS\": junkDir},\n\t\t\tname: \"foo.txt\",\n\t\t\tpath: filepath.Join(dir1, \"foo.txt\"),\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": junkDir, \"XDG_DATA_DIRS\": junkDir},\n\t\t\tname: \"foo.txt\",\n\t\t\terr:  true,\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dir1, \"XDG_DATA_DIRS\": dir2},\n\t\t\tname: \"foo.txt\",\n\t\t\tpath: filepath.Join(dir1, \"foo.txt\"),\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dir1, \"XDG_DATA_DIRS\": dir2},\n\t\t\tname: \"bar.txt\",\n\t\t\tpath: filepath.Join(dir2, \"bar.txt\"),\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dir1, \"XDG_DATA_DIRS\": dir2 + \":\" + dir3},\n\t\t\tname: \"NOTREAL.txt\",\n\t\t\terr:  true,\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dir1, \"XDG_DATA_DIRS\": dir2 + \":\" + dir3},\n\t\t\tname: \"foo.txt\",\n\t\t\tpath: filepath.Join(dir1, \"foo.txt\"),\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dir1, \"XDG_DATA_DIRS\": dir2 + \":\" + dir3},\n\t\t\tname: \"bar.txt\",\n\t\t\tpath: filepath.Join(dir2, \"bar.txt\"),\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dir1, \"XDG_DATA_DIRS\": dir2 + \":\" + dir3},\n\t\t\tname: \"multiple.txt\",\n\t\t\tpath: filepath.Join(dir1, \"multiple.txt\"),\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dir1, \"XDG_DATA_DIRS\": dir2 + \":\" + dir3},\n\t\t\tname: \"only2_3.txt\",\n\t\t\tpath: filepath.Join(dir2, \"only2_3.txt\"),\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\ttest.env.set()\n\t\tf, err := test.dir.Open(test.name)\n\t\tswitch {\n\t\tcase err == nil && test.err:\n\t\t\tt.Errorf(\"In environment %v, %v.Open(%q) succeeded; want error\", test.env, test.dir, test.name)\n\t\tcase err == nil && !test.err && f.Name() != test.path:\n\t\t\tt.Errorf(\"In environment %v, %v.Open(%q).Name() = %q; want %q\", test.env, test.dir, test.name, f.Name(), test.path)\n\t\tcase err != nil && !test.err:\n\t\t\tt.Errorf(\"In environment %v, %v.Open(%q) error: %v\", test.env, test.dir, test.name, err)\n\t\t}\n\t\tif f != nil {\n\t\t\tf.Close()\n\t\t}\n\t}\n}\n\nfunc TestDir_Create(t *testing.T) {\n\ttd := newTempDir(t)\n\tdefer td.cleanup()\n\tjunkDir := td.mkdir(\"junk\", 0777)\n\tdataDir := td.mkdir(\"data\", 0777)\n\n\ttests := []struct {\n\t\tdir  Dir\n\t\tenv  env\n\t\tname string\n\n\t\tpath       string\n\t\terr        bool\n\t\tpermChecks []permCheck\n\t}{\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dataDir, \"XDG_DATA_DIRS\": junkDir},\n\t\t\tname: \"foo01\",\n\t\t\tpath: filepath.Join(dataDir, \"foo01\"),\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{},\n\t\t\tname: \"foo02\",\n\t\t\terr:  true,\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": dataDir, \"XDG_DATA_DIRS\": junkDir},\n\t\t\tname: filepath.Join(\"foo03\", \"bar\"),\n\t\t\tpath: filepath.Join(dataDir, \"foo03\", \"bar\"),\n\t\t\tpermChecks: []permCheck{\n\t\t\t\t{filepath.Join(dataDir, \"foo03\"), 0700},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdir:  Data,\n\t\t\tenv:  env{\"XDG_DATA_HOME\": filepath.Join(td.dir, \"NOTREAL\"), \"XDG_DATA_DIRS\": junkDir},\n\t\t\tname: filepath.Join(\"foo04\", \"bar\"),\n\t\t\tpath: filepath.Join(td.dir, \"NOTREAL\", \"foo04\", \"bar\"),\n\t\t\tpermChecks: []permCheck{\n\t\t\t\t{filepath.Join(td.dir, \"NOTREAL\"), 0700},\n\t\t\t\t{filepath.Join(td.dir, \"NOTREAL\", \"foo04\"), 0700},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\ttest.env.set()\n\t\tf, err := test.dir.Create(test.name)\n\t\tswitch {\n\t\tcase err == nil && test.err:\n\t\t\tt.Errorf(\"In environment %v, %v.Create(%q) succeeded; want error\", test.env, test.dir, test.name)\n\t\tcase err == nil && !test.err && f.Name() != test.path:\n\t\t\tt.Errorf(\"In environment %v, %v.Create(%q).Name() = %q; want %q\", test.env, test.dir, test.name, f.Name(), test.path)\n\t\tcase err != nil && !test.err:\n\t\t\tt.Errorf(\"In environment %v, %v.Create(%q) error: %v\", test.env, test.dir, test.name, err)\n\t\t}\n\t\tif f != nil {\n\t\t\tf.Close()\n\t\t}\n\t\tfor _, pc := range test.permChecks {\n\t\t\tinfo, err := os.Stat(pc.name)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"In environment %v, %v.Create(%q): stat %s error: %v\", test.env, test.dir, test.name, pc.name, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif perm := info.Mode().Perm(); perm != pc.perm {\n\t\t\t\tt.Errorf(\"In environment %v, %v.Create(%q): %s has permission %v; want %v\", test.env, test.dir, test.name, pc.name, perm, pc.perm)\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc stringsEqual(a, b []string) bool {\n\tif len(a) != len(b) {\n\t\treturn false\n\t}\n\tfor i := range a {\n\t\tif a[i] != b[i] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\ntype tempDir struct {\n\tt   *testing.T\n\tdir string\n}\n\nfunc newTempDir(t *testing.T) *tempDir {\n\ttd := &tempDir{t: t}\n\tvar err error\n\ttd.dir, err = ioutil.TempDir(\"\", \"xdgdir_test\")\n\tif err != nil {\n\t\tt.Fatal(\"making temp dir:\", err)\n\t}\n\treturn td\n}\n\n// newFile creates a file and returns its path.\nfunc (td *tempDir) newFile(name string, data string) string {\n\tpath := filepath.Join(td.dir, name)\n\tf, err := os.Create(path)\n\tif err != nil {\n\t\ttd.t.Fatalf(\"newFile(%q, %q) error: %v\", name, data, err)\n\t}\n\t_, werr := f.Write([]byte(data))\n\tcerr := f.Close()\n\tif werr != nil {\n\t\ttd.t.Errorf(\"newFile(%q, %q) write error: %v\", name, data, err)\n\t}\n\tif cerr != nil {\n\t\ttd.t.Errorf(\"newFile(%q, %q) close error: %v\", name, data, err)\n\t}\n\tif werr != nil || cerr != nil {\n\t\ttd.t.FailNow()\n\t}\n\treturn path\n}\n\n// mkdir creates a directory and returns its path.\nfunc (td *tempDir) mkdir(name string, perm os.FileMode) string {\n\tpath := filepath.Join(td.dir, name)\n\terr := os.Mkdir(path, perm)\n\tif err != nil {\n\t\ttd.t.Fatal(err)\n\t}\n\treturn path\n}\n\nfunc (td *tempDir) cleanup() {\n\terr := os.RemoveAll(td.dir)\n\tif err != nil {\n\t\ttd.t.Log(\"failed to clean up temp dir:\", err)\n\t}\n}\n\ntype permCheck struct {\n\tname string\n\tperm os.FileMode\n}\n\ntype env map[string]string\n\nfunc (e env) set() {\n\tgetenv = func(key string) string {\n\t\treturn e[key]\n\t}\n\tfindHome = func() string {\n\t\treturn e[\"HOME\"]\n\t}\n}\n"
  },
  {
    "path": "ziputil/ziputil.go",
    "content": "// Copyright 2026 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE file.\n\n// Package ziputil is a fork of parts of the Go standard library's archive/zip\n// package, exposing details of the zip file format that standard library does\n// not make available.\n//\n// In particular, it enables reading both the Table of Contents (the list of all\n// files in a zip) and individual files out of a zip file from an HTTP server\n// using the minimum number and size of HTTP requests. A naive implementation\n// would implement an io.ReaderAt in terms of HTTP Range requests, but\n// archive/zip makes way too many ReadAt calls for that to be efficient. A\n// slightly less naive version would then implement a ReaderAt that does\n// page-aligned chunking and caching. That's better, but still not good. This\n// package permits doing 1-2 range requests to get the TOC, and then exactly one\n// streaming HTTP request per random file downloaded, sized correctly.\npackage ziputil\n\nimport (\n\t\"archive/zip\"\n\t\"bufio\"\n\t\"bytes\"\n\t\"compress/flate\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\"\n\t\"hash/crc32\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode/utf8\"\n\n\t\"go4.org/readerutil\"\n)\n\nvar (\n\terrFormat = zip.ErrFormat\n)\n\n// Compression methods.\nconst (\n\tmethodStore   uint16 = 0 // no compression\n\tmethodDeflate uint16 = 8 // DEFLATE compressed\n)\n\nconst (\n\tfileHeaderSignature      = 0x04034b50\n\tdirectoryHeaderSignature = 0x02014b50\n\tdirectoryEndSignature    = 0x06054b50\n\tdirectory64LocSignature  = 0x07064b50\n\tdirectory64EndSignature  = 0x06064b50\n\tdataDescriptorSignature  = 0x08074b50 // de-facto standard; required by OS X Finder\n\tfileHeaderLen            = 30         // + filename + extra\n\tdirectoryHeaderLen       = 46         // + filename + extra + comment\n\tdirectoryEndLen          = 22         // + comment\n\tdataDescriptorLen        = 16         // four uint32: descriptor signature, crc32, compressed size, size\n\tdataDescriptor64Len      = 24         // two uint32: signature, crc32 | two uint64: compressed size, size\n\tdirectory64LocLen        = 20         //\n\tdirectory64EndLen        = 56         // + extra\n\n\t// Constants for the first byte in CreatorVersion.\n\tcreatorFAT    = 0\n\tcreatorUnix   = 3\n\tcreatorNTFS   = 11\n\tcreatorVFAT   = 14\n\tcreatorMacOSX = 19\n\n\t// Version numbers.\n\tzipVersion20 = 20 // 2.0\n\tzipVersion45 = 45 // 4.5 (reads and writes zip64 archives)\n\n\t// Limits for non zip64 files.\n\tuint16max = (1 << 16) - 1\n\tuint32max = (1 << 32) - 1\n\n\t// Extra header IDs.\n\t//\n\t// IDs 0..31 are reserved for official use by PKWARE.\n\t// IDs above that range are defined by third-party vendors.\n\t// Since ZIP lacked high precision timestamps (nor an official specification\n\t// of the timezone used for the date fields), many competing extra fields\n\t// have been invented. Pervasive use effectively makes them \"official\".\n\t//\n\t// See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField\n\tzip64ExtraID       = 0x0001 // Zip64 extended information\n\tntfsExtraID        = 0x000a // NTFS\n\tunixExtraID        = 0x000d // UNIX\n\textTimeExtraID     = 0x5455 // Extended timestamp\n\tinfoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension\n)\n\n// ZipTOCSize reports, as a function of the total zip file size and a small\n// suffix of the file, the total suffix length needed to be able to read the\n// zip's full Table of Contents (TOC).\n//\n// A valid zip file's footer (its \"End of Central Directory\") is usually in the\n// final 22 bytes of the file, so a 22 byte zipFooter if often but not always\n// sufficient. The Go standard library does a 1 KiB suffix read, followed by a\n// 65 KiB read, and looks no further.\n//\n// The function reports ok=false if it couldn't find a valid EOCD in the footer\n// buffer provided. Callers can either pass in 65 KiB to start, or first try 22\n// bytes followed by 1 KiB, followed by 65 KiB. The provided buffer can be any\n// size, including the full size of the file.\n//\n// When the footer is found, the function reports ok=true and the total suffix\n// size needed to read the zip's table of contents. That size includes the\n// length of the footer itself.\nfunc ZipTOCSize(size int64, zipFooter []byte) (tocSize int64, ok bool) {\n\tnumFakeZeros := size - int64(len(zipFooter))\n\tif numFakeZeros < 0 {\n\t\t// Invalid arguments to function.\n\t\treturn 0, false\n\t}\n\tra := readerutil.NewMultiReaderAt(\n\t\treaderutil.ZeroSizeReaderAt(numFakeZeros),\n\t\tbytes.NewReader(zipFooter),\n\t)\n\tend, baseOffset, err := readDirectoryEnd(ra, size)\n\tif err != nil {\n\t\treturn 0, false\n\t}\n\ttocStartOff := baseOffset + int64(end.directoryOffset)\n\ttocSize = size - tocStartOff\n\tif tocSize < 0 || tocSize > size {\n\t\t// Something went wrong above.\n\t\treturn 0, false\n\t}\n\treturn tocSize, true\n}\n\n// Reader is the result of [ParseTOC].\ntype Reader struct {\n\t// File are the files in the zip file.\n\tFile []*FileHeader\n\n\t// BaseOffset is where in the zip file the zip contents\n\t// begin. This is often zero, but some zip files have\n\t// prefixes (such as shell scripts).\n\tBaseOffset int64\n\n\t// Comment is the optional comment from the end of a zip file.\n\tComment string\n}\n\n// ParseTOC parses the table of contents from a zip file with\n// the provided size.\nfunc ParseTOC(size int64, toc []byte) (*Reader, error) {\n\tnumFakeZeros := size - int64(len(toc))\n\tif numFakeZeros < 0 {\n\t\treturn nil, errors.New(\"invalid arguments to ParseTOC\")\n\t}\n\tra := readerutil.NewMultiReaderAt(\n\t\treaderutil.ZeroSizeReaderAt(numFakeZeros),\n\t\tbytes.NewReader(toc),\n\t)\n\tend, baseOffset, err := readDirectoryEnd(ra, size)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tr := new(Reader)\n\tr.BaseOffset = baseOffset\n\tr.Comment = end.comment\n\n\trs := io.NewSectionReader(ra, 0, size)\n\tif _, err = rs.Seek(r.BaseOffset+int64(end.directoryOffset), io.SeekStart); err != nil {\n\t\treturn nil, err\n\t}\n\tbuf := bufio.NewReader(rs)\n\tfor {\n\t\tf := &FileHeader{Reader: r}\n\t\terr = readDirectoryHeader(f, buf)\n\t\tif err == zip.ErrFormat || err == io.ErrUnexpectedEOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tf.HeaderOffset += r.BaseOffset\n\t\tr.File = append(r.File, f)\n\t}\n\tif uint16(len(r.File)) != uint16(end.directoryRecords) { // only compare 16 bits here\n\t\t// Return the readDirectoryHeader error if we read\n\t\t// the wrong number of directory entries.\n\t\treturn nil, fmt.Errorf(\"zip: wrong number of directory entries: got %d, want %d (mod uint16)\", len(r.File), end.directoryRecords)\n\t}\n\treturn r, nil\n}\n\nfunc readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, baseOffset int64, err error) {\n\t// look for directoryEndSignature in the last 1k, then in the last 65k\n\tvar buf []byte\n\tvar directoryEndOffset int64\n\tfor i, bLen := range []int64{1024, 65 * 1024} {\n\t\tif bLen > size {\n\t\t\tbLen = size\n\t\t}\n\t\tbuf = make([]byte, int(bLen))\n\t\tif _, err := r.ReadAt(buf, size-bLen); err != nil && err != io.EOF {\n\t\t\treturn nil, 0, err\n\t\t}\n\t\tif p := findSignatureInBlock(buf); p >= 0 {\n\t\t\tbuf = buf[p:]\n\t\t\tdirectoryEndOffset = size - bLen + int64(p)\n\t\t\tbreak\n\t\t}\n\t\tif i == 1 || bLen == size {\n\t\t\treturn nil, 0, errFormat\n\t\t}\n\t}\n\n\t// read header into struct\n\tb := readBuf(buf[4:]) // skip signature\n\td := &directoryEnd{\n\t\tdiskNbr:            uint32(b.uint16()),\n\t\tdirDiskNbr:         uint32(b.uint16()),\n\t\tdirRecordsThisDisk: uint64(b.uint16()),\n\t\tdirectoryRecords:   uint64(b.uint16()),\n\t\tdirectorySize:      uint64(b.uint32()),\n\t\tdirectoryOffset:    uint64(b.uint32()),\n\t\tcommentLen:         b.uint16(),\n\t}\n\tl := int(d.commentLen)\n\tif l > len(b) {\n\t\treturn nil, 0, errors.New(\"zip: invalid comment length\")\n\t}\n\td.comment = string(b[:l])\n\n\t// These values mean that the file can be a zip64 file\n\tif d.directoryRecords == 0xffff || d.directorySize == 0xffff || d.directoryOffset == 0xffffffff {\n\t\tp, err := findDirectory64End(r, directoryEndOffset)\n\t\tif err == nil && p >= 0 {\n\t\t\tdirectoryEndOffset = p\n\t\t\terr = readDirectory64End(r, p, d)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, 0, err\n\t\t}\n\t}\n\n\tmaxInt64 := uint64(1<<63 - 1)\n\tif d.directorySize > maxInt64 || d.directoryOffset > maxInt64 {\n\t\treturn nil, 0, errFormat\n\t}\n\n\tbaseOffset = directoryEndOffset - int64(d.directorySize) - int64(d.directoryOffset)\n\n\t// Make sure directoryOffset points to somewhere in our file.\n\tif o := baseOffset + int64(d.directoryOffset); o < 0 || o >= size {\n\t\treturn nil, 0, errFormat\n\t}\n\n\t// If the directory end data tells us to use a non-zero baseOffset,\n\t// but we would find a valid directory entry if we assume that the\n\t// baseOffset is 0, then just use a baseOffset of 0.\n\t// We've seen files in which the directory end data gives us\n\t// an incorrect baseOffset.\n\tif baseOffset > 0 {\n\t\toff := int64(d.directoryOffset)\n\t\trs := io.NewSectionReader(r, off, size-off)\n\t\tif readDirectoryHeader(new(FileHeader), rs) == nil {\n\t\t\tbaseOffset = 0\n\t\t}\n\t}\n\n\treturn d, baseOffset, nil\n}\n\n// FileHeader describes a file within a ZIP file.\n// See the [ZIP specification] for details.\n//\n// [ZIP specification]: https://support.pkware.com/pkzip/appnote\ntype FileHeader struct {\n\t// Name is the name of the file.\n\t//\n\t// It must be a relative path, not start with a drive letter (such as \"C:\"),\n\t// and must use forward slashes instead of back slashes. A trailing slash\n\t// indicates that this file is a directory and should have no data.\n\tName string\n\n\t// Comment is any arbitrary user-defined string shorter than 64KiB.\n\tComment string\n\n\t// NonUTF8 indicates that Name and Comment are not encoded in UTF-8.\n\t//\n\t// By specification, the only other encoding permitted should be CP-437,\n\t// but historically many ZIP readers interpret Name and Comment as whatever\n\t// the system's local character encoding happens to be.\n\t//\n\t// This flag should only be set if the user intends to encode a non-portable\n\t// ZIP file for a specific localized region. Otherwise, the Writer\n\t// automatically sets the ZIP format's UTF-8 flag for valid UTF-8 strings.\n\tNonUTF8 bool\n\n\tCreatorVersion uint16\n\tReaderVersion  uint16\n\tFlags          uint16\n\n\t// Method is the compression method. If zero, Store is used.\n\tMethod uint16\n\n\t// Modified is the modified time of the file.\n\t//\n\t// When reading, an extended timestamp is preferred over the legacy MS-DOS\n\t// date field, and the offset between the times is used as the timezone.\n\t// If only the MS-DOS date is present, the timezone is assumed to be UTC.\n\t//\n\t// When writing, an extended timestamp (which is timezone-agnostic) is\n\t// always emitted. The legacy MS-DOS date field is encoded according to the\n\t// location of the Modified time.\n\tModified time.Time\n\n\t// ModifiedTime is an MS-DOS-encoded time.\n\t//\n\t// Deprecated: Use Modified instead.\n\tModifiedTime uint16\n\n\t// ModifiedDate is an MS-DOS-encoded date.\n\t//\n\t// Deprecated: Use Modified instead.\n\tModifiedDate uint16\n\n\t// CRC32 is the CRC32 checksum of the file content.\n\tCRC32 uint32\n\n\t// CompressedSize is the compressed size of the file in bytes.\n\t// If either the uncompressed or compressed size of the file\n\t// does not fit in 32 bits, CompressedSize is set to ^uint32(0).\n\t//\n\t// Deprecated: Use CompressedSize64 instead.\n\tCompressedSize uint32\n\n\t// UncompressedSize is the uncompressed size of the file in bytes.\n\t// If either the uncompressed or compressed size of the file\n\t// does not fit in 32 bits, UncompressedSize is set to ^uint32(0).\n\t//\n\t// Deprecated: Use UncompressedSize64 instead.\n\tUncompressedSize uint32\n\n\t// CompressedSize64 is the compressed size of the file in bytes.\n\tCompressedSize64 uint64\n\n\t// UncompressedSize64 is the uncompressed size of the file in bytes.\n\tUncompressedSize64 uint64\n\n\tExtra         []byte\n\tExternalAttrs uint32 // Meaning depends on CreatorVersion\n\n\t// Reader is the Reader which parsed this header.\n\tReader *Reader\n\n\t// HeaderOffset is where in the zip file the local file header\n\t// for this file is located.\n\tHeaderOffset int64\n}\n\n// readDirectoryHeader attempts to read a directory header from r.\n// It returns io.ErrUnexpectedEOF if it cannot read a complete header,\n// and ErrFormat if it doesn't find a valid header signature.\nfunc readDirectoryHeader(f *FileHeader, r io.Reader) error {\n\tvar buf [directoryHeaderLen]byte\n\tif _, err := io.ReadFull(r, buf[:]); err != nil {\n\t\treturn err\n\t}\n\tb := readBuf(buf[:])\n\tif sig := b.uint32(); sig != directoryHeaderSignature {\n\t\treturn errFormat\n\t}\n\tf.CreatorVersion = b.uint16()\n\tf.ReaderVersion = b.uint16()\n\tf.Flags = b.uint16()\n\tf.Method = b.uint16()\n\tf.ModifiedTime = b.uint16()\n\tf.ModifiedDate = b.uint16()\n\tf.CRC32 = b.uint32()\n\tf.CompressedSize = b.uint32()\n\tf.UncompressedSize = b.uint32()\n\tf.CompressedSize64 = uint64(f.CompressedSize)\n\tf.UncompressedSize64 = uint64(f.UncompressedSize)\n\tfilenameLen := int(b.uint16())\n\textraLen := int(b.uint16())\n\tcommentLen := int(b.uint16())\n\tb = b[4:] // skipped start disk number and internal attributes (2x uint16)\n\tf.ExternalAttrs = b.uint32()\n\tf.HeaderOffset = int64(b.uint32())\n\td := make([]byte, filenameLen+extraLen+commentLen)\n\tif _, err := io.ReadFull(r, d); err != nil {\n\t\treturn err\n\t}\n\tf.Name = string(d[:filenameLen])\n\tf.Extra = d[filenameLen : filenameLen+extraLen]\n\tf.Comment = string(d[filenameLen+extraLen:])\n\n\t// Determine the character encoding.\n\tutf8Valid1, utf8Require1 := detectUTF8(f.Name)\n\tutf8Valid2, utf8Require2 := detectUTF8(f.Comment)\n\tswitch {\n\tcase !utf8Valid1 || !utf8Valid2:\n\t\t// Name and Comment definitely not UTF-8.\n\t\tf.NonUTF8 = true\n\tcase !utf8Require1 && !utf8Require2:\n\t\t// Name and Comment use only single-byte runes that overlap with UTF-8.\n\t\tf.NonUTF8 = false\n\tdefault:\n\t\t// Might be UTF-8, might be some other encoding; preserve existing flag.\n\t\t// Some ZIP writers use UTF-8 encoding without setting the UTF-8 flag.\n\t\t// Since it is impossible to always distinguish valid UTF-8 from some\n\t\t// other encoding (e.g., GBK or Shift-JIS), we trust the flag.\n\t\tf.NonUTF8 = f.Flags&0x800 == 0\n\t}\n\n\tneedUSize := f.UncompressedSize == ^uint32(0)\n\tneedCSize := f.CompressedSize == ^uint32(0)\n\tneedHeaderOffset := f.HeaderOffset == int64(^uint32(0))\n\n\t// Best effort to find what we need.\n\t// Other zip authors might not even follow the basic format,\n\t// and we'll just ignore the Extra content in that case.\n\tvar modified time.Time\nparseExtras:\n\tfor extra := readBuf(f.Extra); len(extra) >= 4; { // need at least tag and size\n\t\tfieldTag := extra.uint16()\n\t\tfieldSize := int(extra.uint16())\n\t\tif len(extra) < fieldSize {\n\t\t\tbreak\n\t\t}\n\t\tfieldBuf := extra.sub(fieldSize)\n\n\t\tswitch fieldTag {\n\t\tcase zip64ExtraID:\n\n\t\t\t// update directory values from the zip64 extra block.\n\t\t\t// They should only be consulted if the sizes read earlier\n\t\t\t// are maxed out.\n\t\t\t// See golang.org/issue/13367.\n\t\t\tif needUSize {\n\t\t\t\tneedUSize = false\n\t\t\t\tif len(fieldBuf) < 8 {\n\t\t\t\t\treturn errFormat\n\t\t\t\t}\n\t\t\t\tf.UncompressedSize64 = fieldBuf.uint64()\n\t\t\t}\n\t\t\tif needCSize {\n\t\t\t\tneedCSize = false\n\t\t\t\tif len(fieldBuf) < 8 {\n\t\t\t\t\treturn errFormat\n\t\t\t\t}\n\t\t\t\tf.CompressedSize64 = fieldBuf.uint64()\n\t\t\t}\n\t\t\tif needHeaderOffset {\n\t\t\t\tneedHeaderOffset = false\n\t\t\t\tif len(fieldBuf) < 8 {\n\t\t\t\t\treturn errFormat\n\t\t\t\t}\n\t\t\t\tf.HeaderOffset = int64(fieldBuf.uint64())\n\t\t\t}\n\t\tcase ntfsExtraID:\n\t\t\tif len(fieldBuf) < 4 {\n\t\t\t\tcontinue parseExtras\n\t\t\t}\n\t\t\tfieldBuf.uint32()        // reserved (ignored)\n\t\t\tfor len(fieldBuf) >= 4 { // need at least tag and size\n\t\t\t\tattrTag := fieldBuf.uint16()\n\t\t\t\tattrSize := int(fieldBuf.uint16())\n\t\t\t\tif len(fieldBuf) < attrSize {\n\t\t\t\t\tcontinue parseExtras\n\t\t\t\t}\n\t\t\t\tattrBuf := fieldBuf.sub(attrSize)\n\t\t\t\tif attrTag != 1 || attrSize != 24 {\n\t\t\t\t\tcontinue // Ignore irrelevant attributes\n\t\t\t\t}\n\n\t\t\t\tconst ticksPerSecond = 1e7    // Windows timestamp resolution\n\t\t\t\tts := int64(attrBuf.uint64()) // ModTime since Windows epoch\n\t\t\t\tsecs := ts / ticksPerSecond\n\t\t\t\tnsecs := (1e9 / ticksPerSecond) * (ts % ticksPerSecond)\n\t\t\t\tepoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)\n\t\t\t\tmodified = time.Unix(epoch.Unix()+secs, nsecs)\n\t\t\t}\n\t\tcase unixExtraID, infoZipUnixExtraID:\n\t\t\tif len(fieldBuf) < 8 {\n\t\t\t\tcontinue parseExtras\n\t\t\t}\n\t\t\tfieldBuf.uint32()              // AcTime (ignored)\n\t\t\tts := int64(fieldBuf.uint32()) // ModTime since Unix epoch\n\t\t\tmodified = time.Unix(ts, 0)\n\t\tcase extTimeExtraID:\n\t\t\tif len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {\n\t\t\t\tcontinue parseExtras\n\t\t\t}\n\t\t\tts := int64(fieldBuf.uint32()) // ModTime since Unix epoch\n\t\t\tmodified = time.Unix(ts, 0)\n\t\t}\n\t}\n\n\tmsdosModified := msDosTimeToTime(f.ModifiedDate, f.ModifiedTime)\n\tf.Modified = msdosModified\n\tif !modified.IsZero() {\n\t\tf.Modified = modified.UTC()\n\n\t\t// If legacy MS-DOS timestamps are set, we can use the delta between\n\t\t// the legacy and extended versions to estimate timezone offset.\n\t\t//\n\t\t// A non-UTC timezone is always used (even if offset is zero).\n\t\t// Thus, FileHeader.Modified.Location() == time.UTC is useful for\n\t\t// determining whether extended timestamps are present.\n\t\t// This is necessary for users that need to do additional time\n\t\t// calculations when dealing with legacy ZIP formats.\n\t\tif f.ModifiedTime != 0 || f.ModifiedDate != 0 {\n\t\t\tf.Modified = modified.In(timeZone(msdosModified.Sub(modified)))\n\t\t}\n\t}\n\n\t// Assume that uncompressed size 2³²-1 could plausibly happen in\n\t// an old zip32 file that was sharding inputs into the largest chunks\n\t// possible (or is just malicious; search the web for 42.zip).\n\t// If needUSize is true still, it means we didn't see a zip64 extension.\n\t// As long as the compressed size is not also 2³²-1 (implausible)\n\t// and the header is not also 2³²-1 (equally implausible),\n\t// accept the uncompressed size 2³²-1 as valid.\n\t// If nothing else, this keeps archive/zip working with 42.zip.\n\t_ = needUSize\n\n\tif needCSize || needHeaderOffset {\n\t\treturn errFormat\n\t}\n\n\treturn nil\n}\n\n// findDirectory64End tries to read the zip64 locator just before the\n// directory end and returns the offset of the zip64 directory end if\n// found.\nfunc findDirectory64End(r io.ReaderAt, directoryEndOffset int64) (int64, error) {\n\tlocOffset := directoryEndOffset - directory64LocLen\n\tif locOffset < 0 {\n\t\treturn -1, nil // no need to look for a header outside the file\n\t}\n\tbuf := make([]byte, directory64LocLen)\n\tif _, err := r.ReadAt(buf, locOffset); err != nil {\n\t\treturn -1, err\n\t}\n\tb := readBuf(buf)\n\tif sig := b.uint32(); sig != directory64LocSignature {\n\t\treturn -1, nil\n\t}\n\tif b.uint32() != 0 { // number of the disk with the start of the zip64 end of central directory\n\t\treturn -1, nil // the file is not a valid zip64-file\n\t}\n\tp := b.uint64()      // relative offset of the zip64 end of central directory record\n\tif b.uint32() != 1 { // total number of disks\n\t\treturn -1, nil // the file is not a valid zip64-file\n\t}\n\treturn int64(p), nil\n}\n\n// readDirectory64End reads the zip64 directory end and updates the\n// directory end with the zip64 directory end values.\nfunc readDirectory64End(r io.ReaderAt, offset int64, d *directoryEnd) (err error) {\n\tbuf := make([]byte, directory64EndLen)\n\tif _, err := r.ReadAt(buf, offset); err != nil {\n\t\treturn err\n\t}\n\n\tb := readBuf(buf)\n\tif sig := b.uint32(); sig != directory64EndSignature {\n\t\treturn errFormat\n\t}\n\n\tb = b[12:]                        // skip dir size, version and version needed (uint64 + 2x uint16)\n\td.diskNbr = b.uint32()            // number of this disk\n\td.dirDiskNbr = b.uint32()         // number of the disk with the start of the central directory\n\td.dirRecordsThisDisk = b.uint64() // total number of entries in the central directory on this disk\n\td.directoryRecords = b.uint64()   // total number of entries in the central directory\n\td.directorySize = b.uint64()      // size of the central directory\n\td.directoryOffset = b.uint64()    // offset of start of central directory with respect to the starting disk number\n\n\treturn nil\n}\n\nfunc findSignatureInBlock(b []byte) int {\n\tfor i := len(b) - directoryEndLen; i >= 0; i-- {\n\t\t// defined from directoryEndSignature in struct.go\n\t\tif b[i] == 'P' && b[i+1] == 'K' && b[i+2] == 0x05 && b[i+3] == 0x06 {\n\t\t\t// n is length of comment\n\t\t\tn := int(b[i+directoryEndLen-2]) | int(b[i+directoryEndLen-1])<<8\n\t\t\tif n+directoryEndLen+i > len(b) {\n\t\t\t\t// Truncated comment.\n\t\t\t\t// Some parsers (such as Info-ZIP) ignore the truncated comment\n\t\t\t\t// rather than treating it as a hard error.\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\ntype readBuf []byte\n\nfunc (b *readBuf) uint8() uint8 {\n\tv := (*b)[0]\n\t*b = (*b)[1:]\n\treturn v\n}\n\nfunc (b *readBuf) uint16() uint16 {\n\tv := binary.LittleEndian.Uint16(*b)\n\t*b = (*b)[2:]\n\treturn v\n}\n\nfunc (b *readBuf) uint32() uint32 {\n\tv := binary.LittleEndian.Uint32(*b)\n\t*b = (*b)[4:]\n\treturn v\n}\n\nfunc (b *readBuf) uint64() uint64 {\n\tv := binary.LittleEndian.Uint64(*b)\n\t*b = (*b)[8:]\n\treturn v\n}\n\nfunc (b *readBuf) sub(n int) readBuf {\n\tb2 := (*b)[:n]\n\t*b = (*b)[n:]\n\treturn b2\n}\n\ntype directoryEnd struct {\n\tdiskNbr            uint32 // unused\n\tdirDiskNbr         uint32 // unused\n\tdirRecordsThisDisk uint64 // unused\n\tdirectoryRecords   uint64\n\tdirectorySize      uint64\n\tdirectoryOffset    uint64 // relative to file\n\tcommentLen         uint16\n\tcomment            string\n}\n\n// msDosTimeToTime converts an MS-DOS date and time into a time.Time.\n// The resolution is 2s.\n// See: https://learn.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime\nfunc msDosTimeToTime(dosDate, dosTime uint16) time.Time {\n\treturn time.Date(\n\t\t// date bits 0-4: day of month; 5-8: month; 9-15: years since 1980\n\t\tint(dosDate>>9+1980),\n\t\ttime.Month(dosDate>>5&0xf),\n\t\tint(dosDate&0x1f),\n\n\t\t// time bits 0-4: second/2; 5-10: minute; 11-15: hour\n\t\tint(dosTime>>11),\n\t\tint(dosTime>>5&0x3f),\n\t\tint(dosTime&0x1f*2),\n\t\t0, // nanoseconds\n\n\t\ttime.UTC,\n\t)\n}\n\n// detectUTF8 reports whether s is a valid UTF-8 string, and whether the string\n// must be considered UTF-8 encoding (i.e., not compatible with CP-437, ASCII,\n// or any other common encoding).\nfunc detectUTF8(s string) (valid, require bool) {\n\tfor i := 0; i < len(s); {\n\t\tr, size := utf8.DecodeRuneInString(s[i:])\n\t\ti += size\n\t\t// Officially, ZIP uses CP-437, but many readers use the system's\n\t\t// local character encoding. Most encoding are compatible with a large\n\t\t// subset of CP-437, which itself is ASCII-like.\n\t\t//\n\t\t// Forbid 0x7e and 0x5c since EUC-KR and Shift-JIS replace those\n\t\t// characters with localized currency and overline characters.\n\t\tif r < 0x20 || r > 0x7d || r == 0x5c {\n\t\t\tif !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {\n\t\t\t\treturn false, false\n\t\t\t}\n\t\t\trequire = true\n\t\t}\n\t}\n\treturn true, require\n}\n\n// timeZone returns a *time.Location based on the provided offset.\n// If the offset is non-sensible, then this uses an offset of zero.\nfunc timeZone(offset time.Duration) *time.Location {\n\tconst (\n\t\tminOffset   = -12 * time.Hour  // E.g., Baker island at -12:00\n\t\tmaxOffset   = +14 * time.Hour  // E.g., Line island at +14:00\n\t\toffsetAlias = 15 * time.Minute // E.g., Nepal at +5:45\n\t)\n\toffset = offset.Round(offsetAlias)\n\tif offset < minOffset || maxOffset < offset {\n\t\toffset = 0\n\t}\n\treturn time.FixedZone(\"\", int(offset/time.Second))\n}\n\n// openReadSlop is how many extra bytes OpenWithReader should add to its size\n// estimation to account for possible differences between the expected and\n// actual size of the underlying reader, since the fileHeader length can't be\n// known exactly.\nconst openReadSlop = 512\n\n// OpenWithReader opens a zip.File using an alterate reader for the compressed\n// data.\n//\n// For example, you can use this (along with [ZipTOCSize]), to do one or two\n// HTTP Range requests against a remote zip file to first read its TOC, then use\n// this function to read individual files within the zip without downloading the\n// entire zip file. The getRawReader function would then do an HTTP Range\n// request for the given offset and size.\n//\n// Callers are responsible for closing the returned io.ReadCloser, which then\n// also closes the rawReader returned by getRawReader.\nfunc OpenWithReader(h *FileHeader, getRawReader func(offsize, size int64) (rawReader io.ReadCloser, err error)) (io.ReadCloser, error) {\n\tvar decomp zip.Decompressor\n\tswitch h.Method {\n\tcase zip.Store:\n\t\tdecomp = io.NopCloser\n\tcase zip.Deflate:\n\t\tdecomp = newFlateReader\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported storage method %d\", h.Method)\n\t}\n\n\tcompSize := int64(h.CompressedSize64)\n\tdescSize := int64(0)\n\tif h.hasDataDesc() {\n\t\tdescSize = dataDescriptorLen\n\t}\n\treadSize := compSize + descSize\n\n\t// Pad length for fileHeader, filename, extras, and a bit of slop space, in\n\t// case the local file header's name/extras are longer (very unlikely)\n\treadSize += fileHeaderLen\n\tfileHeaderVariableLen := int64(len(h.Name)) + int64(len(h.Extra)) + openReadSlop\n\treadSize += fileHeaderVariableLen\n\n\trawReader, err := getRawReader(h.HeaderOffset, readSize)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"opening raw reader: %w\", err)\n\t}\n\n\t// Skip over the local file header to get to the body.\n\tif variableLen, err := skipHeader(rawReader); err != nil {\n\t\trawReader.Close()\n\t\treturn nil, err\n\t} else if variableLen > fileHeaderVariableLen {\n\t\trawReader.Close()\n\t\treturn nil, fmt.Errorf(\"file header length of %d was greater than expected %d\", variableLen, fileHeaderVariableLen)\n\t}\n\n\tvar desr io.Reader // or nil if none\n\tif h.hasDataDesc() {\n\t\tdesr = rawReader\n\t}\n\n\tdecompressReader := decomp(io.LimitReader(rawReader, compSize))\n\treturn &checksumReader{\n\t\trawRC:      rawReader,\n\t\tbodyReader: decompressReader,\n\t\thash:       crc32.NewIEEE(),\n\t\tf:          h,\n\t\tdesr:       desr,\n\t}, nil\n}\n\nvar flateReaderPool sync.Pool\n\nfunc newFlateReader(r io.Reader) io.ReadCloser {\n\tfr, ok := flateReaderPool.Get().(io.ReadCloser)\n\tif ok {\n\t\tfr.(flate.Resetter).Reset(r, nil)\n\t} else {\n\t\tfr = flate.NewReader(r)\n\t}\n\treturn &pooledFlateReader{fr: fr}\n}\n\ntype pooledFlateReader struct {\n\tmu sync.Mutex // guards Close and Read\n\tfr io.ReadCloser\n}\n\nfunc (r *pooledFlateReader) Read(p []byte) (n int, err error) {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tif r.fr == nil {\n\t\treturn 0, errors.New(\"Read after Close\")\n\t}\n\treturn r.fr.Read(p)\n}\n\nfunc (r *pooledFlateReader) Close() error {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\tvar err error\n\tif r.fr != nil {\n\t\terr = r.fr.Close()\n\t\tflateReaderPool.Put(r.fr)\n\t\tr.fr = nil\n\t}\n\treturn err\n}\n\nfunc (h *FileHeader) hasDataDesc() bool {\n\treturn h.Flags&0x8 != 0\n}\n\n// skipHeader skips over the local file header and returns the length of\n// the variable length fields (filename and extra) that were skipped,\n// not including the fixed 30 byte header.\nfunc skipHeader(r io.Reader) (variableLen int64, err error) {\n\tvar buf [fileHeaderLen]byte\n\tif _, err := io.ReadFull(r, buf[:]); err != nil {\n\t\treturn 0, err\n\t}\n\tb := readBuf(buf[:])\n\tif sig := b.uint32(); sig != fileHeaderSignature {\n\t\treturn 0, zip.ErrFormat\n\t}\n\tb = b[22:] // skip over most of the header\n\tfilenameLen := b.uint16()\n\textraLen := b.uint16()\n\tvariableLen = int64(filenameLen) + int64(extraLen)\n\tif _, err := io.CopyN(io.Discard, r, variableLen); err != nil {\n\t\treturn 0, err\n\t}\n\treturn variableLen, nil\n}\n\ntype checksumReader struct {\n\tbodyReader io.ReadCloser\n\trawRC      io.ReadCloser // the getRawReader result from OpenReader (e.g. http.Response.Body)\n\thash       hash.Hash32\n\tnread      uint64    // number of bytes read so far\n\tdesr       io.Reader // if non-nil, where to read data descriptor\n\tf          *FileHeader\n\terr        error // sticky error\n}\n\nfunc (r *checksumReader) setErr(err error) error {\n\tif r.err != nil {\n\t\treturn r.err\n\t}\n\tif err == nil {\n\t\treturn nil\n\t}\n\tr.err = err\n\tr.rawRC.Close()\n\treturn err\n}\n\nfunc (r *checksumReader) Read(b []byte) (n int, err error) {\n\tif r.err != nil {\n\t\treturn 0, r.err\n\t}\n\tn, err = r.bodyReader.Read(b)\n\tr.hash.Write(b[:n])\n\tr.nread += uint64(n)\n\tif r.nread > r.f.UncompressedSize64 {\n\t\treturn 0, r.setErr(zip.ErrFormat)\n\t}\n\tif err == nil {\n\t\treturn\n\t}\n\tif err == io.EOF {\n\t\tif r.nread != r.f.UncompressedSize64 {\n\t\t\treturn 0, r.setErr(io.ErrUnexpectedEOF)\n\t\t}\n\t\tif r.desr != nil {\n\t\t\tif err1 := readDataDescriptor(r.desr, r.f); err1 != nil {\n\t\t\t\tif err1 == io.EOF {\n\t\t\t\t\terr = io.ErrUnexpectedEOF\n\t\t\t\t} else {\n\t\t\t\t\terr = err1\n\t\t\t\t}\n\t\t\t} else if r.hash.Sum32() != r.f.CRC32 {\n\t\t\t\terr = zip.ErrChecksum\n\t\t\t}\n\t\t} else {\n\t\t\t// If there's not a data descriptor, we still compare\n\t\t\t// the CRC32 of what we've read against the file header\n\t\t\t// or TOC's CRC32, if it seems like it was set.\n\t\t\tif r.f.CRC32 != 0 && r.hash.Sum32() != r.f.CRC32 {\n\t\t\t\terr = zip.ErrChecksum\n\t\t\t}\n\t\t}\n\t}\n\treturn n, r.setErr(err)\n}\n\nfunc (r *checksumReader) Close() error {\n\t// In case the rawRC was an http.Response.Body, make a best effort attempt\n\t// to read the final [openReadSlop]] bytes we might not've needed. This\n\t// increases the change that the HTTP connection can be reused, at least\n\t// with HTTP/1.1, which doesn't really help my motivating example (Google\n\t// Takeout + Google Drive, which are HTTP/2+, where this optimization\n\t// doesn't matter)\n\tio.CopyN(io.Discard, r.rawRC, openReadSlop)\n\tr.rawRC.Close()\n\treturn r.bodyReader.Close()\n}\n\nfunc readDataDescriptor(r io.Reader, f *FileHeader) error {\n\tvar buf [dataDescriptorLen]byte\n\t// The spec says: \"Although not originally assigned a\n\t// signature, the value 0x08074b50 has commonly been adopted\n\t// as a signature value for the data descriptor record.\n\t// Implementers should be aware that ZIP files may be\n\t// encountered with or without this signature marking data\n\t// descriptors and should account for either case when reading\n\t// ZIP files to ensure compatibility.\"\n\t//\n\t// dataDescriptorLen includes the size of the signature but\n\t// first read just those 4 bytes to see if it exists.\n\tif _, err := io.ReadFull(r, buf[:4]); err != nil {\n\t\treturn err\n\t}\n\toff := 0\n\tmaybeSig := readBuf(buf[:4])\n\tif maybeSig.uint32() != dataDescriptorSignature {\n\t\t// No data descriptor signature. Keep these four\n\t\t// bytes.\n\t\toff += 4\n\t}\n\tif _, err := io.ReadFull(r, buf[off:12]); err != nil {\n\t\treturn err\n\t}\n\tb := readBuf(buf[:12])\n\tif b.uint32() != f.CRC32 {\n\t\treturn zip.ErrChecksum\n\t}\n\n\t// The two sizes that follow here can be either 32 bits or 64 bits\n\t// but the spec is not very clear on this and different\n\t// interpretations has been made causing incompatibilities. We\n\t// already have the sizes from the central directory so we can\n\t// just ignore these.\n\n\treturn nil\n}\n"
  },
  {
    "path": "ziputil/ziputil_test.go",
    "content": "package ziputil\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"testing\"\n\n\t\"go4.org/readerutil\"\n)\n\ntype testZip struct {\n\tname      string\n\tdata      []byte\n\tfileNames []string\n}\n\nfunc (tz *testZip) Len() int    { return len(tz.data) }\nfunc (tz *testZip) Size() int64 { return int64(len(tz.data)) }\n\nfunc mkZip(t testing.TB, name string, numFiles int) *testZip {\n\ttz := &testZip{name: name}\n\n\t// Create a zip file in memory with numFiles files.\n\t// Each file is named \"fileN.txt\" and contains \"This is file N\".\n\tvar buf bytes.Buffer\n\tzw := zip.NewWriter(&buf)\n\tfor i := 0; i < numFiles; i++ {\n\t\tfileName := fmt.Sprintf(\"file%d.txt\", i)\n\t\ttz.fileNames = append(tz.fileNames, fileName)\n\t\tfw, _ := zw.Create(fileName)\n\t\tfw.Write([]byte(fmt.Sprintf(\"This is file %d\", i)))\n\t}\n\tzw.Close()\n\tt.Logf(\"created zip with %d files; size=%d bytes\", numFiles, buf.Len())\n\n\ttz.data = buf.Bytes()\n\treturn tz\n}\n\nfunc TestZipTOCSize(t *testing.T) {\n\tzipSmall := mkZip(t, \"small\", 1)\n\tzipMed := mkZip(t, \"med\", 100)\n\tzipLarge := mkZip(t, \"large\", 10000)\n\n\ttests := []struct {\n\t\tname   string\n\t\tzip    *testZip\n\t\tfooter int   // footer length of zip to pass\n\t\twant   int64 // or -1 for wanting ok=false\n\t}{\n\t\t{\n\t\t\tname:   \"smallzip-21\",\n\t\t\tzip:    zipSmall,\n\t\t\tfooter: 21,\n\t\t\twant:   -1,\n\t\t},\n\t\t{\n\t\t\tname:   \"smallzip-22\",\n\t\t\tzip:    zipSmall,\n\t\t\tfooter: 22,\n\t\t\twant:   77,\n\t\t},\n\t\t{\n\t\t\tname:   \"smallzip-all\",\n\t\t\tzip:    zipSmall,\n\t\t\tfooter: len(zipSmall.data),\n\t\t\twant:   77,\n\t\t},\n\t\t{\n\t\t\tname:   \"medzip-1024\",\n\t\t\tzip:    zipMed,\n\t\t\tfooter: 1024,\n\t\t\twant:   5612,\n\t\t},\n\t\t{\n\t\t\tname:   \"largezip-21\",\n\t\t\tzip:    zipLarge,\n\t\t\tfooter: 21,\n\t\t\twant:   -1,\n\t\t},\n\t\t{\n\t\t\tname:   \"largezip-22\",\n\t\t\tzip:    zipLarge,\n\t\t\tfooter: 22,\n\t\t\twant:   578912,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif tt.footer > tt.zip.Len() {\n\t\t\t\tpanic(\"bad test\")\n\t\t\t}\n\t\t\tfooter := tt.zip.data[len(tt.zip.data)-tt.footer:]\n\t\t\ttocSize, ok := ZipTOCSize(int64(len(tt.zip.data)), footer)\n\t\t\tgot := tocSize\n\t\t\tif !ok {\n\t\t\t\tgot = -1\n\t\t\t} else {\n\t\t\t\tif got >= tt.zip.Size() {\n\t\t\t\t\tt.Errorf(\"unexpected tocSize %d is >= file size %d\", got, tt.zip.Size())\n\t\t\t\t}\n\t\t\t}\n\t\t\tif got != tt.want {\n\t\t\t\tif tt.want == -1 {\n\t\t\t\t\tt.Fatalf(\"got tocSize = %d; want ok=false\", tocSize)\n\t\t\t\t}\n\t\t\t\tif !ok {\n\t\t\t\t\tt.Fatalf(\"got ok=false; want tocSize=%d\", tt.want)\n\t\t\t\t}\n\t\t\t\tt.Fatalf(\"got tocSize=%d; want %d\", tocSize, tt.want)\n\t\t\t}\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// Verify that archive/zip will read the TOC, with fake zero bytes\n\t\t\t// for the rest of the file.\n\t\t\tdataLen := tt.zip.Size() - tocSize\n\t\t\tra := readerutil.NewMultiReaderAt(\n\t\t\t\treaderutil.ZeroSizeReaderAt(dataLen),\n\t\t\t\tbytes.NewReader(tt.zip.data[tt.zip.Size()-tocSize:]),\n\t\t\t)\n\t\t\tt.Logf(\"total size = %d (tocSize = %d; dataLen = %d)\", tt.zip.Size(), tocSize, dataLen)\n\t\t\tzr, err := zip.NewReader(ra, tt.zip.Size())\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"zip.NewReader error: %v\", err)\n\t\t\t}\n\t\t\tvar gotNames []string\n\t\t\tfor _, f := range zr.File {\n\t\t\t\tgotNames = append(gotNames, f.Name)\n\t\t\t}\n\t\t\tif !reflect.DeepEqual(gotNames, tt.zip.fileNames) {\n\t\t\t\tt.Fatalf(\"got file names = %q; want %q\", gotNames, tt.zip.fileNames)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc goTestZips(t testing.TB) (baseNames []string) {\n\tfe, err := os.ReadDir(\"testdata\") // from Go's archive/zip testdata\n\tif err != nil {\n\t\tt.Fatalf(\"os.ReadDir testdata: %v\", err)\n\t}\n\tfor _, de := range fe {\n\t\tif !strings.HasSuffix(de.Name(), \".zip\") {\n\t\t\tcontinue\n\t\t}\n\t\tbaseNames = append(baseNames, de.Name())\n\t}\n\treturn\n}\n\nfunc TestGoTestZips(t *testing.T) {\n\tfor _, zipBase := range goTestZips(t) {\n\t\tzipData, err := os.ReadFile(filepath.Join(\"testdata\", zipBase))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\n\t\twantZF, err := zip.NewReader(bytes.NewReader(zipData), int64(len(zipData)))\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"zip.NewReader on %s: %v\", zipBase, err)\n\t\t}\n\n\t\tt.Run(zipBase, func(t *testing.T) {\n\t\t\ttocSize, ok := ZipTOCSize(int64(len(zipData)), zipData)\n\t\t\tif !ok {\n\t\t\t\tt.Fatalf(\"ZipTOCSize failed\")\n\t\t\t}\n\t\t\tdataLen := int64(len(zipData)) - int64(tocSize)\n\t\t\tra := readerutil.NewMultiReaderAt(\n\t\t\t\treaderutil.ZeroSizeReaderAt(dataLen),\n\t\t\t\tbytes.NewReader(zipData[len(zipData)-int(tocSize):]),\n\t\t\t)\n\t\t\tgotZF, err := zip.NewReader(ra, int64(len(zipData)))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"zip.NewReader error: %v\", err)\n\t\t\t}\n\n\t\t\tif len(gotZF.File) != len(wantZF.File) {\n\t\t\t\tt.Fatalf(\"got %d files; want %d files\", len(gotZF.File), len(wantZF.File))\n\t\t\t}\n\t\t\tfor i := range gotZF.File {\n\t\t\t\tgot, want := gotZF.File[i].FileHeader, wantZF.File[i].FileHeader\n\t\t\t\tif !reflect.DeepEqual(got, want) {\n\t\t\t\t\tt.Errorf(\"file %d: got header %+v; want %+v\", i, got, want)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestOpenWithReader(t *testing.T) {\n\tfor _, zipBase := range goTestZips(t) {\n\t\tt.Run(zipBase, func(t *testing.T) {\n\n\t\t\tzipData, err := os.ReadFile(filepath.Join(\"testdata\", zipBase))\n\t\t\tif err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\tzipSize := int64(len(zipData))\n\n\t\t\tzf, err := zip.NewReader(bytes.NewReader(zipData), zipSize)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"zip.OpenReader on %s: %v\", zipBase, err)\n\t\t\t}\n\n\t\t\tur, err := ParseTOC(zipSize, zipData)\n\t\t\tif err != nil {\n\t\t\t\tt.Fatalf(\"ParseTOC: %v\", err)\n\t\t\t}\n\t\t\tif len(ur.File) != len(zf.File) {\n\t\t\t\tt.Fatalf(\"ParseTOC got %d files; want %d files\", len(ur.File), len(zf.File))\n\t\t\t}\n\n\t\t\tfor i, zf := range zf.File {\n\t\t\t\tt.Run(fmt.Sprint(i), func(t *testing.T) {\n\t\t\t\t\trc, err := zf.Open()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tdefer rc.Close()\n\t\t\t\t\twant, err := io.ReadAll(rc)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"ReadAll: %v\", err)\n\t\t\t\t\t}\n\n\t\t\t\t\tfh := ur.File[i]\n\t\t\t\t\tvar sawClose atomic.Bool // was our closeTracker's Close called?\n\t\t\t\t\trc, err = OpenWithReader(fh, func(off, size int64) (io.ReadCloser, error) {\n\t\t\t\t\t\treturn &closeTracker{\n\t\t\t\t\t\t\tsawClose: &sawClose,\n\t\t\t\t\t\t\tReader:   io.NewSectionReader(bytes.NewReader(zipData), off, size),\n\t\t\t\t\t\t}, nil\n\t\t\t\t\t})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatal(err)\n\t\t\t\t\t}\n\t\t\t\t\tgot, err := io.ReadAll(rc)\n\t\t\t\t\trc.Close()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tt.Fatalf(\"ReadAll: %v\", err)\n\t\t\t\t\t}\n\t\t\t\t\tif !bytes.Equal(got, want) {\n\t\t\t\t\t\tt.Errorf(\"file %d: contents mismatch\", i)\n\t\t\t\t\t}\n\t\t\t\t\tif !sawClose.Load() {\n\t\t\t\t\t\tt.Errorf(\"didn't see Close\")\n\t\t\t\t\t}\n\t\t\t\t\tif !t.Failed() {\n\t\t\t\t\t\tt.Logf(\"pass; %d bytes, desc=%v\", len(got), fh.hasDataDesc())\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype closeTracker struct {\n\tio.Reader\n\tsawClose *atomic.Bool\n}\n\nfunc (c *closeTracker) Close() error {\n\tc.sawClose.Store(true)\n\treturn nil\n}\n"
  }
]