[
  {
    "path": ".gitignore",
    "content": "# Binaries for programs and plugins\n*.exe\n*.exe~\n*.dll\n*.so\n*.dylib\n\n# Test binary, built with `go test -c`\n*.test\n\n# Output of the go coverage tool, specifically when used with LiteIDE\n*.out\n\n# GoLand\n.idea/\n\n# Dependency directories (remove the comment below to include it)\n# vendor/\n\n"
  },
  {
    "path": "ISSUE_TEMPLATE.md",
    "content": "## Issues tracked in Launchpad\n\nPlease file an issue against https://bugs.launchpad.net/juju/+filebug\n"
  },
  {
    "path": "LICENSE",
    "content": "All files in this repository are licensed as follows. If you contribute\nto this repository, it is assumed that you license your contribution\nunder the same license unless you state otherwise.\n\nAll files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.\n\nThis software is licensed under the LGPLv3, included below.\n\nAs a special exception to the GNU Lesser General Public License version 3\n(\"LGPL3\"), the copyright holders of this Library give you permission to\nconvey to a third party a Combined Work that links statically or dynamically\nto this Library without providing any Minimal Corresponding Source or\nMinimal Application Code as set out in 4d or providing the installation\ninformation set out in section 4e, provided that you comply with the other\nprovisions of LGPL3 and provided that you meet, for the Application the\nterms and conditions of the license(s) which apply to the Application.\n\nExcept as stated in this special exception, the provisions of LGPL3 will\ncontinue to comply in full to this Library. If you modify this Library, you\nmay apply this exception to your version of this Library, but you are not\nobliged to do so. If you do not wish to do so, delete this exception\nstatement from your version. This exception does not (and cannot) modify any\nlicense terms which apply to the Application, with which you must still\ncomply.\n\n\n                   GNU LESSER GENERAL PUBLIC LICENSE\n                       Version 3, 29 June 2007\n\n Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>\n Everyone is permitted to copy and distribute verbatim copies\n of this license document, but changing it is not allowed.\n\n\n  This version of the GNU Lesser General Public License incorporates\nthe terms and conditions of version 3 of the GNU General Public\nLicense, supplemented by the additional permissions listed below.\n\n  0. Additional Definitions.\n\n  As used herein, \"this License\" refers to version 3 of the GNU Lesser\nGeneral Public License, and the \"GNU GPL\" refers to version 3 of the GNU\nGeneral Public License.\n\n  \"The Library\" refers to a covered work governed by this License,\nother than an Application or a Combined Work as defined below.\n\n  An \"Application\" is any work that makes use of an interface provided\nby the Library, but which is not otherwise based on the Library.\nDefining a subclass of a class defined by the Library is deemed a mode\nof using an interface provided by the Library.\n\n  A \"Combined Work\" is a work produced by combining or linking an\nApplication with the Library.  The particular version of the Library\nwith which the Combined Work was made is also called the \"Linked\nVersion\".\n\n  The \"Minimal Corresponding Source\" for a Combined Work means the\nCorresponding Source for the Combined Work, excluding any source code\nfor portions of the Combined Work that, considered in isolation, are\nbased on the Application, and not on the Linked Version.\n\n  The \"Corresponding Application Code\" for a Combined Work means the\nobject code and/or source code for the Application, including any data\nand utility programs needed for reproducing the Combined Work from the\nApplication, but excluding the System Libraries of the Combined Work.\n\n  1. Exception to Section 3 of the GNU GPL.\n\n  You may convey a covered work under sections 3 and 4 of this License\nwithout being bound by section 3 of the GNU GPL.\n\n  2. Conveying Modified Versions.\n\n  If you modify a copy of the Library, and, in your modifications, a\nfacility refers to a function or data to be supplied by an Application\nthat uses the facility (other than as an argument passed when the\nfacility is invoked), then you may convey a copy of the modified\nversion:\n\n   a) under this License, provided that you make a good faith effort to\n   ensure that, in the event an Application does not supply the\n   function or data, the facility still operates, and performs\n   whatever part of its purpose remains meaningful, or\n\n   b) under the GNU GPL, with none of the additional permissions of\n   this License applicable to that copy.\n\n  3. Object Code Incorporating Material from Library Header Files.\n\n  The object code form of an Application may incorporate material from\na header file that is part of the Library.  You may convey such object\ncode under terms of your choice, provided that, if the incorporated\nmaterial is not limited to numerical parameters, data structure\nlayouts and accessors, or small macros, inline functions and templates\n(ten or fewer lines in length), you do both of the following:\n\n   a) Give prominent notice with each copy of the object code that the\n   Library is used in it and that the Library and its use are\n   covered by this License.\n\n   b) Accompany the object code with a copy of the GNU GPL and this license\n   document.\n\n  4. Combined Works.\n\n  You may convey a Combined Work under terms of your choice that,\ntaken together, effectively do not restrict modification of the\nportions of the Library contained in the Combined Work and reverse\nengineering for debugging such modifications, if you also do each of\nthe following:\n\n   a) Give prominent notice with each copy of the Combined Work that\n   the Library is used in it and that the Library and its use are\n   covered by this License.\n\n   b) Accompany the Combined Work with a copy of the GNU GPL and this license\n   document.\n\n   c) For a Combined Work that displays copyright notices during\n   execution, include the copyright notice for the Library among\n   these notices, as well as a reference directing the user to the\n   copies of the GNU GPL and this license document.\n\n   d) Do one of the following:\n\n       0) Convey the Minimal Corresponding Source under the terms of this\n       License, and the Corresponding Application Code in a form\n       suitable for, and under terms that permit, the user to\n       recombine or relink the Application with a modified version of\n       the Linked Version to produce a modified Combined Work, in the\n       manner specified by section 6 of the GNU GPL for conveying\n       Corresponding Source.\n\n       1) Use a suitable shared library mechanism for linking with the\n       Library.  A suitable mechanism is one that (a) uses at run time\n       a copy of the Library already present on the user's computer\n       system, and (b) will operate properly with a modified version\n       of the Library that is interface-compatible with the Linked\n       Version.\n\n   e) Provide Installation Information, but only if you would otherwise\n   be required to provide such information under section 6 of the\n   GNU GPL, and only to the extent that such information is\n   necessary to install and execute a modified version of the\n   Combined Work produced by recombining or relinking the\n   Application with a modified version of the Linked Version. (If\n   you use option 4d0, the Installation Information must accompany\n   the Minimal Corresponding Source and Corresponding Application\n   Code. If you use option 4d1, you must provide the Installation\n   Information in the manner specified by section 6 of the GNU GPL\n   for conveying Corresponding Source.)\n\n  5. Combined Libraries.\n\n  You may place library facilities that are a work based on the\nLibrary side by side in a single library together with other library\nfacilities that are not Applications and are not covered by this\nLicense, and convey such a combined library under terms of your\nchoice, if you do both of the following:\n\n   a) Accompany the combined library with a copy of the same work based\n   on the Library, uncombined with any other library facilities,\n   conveyed under the terms of this License.\n\n   b) Give prominent notice with the combined library that part of it\n   is a work based on the Library, and explaining where to find the\n   accompanying uncombined form of the same work.\n\n  6. Revised Versions of the GNU Lesser General Public License.\n\n  The Free Software Foundation may publish revised and/or new versions\nof the GNU Lesser General Public License from time to time. Such new\nversions will be similar in spirit to the present version, but may\ndiffer in detail to address new problems or concerns.\n\n  Each version is given a distinguishing version number. If the\nLibrary as you received it specifies that a certain numbered version\nof the GNU Lesser General Public License \"or any later version\"\napplies to it, you have the option of following the terms and\nconditions either of that published version or of any later version\npublished by the Free Software Foundation. If the Library as you\nreceived it does not specify a version number of the GNU Lesser\nGeneral Public License, you may choose any version of the GNU Lesser\nGeneral Public License ever published by the Free Software Foundation.\n\n  If the Library as you received it specifies that a proxy can decide\nwhether future versions of the GNU Lesser General Public License shall\napply, that proxy's public statement of acceptance of any version is\npermanent authorization for you to choose that version for the\nLibrary.\n"
  },
  {
    "path": "LICENSE.golang",
    "content": "This licence applies to the following files:\n\n* filepath/stdlib.go\n* filepath/stdlibmatch.go\n\nCopyright (c) 2010 The Go Authors. All rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are\nmet:\n\n   * Redistributions of source code must retain the above copyright\nnotice, this list of conditions and the following disclaimer.\n   * Redistributions in binary form must reproduce the above\ncopyright notice, this list of conditions and the following disclaimer\nin the documentation and/or other materials provided with the\ndistribution.\n   * Neither the name of Google Inc. nor the names of its\ncontributors may be used to endorse or promote products derived from\nthis software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n\"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\nLIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR\nA PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\nOWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\nSPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\nLIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\nDATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\nTHEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
  },
  {
    "path": "Makefile",
    "content": "PROJECT := github.com/juju/utils/v4\n\n.PHONY: check-licence check-go check\n\ncheck: check-licence check-go\n\tgo test -v $(PROJECT)/... \n\ncheck-licence:\n\t@(grep -rFl \"Licensed under the LGPLv3\" .;\\\n\t\tgrep -rFl \"MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\" .;\\\n\t\tgrep -rFl \"license that can be found in the LICENSE.ricochet2200 file\" .; \\\n\t\tfind . -name \"*.go\") | sed -e 's,\\./,,' | sort | uniq -u | \\\n\t\txargs -I {} echo FAIL: licence missed: {}\n\ncheck-go:\n\t$(eval GOFMT := $(strip $(shell gofmt -l .| sed -e \"s/^/ /g\")))\n\t@(if [ x$(GOFMT) != x\"\" ]; then \\\n\t\techo go fmt is sad: $(GOFMT); \\\n\t\texit 1; \\\n\tfi )\n\t@(go vet -all -composites=false -copylocks=false .)\n\n# Install packages required to develop in utils and run tests.\ninstall-dependencies: install-snap-dependencies install-mongo-dependencies\n\t@echo Installing dependencies\n\t@echo Installing bzr\n\t@sudo apt install bzr --yes\n\t@echo Installing zip\n\t@sudo apt install zip --yes\n\ninstall-snap-dependencies:\n## install-snap-dependencies: Install the supported snap dependencies\n\t@echo Installing go-1.17 snap\n\t@sudo snap install go --channel=1.17/stable --classic\n\ninstall-mongo-dependencies:\n## install-mongo-dependencies: Install Mongo and its dependencies\n\t@echo Adding juju PPA for mongodb\n\t@sudo apt-add-repository --yes ppa:juju/stable\n\t@sudo apt-get update\n\t@echo Installing mongodb\n\t@sudo apt-get --yes install  \\\n\t$(strip $(DEPENDENCIES)) \\\n\t$(shell apt-cache madison mongodb-server-core juju-mongodb3.2 juju-mongodb mongodb-server | head -1 | cut -d '|' -f1)\n"
  },
  {
    "path": "README.md",
    "content": "juju/utils\n============\n\nThis package provides general utility packages and functions.\n"
  },
  {
    "path": "SECURITY.md",
    "content": "# Security policy\n\n## Reporting a vulnerability\n\nPlease provide a description of the issue, the steps you took to\ncreate the issue, affected versions, and, if known, mitigations for\nthe issue.\n\nThe preferred way to report a security issue is through\n[GitHub's security advisory for this project](https://github.com/juju/utils/security/advisories/new). See\n[Privately reporting a security\nvulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing/privately-reporting-a-security-vulnerability)\nfor instructions on reporting using GitHub's security advisory feature.\n\nThe [Ubuntu Security disclosure and embargo\npolicy](https://ubuntu.com/security/disclosure-policy) contains more\ninformation about how can contact us, what you can expect when you contact us,\nand what we expect from you.\n"
  },
  {
    "path": "arch/arch.go",
    "content": "// Copyright 2014-2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage arch\n\nimport (\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n)\n\n// The following constants define the machine architectures supported by Juju.\nconst (\n\tAMD64   = \"amd64\"\n\tI386    = \"i386\"\n\tARM     = \"armhf\"\n\tARM64   = \"arm64\"\n\tPPC64EL = \"ppc64el\"\n\tS390X   = \"s390x\"\n\tRISCV64 = \"riscv64\"\n\n\t// Older versions of Juju used \"ppc64\" instead of ppc64el\n\tLEGACY_PPC64 = \"ppc64\"\n)\n\n// AllSupportedArches records the machine architectures recognised by Juju.\nvar AllSupportedArches = []string{\n\tAMD64,\n\tI386,\n\tARM,\n\tARM64,\n\tPPC64EL,\n\tS390X,\n\tRISCV64,\n}\n\n// Info records the information regarding each architecture recognised by Juju.\nvar Info = map[string]ArchInfo{\n\tAMD64:   {64},\n\tI386:    {32},\n\tARM:     {32},\n\tARM64:   {64},\n\tPPC64EL: {64},\n\tS390X:   {64},\n\tRISCV64: {64},\n}\n\n// ArchInfo is a struct containing information about a supported architecture.\ntype ArchInfo struct {\n\t// WordSize is the architecture's word size, in bits.\n\tWordSize int\n}\n\n// archREs maps regular expressions for matching\n// `uname -m` to architectures recognised by Juju.\nvar archREs = []struct {\n\t*regexp.Regexp\n\tarch string\n}{\n\t{regexp.MustCompile(\"amd64|x86_64\"), AMD64},\n\t{regexp.MustCompile(\"i?[3-9]86\"), I386},\n\t{regexp.MustCompile(\"(arm$)|(armv.*)\"), ARM},\n\t{regexp.MustCompile(\"aarch64\"), ARM64},\n\t{regexp.MustCompile(\"ppc64|ppc64el|ppc64le\"), PPC64EL},\n\t{regexp.MustCompile(\"s390x\"), S390X},\n\t{regexp.MustCompile(\"riscv64|risc$|risc-[vV]64\"), RISCV64},\n}\n\n// Override for testing.\nvar HostArch = hostArch\n\n// hostArch returns the Juju architecture of the machine on which it is run.\nfunc hostArch() string {\n\treturn NormaliseArch(runtime.GOARCH)\n}\n\n// NormaliseArch returns the Juju architecture corresponding to a machine's\n// reported architecture. The Juju architecture is used to filter simple\n// streams lookup of tools and images.\nfunc NormaliseArch(rawArch string) string {\n\trawArch = strings.TrimSpace(rawArch)\n\tfor _, re := range archREs {\n\t\tif re.Match([]byte(rawArch)) {\n\t\t\treturn re.arch\n\t\t}\n\t}\n\treturn rawArch\n}\n\n// IsSupportedArch returns true if arch is one supported by Juju.\nfunc IsSupportedArch(arch string) bool {\n\tfor _, a := range AllSupportedArches {\n\t\tif a == arch {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "arch/arch_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage arch_test\n\nimport (\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/arch\"\n)\n\ntype archSuite struct {\n}\n\nvar _ = gc.Suite(&archSuite{})\n\nfunc (s *archSuite) TestHostArch(c *gc.C) {\n\ta := arch.HostArch()\n\tc.Assert(arch.IsSupportedArch(a), jc.IsTrue)\n}\n\nfunc (s *archSuite) TestNormaliseArch(c *gc.C) {\n\tfor _, test := range []struct {\n\t\traw  string\n\t\tarch string\n\t}{\n\t\t{\"windows\", \"windows\"},\n\t\t{\"amd64\", \"amd64\"},\n\t\t{\"x86_64\", \"amd64\"},\n\t\t{\"386\", \"i386\"},\n\t\t{\"i386\", \"i386\"},\n\t\t{\"i486\", \"i386\"},\n\t\t{\"arm\", \"armhf\"},\n\t\t{\"armv\", \"armhf\"},\n\t\t{\"armv7\", \"armhf\"},\n\t\t{\"aarch64\", \"arm64\"},\n\t\t{\"arm64\", \"arm64\"},\n\t\t{\"ppc64el\", \"ppc64el\"},\n\t\t{\"ppc64le\", \"ppc64el\"},\n\t\t{\"ppc64\", \"ppc64el\"},\n\t\t{\"s390x\", \"s390x\"},\n\t\t{\"riscv64\", \"riscv64\"},\n\t\t{\"risc\", \"riscv64\"},\n\t\t{\"risc-v64\", \"riscv64\"},\n\t\t{\"risc-V64\", \"riscv64\"},\n\t} {\n\t\tarch := arch.NormaliseArch(test.raw)\n\t\tc.Check(arch, gc.Equals, test.arch)\n\t}\n}\n\nfunc (s *archSuite) TestIsSupportedArch(c *gc.C) {\n\tfor _, a := range arch.AllSupportedArches {\n\t\tc.Assert(arch.IsSupportedArch(a), jc.IsTrue)\n\t}\n\tc.Assert(arch.IsSupportedArch(\"invalid\"), jc.IsFalse)\n}\n\nfunc (s *archSuite) TestArchInfo(c *gc.C) {\n\tfor _, a := range arch.AllSupportedArches {\n\t\t_, ok := arch.Info[a]\n\t\tc.Assert(ok, jc.IsTrue)\n\t}\n}\n"
  },
  {
    "path": "arch/package_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage arch_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "attempt.go",
    "content": "// Copyright 2011, 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"time\"\n)\n\n// The Attempt and AttemptStrategy types are copied from those in launchpad.net/goamz/aws.\n\n// AttemptStrategy represents a strategy for waiting for an action\n// to complete successfully.\ntype AttemptStrategy struct {\n\tTotal time.Duration // total duration of attempt.\n\tDelay time.Duration // interval between each try in the burst.\n\tMin   int           // minimum number of retries; overrides Total\n}\n\ntype Attempt struct {\n\tstrategy AttemptStrategy\n\tlast     time.Time\n\tend      time.Time\n\tforce    bool\n\tcount    int\n}\n\n// Start begins a new sequence of attempts for the given strategy.\nfunc (s AttemptStrategy) Start() *Attempt {\n\tnow := time.Now()\n\treturn &Attempt{\n\t\tstrategy: s,\n\t\tlast:     now,\n\t\tend:      now.Add(s.Total),\n\t\tforce:    true,\n\t}\n}\n\n// Next waits until it is time to perform the next attempt or returns\n// false if it is time to stop trying.\n// It always returns true the first time it is called - we are guaranteed to\n// make at least one attempt.\nfunc (a *Attempt) Next() bool {\n\tnow := time.Now()\n\tsleep := a.nextSleep(now)\n\tif !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {\n\t\treturn false\n\t}\n\ta.force = false\n\tif sleep > 0 && a.count > 0 {\n\t\ttime.Sleep(sleep)\n\t\tnow = time.Now()\n\t}\n\ta.count++\n\ta.last = now\n\treturn true\n}\n\nfunc (a *Attempt) nextSleep(now time.Time) time.Duration {\n\tsleep := a.strategy.Delay - now.Sub(a.last)\n\tif sleep < 0 {\n\t\treturn 0\n\t}\n\treturn sleep\n}\n\n// HasNext returns whether another attempt will be made if the current\n// one fails. If it returns true, the following call to Next is\n// guaranteed to return true.\nfunc (a *Attempt) HasNext() bool {\n\tif a.force || a.strategy.Min > a.count {\n\t\treturn true\n\t}\n\tnow := time.Now()\n\tif now.Add(a.nextSleep(now)).Before(a.end) {\n\t\ta.force = true\n\t\treturn true\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "attempt_test.go",
    "content": "// Copyright 2011, 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"time\"\n\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\nfunc doSomething() (int, error) { return 0, nil }\n\nfunc shouldRetry(error) bool { return false }\n\nfunc doSomethingWith(int) {}\n\nfunc ExampleAttempt_HasNext() {\n\t// This example shows how Attempt.HasNext can be used to help\n\t// structure an attempt loop. If the godoc example code allowed\n\t// us to make the example return an error, we would uncomment\n\t// the commented return statements.\n\tattempts := utils.AttemptStrategy{\n\t\tTotal: 1 * time.Second,\n\t\tDelay: 250 * time.Millisecond,\n\t}\n\tfor attempt := attempts.Start(); attempt.Next(); {\n\t\tx, err := doSomething()\n\t\tif shouldRetry(err) && attempt.HasNext() {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\t// return err\n\t\t\treturn\n\t\t}\n\t\tdoSomethingWith(x)\n\t}\n\t// return ErrTimedOut\n\treturn\n}\n\nfunc (*utilsSuite) TestAttemptTiming(c *gc.C) {\n\ttestAttempt := utils.AttemptStrategy{\n\t\tTotal: 0.25e9,\n\t\tDelay: 0.1e9,\n\t}\n\twant := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}\n\tgot := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing\n\tt0 := time.Now()\n\tfor a := testAttempt.Start(); a.Next(); {\n\t\tgot = append(got, time.Now().Sub(t0))\n\t}\n\tgot = append(got, time.Now().Sub(t0))\n\tc.Assert(got, gc.HasLen, len(want))\n\tconst margin = 0.01e9\n\tfor i, got := range want {\n\t\tlo := want[i] - margin\n\t\thi := want[i] + margin\n\t\tif got < lo || got > hi {\n\t\t\tc.Errorf(\"attempt %d want %g got %g\", i, want[i].Seconds(), got.Seconds())\n\t\t}\n\t}\n}\n\nfunc (*utilsSuite) TestAttemptNextHasNext(c *gc.C) {\n\ta := utils.AttemptStrategy{}.Start()\n\tc.Assert(a.Next(), gc.Equals, true)\n\tc.Assert(a.Next(), gc.Equals, false)\n\n\ta = utils.AttemptStrategy{}.Start()\n\tc.Assert(a.Next(), gc.Equals, true)\n\tc.Assert(a.HasNext(), gc.Equals, false)\n\tc.Assert(a.Next(), gc.Equals, false)\n\n\ta = utils.AttemptStrategy{Total: 2e8}.Start()\n\tc.Assert(a.Next(), gc.Equals, true)\n\tc.Assert(a.HasNext(), gc.Equals, true)\n\ttime.Sleep(2e8)\n\tc.Assert(a.HasNext(), gc.Equals, true)\n\tc.Assert(a.Next(), gc.Equals, true)\n\tc.Assert(a.Next(), gc.Equals, false)\n\n\ta = utils.AttemptStrategy{Total: 1e8, Min: 2}.Start()\n\ttime.Sleep(1e8)\n\tc.Assert(a.Next(), gc.Equals, true)\n\tc.Assert(a.HasNext(), gc.Equals, true)\n\tc.Assert(a.Next(), gc.Equals, true)\n\tc.Assert(a.HasNext(), gc.Equals, false)\n\tc.Assert(a.Next(), gc.Equals, false)\n}\n"
  },
  {
    "path": "bzr/bzr.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// Package bzr offers an interface to manage branches of the Bazaar VCS.\npackage bzr\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"strings\"\n)\n\n// Branch represents a Bazaar branch.\ntype Branch struct {\n\tlocation string\n\tenv      []string\n}\n\n// New returns a new Branch for the Bazaar branch at location.\nfunc New(location string) *Branch {\n\tb := &Branch{location, cenv()}\n\tif _, err := os.Stat(location); err == nil {\n\t\tstdout, _, err := b.bzr(\"root\")\n\t\tif err == nil {\n\t\t\t// Need to trim \\r as well as \\n for Windows compatibility\n\t\t\tb.location = strings.TrimRight(string(stdout), \"\\r\\n\")\n\t\t}\n\t}\n\treturn b\n}\n\n// cenv returns a copy of the current process environment with LC_ALL=C.\nfunc cenv() []string {\n\tenv := os.Environ()\n\tfor i, pair := range env {\n\t\tif strings.HasPrefix(pair, \"LC_ALL=\") {\n\t\t\tenv[i] = \"LC_ALL=C\"\n\t\t\treturn env\n\t\t}\n\t}\n\treturn append(env, \"LC_ALL=C\")\n}\n\n// Location returns the location of branch b.\nfunc (b *Branch) Location() string {\n\treturn b.location\n}\n\n// Join returns b's location with parts appended as path components.\n// In other words, if b's location is \"lp:foo\", and parts is {\"bar, baz\"},\n// Join returns \"lp:foo/bar/baz\".\nfunc (b *Branch) Join(parts ...string) string {\n\treturn path.Join(append([]string{b.location}, parts...)...)\n}\n\nfunc (b *Branch) bzr(subcommand string, args ...string) (stdout, stderr []byte, err error) {\n\tcmd := exec.Command(\"bzr\", append([]string{subcommand}, args...)...)\n\tif _, err := os.Stat(b.location); err == nil {\n\t\tcmd.Dir = b.location\n\t}\n\terrbuf := &bytes.Buffer{}\n\tcmd.Stderr = errbuf\n\tcmd.Env = b.env\n\tstdout, err = cmd.Output()\n\t// Some commands fail with exit status 0 (e.g. bzr root). :-(\n\tif err != nil || bytes.Contains(errbuf.Bytes(), []byte(\"ERROR\")) {\n\t\tvar errmsg string\n\t\tif err != nil {\n\t\t\terrmsg = err.Error()\n\t\t}\n\t\treturn nil, nil, fmt.Errorf(`error running \"bzr %s\": %s%s%s`, subcommand, stdout, errbuf.Bytes(), errmsg)\n\t}\n\treturn stdout, errbuf.Bytes(), err\n}\n\n// Init intializes a new branch at b's location.\nfunc (b *Branch) Init() error {\n\t_, _, err := b.bzr(\"init\", b.location)\n\treturn err\n}\n\n// Add adds to b the path resultant from calling b.Join(parts...).\nfunc (b *Branch) Add(parts ...string) error {\n\t_, _, err := b.bzr(\"add\", b.Join(parts...))\n\treturn err\n}\n\n// Commit commits pending changes into b.\nfunc (b *Branch) Commit(message string) error {\n\t_, _, err := b.bzr(\"commit\", \"-q\", \"-m\", message)\n\treturn err\n}\n\n// RevisionId returns the Bazaar revision id for the tip of b.\nfunc (b *Branch) RevisionId() (string, error) {\n\tstdout, stderr, err := b.bzr(\"revision-info\", \"-d\", b.location)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tpair := bytes.Fields(stdout)\n\tif len(pair) != 2 {\n\t\treturn \"\", fmt.Errorf(`invalid output from \"bzr revision-info\": %s%s`, stdout, stderr)\n\t}\n\tid := string(pair[1])\n\tif id == \"null:\" {\n\t\treturn \"\", fmt.Errorf(\"branch has no content\")\n\t}\n\treturn id, nil\n}\n\n// PushLocation returns the default push location for b.\nfunc (b *Branch) PushLocation() (string, error) {\n\tstdout, _, err := b.bzr(\"info\", b.location)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif i := bytes.Index(stdout, []byte(\"push branch:\")); i >= 0 {\n\t\treturn string(stdout[i+13 : i+bytes.IndexAny(stdout[i:], \"\\r\\n\")]), nil\n\t}\n\treturn \"\", fmt.Errorf(\"no push branch location defined\")\n}\n\n// PushAttr holds options for the Branch.Push method.\ntype PushAttr struct {\n\tLocation string // Location to push to. Use the default push location if empty.\n\tRemember bool   // Whether to remember the location being pushed to as the default.\n}\n\n// Push pushes any new revisions in b to attr.Location if that's\n// provided, or to the default push location otherwise.\n// See PushAttr for other options.\nfunc (b *Branch) Push(attr *PushAttr) error {\n\tvar args []string\n\tif attr != nil {\n\t\tif attr.Remember {\n\t\t\targs = append(args, \"--remember\")\n\t\t}\n\t\tif attr.Location != \"\" {\n\t\t\targs = append(args, attr.Location)\n\t\t}\n\t}\n\t_, _, err := b.bzr(\"push\", args...)\n\treturn err\n}\n\n// CheckClean returns an error if 'bzr status' is not clean.\nfunc (b *Branch) CheckClean() error {\n\tstdout, _, err := b.bzr(\"status\", b.location)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif bytes.Count(stdout, []byte{'\\n'}) == 1 && bytes.Contains(stdout, []byte(`See \"bzr shelve --list\" for details.`)) {\n\t\treturn nil // Shelves are fine.\n\t}\n\tif len(stdout) > 0 {\n\t\treturn fmt.Errorf(\"branch is not clean (bzr status)\")\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "bzr/bzr_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage bzr_test\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\tstdtesting \"testing\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/bzr\"\n)\n\nfunc Test(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n\nvar _ = gc.Suite(&BzrSuite{})\n\ntype BzrSuite struct {\n\ttesting.CleanupSuite\n\tb *bzr.Branch\n}\n\nconst bzr_config = `[DEFAULT]\nemail = testing <test@example.com>\n`\n\nfunc (s *BzrSuite) SetUpTest(c *gc.C) {\n\ts.CleanupSuite.SetUpTest(c)\n\tbzrdir := c.MkDir()\n\ts.PatchEnvironment(\"BZR_HOME\", bzrdir)\n\terr := os.MkdirAll(filepath.Join(bzrdir, bzrHome), 0755)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ioutil.WriteFile(\n\t\tfilepath.Join(bzrdir, bzrHome, \"bazaar.conf\"),\n\t\t[]byte(bzr_config), 0644)\n\tc.Assert(err, jc.ErrorIsNil)\n\ts.b = bzr.New(c.MkDir())\n\tc.Assert(s.b.Init(), gc.IsNil)\n}\n\nfunc (s *BzrSuite) TestNewFindsRoot(c *gc.C) {\n\terr := os.Mkdir(s.b.Join(\"dir\"), 0755)\n\tc.Assert(err, jc.ErrorIsNil)\n\tb := bzr.New(s.b.Join(\"dir\"))\n\t// When bzr has to search for the root, it will expand any symlinks it\n\t// found along the way.\n\tpath, err := filepath.EvalSymlinks(s.b.Location())\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(b.Location(), jc.SamePath, path)\n}\n\nfunc (s *BzrSuite) TestJoin(c *gc.C) {\n\tpath := bzr.New(\"lp:foo\").Join(\"baz\", \"bar\")\n\tc.Assert(path, gc.Equals, \"lp:foo/baz/bar\")\n}\n\nfunc (s *BzrSuite) TestErrorHandling(c *gc.C) {\n\terr := bzr.New(\"/non/existent/path\").Init()\n\tc.Assert(err, gc.ErrorMatches, `(?s)error running \"bzr init\":.*does not exist.*`)\n}\n\nfunc (s *BzrSuite) TestInit(c *gc.C) {\n\t_, err := os.Stat(s.b.Join(\".bzr\"))\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (s *BzrSuite) TestRevisionIdOnEmpty(c *gc.C) {\n\trevid, err := s.b.RevisionId()\n\tc.Assert(err, gc.ErrorMatches, \"branch has no content\")\n\tc.Assert(revid, gc.Equals, \"\")\n}\n\nfunc (s *BzrSuite) TestCommit(c *gc.C) {\n\tf, err := os.Create(s.b.Join(\"myfile\"))\n\tc.Assert(err, jc.ErrorIsNil)\n\tf.Close()\n\terr = s.b.Add(\"myfile\")\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = s.b.Commit(\"my log message\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\trevid, err := s.b.RevisionId()\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tcmd := exec.Command(\"bzr\", \"log\", \"--long\", \"--show-ids\", \"-v\", s.b.Location())\n\toutput, err := cmd.CombinedOutput()\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(output), gc.Matches, \"(?s).*revision-id: \"+revid+\"\\n.*message:\\n.*my log message\\n.*added:\\n.*myfile .*\")\n}\n\nfunc (s *BzrSuite) TestPush(c *gc.C) {\n\tb1 := bzr.New(c.MkDir())\n\tb2 := bzr.New(c.MkDir())\n\tb3 := bzr.New(c.MkDir())\n\tc.Assert(b1.Init(), gc.IsNil)\n\tc.Assert(b2.Init(), gc.IsNil)\n\tc.Assert(b3.Init(), gc.IsNil)\n\n\t// Create and add b1/file to the branch.\n\tf, err := os.Create(b1.Join(\"file\"))\n\tc.Assert(err, jc.ErrorIsNil)\n\tf.Close()\n\terr = b1.Add(\"file\")\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = b1.Commit(\"added file\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\t// Push file to b2.\n\terr = b1.Push(&bzr.PushAttr{Location: b2.Location()})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\t// Push location should be set to b2.\n\tlocation, err := b1.PushLocation()\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(location, jc.SamePath, b2.Location())\n\n\t// Now push it to b3.\n\terr = b1.Push(&bzr.PushAttr{Location: b3.Location()})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\t// Push location is still set to b2.\n\tlocation, err = b1.PushLocation()\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(location, jc.SamePath, b2.Location())\n\n\t// Push it again, this time with the remember flag set.\n\terr = b1.Push(&bzr.PushAttr{Location: b3.Location(), Remember: true})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\t// Now the push location has shifted to b3.\n\tlocation, err = b1.PushLocation()\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(location, jc.SamePath, b3.Location())\n\n\t// Both b2 and b3 should have the file.\n\t_, err = os.Stat(b2.Join(\"file\"))\n\tc.Assert(err, jc.ErrorIsNil)\n\t_, err = os.Stat(b3.Join(\"file\"))\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (s *BzrSuite) TestCheckClean(c *gc.C) {\n\terr := s.b.CheckClean()\n\tc.Assert(err, jc.ErrorIsNil)\n\n\t// Create and add b1/file to the branch.\n\tf, err := os.Create(s.b.Join(\"file\"))\n\tc.Assert(err, jc.ErrorIsNil)\n\tf.Close()\n\n\terr = s.b.CheckClean()\n\tc.Assert(err, gc.ErrorMatches, `branch is not clean \\(bzr status\\)`)\n}\n"
  },
  {
    "path": "bzr/bzr_unix_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build !windows\n// +build !windows\n\npackage bzr_test\n\nconst bzrHome = \".bazaar\"\n"
  },
  {
    "path": "bzr/bzr_windows_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build windows\n// +build windows\n\npackage bzr_test\n\nconst bzrHome = \"Bazaar/2.0\"\n"
  },
  {
    "path": "cache/cache.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// Package cache provides a simple caching mechanism\n// that limits the age of cache entries and tries to avoid large\n// repopulation events by staggering refresh times.\npackage cache\n\nimport (\n\t\"math/rand\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/juju/errors\"\n)\n\n// entry holds a cache entry. The expire field\n// holds the time after which the entry will be\n// considered invalid.\ntype entry struct {\n\tvalue  any\n\texpire time.Time\n}\n\n// Key represents a cache key. It must be a comparable type.\ntype Key any\n\n// Cache holds a time-limited set of values for arbitrary keys.\ntype Cache struct {\n\tmaxAge time.Duration\n\n\t// mu guards the fields below it.\n\tmu sync.Mutex\n\n\t// expire holds when the cache is due to expire.\n\texpire time.Time\n\n\t// We hold two maps so that can avoid scanning through all the\n\t// items in the cache when the cache needs to be refreshed.\n\t// Instead, we move items from old to new when they're accessed\n\t// and throw away the old map at refresh time.\n\told, new map[Key]entry\n\n\tinFlight map[Key]*fetchCall\n}\n\n// fetch represents an in-progress fetch call. If a cache Get request\n// is made for an item that is currently being fetched, this will\n// be used to avoid an extra call to the fetch function.\ntype fetchCall struct {\n\twg  sync.WaitGroup\n\tval any\n\terr error\n}\n\n// New returns a new Cache that will cache items for\n// at most maxAge. If maxAge is zero, items will\n// never be cached.\nfunc New(maxAge time.Duration) *Cache {\n\t// The returned cache will have a zero-valued expire\n\t// time, so will expire immediately, causing the new\n\t// map to be created.\n\treturn &Cache{\n\t\tmaxAge:   maxAge,\n\t\tinFlight: make(map[Key]*fetchCall),\n\t}\n}\n\n// Len returns the total number of cached entries.\nfunc (c *Cache) Len() int {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\treturn len(c.old) + len(c.new)\n}\n\n// Evict removes the entry with the given key from the cache if present.\nfunc (c *Cache) Evict(key Key) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tdelete(c.new, key)\n\tdelete(c.old, key)\n}\n\n// EvictAll removes all entries from the cache.\nfunc (c *Cache) EvictAll() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.new = make(map[Key]entry)\n\tc.old = nil\n}\n\n// Get returns the value for the given key, using fetch to fetch\n// the value if it is not found in the cache.\n// If fetch returns an error, the returned error from Get will have\n// the same cause.\nfunc (c *Cache) Get(key Key, fetch func() (any, error)) (any, error) {\n\treturn c.getAtTime(key, fetch, time.Now())\n}\n\n// getAtTime is the internal version of Get, useful for testing; now represents the current\n// time.\nfunc (c *Cache) getAtTime(key Key, fetch func() (any, error), now time.Time) (any, error) {\n\tif val, ok := c.cachedValue(key, now); ok {\n\t\treturn val, nil\n\t}\n\tc.mu.Lock()\n\tif f, ok := c.inFlight[key]; ok {\n\t\t// There's already an in-flight request for the key, so wait\n\t\t// for that to complete and use its results.\n\t\tc.mu.Unlock()\n\t\tf.wg.Wait()\n\t\t// The value will have been added to the cache by the first fetch,\n\t\t// so no need to add it here.\n\t\tif f.err == nil {\n\t\t\treturn f.val, nil\n\t\t}\n\t\treturn nil, errors.Trace(f.err)\n\t}\n\tvar f fetchCall\n\tf.wg.Add(1)\n\tc.inFlight[key] = &f\n\t// Mark the request as done when we return, and after\n\t// the value has been added to the cache.\n\tdefer f.wg.Done()\n\n\t// Fetch the data without the mutex held\n\t// so that one slow fetch doesn't hold up\n\t// all the other cache accesses.\n\tc.mu.Unlock()\n\tval, err := fetch()\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\t// Set the result in the fetchCall so that other calls can see it.\n\tf.val, f.err = val, err\n\tif err == nil && c.maxAge >= 2*time.Nanosecond {\n\t\t// If maxAge is < 2ns then the expiry code will panic because the\n\t\t// actual expiry time will be maxAge - a random value in the\n\t\t// interval [0, maxAge/2). If maxAge is < 2ns then this requires\n\t\t// a random interval in [0, 0) which causes a panic.\n\t\t//\n\t\t// This value is so small that there's no need to cache anyway,\n\t\t// which makes tests more obviously deterministic when using\n\t\t// a zero expiry time.\n\t\tc.new[key] = entry{\n\t\t\tvalue:  val,\n\t\t\texpire: now.Add(c.maxAge - time.Duration(rand.Int63n(int64(c.maxAge/2)))),\n\t\t}\n\t}\n\tdelete(c.inFlight, key)\n\tif err == nil {\n\t\treturn f.val, nil\n\t}\n\treturn nil, errors.Trace(err)\n}\n\n// cachedValue returns any cached value for the given key\n// and whether it was found.\nfunc (c *Cache) cachedValue(key Key, now time.Time) (any, bool) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif now.After(c.expire) {\n\t\tc.old = c.new\n\t\tc.new = make(map[Key]entry)\n\t\tc.expire = now.Add(c.maxAge)\n\t}\n\tif e, ok := c.entry(c.new, key, now); ok {\n\t\treturn e.value, true\n\t}\n\tif e, ok := c.entry(c.old, key, now); ok {\n\t\t// An old entry has been accessed; move it to the new\n\t\t// map so that we only use a single map access for\n\t\t// subsequent lookups. Note that because we use the same\n\t\t// duration for cache refresh (c.expire) as for max\n\t\t// entry age, this is strictly speaking unnecessary\n\t\t// because any entries in old will have expired by the\n\t\t// time it is dropped.\n\t\tc.new[key] = e\n\t\tdelete(c.old, key)\n\t\treturn e.value, true\n\t}\n\treturn nil, false\n}\n\n// entry returns an entry from the map and whether it\n// was found. If the entry has expired, it is deleted from the map.\nfunc (c *Cache) entry(m map[Key]entry, key Key, now time.Time) (entry, bool) {\n\te, ok := m[key]\n\tif !ok {\n\t\treturn entry{}, false\n\t}\n\tif now.After(e.expire) {\n\t\t// Delete expired entries.\n\t\tdelete(m, key)\n\t\treturn entry{}, false\n\t}\n\treturn e, true\n}\n"
  },
  {
    "path": "cache/cache_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage cache_test\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/utils/v4/cache\"\n\tgc \"gopkg.in/check.v1\"\n)\n\ntype suite struct{}\n\nvar _ = gc.Suite(&suite{})\n\nfunc (*suite) TestSimpleGet(c *gc.C) {\n\tp := cache.New(time.Hour)\n\tv, err := p.Get(\"a\", fetchValue(2))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 2)\n}\n\nfunc (*suite) TestEvict(c *gc.C) {\n\tp := cache.New(time.Hour)\n\tv, err := p.Get(\"a\", fetchValue(2))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 2)\n\n\tv, err = p.Get(\"a\", fetchValue(4))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 2)\n\n\tp.Evict(\"a\")\n\tv, err = p.Get(\"a\", fetchValue(3))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 3)\n\n\tv, err = p.Get(\"a\", fetchValue(4))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 3)\n}\n\nfunc (*suite) TestEvictOld(c *gc.C) {\n\t// Test that evict removes entries even when they're\n\t// in the old map.\n\n\tnow := time.Now()\n\tp := cache.New(time.Minute)\n\n\t// Populate the cache with an initial entry.\n\tv, err := cache.GetAtTime(p, \"a\", fetchValue(\"a\"), now)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"a\")\n\tc.Assert(p.Len(), gc.Equals, 1)\n\n\tv, err = cache.GetAtTime(p, \"b\", fetchValue(\"b\"), now.Add(time.Minute/2))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"b\")\n\tc.Assert(p.Len(), gc.Equals, 2)\n\n\t// Fetch an item after the expiry time,\n\t// causing current entries to be moved to old.\n\tv, err = cache.GetAtTime(p, \"a\", fetchValue(\"a1\"), now.Add(time.Minute+1))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"a1\")\n\tc.Assert(p.Len(), gc.Equals, 2)\n\tc.Assert(cache.OldLen(p), gc.Equals, 1)\n\n\tp.Evict(\"b\")\n\tv, err = cache.GetAtTime(p, \"b\", fetchValue(\"b1\"), now.Add(time.Minute+2))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"b1\")\n}\n\nfunc (*suite) TestFetchError(c *gc.C) {\n\tp := cache.New(time.Hour)\n\texpectErr := errors.New(\"hello\")\n\tv, err := p.Get(\"a\", fetchError(expectErr))\n\tc.Assert(err, gc.ErrorMatches, \"hello\")\n\tc.Assert(errors.Cause(err), gc.Equals, expectErr)\n\tc.Assert(v, gc.Equals, nil)\n}\n\nfunc (*suite) TestFetchOnlyOnce(c *gc.C) {\n\tp := cache.New(time.Hour)\n\tv, err := p.Get(\"a\", fetchValue(2))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 2)\n\n\tv, err = p.Get(\"a\", fetchError(errUnexpectedFetch))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 2)\n}\n\nfunc (*suite) TestEntryExpiresAfterMaxEntryAge(c *gc.C) {\n\tnow := time.Now()\n\tp := cache.New(time.Minute)\n\tv, err := cache.GetAtTime(p, \"a\", fetchValue(2), now)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 2)\n\n\t// Entry is definitely not expired before half the entry expiry time.\n\tv, err = cache.GetAtTime(p, \"a\", fetchError(errUnexpectedFetch), now.Add(time.Minute/2-1))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, 2)\n\n\t// Entry is definitely expired after the entry expiry time\n\tv, err = cache.GetAtTime(p, \"a\", fetchValue(3), now.Add(time.Minute+1))\n\tc.Assert(v, gc.Equals, 3)\n}\n\nfunc (*suite) TestEntriesRemovedWhenNotRetrieved(c *gc.C) {\n\tnow := time.Now()\n\tp := cache.New(time.Minute)\n\n\t// Populate the cache with an initial entry.\n\tv, err := cache.GetAtTime(p, \"a\", fetchValue(\"a\"), now)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"a\")\n\tc.Assert(p.Len(), gc.Equals, 1)\n\n\t// Fetch another item after the expiry time,\n\t// causing current entries to be moved to old.\n\tv, err = cache.GetAtTime(p, \"b\", fetchValue(\"b\"), now.Add(time.Minute+1))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"b\")\n\tc.Assert(p.Len(), gc.Equals, 2)\n\tc.Assert(cache.OldLen(p), gc.Equals, 1)\n\n\t// Fetch the other item after another expiry time\n\t// causing the old entries to be discarded because\n\t// nothing has fetched them.\n\tv, err = cache.GetAtTime(p, \"b\", fetchValue(\"b\"), now.Add(time.Minute*2+2))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"b\")\n\tc.Assert(p.Len(), gc.Equals, 1)\n}\n\n// TestRefreshedEntry tests the code path where a value is moved\n// from the old map to new.\nfunc (*suite) TestRefreshedEntry(c *gc.C) {\n\tnow := time.Now()\n\tp := cache.New(time.Minute)\n\n\t// Populate the cache with an initial entry.\n\tv, err := cache.GetAtTime(p, \"a\", fetchValue(\"a\"), now)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"a\")\n\tc.Assert(p.Len(), gc.Equals, 1)\n\n\t// Fetch another item very close to the expiry time.\n\tv, err = cache.GetAtTime(p, \"b\", fetchValue(\"b\"), now.Add(time.Minute-1))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"b\")\n\tc.Assert(p.Len(), gc.Equals, 2)\n\n\t// Fetch it again just after the expiry time,\n\t// which should move it into the new map.\n\tv, err = cache.GetAtTime(p, \"b\", fetchError(errUnexpectedFetch), now.Add(time.Minute+1))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"b\")\n\tc.Assert(p.Len(), gc.Equals, 2)\n\n\t// Fetch another item, causing \"a\" to be removed from the cache\n\t// and keeping \"b\" in there.\n\tv, err = cache.GetAtTime(p, \"c\", fetchValue(\"c\"), now.Add(time.Minute*2+2))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(v, gc.Equals, \"c\")\n\tc.Assert(p.Len(), gc.Equals, 2)\n}\n\n// TestConcurrentFetch checks that the cache is safe\n// to use concurrently. It is designed to fail when\n// tested with the race detector enabled.\nfunc (*suite) TestConcurrentFetch(c *gc.C) {\n\tp := cache.New(time.Minute)\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tv, err := p.Get(\"a\", fetchValue(\"a\"))\n\t\tc.Check(err, gc.IsNil)\n\t\tc.Check(v, gc.Equals, \"a\")\n\t}()\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tv, err := p.Get(\"b\", fetchValue(\"b\"))\n\t\tc.Check(err, gc.IsNil)\n\t\tc.Check(v, gc.Equals, \"b\")\n\t}()\n\twg.Wait()\n}\n\nfunc (*suite) TestRefreshSpread(c *gc.C) {\n\tnow := time.Now()\n\tp := cache.New(time.Minute)\n\t// Get all values to start with.\n\tconst N = 100\n\tfor i := 0; i < N; i++ {\n\t\tv, err := cache.GetAtTime(p, fmt.Sprint(i), fetchValue(i), now)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(v, gc.Equals, i)\n\t}\n\tcounts := make([]int, time.Minute/time.Millisecond/10+1)\n\n\t// Continually get values over the course of the\n\t// expiry time; the fetches should be spread out.\n\tslot := 0\n\tfor t := now.Add(0); t.Before(now.Add(time.Minute + 1)); t = t.Add(time.Millisecond * 10) {\n\t\tfor i := 0; i < N; i++ {\n\t\t\tcache.GetAtTime(p, fmt.Sprint(i), func() (any, error) {\n\t\t\t\tcounts[slot]++\n\t\t\t\treturn i, nil\n\t\t\t}, t)\n\t\t}\n\t\tslot++\n\t}\n\n\t// There should be no fetches in the first half of the cycle.\n\tfor i := 0; i < len(counts)/2; i++ {\n\t\tc.Assert(counts[i], gc.Equals, 0, gc.Commentf(\"slot %d\", i))\n\t}\n\n\tmax := 0\n\ttotal := 0\n\tfor _, count := range counts {\n\t\tif count > max {\n\t\t\tmax = count\n\t\t}\n\t\ttotal += count\n\t}\n\tif max > 10 {\n\t\tc.Errorf(\"requests grouped too closely (max %d)\", max)\n\t}\n\tc.Assert(total, gc.Equals, N)\n}\n\nfunc (*suite) TestSingleFlight(c *gc.C) {\n\tp := cache.New(time.Minute)\n\tstart := make(chan struct{})\n\tvar wg sync.WaitGroup\n\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tx, err := p.Get(\"x\", func() (any, error) {\n\t\t\tstart <- struct{}{}\n\t\t\t<-start\n\t\t\treturn 99, nil\n\t\t})\n\t\tc.Check(x, gc.Equals, 99)\n\t\tc.Check(err, gc.Equals, nil)\n\n\t}()\n\t// Wait for the fetch to start.\n\t<-start\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tx, err := p.Get(\"x\", func() (any, error) {\n\t\t\tc.Errorf(\"fetch function unexpectedly called with inflight request\")\n\t\t\treturn 55, nil\n\t\t})\n\t\tc.Check(x, gc.Equals, 99)\n\t\tc.Check(err, gc.Equals, nil)\n\t}()\n\n\t// Check that we can still get other values while the\n\t// other fetches are in progress.\n\ty, err := p.Get(\"y\", func() (any, error) {\n\t\treturn 88, nil\n\t})\n\tc.Check(y, gc.Equals, 88)\n\tc.Check(err, gc.Equals, nil)\n\n\t// Let the original fetch proceed, which should let the other one\n\t// succeed too, but sleep for a little bit to let the second goroutine\n\t// actually initiate its request.\n\ttime.Sleep(time.Millisecond)\n\tstart <- struct{}{}\n\twg.Wait()\n}\n\nvar errUnexpectedFetch = errors.New(\"fetch called unexpectedly\")\n\nfunc fetchError(err error) func() (any, error) {\n\treturn func() (any, error) {\n\t\treturn nil, err\n\t}\n}\n\nfunc fetchValue(val any) func() (any, error) {\n\treturn func() (any, error) {\n\t\treturn val, nil\n\t}\n}\n"
  },
  {
    "path": "cache/export_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage cache\n\nvar GetAtTime = (*Cache).getAtTime\n\nfunc OldLen(c *Cache) int {\n\treturn len(c.old)\n}\n"
  },
  {
    "path": "cache/package_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage cache_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "cert/cert.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Copyright 2016 Cloudbase solutions\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage cert\n\nimport (\n\t\"crypto\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/asn1\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\n\t\"github.com/juju/errors\"\n)\n\n// OtherName type for asn1 encoding\ntype OtherName struct {\n\tA string `asn1:\"utf8\"`\n}\n\n// GeneralName type for asn1 encoding\ntype GeneralName struct {\n\tOID       asn1.ObjectIdentifier\n\tOtherName `asn1:\"tag:0\"`\n}\n\n// GeneralNames type for asn1 encoding\ntype GeneralNames struct {\n\tGeneralName `asn1:\"tag:0\"`\n}\n\nvar (\n\t// https://support.microsoft.com/en-us/kb/287547\n\t//  szOID_NT_PRINCIPAL_NAME 1.3.6.1.4.1.311.20.2.3\n\tszOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 311, 20, 2, 3}\n\t// http://www.umich.edu/~x509/ssleay/asn1-oids.html\n\t// 2 5 29 17  subjectAltName\n\tsubjAltName = asn1.ObjectIdentifier{2, 5, 29, 17}\n)\n\n// getUPNExtensionValue returns marsheled asn1 encoded info\nfunc getUPNExtensionValue(subject pkix.Name) ([]byte, error) {\n\t// returns the ASN.1 encoding of val\n\t// in addition to the struct tags recognized\n\t// we used:\n\t// utf8 => causes string to be marsheled as ASN.1, UTF8 strings\n\t// tag:x => specifies the ASN.1 tag number; imples ASN.1 CONTEXT SPECIFIC\n\treturn asn1.Marshal(GeneralNames{\n\t\tGeneralName: GeneralName{\n\t\t\t// init our ASN.1 object identifier\n\t\t\tOID: szOID,\n\t\t\tOtherName: OtherName{\n\t\t\t\tA: subject.CommonName,\n\t\t\t},\n\t\t},\n\t})\n}\n\n// ParseCert parses the given PEM-formatted X509 certificate.\nfunc ParseCert(certPEM string) (*x509.Certificate, error) {\n\tcertPEMData := []byte(certPEM)\n\tfor len(certPEMData) > 0 {\n\t\tvar certBlock *pem.Block\n\t\tcertBlock, certPEMData = pem.Decode(certPEMData)\n\t\tif certBlock == nil {\n\t\t\tbreak\n\t\t}\n\t\tif certBlock.Type == \"CERTIFICATE\" {\n\t\t\tcert, err := x509.ParseCertificate(certBlock.Bytes)\n\t\t\treturn cert, err\n\t\t}\n\t}\n\treturn nil, errors.New(\"no certificates found\")\n}\n\n// ParseCertAndKey parses the given PEM-formatted X509 certificate\n// and RSA private key.\nfunc ParseCertAndKey(certPEM, keyPEM string) (*x509.Certificate, crypto.Signer, error) {\n\ttlsCert, err := tls.X509KeyPair([]byte(certPEM), []byte(keyPEM))\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tcert, err := x509.ParseCertificate(tlsCert.Certificate[0])\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tkey, ok := tlsCert.PrivateKey.(crypto.Signer)\n\tif !ok {\n\t\treturn nil, nil, fmt.Errorf(\"private key with unexpected type %T\", tlsCert.PrivateKey)\n\t}\n\treturn cert, key, nil\n}\n"
  },
  {
    "path": "cert/cert_test.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Copyright 2016 Cloudbase solutions\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage cert_test\n\nimport (\n\t\"testing\"\n\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/cert\"\n)\n\nfunc TestAll(t *testing.T) {\n\tgc.TestingT(t)\n}\n\ntype certSuite struct{}\n\nvar _ = gc.Suite(certSuite{})\n\nfunc (certSuite) TestParseCertificate(c *gc.C) {\n\txcert, err := cert.ParseCert(caCertPEM)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(xcert.Subject.CommonName, gc.Equals, `juju-generated CA for model \"juju testing\"`)\n\n\txcert, err = cert.ParseCert(caKeyPEM)\n\tc.Check(xcert, gc.IsNil)\n\tc.Assert(err, gc.ErrorMatches, \"no certificates found\")\n\n\txcert, err = cert.ParseCert(\"hello\")\n\tc.Check(xcert, gc.IsNil)\n\tc.Assert(err, gc.ErrorMatches, \"no certificates found\")\n}\n\nfunc (certSuite) TestParseCertAndKey(c *gc.C) {\n\txcert, key, err := cert.ParseCertAndKey(caCertPEM, caKeyPEM)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(xcert.Subject.CommonName, gc.Equals, `juju-generated CA for model \"juju testing\"`)\n\tc.Assert(key, gc.NotNil)\n\n\tc.Assert(xcert.PublicKey, gc.DeepEquals, key.Public())\n}\n\nvar (\n\tcaCertPEM = `\n-----BEGIN CERTIFICATE-----\nMIICHDCCAcagAwIBAgIUfzWn5ktGMxD6OiTgfiZyvKdM+ZYwDQYJKoZIhvcNAQEL\nBQAwazENMAsGA1UEChMEanVqdTEzMDEGA1UEAwwqanVqdS1nZW5lcmF0ZWQgQ0Eg\nZm9yIG1vZGVsICJqdWp1IHRlc3RpbmciMSUwIwYDVQQFExwxMjM0LUFCQ0QtSVMt\nTk9ULUEtUkVBTC1VVUlEMB4XDTE2MDkyMTEwNDgyN1oXDTI2MDkyODEwNDgyN1ow\nazENMAsGA1UEChMEanVqdTEzMDEGA1UEAwwqanVqdS1nZW5lcmF0ZWQgQ0EgZm9y\nIG1vZGVsICJqdWp1IHRlc3RpbmciMSUwIwYDVQQFExwxMjM0LUFCQ0QtSVMtTk9U\nLUEtUkVBTC1VVUlEMFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBAL+0X+1zl2vt1wI4\n1Q+RnlltJyaJmtwCbHRhREXVGU7t0kTMMNERxqLnuNUyWRz90Rg8s9XvOtCqNYW7\nmypGrFECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMA8GA1UdEwEB/wQFMAMBAf8w\nHQYDVR0OBBYEFHueMLZ1QJ/2sKiPIJ28TzjIMRENMA0GCSqGSIb3DQEBCwUAA0EA\novZN0RbUHrO8q9Eazh0qPO4mwW9jbGTDz126uNrLoz1g3TyWxIas1wRJ8IbCgxLy\nXUrBZO5UPZab66lJWXyseA==\n-----END CERTIFICATE-----\n`\n\n\tcaKeyPEM = `\n-----BEGIN RSA PRIVATE KEY-----\nMIIBOgIBAAJBAL+0X+1zl2vt1wI41Q+RnlltJyaJmtwCbHRhREXVGU7t0kTMMNER\nxqLnuNUyWRz90Rg8s9XvOtCqNYW7mypGrFECAwEAAQJAMPa+JaUHgO6foxam/LIB\n0u95N3OgFR+dWeBaEsgKDclpREdJ0rXNI+3C3kwqeEZR4omoPlBeSEewSkwHxpmI\n0QIhAOjKiHZ5v6R8haleipbDzkGUnZW07hEwL5Ld4MNx/QQ1AiEA0tEzSSNAdM0C\nM/vY0x5mekIYai8/tFSEG9PJ3ZkpEy0CIQCo9B3YxwI1Un777vbs903iQQeiWP+U\nEAHnOQvhLgDxpQIgGkpml+9igW5zoOH+h02aQBLwEoXz7tw/YW0HFrCcE70CIGkS\nve4WjiEqnQaHNAPy0hY/1DfIgBOSpOfnkFHOk9vX\n-----END RSA PRIVATE KEY-----\n`\n)\n"
  },
  {
    "path": "cert/exports_test.go",
    "content": "// Copyright 2016 Canonical ltd.\n// Copyright 2016 Cloudbase solutions\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage cert\n"
  },
  {
    "path": "command.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"os/exec\"\n)\n\n// RunCommand executes the command and return the combined output.\nfunc RunCommand(command string, args ...string) (output string, err error) {\n\tcmd := exec.Command(command, args...)\n\tout, err := cmd.CombinedOutput()\n\toutput = string(out)\n\tif err != nil {\n\t\treturn output, err\n\t}\n\treturn output, nil\n}\n"
  },
  {
    "path": "command_test.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"io/ioutil\"\n\t\"path/filepath\"\n\t\"runtime\"\n\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype EnvironmentPatcher interface {\n\tPatchEnvironment(name, value string)\n}\n\nfunc patchExecutable(patcher EnvironmentPatcher, dir, execName, script string) {\n\tpatcher.PatchEnvironment(\"PATH\", dir)\n\tfilename := filepath.Join(dir, execName)\n\tioutil.WriteFile(filename, []byte(script), 0755)\n}\n\ntype commandSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&commandSuite{})\n\nfunc (s *commandSuite) TestRunCommandCombinesOutput(c *gc.C) {\n\tvar content string\n\tvar cmdName string\n\tvar expect string\n\tif runtime.GOOS != \"windows\" {\n\t\tcontent = `#!/bin/bash --norc\necho stdout\necho stderr 1>&2\n`\n\t\tcmdName = \"test-output\"\n\t\texpect = \"stdout\\nstderr\\n\"\n\t} else {\n\t\tcontent = `@echo off\necho stdout\necho stderr 1>&2\n`\n\t\tcmdName = \"test-output.bat\"\n\t\texpect = \"stdout\\r\\nstderr \\r\\n\"\n\t}\n\tpatchExecutable(s, c.MkDir(), cmdName, content)\n\toutput, err := utils.RunCommand(\"test-output\")\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(output, gc.Equals, expect)\n}\n\nfunc (s *commandSuite) TestRunCommandNonZeroExit(c *gc.C) {\n\tvar content string\n\tvar cmdName string\n\tvar expect string\n\tif runtime.GOOS != \"windows\" {\n\t\tcontent = `#!/bin/bash --norc\necho stdout\nexit 42\n`\n\t\tcmdName = \"test-output\"\n\t\texpect = \"stdout\\n\"\n\t} else {\n\t\tcontent = `@echo off\necho stdout\nexit 42\n`\n\t\tcmdName = \"test-output.bat\"\n\t\texpect = \"stdout\\r\\n\"\n\t}\n\tpatchExecutable(s, c.MkDir(), cmdName, content)\n\toutput, err := utils.RunCommand(\"test-output\")\n\tc.Assert(err, gc.ErrorMatches, `exit status 42`)\n\tc.Assert(output, gc.Equals, expect)\n}\n"
  },
  {
    "path": "context.go",
    "content": "// Copyright 2018 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"github.com/juju/clock\"\n)\n\n// timerCtx is an implementation of context.Context that\n// is done when a given deadline has passed\n// (as measured by the Clock in the clock field)\ntype timerCtx struct {\n\tclock    clock.Clock\n\ttimer    clock.Timer\n\tdeadline time.Time\n\tparent   context.Context\n\tdone     chan struct{}\n\n\t// mu guards err.\n\tmu sync.Mutex\n\n\t// err holds context.Canceled or context.DeadlineExceeded\n\t// after the context has been canceled.\n\t// If this is non-nil, then done will have been closed.\n\terr error\n}\n\nfunc (ctx *timerCtx) Deadline() (time.Time, bool) {\n\treturn ctx.deadline, true\n}\n\nfunc (ctx *timerCtx) Err() error {\n\tctx.mu.Lock()\n\tdefer ctx.mu.Unlock()\n\treturn ctx.err\n}\n\nfunc (ctx *timerCtx) Value(key any) any {\n\treturn ctx.parent.Value(key)\n}\n\nfunc (ctx *timerCtx) Done() <-chan struct{} {\n\treturn ctx.done\n}\n\nfunc (ctx *timerCtx) cancel(err error) {\n\tctx.mu.Lock()\n\tdefer ctx.mu.Unlock()\n\tif err == nil {\n\t\tpanic(\"cancel with nil error!\")\n\t}\n\tif ctx.err != nil {\n\t\t// Already canceled - no need to do anything.\n\t\treturn\n\t}\n\tctx.err = err\n\tif ctx.timer != nil {\n\t\tctx.timer.Stop()\n\t}\n\tclose(ctx.done)\n}\n\nfunc (ctx *timerCtx) String() string {\n\treturn fmt.Sprintf(\"%v.WithDeadline(%s [%s])\", ctx.parent, ctx.deadline, ctx.deadline.Sub(ctx.clock.Now()))\n}\n\n// ContextWithTimeout is like context.WithTimeout\n// except that it works with a clock.Clock rather than\n// wall-clock time.\nfunc ContextWithTimeout(parent context.Context, clk clock.Clock, timeout time.Duration) (context.Context, context.CancelFunc) {\n\treturn ContextWithDeadline(parent, clk, clk.Now().Add(timeout))\n}\n\n// ContextWithDeadline is like context.WithDeadline\n// except that it works with a clock.Clock rather than\n// wall-clock time.\nfunc ContextWithDeadline(parent context.Context, clk clock.Clock, deadline time.Time) (context.Context, context.CancelFunc) {\n\td := deadline.Sub(clk.Now())\n\tctx := &timerCtx{\n\t\tclock:    clk,\n\t\tparent:   parent,\n\t\tdeadline: deadline,\n\t\tdone:     make(chan struct{}),\n\t}\n\tif d <= 0 {\n\t\t// deadline has already passed\n\t\tctx.cancel(context.DeadlineExceeded)\n\t\treturn ctx, func() {}\n\t}\n\tctx.timer = clk.NewTimer(d)\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.timer.Chan():\n\t\t\tctx.cancel(context.DeadlineExceeded)\n\t\tcase <-parent.Done():\n\t\t\tctx.cancel(parent.Err())\n\t\tcase <-ctx.done:\n\t\t}\n\t}()\n\treturn ctx, func() {\n\t\tctx.cancel(context.Canceled)\n\t}\n}\n"
  },
  {
    "path": "context_test.go",
    "content": "// Copyright 2018 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"golang.org/x/net/context\"\n\n\t\"github.com/juju/clock/testclock\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype contextSuite struct{}\n\nvar _ = gc.Suite(&contextSuite{})\n\n// Note: the logic in these tests was copied from the tests\n// in the Go standard library.\n\nfunc (*contextSuite) TestDeadline(c *gc.C) {\n\tclk := testclock.NewClock(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC))\n\tctx, cancel := utils.ContextWithDeadline(context.Background(), clk, clk.Now().Add(50*time.Millisecond))\n\tdefer cancel()\n\tc.Assert(fmt.Sprint(ctx), gc.Equals, `context.Background.WithDeadline(2000-01-01 00:00:00.05 +0000 UTC [50ms])`)\n\ttestContextDeadline(c, ctx, \"WithDeadline\", clk, 1, 50*time.Millisecond)\n\n\tctx, cancel = utils.ContextWithDeadline(context.Background(), clk, clk.Now().Add(50*time.Millisecond))\n\tdefer cancel()\n\to := otherContext{ctx}\n\ttestContextDeadline(c, o, \"WithDeadline+otherContext\", clk, 1, 50*time.Millisecond)\n\n\tctx, cancel = utils.ContextWithDeadline(context.Background(), clk, clk.Now().Add(50*time.Millisecond))\n\tdefer cancel()\n\to = otherContext{ctx}\n\tctx, _ = utils.ContextWithDeadline(o, clk, clk.Now().Add(4*time.Second))\n\ttestContextDeadline(c, ctx, \"WithDeadline+otherContext+WithDeadline\", clk, 2, 50*time.Millisecond)\n\n\tctx, cancel = utils.ContextWithDeadline(context.Background(), clk, clk.Now().Add(-time.Millisecond))\n\tdefer cancel()\n\ttestContextDeadline(c, ctx, \"WithDeadline+inthepast\", clk, 0, 0)\n\n\tctx, cancel = utils.ContextWithDeadline(context.Background(), clk, clk.Now())\n\ttestContextDeadline(c, ctx, \"WithDeadline+now\", clk, 0, 0)\n}\n\nfunc (*contextSuite) TestTimeout(c *gc.C) {\n\tclk := testclock.NewClock(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC))\n\tctx, _ := utils.ContextWithTimeout(context.Background(), clk, 50*time.Millisecond)\n\tc.Assert(fmt.Sprint(ctx), gc.Equals, `context.Background.WithDeadline(2000-01-01 00:00:00.05 +0000 UTC [50ms])`)\n\ttestContextDeadline(c, ctx, \"WithTimeout\", clk, 1, 50*time.Millisecond)\n\n\tctx, _ = utils.ContextWithTimeout(context.Background(), clk, 50*time.Millisecond)\n\to := otherContext{ctx}\n\ttestContextDeadline(c, o, \"WithTimeout+otherContext\", clk, 1, 50*time.Millisecond)\n\n\tctx, _ = utils.ContextWithTimeout(context.Background(), clk, 50*time.Millisecond)\n\to = otherContext{ctx}\n\tctx, _ = utils.ContextWithTimeout(o, clk, 3*time.Second)\n\ttestContextDeadline(c, ctx, \"WithTimeout+otherContext+WithTimeout\", clk, 2, 50*time.Millisecond)\n}\n\nfunc (*contextSuite) TestCanceledTimeout(c *gc.C) {\n\tclk := testclock.NewClock(time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC))\n\tctx, _ := utils.ContextWithTimeout(context.Background(), clk, time.Second)\n\to := otherContext{ctx}\n\tctx, cancel := utils.ContextWithTimeout(o, clk, 2*time.Second)\n\tcancel()\n\ttime.Sleep(100 * time.Millisecond) // let cancelation propagate\n\tselect {\n\tcase <-ctx.Done():\n\tdefault:\n\t\tc.Errorf(\"<-ctx.Done() blocked, but shouldn't have\")\n\t}\n\tc.Assert(ctx.Err(), gc.Equals, context.Canceled)\n}\n\nfunc testContextDeadline(c *gc.C, ctx context.Context, name string, clk *testclock.Clock, waiters int, failAfter time.Duration) {\n\terr := clk.WaitAdvance(failAfter, 0, waiters)\n\tc.Assert(err, jc.ErrorIsNil)\n\tselect {\n\tcase <-time.After(time.Second):\n\t\tc.Fatalf(\"%s: context should have timed out\", name)\n\tcase <-ctx.Done():\n\t}\n\tc.Assert(ctx.Err(), gc.Equals, context.DeadlineExceeded)\n}\n\n// otherContext is a Context that's not one of the types defined in context.go.\n// This lets us test code paths that differ based on the underlying type of the\n// Context.\ntype otherContext struct {\n\tcontext.Context\n}\n"
  },
  {
    "path": "du/LICENSE.ricochet2200",
    "content": "This is free and unencumbered software released into the public domain.\n\nAnyone is free to copy, modify, publish, use, compile, sell, or\ndistribute this software, either in source code form or as a compiled\nbinary, for any purpose, commercial or non-commercial, and by any\nmeans.\n\nIn jurisdictions that recognize copyright laws, the author or authors\nof this software dedicate any and all copyright interest in the\nsoftware to the public domain. We make this dedication for the benefit\nof the public at large and to the detriment of our heirs and\nsuccessors. We intend this dedication to be an overt act of\nrelinquishment in perpetuity of all present and future rights to this\nsoftware under copyright law.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\nEXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\nMERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\nIN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\nOTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\nARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\nOTHER DEALINGS IN THE SOFTWARE.\n\nFor more information, please refer to <http://unlicense.org>\n"
  },
  {
    "path": "du/diskusage.go",
    "content": "// Copied from https://github.com/ricochet2200/go-disk-usage\n// Copyright 2011 Rick Smith.\n// Use of this source code is governed by a public domain\n// license that can be found in the LICENSE.ricochet2200 file.\n//\n//go:build !windows\n// +build !windows\n\npackage du\n\nimport \"syscall\"\n\ntype DiskUsage struct {\n\tstat *syscall.Statfs_t\n}\n\n// Returns an object holding the disk usage of volumePath\n// This function assumes volumePath is a valid path\nfunc NewDiskUsage(volumePath string) *DiskUsage {\n\n\tvar stat syscall.Statfs_t\n\tsyscall.Statfs(volumePath, &stat)\n\treturn &DiskUsage{&stat}\n}\n\n// Total free bytes on file system\nfunc (this *DiskUsage) Free() uint64 {\n\treturn this.stat.Bfree * uint64(this.stat.Bsize)\n}\n\n// Total available bytes on file system to an unpriveleged user\nfunc (this *DiskUsage) Available() uint64 {\n\treturn this.stat.Bavail * uint64(this.stat.Bsize)\n}\n\n// Total size of the file system\nfunc (this *DiskUsage) Size() uint64 {\n\treturn this.stat.Blocks * uint64(this.stat.Bsize)\n}\n\n// Total bytes used in file system\nfunc (this *DiskUsage) Used() uint64 {\n\treturn this.Size() - this.Free()\n}\n\n// Percentage of use on the file system\nfunc (this *DiskUsage) Usage() float32 {\n\treturn float32(this.Used()) / float32(this.Size())\n}\n"
  },
  {
    "path": "du/diskusage_windows.go",
    "content": "// Copied from https://github.com/ricochet2200/go-disk-usage\n// Copyright 2011 Rick Smith.\n// Use of this source code is governed by a public domain\n// license that can be found in the LICENSE.ricochet2200 file.\n//\n\npackage du\n\nimport (\n\t\"syscall\"\n\t\"unsafe\"\n)\n\ntype DiskUsage struct {\n\tfreeBytes  int64\n\ttotalBytes int64\n\tavailBytes int64\n}\n\n// Returns an object holding the disk usage of volumePath\n// This function assumes volumePath is a valid path\nfunc NewDiskUsage(volumePath string) *DiskUsage {\n\n\th := syscall.MustLoadDLL(\"kernel32.dll\")\n\tc := h.MustFindProc(\"GetDiskFreeSpaceExW\")\n\n\tdu := &DiskUsage{}\n\n\tc.Call(\n\t\tuintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(volumePath))),\n\t\tuintptr(unsafe.Pointer(&du.freeBytes)),\n\t\tuintptr(unsafe.Pointer(&du.totalBytes)),\n\t\tuintptr(unsafe.Pointer(&du.availBytes)))\n\n\treturn du\n}\n\n// Total free bytes on file system\nfunc (this *DiskUsage) Free() uint64 {\n\treturn uint64(this.freeBytes)\n}\n\n// Total available bytes on file system to an unpriveleged user\nfunc (this *DiskUsage) Available() uint64 {\n\treturn uint64(this.availBytes)\n}\n\n// Total size of the file system\nfunc (this *DiskUsage) Size() uint64 {\n\treturn uint64(this.totalBytes)\n}\n\n// Total bytes used in file system\nfunc (this *DiskUsage) Used() uint64 {\n\treturn this.Size() - this.Free()\n}\n\n// Percentage of use on the file system\nfunc (this *DiskUsage) Usage() float32 {\n\treturn float32(this.Used()) / float32(this.Size())\n}\n"
  },
  {
    "path": "errors.go",
    "content": "// Copyright 2024 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"fmt\"\n)\n\n// RcPassthroughError indicates that a Juju plugin command exited with a\n// non-zero exit code. This error is used to exit with the return code.\ntype RcPassthroughError struct {\n\tCode int\n}\n\n// Error implements error.\nfunc (e *RcPassthroughError) Error() string {\n\treturn fmt.Sprintf(\"subprocess encountered error code %v\", e.Code)\n}\n\n// IsRcPassthroughError returns whether the error is an RcPassthroughError.\nfunc IsRcPassthroughError(err error) bool {\n\t_, ok := err.(*RcPassthroughError)\n\treturn ok\n}\n\n// NewRcPassthroughError creates an error that will have the code used at the\n// return code from the cmd.Main function rather than the default of 1 if\n// there is an error.\nfunc NewRcPassthroughError(code int) error {\n\treturn &RcPassthroughError{code}\n}\n"
  },
  {
    "path": "exec/exec.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Copyright 2016 Cloudbase Solutions\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage exec\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/juju/clock\"\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/loggo/v2\"\n)\n\nvar logger = loggo.GetLogger(\"juju.util.exec\")\n\n// Parameters for RunCommands.  Commands contains one or more commands to be\n// executed using bash or PowerShell.  If WorkingDir is set, this is passed\n// through.  Similarly if the Environment is specified, this is used\n// for executing the command.\n// TODO: refactor this to use a config struct and a constructor. Remove todo\n// and extra code from WaitWithCancel once this is done.\ntype RunParams struct {\n\tCommands    string\n\tWorkingDir  string\n\tEnvironment []string\n\tClock       clock.Clock\n\tKillProcess func(*os.Process) error\n\tUser        string\n\n\ttempDir string\n\tstdout  *bytes.Buffer\n\tstderr  *bytes.Buffer\n\tps      *exec.Cmd\n}\n\n// ExecResponse contains the return code and output generated by executing a\n// command.\ntype ExecResponse struct {\n\tCode   int\n\tStdout []byte\n\tStderr []byte\n}\n\n// mergeEnvironment takes in a string array representing the desired environment\n// and merges it with the current environment. On Windows, clearing the environment,\n// or having missing environment variables, may lead to standard go packages not working\n// (os.TempDir relies on $env:TEMP), and powershell erroring out\n// Currently this function is only used for windows\nfunc mergeEnvironment(env []string) []string {\n\tif env == nil {\n\t\treturn nil\n\t}\n\tm := make(map[string]string)\n\tvar tmpEnv []string\n\tfor _, val := range os.Environ() {\n\t\tvarSplit := strings.SplitN(val, \"=\", 2)\n\t\tm[varSplit[0]] = varSplit[1]\n\t}\n\n\tfor _, val := range env {\n\t\tvarSplit := strings.SplitN(val, \"=\", 2)\n\t\tm[varSplit[0]] = varSplit[1]\n\t}\n\n\tfor key, val := range m {\n\t\ttmpEnv = append(tmpEnv, key+\"=\"+val)\n\t}\n\n\treturn tmpEnv\n}\n\n// shellAndArgs returns the name of the shell command and arguments to run the\n// specified script. shellAndArgs may write into the provided temporary\n// directory, which will be maintained until the process exits.\nfunc shellAndArgs(tempDir, script, user string) (string, []string, error) {\n\tvar scriptFile string\n\tvar cmd string\n\tvar args []string\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tscriptFile = filepath.Join(tempDir, \"script.ps1\")\n\t\tcmd = \"powershell.exe\"\n\t\targs = []string{\n\t\t\t\"-NoProfile\",\n\t\t\t\"-NonInteractive\",\n\t\t\t\"-ExecutionPolicy\", \"RemoteSigned\",\n\t\t\t\"-File\", scriptFile,\n\t\t}\n\t\t// Exceptions don't result in a non-zero exit code by default\n\t\t// when using -File. The exit code of an explicit \"exit\" when\n\t\t// using -Command is ignored and results in an exit code of 1.\n\t\t// We use -File and trap exceptions to cover both.\n\t\tscript = \"trap {Write-Error $_; exit 1}\\n\" + script\n\tdefault:\n\t\tscriptFile = filepath.Join(tempDir, \"script.sh\")\n\t\tif user == \"\" {\n\t\t\tcmd = \"/bin/bash\"\n\t\t\targs = []string{scriptFile}\n\t\t} else {\n\t\t\t// Need to make the tempDir readable by all so the user can see it.\n\t\t\terr := os.Chmod(tempDir, 0755)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", nil, errors.Annotatef(err, \"making tempdir readable by %q\", user)\n\t\t\t}\n\t\t\tcmd = \"/bin/su\"\n\t\t\targs = []string{user, \"--login\", \"--command\", fmt.Sprintf(\"/bin/bash %s\", scriptFile)}\n\t\t}\n\t}\n\terr := ioutil.WriteFile(scriptFile, []byte(script), 0644)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\treturn cmd, args, nil\n}\n\n// Run sets up the command environment (environment variables, working dir)\n// and starts the process. The commands are passed into bash on Linux machines\n// and to powershell on Windows machines.\nfunc (r *RunParams) Run() error {\n\tif runtime.GOOS == \"windows\" {\n\t\tr.Environment = mergeEnvironment(r.Environment)\n\t}\n\n\ttempDir, err := ioutil.TempDir(\"\", \"juju-exec\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tshell, args, err := shellAndArgs(tempDir, r.Commands, r.User)\n\tif err != nil {\n\t\tif err := os.RemoveAll(tempDir); err != nil {\n\t\t\tlogger.Warningf(\"failed to remove temporary directory: %v\", err)\n\t\t}\n\t\treturn err\n\t}\n\n\tr.ps = exec.Command(shell, args...)\n\tif r.Environment != nil {\n\t\tr.ps.Env = r.Environment\n\t}\n\tif r.WorkingDir != \"\" {\n\t\tr.ps.Dir = r.WorkingDir\n\t}\n\n\tr.populateSysProcAttr()\n\n\t// If there is no user provided KillProcess function we\n\t// use the default one.\n\tif r.KillProcess == nil {\n\t\tr.KillProcess = KillProcess\n\t}\n\n\tr.tempDir = tempDir\n\tr.stdout = &bytes.Buffer{}\n\tr.stderr = &bytes.Buffer{}\n\n\tr.ps.Stdout = r.stdout\n\tr.ps.Stderr = r.stderr\n\n\treturn r.ps.Start()\n}\n\n// Process returns the *os.Process instance of the current running process\n// This will allow us to kill the process if needed, or get more information\n// on the process\nfunc (r *RunParams) Process() *os.Process {\n\tif r.ps != nil && r.ps.Process != nil {\n\t\treturn r.ps.Process\n\t}\n\treturn nil\n}\n\n// Wait blocks until the process exits, and returns an ExecResponse type\n// containing stdout, stderr and the return code of the process. If a non-zero\n// return code is returned, this is collected as the code for the response and\n// this does not classify as an error.\nfunc (r *RunParams) Wait() (*ExecResponse, error) {\n\tvar err error\n\tif r.ps == nil {\n\t\treturn nil, errors.New(\"No process has been started yet\")\n\t}\n\terr = r.ps.Wait()\n\tif err := os.RemoveAll(r.tempDir); err != nil {\n\t\tlogger.Warningf(\"failed to remove temporary directory: %v\", err)\n\t}\n\n\tresult := &ExecResponse{\n\t\tStdout: r.stdout.Bytes(),\n\t\tStderr: r.stderr.Bytes(),\n\t}\n\n\tif ee, ok := err.(*exec.ExitError); ok && err != nil {\n\t\tstatus := ee.ProcessState.Sys().(syscall.WaitStatus)\n\t\tif status.Exited() {\n\t\t\t// A non-zero return code isn't considered an error here.\n\t\t\tresult.Code = status.ExitStatus()\n\t\t\terr = nil\n\t\t}\n\t\tlogger.Infof(\"run result: %v\", ee)\n\t}\n\treturn result, err\n}\n\n// ErrCancelled is returned by WaitWithCancel in case it successfully manages to kill\n// the running process.\nvar ErrCancelled = errors.New(\"command cancelled\")\n\n// timeWaitForKill reperesent the time we wait after attempting to kill a\n// process before bailing out and returning.\nconst timeWaitForKill = 30 * time.Second\n\ntype resultWithError struct {\n\texecResult *ExecResponse\n\terr        error\n}\n\n// WaitWithCancel waits until the process exits or until a signal is sent on the\n// cancel channel. In case a signal is sent it first tries to kill the process and\n// return ErrCancelled. If it fails at killing the process it will return anyway\n// and report the problematic PID.\nfunc (r *RunParams) WaitWithCancel(cancel <-chan struct{}) (*ExecResponse, error) {\n\t// TODO: Remove this once we make Clock a required field\n\t_clock := r.Clock\n\tif _clock == nil {\n\t\t_clock = clock.WallClock\n\t}\n\n\tdone := make(chan resultWithError, 1)\n\tgo func() {\n\t\tdefer close(done)\n\t\twaitResult, err := r.Wait()\n\t\tdone <- resultWithError{waitResult, err}\n\t}()\n\n\tselect {\n\tcase resWithError := <-done:\n\t\treturn resWithError.execResult, errors.Trace(resWithError.err)\n\tcase <-cancel:\n\t\tlogger.Debugf(\"attempting to kill process\")\n\t\terr := r.KillProcess(r.ps.Process)\n\t\tif err != nil {\n\t\t\tlogger.Debugf(\"kill returned: %s\", err)\n\t\t}\n\n\t\t// After we issue a kill we expect the wait above to return within timeWaitForKill.\n\t\t// In case it doesn't we just go on and assume the process is stuck, but we don't block\n\t\tselect {\n\t\tcase resWithError := <-done:\n\t\t\treturn resWithError.execResult, ErrCancelled\n\t\tcase <-_clock.After(timeWaitForKill):\n\t\t\treturn nil, errors.Errorf(\"tried to kill process %v, but timed out\", r.ps.Process.Pid)\n\t\t}\n\t}\n}\n\n// RunCommands executes the Commands specified in the RunParams using\n// powershell on windows, and '/bin/bash -s' on everything else,\n// passing the commands through as stdin, and collecting\n// stdout and stderr.  If a non-zero return code is returned, this is\n// collected as the code for the response and this does not classify as an\n// error.\nfunc RunCommands(run RunParams) (*ExecResponse, error) {\n\terr := run.Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn run.Wait()\n}\n"
  },
  {
    "path": "exec/exec_internal_test.go",
    "content": "// Copyright 2017 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage exec\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n)\n\ntype execSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&execSuite{})\n\nfunc (*execSuite) TestShellAndArgsNoUserSpecified(c *gc.C) {\n\tif runtime.GOOS == \"windows\" {\n\t\tc.Skip(\"non-windows only test\")\n\t}\n\n\tdir := c.MkDir()\n\tstat, err := os.Stat(dir)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(stat.Mode().Perm(), gc.Equals, os.FileMode(0700))\n\n\tcmd, args, err := shellAndArgs(dir, \"env\", \"\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tscriptFile := filepath.Join(dir, \"script.sh\")\n\n\tc.Assert(cmd, gc.Equals, \"/bin/bash\")\n\tc.Assert(args, jc.DeepEquals, []string{scriptFile})\n}\n\nfunc (*execSuite) TestShellAndArgsAsUser(c *gc.C) {\n\tif runtime.GOOS == \"windows\" {\n\t\tc.Skip(\"non-windows only test\")\n\t}\n\n\tdir := c.MkDir()\n\tstat, err := os.Stat(dir)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(stat.Mode().Perm(), gc.Equals, os.FileMode(0700))\n\n\tcmd, args, err := shellAndArgs(dir, \"env\", \"ubuntu\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tscriptFile := filepath.Join(dir, \"script.sh\")\n\n\tc.Assert(cmd, gc.Equals, \"/bin/su\")\n\tcommand := \"/bin/bash \" + scriptFile\n\tc.Assert(args, jc.DeepEquals, []string{\"ubuntu\", \"--login\", \"--command\", command})\n\n\t// The directory is now readable by everyone.\n\tstat, err = os.Stat(dir)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(stat.Mode().Perm(), gc.Equals, os.FileMode(0755))\n\t// And the file is world readable\n\tstat, err = os.Stat(scriptFile)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(stat.Mode().Perm(), gc.Equals, os.FileMode(0644))\n}\n"
  },
  {
    "path": "exec/exec_linux_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage exec_test\n\nimport (\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/exec\"\n)\n\n// 0 is thrown by linux because RunParams.Wait\n// only sets the code if the process exits cleanly\nconst cancelErrCode = 0\n\nfunc (*execSuite) TestRunCommands(c *gc.C) {\n\tnewDir := c.MkDir()\n\n\tfor i, test := range []struct {\n\t\tmessage     string\n\t\tcommands    string\n\t\tworkingDir  string\n\t\tenvironment []string\n\t\tstdout      string\n\t\tstderr      string\n\t\tcode        int\n\t}{\n\t\t{\n\t\t\tmessage:  \"test stdout capture\",\n\t\t\tcommands: \"echo testing stdout\",\n\t\t\tstdout:   \"testing stdout\\n\",\n\t\t}, {\n\t\t\tmessage:  \"test stderr capture\",\n\t\t\tcommands: \"echo testing stderr >&2\",\n\t\t\tstderr:   \"testing stderr\\n\",\n\t\t}, {\n\t\t\tmessage:  \"test return code\",\n\t\t\tcommands: \"exit 42\",\n\t\t\tcode:     42,\n\t\t}, {\n\t\t\tmessage:    \"test working dir\",\n\t\t\tcommands:   \"pwd\",\n\t\t\tworkingDir: newDir,\n\t\t\tstdout:     newDir + \"\\n\",\n\t\t}, {\n\t\t\tmessage:     \"test environment\",\n\t\t\tcommands:    \"echo $OMG_IT_WORKS\",\n\t\t\tenvironment: []string{\"OMG_IT_WORKS=like magic\"},\n\t\t\tstdout:      \"like magic\\n\",\n\t\t}, {\n\t\t\tmessage:  \"multiple commands\",\n\t\t\tcommands: \"cat\\necho 123\",\n\t\t\tstdout:   \"123\\n\",\n\t\t},\n\t} {\n\t\tc.Logf(\"%v: %s\", i, test.message)\n\n\t\tparams := exec.RunParams{\n\t\t\tCommands:    test.commands,\n\t\t\tWorkingDir:  test.workingDir,\n\t\t\tEnvironment: test.environment,\n\t\t}\n\n\t\tresult, err := exec.RunCommands(params)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(string(result.Stdout), gc.Equals, test.stdout)\n\t\tc.Assert(string(result.Stderr), gc.Equals, test.stderr)\n\t\tc.Assert(result.Code, gc.Equals, test.code)\n\n\t\terr = params.Run()\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(params.Process(), gc.Not(gc.IsNil))\n\t\tresult, err = params.Wait()\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(string(result.Stdout), gc.Equals, test.stdout)\n\t\tc.Assert(string(result.Stderr), gc.Equals, test.stderr)\n\t\tc.Assert(result.Code, gc.Equals, test.code)\n\n\t\terr = params.Run()\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(params.Process(), gc.Not(gc.IsNil))\n\t\tresult, err = params.WaitWithCancel(nil)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(string(result.Stdout), gc.Equals, test.stdout)\n\t\tc.Assert(string(result.Stderr), gc.Equals, test.stderr)\n\t\tc.Assert(result.Code, gc.Equals, test.code)\n\t}\n}\n\nfunc (*execSuite) TestExecUnknownCommand(c *gc.C) {\n\tresult, err := exec.RunCommands(\n\t\texec.RunParams{\n\t\t\tCommands: \"unknown-command\",\n\t\t},\n\t)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result.Stdout, gc.HasLen, 0)\n\tc.Assert(string(result.Stderr), jc.Contains, \"unknown-command: command not found\")\n\t// 127 is a special bash return code meaning command not found.\n\tc.Assert(result.Code, gc.Equals, 127)\n}\n"
  },
  {
    "path": "exec/exec_test.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Copyright 2016 Cloudbase Solutions\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage exec_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/clock\"\n\t\"github.com/juju/utils/v4/exec\"\n)\n\ntype execSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&execSuite{})\n\nfunc (*execSuite) TestWaitWithCancel(c *gc.C) {\n\tparams := exec.RunParams{\n\t\tCommands: \"sleep 100\",\n\t\tClock:    &mockClock{C: make(chan time.Time)},\n\t}\n\n\terr := params.Run()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(params.Process(), gc.Not(gc.IsNil))\n\n\tcancelChan := make(chan struct{}, 1)\n\tdefer close(cancelChan)\n\tcancelChan <- struct{}{}\n\tresult, err := params.WaitWithCancel(cancelChan)\n\tc.Assert(err, gc.Equals, exec.ErrCancelled)\n\tc.Assert(string(result.Stdout), gc.Equals, \"\")\n\tc.Assert(string(result.Stderr), gc.Equals, \"\")\n\tc.Assert(result.Code, gc.Equals, cancelErrCode)\n}\n\nfunc (s *execSuite) TestKillAbortedIfUnsuccessfull(c *gc.C) {\n\tkillCalled := false\n\n\tmockChan := make(chan time.Time, 1)\n\tdefer close(mockChan)\n\tparams := exec.RunParams{\n\t\tCommands:    \"sleep 100\",\n\t\tWorkingDir:  \"\",\n\t\tEnvironment: []string{},\n\t\tClock:       &mockClock{C: mockChan},\n\t\tKillProcess: func(*os.Process) error {\n\t\t\tkillCalled = true\n\t\t\treturn nil\n\t\t},\n\t}\n\n\terr := params.Run()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(params.Process(), gc.Not(gc.IsNil))\n\n\tcancelChan := make(chan struct{}, 1)\n\tdefer close(cancelChan)\n\tcancelChan <- struct{}{}\n\tmockChan <- time.Now()\n\tres, err := params.WaitWithCancel(cancelChan)\n\tc.Assert(err, gc.ErrorMatches, fmt.Sprintf(\"tried to kill process %d, but timed out\", params.Process().Pid))\n\tc.Assert(res, gc.IsNil)\n\tc.Assert(killCalled, jc.IsTrue)\n}\n\ntype mockClock struct {\n\tclock.Clock\n\tC <-chan time.Time\n}\n\nfunc (m *mockClock) After(t time.Duration) <-chan time.Time {\n\treturn m.C\n}\n"
  },
  {
    "path": "exec/exec_unix.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Copyright 2016 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build !windows\n// +build !windows\n\npackage exec\n\nimport (\n\t\"os\"\n\t\"syscall\"\n)\n\n// KillProcess tries to kill the process being ran by RunParams\n// We need this convoluted implementation because everything\n// ran under the bash script is spawned as a different process\n// and doesn't get killed by a regular process.Kill()\n// For details see https://groups.google.com/forum/#!topic/golang-nuts/XoQ3RhFBJl8\nfunc KillProcess(proc *os.Process) error {\n\tpgid, err := syscall.Getpgid(proc.Pid)\n\tif err == nil {\n\t\treturn syscall.Kill(-pgid, 15) // note the minus sign\n\t}\n\treturn nil\n}\n\n// populateSysProcAttr exists so that the method Kill on the same struct\n// can work correctly. For more information see Kill's comment.\nfunc (r *RunParams) populateSysProcAttr() {\n\tr.ps.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}\n}\n"
  },
  {
    "path": "exec/exec_windows.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Copyright 2016 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build windows\n// +build windows\n\npackage exec\n\nimport (\n\t\"os\"\n)\n\n// KillProcess tries to kill the process passed in.\nfunc KillProcess(proc *os.Process) error {\n\treturn proc.Kill()\n}\n\n// populateSysProcAttr is a noop on windows\nfunc (r *RunParams) populateSysProcAttr() {}\n"
  },
  {
    "path": "exec/exec_windows_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage exec_test\n\nimport (\n\t\"path/filepath\"\n\t\"strings\"\n\t\"syscall\"\n\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/exec\"\n)\n\n// 1 is thrown by powershell after the a command is cancelled\nconst cancelErrCode = 1\n\n// longPath is copied over from the symlink package. This should be removed\n// if we add it to gc or in some other convenience package\nfunc longPath(path string) ([]uint16, error) {\n\tpathp, err := syscall.UTF16FromString(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlongp := pathp\n\tn, err := syscall.GetLongPathName(&pathp[0], &longp[0], uint32(len(longp)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n > uint32(len(longp)) {\n\t\tlongp = make([]uint16, n)\n\t\tn, err = syscall.GetLongPathName(&pathp[0], &longp[0], uint32(len(longp)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tlongp = longp[:n]\n\n\treturn longp, nil\n}\n\nfunc longPathAsString(path string) (string, error) {\n\tlongp, err := longPath(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn syscall.UTF16ToString(longp), nil\n}\n\nfunc (*execSuite) TestRunCommands(c *gc.C) {\n\tnewDir, err := longPathAsString(c.MkDir())\n\tc.Assert(err, gc.IsNil)\n\tfor i, test := range []struct {\n\t\tmessage     string\n\t\tcommands    string\n\t\tworkingDir  string\n\t\tenvironment []string\n\t\tstdout      string\n\t\tstderr      string\n\t\tcode        int\n\t}{\n\t\t{\n\t\t\tmessage:  \"test stdout capture\",\n\t\t\tcommands: \"echo 'testing stdout'\",\n\t\t\tstdout:   \"testing stdout\\r\\n\",\n\t\t}, {\n\t\t\tmessage:  \"test stderr capture\",\n\t\t\tcommands: \"Write-Error 'testing stderr'\",\n\t\t\tstderr:   \"testing stderr\\r\\n\",\n\t\t}, {\n\t\t\tmessage:  \"test return code\",\n\t\t\tcommands: \"exit 42\",\n\t\t\tcode:     42,\n\t\t}, {\n\t\t\tmessage:    \"test working dir\",\n\t\t\tcommands:   \"(pwd).Path\",\n\t\t\tworkingDir: newDir,\n\t\t\tstdout:     filepath.FromSlash(newDir) + \"\\r\\n\",\n\t\t}, {\n\t\t\tmessage:     \"test environment\",\n\t\t\tcommands:    \"echo $env:OMG_IT_WORKS\",\n\t\t\tenvironment: []string{\"OMG_IT_WORKS=like magic\"},\n\t\t\tstdout:      \"like magic\\r\\n\",\n\t\t},\n\t} {\n\t\tc.Logf(\"%v: %s\", i, test.message)\n\n\t\tparams := exec.RunParams{\n\t\t\tCommands:    test.commands,\n\t\t\tWorkingDir:  test.workingDir,\n\t\t\tEnvironment: test.environment,\n\t\t}\n\n\t\tresult, err := exec.RunCommands(params)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(string(result.Stdout), gc.Equals, test.stdout)\n\t\tc.Assert(string(result.Stderr), jc.Contains, test.stderr)\n\t\tc.Assert(result.Code, gc.Equals, test.code)\n\n\t\terr = params.Run()\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(params.Process(), gc.Not(gc.IsNil))\n\t\tresult, err = params.Wait()\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(string(result.Stdout), gc.Equals, test.stdout)\n\t\tc.Assert(string(result.Stderr), jc.Contains, test.stderr)\n\t\tc.Assert(result.Code, gc.Equals, test.code)\n\n\t\terr = params.Run()\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(params.Process(), gc.Not(gc.IsNil))\n\t\tresult, err = params.WaitWithCancel(nil)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(string(result.Stdout), gc.Equals, test.stdout)\n\t\tc.Assert(string(result.Stderr), jc.Contains, test.stderr)\n\t\tc.Assert(result.Code, gc.Equals, test.code)\n\t}\n}\n\nfunc (*execSuite) TestExecUnknownCommand(c *gc.C) {\n\tresult, err := exec.RunCommands(\n\t\texec.RunParams{\n\t\t\tCommands: \"unknown-command\",\n\t\t},\n\t)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(result.Stdout, gc.HasLen, 0)\n\tstderr := strings.Replace(string(result.Stderr), \"\\r\\n\", \"\", -1)\n\tc.Assert(stderr, jc.Contains, \"is not recognized as the name of a cmdlet\")\n\t// 1 is returned by RunCommands when powershell commands throw exceptions\n\tc.Assert(result.Code, gc.Equals, 1)\n}\n"
  },
  {
    "path": "exec/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage exec_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "export_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"time\"\n)\n\nvar (\n\tGOMAXPROCS        = &gomaxprocs\n\tNumCPU            = &numCPU\n\tResolveSudoByFunc = resolveSudo\n)\n\nfunc ExposeBackoffTimerDuration(bot *BackoffTimer) time.Duration {\n\treturn bot.currentDuration\n}\n"
  },
  {
    "path": "file.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"regexp\"\n\n\t\"github.com/juju/errors\"\n)\n\n// UserHomeDir returns the home directory for the specified user, or the\n// home directory for the current user if the specified user is empty.\nfunc UserHomeDir(userName string) (hDir string, err error) {\n\tif userName == \"\" {\n\t\t// TODO (wallyworld) - fix tests on Windows\n\t\t// Ordinarily, we'd always use user.Current() to get the current user\n\t\t// and then get the HomeDir from that. But our tests rely on poking\n\t\t// a value into $HOME in order to override the normal home dir for the\n\t\t// current user. So we're forced to use Home() to make the tests pass.\n\t\t// All of our tests currently construct paths with the default user in\n\t\t// mind eg \"~/foo\".\n\t\treturn Home(), nil\n\t}\n\thDir, err = homeDir(userName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn hDir, nil\n}\n\n// Only match paths starting with ~ (~user/test, ~/test). This will prevent\n// accidental expansion on Windows when short form paths are present (C:\\users\\ADMINI~1\\test)\nvar userHomePathRegexp = regexp.MustCompile(\"(^~(?P<user>[^/]*))(?P<path>.*)\")\n\n// NormalizePath expands a path containing ~ to its absolute form,\n// and removes any .. or . path elements.\nfunc NormalizePath(dir string) (string, error) {\n\tif userHomePathRegexp.MatchString(dir) {\n\t\tuser := userHomePathRegexp.ReplaceAllString(dir, \"$user\")\n\t\tuserHomeDir, err := UserHomeDir(user)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdir = userHomePathRegexp.ReplaceAllString(dir, fmt.Sprintf(\"%s$path\", userHomeDir))\n\t}\n\treturn filepath.Clean(dir), nil\n}\n\n// ExpandPath normalises (via Normalize) a path returning an absolute path.\nfunc ExpandPath(path string) (string, error) {\n\tnormPath, err := NormalizePath(path)\n\tif err != nil {\n\t\treturn \"\", errors.Annotate(err, \"unable to normalise file path\")\n\t}\n\treturn filepath.Abs(normPath)\n}\n\n// EnsureBaseDir ensures that path is always prefixed by baseDir,\n// allowing for the fact that path might have a Window drive letter in\n// it.\nfunc EnsureBaseDir(baseDir, path string) string {\n\tif baseDir == \"\" {\n\t\treturn path\n\t}\n\tvolume := filepath.VolumeName(path)\n\treturn filepath.Join(baseDir, path[len(volume):])\n}\n\n// JoinServerPath joins any number of path elements into a single path, adding\n// a path separator (based on the current juju server OS) if necessary. The\n// result is Cleaned; in particular, all empty strings are ignored.\nfunc JoinServerPath(elem ...string) string {\n\treturn path.Join(elem...)\n}\n\n// UniqueDirectory returns \"path/name\" if that directory doesn't exist.  If it\n// does, the method starts appending .1, .2, etc until a unique name is found.\nfunc UniqueDirectory(path, name string) (string, error) {\n\tdir := filepath.Join(path, name)\n\t_, err := os.Stat(dir)\n\tif os.IsNotExist(err) {\n\t\treturn dir, nil\n\t}\n\tfor i := 1; ; i++ {\n\t\tdir := filepath.Join(path, fmt.Sprintf(\"%s.%d\", name, i))\n\t\t_, err := os.Stat(dir)\n\t\tif os.IsNotExist(err) {\n\t\t\treturn dir, nil\n\t\t} else if err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n}\n\n// CopyFile writes the contents of the given source file to dest.\nfunc CopyFile(dest, source string) error {\n\tdf, err := os.Create(dest)\n\tif err != nil {\n\t\treturn err\n\t}\n\tf, err := os.Open(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t_, err = io.Copy(df, f)\n\treturn err\n}\n\n// AtomicWriteFileAndChange atomically writes the filename with the\n// given contents and calls the given function after the contents were\n// written, but before the file is renamed.\nfunc AtomicWriteFileAndChange(filename string, contents []byte, change func(string) error) (err error) {\n\tdir, file := filepath.Split(filename)\n\tf, err := ioutil.TempFile(dir, file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot create temp file: %v\", err)\n\t}\n\tdefer func() { _ = f.Close() }()\n\tdefer func() {\n\t\tif err != nil {\n\t\t\t// Don't leave the temp file lying around on error.\n\t\t\t// Close the file before removing. Trying to remove an open file on\n\t\t\t// Windows will fail.\n\t\t\t_ = f.Close()\n\t\t\t_ = os.Remove(f.Name())\n\t\t}\n\t}()\n\tif _, err := f.Write(contents); err != nil {\n\t\treturn fmt.Errorf(\"cannot write %q contents: %v\", filename, err)\n\t}\n\tif err := f.Sync(); err != nil {\n\t\treturn err\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\tif err := change(f.Name()); err != nil {\n\t\treturn err\n\t}\n\tif err := ReplaceFile(f.Name(), filename); err != nil {\n\t\treturn fmt.Errorf(\"cannot replace %q with %q: %v\", f.Name(), filename, err)\n\t}\n\treturn nil\n}\n\n// AtomicWriteFile atomically writes the filename with the given\n// contents and permissions, replacing any existing file at the same\n// path.\nfunc AtomicWriteFile(filename string, contents []byte, perms os.FileMode) (err error) {\n\treturn AtomicWriteFileAndChange(filename, contents, func(f string) error {\n\t\t// FileMod.Chmod() is not implemented on Windows, however, os.Chmod() is\n\t\tif err := os.Chmod(f, perms); err != nil {\n\t\t\treturn fmt.Errorf(\"cannot set permissions: %v\", err)\n\t\t}\n\t\treturn nil\n\t})\n}\n"
  },
  {
    "path": "file_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype fileSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&fileSuite{})\n\nfunc (*fileSuite) TestNormalizePath(c *gc.C) {\n\thome := filepath.FromSlash(c.MkDir())\n\terr := utils.SetHome(home)\n\tc.Assert(err, gc.IsNil)\n\t// TODO (frankban) bug 1324841: improve the isolation of this suite.\n\tcurrentUser, err := user.Current()\n\tc.Assert(err, gc.IsNil)\n\tfor i, test := range []struct {\n\t\tpath     string\n\t\texpected string\n\t\terr      string\n\t}{{\n\t\tpath:     filepath.FromSlash(\"/var/lib/juju\"),\n\t\texpected: filepath.FromSlash(\"/var/lib/juju\"),\n\t}, {\n\t\tpath:     \"~/foo\",\n\t\texpected: filepath.Join(home, \"foo\"),\n\t}, {\n\t\tpath:     \"~/foo//../bar\",\n\t\texpected: filepath.Join(home, \"bar\"),\n\t}, {\n\t\tpath:     \"~\",\n\t\texpected: home,\n\t}, {\n\t\tpath:     \"~\" + currentUser.Username,\n\t\texpected: currentUser.HomeDir,\n\t}, {\n\t\tpath:     \"~\" + currentUser.Username + \"/foo\",\n\t\texpected: filepath.Join(currentUser.HomeDir, \"foo\"),\n\t}, {\n\t\tpath:     \"~\" + currentUser.Username + \"/foo//../bar\",\n\t\texpected: filepath.Join(currentUser.HomeDir, \"bar\"),\n\t}, {\n\t\tpath:     filepath.FromSlash(\"foo~bar/baz\"),\n\t\texpected: filepath.FromSlash(\"foo~bar/baz\"),\n\t}, {\n\t\tpath: \"~foobar/path\",\n\t\terr:  \".*\" + utils.NoSuchUserErrRegexp,\n\t}} {\n\t\tc.Logf(\"test %d: %s\", i, test.path)\n\t\tactual, err := utils.NormalizePath(test.path)\n\t\tif test.err != \"\" {\n\t\t\tc.Check(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Check(err, gc.IsNil)\n\t\t\tc.Check(actual, gc.Equals, test.expected)\n\t\t}\n\t}\n}\n\nfunc (*fileSuite) TestExpandPath(c *gc.C) {\n\thome := filepath.FromSlash(c.MkDir())\n\terr := utils.SetHome(home)\n\tc.Assert(err, gc.IsNil)\n\tcurrentUser, err := user.Current()\n\tc.Assert(err, gc.IsNil)\n\tcwd, err := os.Getwd()\n\tc.Assert(err, gc.IsNil)\n\tfor i, test := range []struct {\n\t\tpath     string\n\t\texpected string\n\t\terr      string\n\t}{{\n\t\tpath:     filepath.FromSlash(\"/var/lib/juju\"),\n\t\texpected: filepath.FromSlash(\"/var/lib/juju\"),\n\t}, {\n\t\tpath:     \"~/foo\",\n\t\texpected: filepath.Join(home, \"foo\"),\n\t}, {\n\t\tpath:     \"~/foo//../bar\",\n\t\texpected: filepath.Join(home, \"bar\"),\n\t}, {\n\t\tpath:     \"~\",\n\t\texpected: home,\n\t}, {\n\t\tpath:     \"~\" + currentUser.Username,\n\t\texpected: currentUser.HomeDir,\n\t}, {\n\t\tpath:     \"~\" + currentUser.Username + \"/foo\",\n\t\texpected: filepath.Join(currentUser.HomeDir, \"foo\"),\n\t}, {\n\t\tpath:     \"~\" + currentUser.Username + \"/foo//../bar\",\n\t\texpected: filepath.Join(currentUser.HomeDir, \"bar\"),\n\t}, {\n\t\tpath:     filepath.FromSlash(\"foo~bar/baz\"),\n\t\texpected: filepath.Join(cwd, \"foo~bar/baz\"),\n\t}, {\n\t\tpath:     filepath.FromSlash(\"foo/bar\"),\n\t\texpected: filepath.Join(cwd, \"foo\", \"bar\"),\n\t}, {\n\t\tpath:     filepath.FromSlash(\"foo/../bar\"),\n\t\texpected: filepath.Join(cwd, \"bar\"),\n\t}, {\n\t\tpath:     filepath.FromSlash(\"foo/./bar\"),\n\t\texpected: filepath.Join(cwd, \"foo\", \"bar\"),\n\t}, {\n\t\tpath: \"~foobar/path\",\n\t\terr:  \".*\" + utils.NoSuchUserErrRegexp,\n\t}} {\n\t\tc.Logf(\"test %d: %s\", i, test.path)\n\t\tactual, err := utils.ExpandPath(test.path)\n\t\tif test.err != \"\" {\n\t\t\tc.Check(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Check(err, gc.IsNil)\n\t\t\tc.Check(actual, gc.Equals, test.expected)\n\t\t\tc.Check(filepath.IsAbs(actual), jc.IsTrue)\n\t\t}\n\t}\n}\n\nfunc (*fileSuite) TestCopyFile(c *gc.C) {\n\tdir := c.MkDir()\n\tf, err := ioutil.TempFile(dir, \"source\")\n\tc.Assert(err, gc.IsNil)\n\tdefer f.Close()\n\t_, err = f.Write([]byte(\"hello world\"))\n\tc.Assert(err, gc.IsNil)\n\tdest := filepath.Join(dir, \"dest\")\n\n\terr = utils.CopyFile(dest, f.Name())\n\tc.Assert(err, gc.IsNil)\n\tdata, err := ioutil.ReadFile(dest)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(data), gc.Equals, \"hello world\")\n}\n\nvar atomicWriteFileTests = []struct {\n\tsummary   string\n\tchange    func(filename string, contents []byte) error\n\tcheck     func(c *gc.C, fileInfo os.FileInfo)\n\texpectErr string\n}{{\n\tsummary: \"atomic file write and chmod 0644\",\n\tchange: func(filename string, contents []byte) error {\n\t\treturn utils.AtomicWriteFile(filename, contents, 0765)\n\t},\n\tcheck: func(c *gc.C, fi os.FileInfo) {\n\t\tc.Assert(fi.Mode(), gc.Equals, 0765)\n\t},\n}, {\n\tsummary: \"atomic file write and change\",\n\tchange: func(filename string, contents []byte) error {\n\t\tchmodChange := func(f string) error {\n\t\t\t// FileMod.Chmod() is not implemented on Windows, however, os.Chmod() is\n\t\t\treturn os.Chmod(f, 0700)\n\t\t}\n\t\treturn utils.AtomicWriteFileAndChange(filename, contents, chmodChange)\n\t},\n\tcheck: func(c *gc.C, fi os.FileInfo) {\n\t\tc.Assert(fi.Mode(), gc.Equals, 0700)\n\t},\n}, {\n\tsummary: \"atomic file write empty contents\",\n\tchange: func(filename string, contents []byte) error {\n\t\tnopChange := func(string) error {\n\t\t\treturn nil\n\t\t}\n\t\treturn utils.AtomicWriteFileAndChange(filename, contents, nopChange)\n\t},\n}, {\n\tsummary: \"atomic file write and failing change func\",\n\tchange: func(filename string, contents []byte) error {\n\t\terrChange := func(string) error {\n\t\t\treturn fmt.Errorf(\"pow!\")\n\t\t}\n\t\treturn utils.AtomicWriteFileAndChange(filename, contents, errChange)\n\t},\n\texpectErr: \"pow!\",\n}}\n\nfunc (*fileSuite) TestAtomicWriteFile(c *gc.C) {\n\tdir := c.MkDir()\n\tname := \"test.file\"\n\tpath := filepath.Join(dir, name)\n\tassertDirContents := func(names ...string) {\n\t\tfis, err := ioutil.ReadDir(dir)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(fis, gc.HasLen, len(names))\n\t\tfor i, name := range names {\n\t\t\tc.Assert(fis[i].Name(), gc.Equals, name)\n\t\t}\n\t}\n\tassertNotExist := func(path string) {\n\t\t_, err := os.Lstat(path)\n\t\tc.Assert(err, jc.Satisfies, os.IsNotExist)\n\t}\n\n\tfor i, test := range atomicWriteFileTests {\n\t\tc.Logf(\"test %d: %s\", i, test.summary)\n\t\t// First - test with file not already there.\n\t\tassertDirContents()\n\t\tassertNotExist(path)\n\t\tcontents := []byte(\"some\\ncontents\")\n\n\t\terr := test.change(path, contents)\n\t\tif test.expectErr == \"\" {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tdata, err := ioutil.ReadFile(path)\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(data, jc.DeepEquals, contents)\n\t\t\tassertDirContents(name)\n\t\t} else {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.expectErr)\n\t\t\tassertDirContents()\n\t\t\tcontinue\n\t\t}\n\n\t\t// Second - test with a file already there.\n\t\tcontents = []byte(\"new\\ncontents\")\n\t\terr = test.change(path, contents)\n\t\tc.Assert(err, gc.IsNil)\n\t\tdata, err := ioutil.ReadFile(path)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(data, jc.DeepEquals, contents)\n\t\tassertDirContents(name)\n\n\t\t// Remove the file to reset scenario.\n\t\tc.Assert(os.Remove(path), gc.IsNil)\n\t}\n}\n\nfunc (*fileSuite) TestMoveFile(c *gc.C) {\n\td := c.MkDir()\n\tdest := filepath.Join(d, \"foo\")\n\tf1Name := filepath.Join(d, \".foo1\")\n\tf2Name := filepath.Join(d, \".foo2\")\n\terr := ioutil.WriteFile(f1Name, []byte(\"macaroni\"), 0644)\n\tc.Assert(err, gc.IsNil)\n\terr = ioutil.WriteFile(f2Name, []byte(\"cheese\"), 0644)\n\tc.Assert(err, gc.IsNil)\n\n\tok, err := utils.MoveFile(f1Name, dest)\n\tc.Assert(ok, gc.Equals, true)\n\tc.Assert(err, gc.IsNil)\n\n\tok, err = utils.MoveFile(f2Name, dest)\n\tc.Assert(ok, gc.Equals, false)\n\tc.Assert(err, gc.NotNil)\n\n\tcontents, err := ioutil.ReadFile(dest)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(contents, gc.DeepEquals, []byte(\"macaroni\"))\n}\n"
  },
  {
    "path": "file_unix.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build !windows\n// +build !windows\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/user\"\n\t\"strconv\"\n\t\"strings\"\n\t\"syscall\"\n\n\t\"github.com/juju/errors\"\n)\n\nfunc homeDir(userName string) (string, error) {\n\tu, err := user.Lookup(userName)\n\tif err != nil {\n\t\treturn \"\", errors.NewUserNotFound(err, \"no such user\")\n\t}\n\treturn u.HomeDir, nil\n}\n\n// MoveFile atomically moves the source file to the destination, returning\n// whether the file was moved successfully. If the destination already exists,\n// it returns an error rather than overwrite it.\n//\n// On unix systems, an error may occur with a successful move, if the source\n// file location cannot be unlinked.\nfunc MoveFile(source, destination string) (bool, error) {\n\terr := os.Link(source, destination)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\terr = os.Remove(source)\n\tif err != nil {\n\t\treturn true, err\n\t}\n\treturn true, nil\n}\n\n// ReplaceFile atomically replaces the destination file or directory\n// with the source. The errors that are returned are identical to\n// those returned by os.Rename.\nfunc ReplaceFile(source, destination string) error {\n\treturn os.Rename(source, destination)\n}\n\n// MakeFileURL returns a file URL if a directory is passed in else it does nothing\nfunc MakeFileURL(in string) string {\n\tif strings.HasPrefix(in, \"/\") {\n\t\treturn \"file://\" + in\n\t}\n\treturn in\n}\n\n// ChownPath sets the uid and gid of path to match that of the user\n// specified.\nfunc ChownPath(path, username string) error {\n\tu, err := user.Lookup(username)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot lookup %q user id: %v\", username, err)\n\t}\n\tuid, err := strconv.Atoi(u.Uid)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid user id %q: %v\", u.Uid, err)\n\t}\n\tgid, err := strconv.Atoi(u.Gid)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid group id %q: %v\", u.Gid, err)\n\t}\n\treturn os.Chown(path, uid, gid)\n}\n\n// IsFileOwner checks to see if the ownership of the file corresponds to\n// the same username\nfunc IsFileOwner(path, username string) (bool, error) {\n\tu, err := user.Lookup(username)\n\tif err != nil {\n\t\treturn false, errors.Annotatef(err, \"cannot lookup %q user id\", username)\n\t}\n\tinfo, err := os.Stat(path)\n\tif err != nil {\n\t\treturn false, errors.Trace(err)\n\t}\n\tstat, ok := info.Sys().(*syscall.Stat_t)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"cannot lookup %q file\", path)\n\t}\n\treturn (strconv.Itoa(int(stat.Uid)) == u.Uid &&\n\t\tstrconv.Itoa(int(stat.Gid)) == u.Gid), nil\n}\n"
  },
  {
    "path": "file_unix_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build !windows\n// +build !windows\n\npackage utils_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/errors\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype unixFileSuite struct {\n}\n\nvar _ = gc.Suite(&unixFileSuite{})\n\nfunc (s *unixFileSuite) TestEnsureBaseDir(c *gc.C) {\n\tc.Assert(utils.EnsureBaseDir(`/a`, `/b/c`), gc.Equals, `/a/b/c`)\n\tc.Assert(utils.EnsureBaseDir(`/`, `/b/c`), gc.Equals, `/b/c`)\n\tc.Assert(utils.EnsureBaseDir(``, `/b/c`), gc.Equals, `/b/c`)\n}\n\nfunc (s *unixFileSuite) TestFileOwner(c *gc.C) {\n\tusername, err := utils.LocalUsername()\n\tc.Assert(err, gc.IsNil)\n\n\tpath := filepath.Join(os.TempDir(), fmt.Sprintf(\"file-%d\", time.Now().UnixNano()))\n\t_, err = os.Create(path)\n\tc.Assert(err, gc.IsNil)\n\n\tok, err := utils.IsFileOwner(path, username)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(ok, gc.Equals, true)\n}\n\nfunc (s *unixFileSuite) TestFileOwnerUsingRoot(c *gc.C) {\n\tpath := filepath.Join(os.TempDir(), fmt.Sprintf(\"file-%d\", time.Now().UnixNano()))\n\t_, err := os.Create(path)\n\tc.Assert(err, gc.IsNil)\n\n\tok, err := utils.IsFileOwner(path, \"root\")\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(ok, gc.Equals, false)\n}\n\nfunc (s *unixFileSuite) TestFileOwnerWithInvalidPath(c *gc.C) {\n\tusername, err := utils.LocalUsername()\n\tc.Assert(err, gc.IsNil)\n\n\tpath := filepath.Join(os.TempDir(), \"file-bad\")\n\tok, err := utils.IsFileOwner(path, username)\n\tc.Assert(errors.Cause(err), gc.ErrorMatches, \"stat .*: no such file or directory\")\n\tc.Assert(ok, gc.Equals, false)\n}\n\nfunc (s *unixFileSuite) TestFileOwnerWithInvalidUsername(c *gc.C) {\n\tpath := filepath.Join(os.TempDir(), fmt.Sprintf(\"file-%d\", time.Now().UnixNano()))\n\t_, err := os.Create(path)\n\tc.Assert(err, gc.IsNil)\n\n\tok, err := utils.IsFileOwner(path, \"invalid\")\n\tc.Assert(errors.Cause(err), gc.ErrorMatches, \"user: unknown user invalid\")\n\tc.Assert(ok, gc.Equals, false)\n}\n"
  },
  {
    "path": "file_windows.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build windows\n// +build windows\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"github.com/juju/errors\"\n)\n\nconst (\n\tmovefile_replace_existing = 0x1\n\tmovefile_write_through    = 0x8\n)\n\n//sys moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) = MoveFileExW\n\n// MoveFile atomically moves the source file to the destination, returning\n// whether the file was moved successfully. If the destination already exists,\n// it returns an error rather than overwrite it.\nfunc MoveFile(source, destination string) (bool, error) {\n\tsrc, err := syscall.UTF16PtrFromString(source)\n\tif err != nil {\n\t\treturn false, &os.LinkError{\"move\", source, destination, err}\n\t}\n\tdest, err := syscall.UTF16PtrFromString(destination)\n\tif err != nil {\n\t\treturn false, &os.LinkError{\"move\", source, destination, err}\n\t}\n\n\t// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx\n\tif err := moveFileEx(src, dest, movefile_write_through); err != nil {\n\t\treturn false, &os.LinkError{\"move\", source, destination, err}\n\t}\n\treturn true, nil\n\n}\n\n// ReplaceFile atomically replaces the destination file or directory with the source.\n// The errors that are returned are identical to those returned by os.Rename.\nfunc ReplaceFile(source, destination string) error {\n\tsrc, err := syscall.UTF16PtrFromString(source)\n\tif err != nil {\n\t\treturn &os.LinkError{\"replace\", source, destination, err}\n\t}\n\tdest, err := syscall.UTF16PtrFromString(destination)\n\tif err != nil {\n\t\treturn &os.LinkError{\"replace\", source, destination, err}\n\t}\n\n\t// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx\n\tif err := moveFileEx(src, dest, movefile_replace_existing|movefile_write_through); err != nil {\n\t\treturn &os.LinkError{\"replace\", source, destination, err}\n\t}\n\treturn nil\n}\n\n// MakeFileURL returns a proper file URL for the given path/directory\nfunc MakeFileURL(in string) string {\n\tin = filepath.ToSlash(in)\n\t// for windows at least should be <letter>: to be considered valid\n\t// so we cant do anything with less than that.\n\tif len(in) < 2 {\n\t\treturn in\n\t}\n\tif string(in[1]) != \":\" {\n\t\treturn in\n\t}\n\t// since go 1.6 http client will only take this format.\n\treturn \"file://\" + in\n}\n\nfunc getUserSID(username string) (string, error) {\n\tsid, _, _, e := syscall.LookupSID(\"\", username)\n\tif e != nil {\n\t\treturn \"\", e\n\t}\n\tsidStr, err := sid.String()\n\treturn sidStr, err\n}\n\nfunc readRegString(h syscall.Handle, key string) (value string, err error) {\n\tvar typ uint32\n\tvar buf uint32\n\n\t// Get size of registry key\n\terr = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(key), nil, &typ, nil, &buf)\n\tif err != nil {\n\t\treturn value, err\n\t}\n\n\tn := make([]uint16, buf/2+1)\n\terr = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(key), nil, &typ, (*byte)(unsafe.Pointer(&n[0])), &buf)\n\tif err != nil {\n\t\treturn value, err\n\t}\n\treturn syscall.UTF16ToString(n[:]), err\n}\n\nfunc homeFromRegistry(sid string) (string, error) {\n\tvar h syscall.Handle\n\t// This key will exist on all platforms we support the agent on (windows server 2008 and above)\n\tkeyPath := fmt.Sprintf(\"Software\\\\Microsoft\\\\Windows NT\\\\CurrentVersion\\\\ProfileList\\\\%s\", sid)\n\terr := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,\n\t\tsyscall.StringToUTF16Ptr(keyPath),\n\t\t0, syscall.KEY_READ, &h)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer syscall.RegCloseKey(h)\n\tstr, err := readRegString(h, \"ProfileImagePath\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn str, nil\n}\n\n// homeDir returns a local user home dir on Windows\n// user.Lookup() does not populate Gid and HomeDir on Windows,\n// so we get it from the registry\nfunc homeDir(user string) (string, error) {\n\tu, err := getUserSID(user)\n\tif err != nil {\n\t\treturn \"\", errors.NewUserNotFound(err, \"no such user\")\n\t}\n\treturn homeFromRegistry(u)\n}\n\n// ChownPath is not implemented for Windows.\nfunc ChownPath(path, username string) error {\n\t// This only exists to allow building on Windows. User lookup and\n\t// file ownership needs to be handled in a completely different\n\t// way and hasn't yet been implemented.\n\treturn nil\n}\n\n// IsFileOwner is not implemented for Windows.\nfunc IsFileOwner(path, username string) (bool, error) {\n\treturn true, nil\n}\n"
  },
  {
    "path": "file_windows_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build windows\n// +build windows\n\npackage utils_test\n\nimport (\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype windowsFileSuite struct {\n}\n\nvar _ = gc.Suite(&windowsFileSuite{})\n\nfunc (s *windowsFileSuite) TestMakeFileURL(c *gc.C) {\n\tvar makeFileURLTests = []struct {\n\t\tin       string\n\t\texpected string\n\t}{{\n\t\tin:       \"file://C:\\\\foo\\\\baz\",\n\t\texpected: \"file://C:/foo/baz\",\n\t}, {\n\t\tin:       \"C:\\\\foo\\\\baz\",\n\t\texpected: \"file://C:/foo/baz\",\n\t}, {\n\t\tin:       \"http://foo/baz\",\n\t\texpected: \"http://foo/baz\",\n\t}, {\n\t\tin:       \"file://C:/foo/baz\",\n\t\texpected: \"file://C:/foo/baz\",\n\t}}\n\n\tfor i, t := range makeFileURLTests {\n\t\tc.Logf(\"Test %d\", i)\n\t\tc.Assert(utils.MakeFileURL(t.in), gc.Equals, t.expected)\n\t}\n}\n\nfunc (s *windowsFileSuite) TestEnsureBaseDir(c *gc.C) {\n\tc.Assert(utils.EnsureBaseDir(`C:\\r`, `C:\\a\\b`), gc.Equals, `C:\\r\\a\\b`)\n\tc.Assert(utils.EnsureBaseDir(`C:\\r`, `D:\\a\\b`), gc.Equals, `C:\\r\\a\\b`)\n\tc.Assert(utils.EnsureBaseDir(`C:`, `D:\\a\\b`), gc.Equals, `C:\\a\\b`)\n\tc.Assert(utils.EnsureBaseDir(`C:`, `\\a\\b`), gc.Equals, `C:\\a\\b`)\n\tc.Assert(utils.EnsureBaseDir(``, `C:\\a\\b`), gc.Equals, `C:\\a\\b`)\n}\n\nfunc (s *windowsFileSuite) TestFileOwner(c *gc.C) {\n\tc.Assert(utils.IsFileOwner(\"file://C:\\\\foo\\\\baz\", \"timmy\"), gc.Equals, true)\n}\n"
  },
  {
    "path": "filepath/common.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath\n\nfunc splitSuffix(path string) (string, string) {\n\tfor i := len(path) - 1; i >= 0; i-- {\n\t\tif path[i] == '.' && i > 0 {\n\t\t\treturn path[:i], path[i:]\n\t\t}\n\t}\n\treturn path, \"\"\n}\n"
  },
  {
    "path": "filepath/common_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/filepath\"\n)\n\nvar _ = gc.Suite(&commonSuite{})\n\ntype commonSuite struct {\n\ttesting.IsolationSuite\n}\n\nfunc (s commonSuite) TestSplitSuffixHasSuffix(c *gc.C) {\n\tpath, suffix := filepath.SplitSuffix(\"spam.ext\")\n\n\tc.Check(path, gc.Equals, \"spam\")\n\tc.Check(suffix, gc.Equals, \".ext\")\n}\n\nfunc (s commonSuite) TestSplitSuffixNoSuffix(c *gc.C) {\n\tpath, suffix := filepath.SplitSuffix(\"spam\")\n\n\tc.Check(path, gc.Equals, \"spam\")\n\tc.Check(suffix, gc.Equals, \"\")\n}\n\nfunc (s commonSuite) TestSplitSuffixEmpty(c *gc.C) {\n\tpath, suffix := filepath.SplitSuffix(\"\")\n\n\tc.Check(path, gc.Equals, \"\")\n\tc.Check(suffix, gc.Equals, \"\")\n}\n\nfunc (s commonSuite) TestSplitSuffixDotFilePlain(c *gc.C) {\n\tpath, suffix := filepath.SplitSuffix(\".spam\")\n\n\tc.Check(path, gc.Equals, \".spam\")\n\tc.Check(suffix, gc.Equals, \"\")\n}\n\nfunc (s commonSuite) TestSplitSuffixDofileWithSuffix(c *gc.C) {\n\tpath, suffix := filepath.SplitSuffix(\".spam.ext\")\n\n\tc.Check(path, gc.Equals, \".spam\")\n\tc.Check(suffix, gc.Equals, \".ext\")\n}\n"
  },
  {
    "path": "filepath/export_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath\n\nvar (\n\tSplitSuffix = splitSuffix\n)\n"
  },
  {
    "path": "filepath/filepath.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath\n\nimport (\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/utils/v4\"\n)\n\n// Renderer provides methods for the different functions in\n// the stdlib path/filepath package that don't relate to a concrete\n// filesystem. So Abs, EvalSymlinks, Glob, Rel, and Walk are not\n// included. Also, while the functions in path/filepath relate to the\n// current host, the PathRenderer methods relate to the renderer's\n// target platform. So for example, a windows-oriented implementation\n// will give windows-specific results even when used on linux.\ntype Renderer interface {\n\t// Base mimics path/filepath.\n\tBase(path string) string\n\n\t// Clean mimics path/filepath.\n\tClean(path string) string\n\n\t// Dir mimics path/filepath.\n\tDir(path string) string\n\n\t// Ext mimics path/filepath.\n\tExt(path string) string\n\n\t// FromSlash mimics path/filepath.\n\tFromSlash(path string) string\n\n\t// IsAbs mimics path/filepath.\n\tIsAbs(path string) bool\n\n\t// Join mimics path/filepath.\n\tJoin(path ...string) string\n\n\t// Match mimics path/filepath.\n\tMatch(pattern, name string) (matched bool, err error)\n\n\t// NormCase normalizes the case of a pathname. On Unix and Mac OS X,\n\t// this returns the path unchanged; on case-insensitive filesystems,\n\t// it converts the path to lowercase.\n\tNormCase(path string) string\n\n\t// Split mimics path/filepath.\n\tSplit(path string) (dir, file string)\n\n\t// SplitList mimics path/filepath.\n\tSplitList(path string) []string\n\n\t// SplitSuffix splits the pathname into a pair (root, suffix) such\n\t// that root + suffix == path, and ext is empty or begins with a\n\t// period and contains at most one period. Leading periods on the\n\t// basename are ignored; SplitSuffix('.cshrc') returns ('.cshrc', '').\n\tSplitSuffix(path string) (string, string)\n\n\t// ToSlash mimics path/filepath.\n\tToSlash(path string) string\n\n\t// VolumeName mimics path/filepath.\n\tVolumeName(path string) string\n}\n\n// NewRenderer returns a Renderer for the given os.\nfunc NewRenderer(os string) (Renderer, error) {\n\tif os == \"\" {\n\t\tos = runtime.GOOS\n\t}\n\n\tos = strings.ToLower(os)\n\tswitch {\n\tcase os == utils.OSWindows:\n\t\treturn &WindowsRenderer{}, nil\n\tcase utils.OSIsUnix(os):\n\t\treturn &UnixRenderer{}, nil\n\tcase os == \"ubuntu\":\n\t\treturn &UnixRenderer{}, nil\n\tdefault:\n\t\treturn nil, errors.NotFoundf(\"renderer for %q\", os)\n\t}\n}\n"
  },
  {
    "path": "filepath/filepath_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath_test\n\nimport (\n\t\"runtime\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\t\"github.com/juju/utils/v4\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/filepath\"\n)\n\ntype filepathSuite struct {\n\ttesting.IsolationSuite\n\n\tunix    *filepath.UnixRenderer\n\twindows *filepath.WindowsRenderer\n}\n\nvar _ = gc.Suite(&filepathSuite{})\n\nfunc (s *filepathSuite) SetupTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.unix = &filepath.UnixRenderer{}\n\ts.windows = &filepath.WindowsRenderer{}\n}\n\nfunc (s filepathSuite) checkRenderer(c *gc.C, renderer filepath.Renderer, expected string) {\n\tswitch expected {\n\tcase \"windows\":\n\t\tc.Check(renderer, gc.FitsTypeOf, s.windows)\n\tcase \"unix\":\n\t\tc.Check(renderer, gc.FitsTypeOf, s.unix)\n\tdefault:\n\t\tc.Errorf(\"unknown kind %q\", expected)\n\t}\n}\n\nfunc (s filepathSuite) TestNewRendererDefault(c *gc.C) {\n\t// All possible values of runtime.GOOS should be supported.\n\trenderer, err := filepath.NewRenderer(\"\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\ts.checkRenderer(c, renderer, \"windows\")\n\tdefault:\n\t\ts.checkRenderer(c, renderer, \"unix\")\n\t}\n}\n\nfunc (s filepathSuite) TestNewRendererGOOS(c *gc.C) {\n\t// All possible values of runtime.GOOS should be supported.\n\trenderer, err := filepath.NewRenderer(runtime.GOOS)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\ts.checkRenderer(c, renderer, \"windows\")\n\tdefault:\n\t\ts.checkRenderer(c, renderer, \"unix\")\n\t}\n}\n\nfunc (s filepathSuite) TestNewRendererWindows(c *gc.C) {\n\trenderer, err := filepath.NewRenderer(\"windows\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\ts.checkRenderer(c, renderer, \"windows\")\n}\n\nfunc (s filepathSuite) TestNewRendererUnix(c *gc.C) {\n\tfor _, os := range utils.OSUnix {\n\t\tc.Logf(\"trying %q\", os)\n\t\trenderer, err := filepath.NewRenderer(os)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\ts.checkRenderer(c, renderer, \"unix\")\n\t}\n}\n\nfunc (s filepathSuite) TestNewRendererDistros(c *gc.C) {\n\tdistros := []string{\"ubuntu\"}\n\tfor _, distro := range distros {\n\t\tc.Logf(\"trying %q\", distro)\n\t\trenderer, err := filepath.NewRenderer(distro)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\ts.checkRenderer(c, renderer, \"unix\")\n\t}\n}\n\nfunc (s filepathSuite) TestNewRendererUnknown(c *gc.C) {\n\t_, err := filepath.NewRenderer(\"<unknown OS>\")\n\n\tc.Check(err, jc.Satisfies, errors.IsNotFound)\n}\n"
  },
  {
    "path": "filepath/interface_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath\n\nvar _ Renderer = (*UnixRenderer)(nil)\nvar _ Renderer = (*WindowsRenderer)(nil)\n"
  },
  {
    "path": "filepath/package_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "filepath/stdlib.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE.golang file.\n\npackage filepath\n\nimport (\n\t\"strings\"\n)\n\n// The following functions are adapted from the GO stdlib source.\n\n// Base mimics path/filepath for the given path separator.\nfunc Base(sep uint8, volumeName func(string) string, path string) string {\n\tif path == \"\" {\n\t\treturn \".\"\n\t}\n\t// Strip trailing slashes.\n\tfor len(path) > 0 && path[len(path)-1] == sep {\n\t\tpath = path[0 : len(path)-1]\n\t}\n\t// Throw away volume name\n\tpath = path[len(volumeName(path)):]\n\t// Find the last element\n\ti := len(path) - 1\n\tfor i >= 0 && path[i] != sep {\n\t\ti--\n\t}\n\tif i >= 0 {\n\t\tpath = path[i+1:]\n\t}\n\t// If empty now, it had only slashes.\n\tif path == \"\" {\n\t\treturn string(sep)\n\t}\n\treturn path\n}\n\n// A lazybuf is a lazily constructed path buffer.\n// It supports append, reading previously appended bytes,\n// and retrieving the final string. It does not allocate a buffer\n// to hold the output until that output diverges from s.\ntype lazybuf struct {\n\tpath       string\n\tbuf        []byte\n\tw          int\n\tvolAndPath string\n\tvolLen     int\n}\n\nfunc (b *lazybuf) index(i int) byte {\n\tif b.buf != nil {\n\t\treturn b.buf[i]\n\t}\n\treturn b.path[i]\n}\n\nfunc (b *lazybuf) append(c byte) {\n\tif b.buf == nil {\n\t\tif b.w < len(b.path) && b.path[b.w] == c {\n\t\t\tb.w++\n\t\t\treturn\n\t\t}\n\t\tb.buf = make([]byte, len(b.path))\n\t\tcopy(b.buf, b.path[:b.w])\n\t}\n\tb.buf[b.w] = c\n\tb.w++\n}\n\nfunc (b *lazybuf) string() string {\n\tif b.buf == nil {\n\t\treturn b.volAndPath[:b.volLen+b.w]\n\t}\n\treturn b.volAndPath[:b.volLen] + string(b.buf[:b.w])\n}\n\n// Clean mimics path/filepath for the given path separator.\nfunc Clean(sep uint8, volumeName func(string) string, path string) string {\n\toriginalPath := path\n\tvolLen := len(volumeName(path))\n\tpath = path[volLen:]\n\tif path == \"\" {\n\t\tif volLen > 1 && originalPath[1] != ':' {\n\t\t\t// should be UNC\n\t\t\treturn FromSlash(sep, originalPath)\n\t\t}\n\t\treturn originalPath + \".\"\n\t}\n\trooted := (path[0] == sep)\n\n\t// Invariants:\n\t//  reading from path; r is index of next byte to process.\n\t//  writing to buf; w is index of next byte to write.\n\t//  dotdot is index in buf where .. must stop, either because\n\t//      it is the leading slash or it is a leading ../../.. prefix.\n\tn := len(path)\n\tout := lazybuf{path: path, volAndPath: originalPath, volLen: volLen}\n\tr, dotdot := 0, 0\n\tif rooted {\n\t\tout.append(sep)\n\t\tr, dotdot = 1, 1\n\t}\n\n\tfor r < n {\n\t\tswitch {\n\t\tcase path[r] == sep:\n\t\t\t// empty path element\n\t\t\tr++\n\t\tcase path[r] == '.' && (r+1 == n || path[r+1] == sep):\n\t\t\t// . element\n\t\t\tr++\n\t\tcase path[r] == '.' && path[r+1] == '.' && (r+2 == n || path[r+2] == sep):\n\t\t\t// .. element: remove to last separator\n\t\t\tr += 2\n\t\t\tswitch {\n\t\t\tcase out.w > dotdot:\n\t\t\t\t// can backtrack\n\t\t\t\tout.w--\n\t\t\t\tfor out.w > dotdot && out.index(out.w) != sep {\n\t\t\t\t\tout.w--\n\t\t\t\t}\n\t\t\tcase !rooted:\n\t\t\t\t// cannot backtrack, but not rooted, so append .. element.\n\t\t\t\tif out.w > 0 {\n\t\t\t\t\tout.append(sep)\n\t\t\t\t}\n\t\t\t\tout.append('.')\n\t\t\t\tout.append('.')\n\t\t\t\tdotdot = out.w\n\t\t\t}\n\t\tdefault:\n\t\t\t// real path element.\n\t\t\t// add slash if needed\n\t\t\tif rooted && out.w != 1 || !rooted && out.w != 0 {\n\t\t\t\tout.append(sep)\n\t\t\t}\n\t\t\t// copy element\n\t\t\tfor ; r < n && path[r] != sep; r++ {\n\t\t\t\tout.append(path[r])\n\t\t\t}\n\t\t}\n\t}\n\n\t// Turn empty string into \".\"\n\tif out.w == 0 {\n\t\tout.append('.')\n\t}\n\n\treturn FromSlash(sep, out.string())\n}\n\n// Dir mimics path/filepath for the given path separator.\nfunc Dir(sep uint8, volumeName func(string) string, path string) string {\n\tvol := volumeName(path)\n\ti := len(path) - 1\n\tfor i >= len(vol) && path[i] != sep {\n\t\ti--\n\t}\n\tdir := Clean(sep, volumeName, path[len(vol):i+1])\n\treturn vol + dir\n}\n\n// Ext mimics path/filepath for the given path separator.\nfunc Ext(sep uint8, path string) string {\n\tfor i := len(path) - 1; i >= 0 && path[i] != sep; i-- {\n\t\tif path[i] == '.' {\n\t\t\treturn path[i:]\n\t\t}\n\t}\n\treturn \"\"\n}\n\n// FromSlash mimics path/filepath for the given path separator.\nfunc FromSlash(sep uint8, path string) string {\n\tif sep == '/' {\n\t\treturn path\n\t}\n\treturn strings.Replace(path, \"/\", string(sep), -1)\n}\n\n// Join mimics path/filepath for the given path separator.\nfunc Join(sep uint8, volumeName func(string) string, elem ...string) string {\n\tfor i, e := range elem {\n\t\tif e != \"\" {\n\t\t\treturn Clean(sep, volumeName, strings.Join(elem[i:], string(sep)))\n\t\t}\n\t}\n\treturn \"\"\n}\n\n// Split mimics path/filepath for the given path separator.\nfunc Split(sep uint8, volumeName func(string) string, path string) (dir, file string) {\n\tvol := volumeName(path)\n\ti := len(path) - 1\n\tfor i >= len(vol) && path[i] != sep {\n\t\ti--\n\t}\n\treturn path[:i+1], path[i+1:]\n}\n\n// ToSlash mimics path/filepath for the given path separator.\nfunc ToSlash(sep uint8, path string) string {\n\tif sep == '/' {\n\t\treturn path\n\t}\n\treturn strings.Replace(path, string(sep), \"/\", -1)\n}\n"
  },
  {
    "path": "filepath/stdlib_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath_test\n\nimport (\n\tgofilepath \"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/filepath\"\n)\n\n// The tests here are mostly just sanity checks against the behavior\n// of the stdlib path/filepath. We are not trying for high coverage levels.\n\ntype stdlibSuite struct {\n\ttesting.IsolationSuite\n\n\tpath       string\n\tvolumeName func(string) string\n}\n\nvar _ = gc.Suite(&stdlibSuite{})\n\nfunc (s *stdlibSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\ts.path = `C:\\a\\b\\c.xyz`\n\t\ts.volumeName = func(path string) string {\n\t\t\treturn \"C:\"\n\t\t}\n\tdefault:\n\t\ts.path = \"/a/b/c.xyz\"\n\t\ts.volumeName = func(string) string { return \"\" }\n\t}\n}\n\nfunc (s stdlibSuite) TestBase(c *gc.C) {\n\tpath := filepath.Base(gofilepath.Separator, s.volumeName, s.path)\n\n\tgopath := gofilepath.Base(s.path)\n\tc.Check(path, gc.Equals, gopath)\n\tc.Check(path, gc.Equals, \"c.xyz\")\n}\n\nfunc (s stdlibSuite) TestClean(c *gc.C) {\n\t// TODO(ericsnow) Add more cases.\n\toriginals := map[string]string{\n\t\ts.path: s.path,\n\t}\n\tfor original, expected := range originals {\n\t\tc.Logf(\"checking %q\", original)\n\t\tpath := filepath.Clean(gofilepath.Separator, s.volumeName, original)\n\n\t\tgopath := gofilepath.Clean(original)\n\t\tc.Check(path, gc.Equals, gopath)\n\t\tc.Check(path, gc.Equals, expected)\n\t}\n}\n\nfunc (s stdlibSuite) TestDir(c *gc.C) {\n\tpath := filepath.Dir(gofilepath.Separator, s.volumeName, s.path)\n\n\tgopath := gofilepath.Dir(s.path)\n\tc.Check(path, gc.Equals, gopath)\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tc.Check(path, gc.Equals, `\\a\\b`)\n\tdefault:\n\t\tc.Check(path, gc.Equals, \"/a/b\")\n\t}\n}\n\nfunc (s stdlibSuite) TestExt(c *gc.C) {\n\text := filepath.Ext(gofilepath.Separator, s.path)\n\n\tgoext := gofilepath.Ext(s.path)\n\tc.Check(ext, gc.Equals, goext)\n\tc.Check(ext, gc.Equals, \".xyz\")\n}\n\nfunc (s stdlibSuite) TestFromSlash(c *gc.C) {\n\toriginal := \"/a/b/c.xyz\"\n\tpath := filepath.FromSlash(gofilepath.Separator, original)\n\n\tgopath := gofilepath.FromSlash(original)\n\tc.Check(path, gc.Equals, gopath)\n\tc.Check(path, gc.Equals, s.path)\n}\n\nfunc (s stdlibSuite) TestJoin(c *gc.C) {\n\tpath := filepath.Join(gofilepath.Separator, s.volumeName, \"a\", \"b\", \"c.xyz\")\n\n\tgopath := gofilepath.Join(\"a\", \"b\", \"c.xyz\")\n\tc.Check(path, gc.Equals, gopath)\n\texpected := s.path[strings.Index(s.path, string(gofilepath.Separator))+1:]\n\tc.Check(path, gc.Equals, expected)\n}\n\nfunc (s stdlibSuite) TestSplit(c *gc.C) {\n\tdir, base := filepath.Split(gofilepath.Separator, s.volumeName, s.path)\n\n\tgodir, gobase := gofilepath.Split(s.path)\n\tc.Check(dir, gc.Equals, godir)\n\tc.Check(base, gc.Equals, gobase)\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tc.Check(dir, gc.Equals, `\\a\\b\\`)\n\tdefault:\n\t\tc.Check(dir, gc.Equals, \"/a/b/\")\n\t}\n\tc.Check(base, gc.Equals, \"c.xyz\")\n}\n\nfunc (s stdlibSuite) TestToSlash(c *gc.C) {\n\tpath := filepath.ToSlash(gofilepath.Separator, s.path)\n\n\tgopath := gofilepath.ToSlash(s.path)\n\tc.Check(path, gc.Equals, gopath)\n\tc.Check(path, gc.Equals, \"/a/b/c.xyz\")\n}\n\nfunc (s stdlibSuite) TestMatchTrue(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"abc\":   \"abc\",\n\t\t\"ab[c]\": \"abc\",\n\t\t\"\":      \"\",\n\t\t\"*\":     \"abc\",\n\t\t\"a*c\":   \"abc\",\n\t\t\"?\":     \"a\",\n\t\t\"a?c\":   \"abc\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\tmatched, err := filepath.Match(gofilepath.Separator, pattern, name)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\tgomatched, err := gofilepath.Match(pattern, name)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\t\tc.Check(matched, gc.Equals, gomatched)\n\t\tc.Check(matched, jc.IsTrue)\n\t}\n}\n\nfunc (s stdlibSuite) TestMatchFalse(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"abc\": \"xyz\",\n\t\t\"\":    \"abc\",\n\t\t\"a*c\": \"a\",\n\t\t\"?\":   \"\",\n\t\t\"a?c\": \"ac\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\tmatched, err := filepath.Match(gofilepath.Separator, pattern, name)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\tgomatched, err := gofilepath.Match(pattern, name)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\t\tc.Check(matched, gc.Equals, gomatched)\n\t\tc.Check(matched, jc.IsFalse)\n\t}\n}\n\nfunc (s stdlibSuite) TestMatchBadPattern(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"ab[\":    \"abc\",\n\t\t\"ab[-c]\": \"abc\",\n\t\t\"ab[]\":   \"abc\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\t_, err := filepath.Match(gofilepath.Separator, pattern, name)\n\n\t\t_, goerr := gofilepath.Match(pattern, name)\n\t\tc.Check(err, gc.Equals, goerr)\n\t\tc.Check(err, gc.Equals, gofilepath.ErrBadPattern)\n\t}\n}\n"
  },
  {
    "path": "filepath/stdlibmatch.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license that can be found in the LICENSE.golang file.\n\npackage filepath\n\nimport (\n\t\"path/filepath\"\n\t\"strings\"\n\t\"unicode/utf8\"\n)\n\n// The following functions are adapted from the GO stdlib source.\n\n// Match returns true if name matches the shell file name pattern.\n// The pattern syntax is:\n//\n//  pattern:\n//      { term }\n//  term:\n//      '*'         matches any sequence of non-Separator characters\n//      '?'         matches any single non-Separator character\n//      '[' [ '^' ] { character-range } ']'\n//                  character class (must be non-empty)\n//      c           matches character c (c != '*', '?', '\\\\', '[')\n//      '\\\\' c      matches character c\n//\n//  character-range:\n//      c           matches character c (c != '\\\\', '-', ']')\n//      '\\\\' c      matches character c\n//      lo '-' hi   matches character c for lo <= c <= hi\n//\n// Match requires pattern to match all of name, not just a substring.\n// The only possible returned error is ErrBadPattern, when pattern\n// is malformed.\n//\n// On Windows, escaping is disabled. Instead, '\\\\' is treated as\n// path separator.\n//\nfunc Match(sep uint8, pattern, name string) (matched bool, err error) {\nPattern:\n\tfor len(pattern) > 0 {\n\t\tvar star bool\n\t\tvar chunk string\n\t\tstar, chunk, pattern = scanChunk(sep, pattern)\n\t\tif star && chunk == \"\" {\n\t\t\t// Trailing * matches rest of string unless it has a /.\n\t\t\treturn strings.Index(name, string(sep)) < 0, nil\n\t\t}\n\t\t// Look for match at current position.\n\t\tt, ok, err := matchChunk(sep, chunk, name)\n\t\t// if we're the last chunk, make sure we've exhausted the name\n\t\t// otherwise we'll give a false result even if we could still match\n\t\t// using the star\n\t\tif ok && (len(t) == 0 || len(pattern) > 0) {\n\t\t\tname = t\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tif star {\n\t\t\t// Look for match skipping i+1 bytes.\n\t\t\t// Cannot skip /.\n\t\t\tfor i := 0; i < len(name) && name[i] != sep; i++ {\n\t\t\t\tt, ok, err := matchChunk(sep, chunk, name[i+1:])\n\t\t\t\tif ok {\n\t\t\t\t\t// if we're the last chunk, make sure we exhausted the name\n\t\t\t\t\tif len(pattern) == 0 && len(t) > 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tname = t\n\t\t\t\t\tcontinue Pattern\n\t\t\t\t}\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false, nil\n\t}\n\treturn len(name) == 0, nil\n}\n\n// scanChunk gets the next segment of pattern, which is a non-star string\n// possibly preceded by a star.\nfunc scanChunk(sep uint8, pattern string) (star bool, chunk, rest string) {\n\tfor len(pattern) > 0 && pattern[0] == '*' {\n\t\tpattern = pattern[1:]\n\t\tstar = true\n\t}\n\tinrange := false\n\tvar i int\nScan:\n\tfor i = 0; i < len(pattern); i++ {\n\t\tswitch pattern[i] {\n\t\tcase '\\\\':\n\t\t\tif sep == '\\\\' {\n\t\t\t\t// error check handled in matchChunk: bad pattern.\n\t\t\t\tif i+1 < len(pattern) {\n\t\t\t\t\ti++\n\t\t\t\t}\n\t\t\t}\n\t\tcase '[':\n\t\t\tinrange = true\n\t\tcase ']':\n\t\t\tinrange = false\n\t\tcase '*':\n\t\t\tif !inrange {\n\t\t\t\tbreak Scan\n\t\t\t}\n\t\t}\n\t}\n\treturn star, pattern[0:i], pattern[i:]\n}\n\n// matchChunk checks whether chunk matches the beginning of s.\n// If so, it returns the remainder of s (after the match).\n// Chunk is all single-character operators: literals, char classes, and ?.\nfunc matchChunk(sep uint8, chunk, s string) (rest string, ok bool, err error) {\n\tfor len(chunk) > 0 {\n\t\tif len(s) == 0 {\n\t\t\treturn\n\t\t}\n\t\tswitch chunk[0] {\n\t\tcase '[':\n\t\t\t// character class\n\t\t\tr, n := utf8.DecodeRuneInString(s)\n\t\t\ts = s[n:]\n\t\t\tchunk = chunk[1:]\n\t\t\t// We can't end right after '[', we're expecting at least\n\t\t\t// a closing bracket and possibly a caret.\n\t\t\tif len(chunk) == 0 {\n\t\t\t\terr = filepath.ErrBadPattern\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// possibly negated\n\t\t\tnegated := chunk[0] == '^'\n\t\t\tif negated {\n\t\t\t\tchunk = chunk[1:]\n\t\t\t}\n\t\t\t// parse all ranges\n\t\t\tmatch := false\n\t\t\tnrange := 0\n\t\t\tfor {\n\t\t\t\tif len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {\n\t\t\t\t\tchunk = chunk[1:]\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t\tvar lo, hi rune\n\t\t\t\tif lo, chunk, err = getEsc(sep, chunk); err != nil {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\thi = lo\n\t\t\t\tif chunk[0] == '-' {\n\t\t\t\t\tif hi, chunk, err = getEsc(sep, chunk[1:]); err != nil {\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif lo <= r && r <= hi {\n\t\t\t\t\tmatch = true\n\t\t\t\t}\n\t\t\t\tnrange++\n\t\t\t}\n\t\t\tif match == negated {\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase '?':\n\t\t\tif s[0] == sep {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t_, n := utf8.DecodeRuneInString(s)\n\t\t\ts = s[n:]\n\t\t\tchunk = chunk[1:]\n\n\t\tcase '\\\\':\n\t\t\tif sep != '\\\\' {\n\t\t\t\tchunk = chunk[1:]\n\t\t\t\tif len(chunk) == 0 {\n\t\t\t\t\terr = filepath.ErrBadPattern\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t\tfallthrough\n\n\t\tdefault:\n\t\t\tif chunk[0] != s[0] {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts = s[1:]\n\t\t\tchunk = chunk[1:]\n\t\t}\n\t}\n\treturn s, true, nil\n}\n\n// getEsc gets a possibly-escaped character from chunk, for a character class.\nfunc getEsc(sep uint8, chunk string) (r rune, nchunk string, err error) {\n\tif len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {\n\t\terr = filepath.ErrBadPattern\n\t\treturn\n\t}\n\tif chunk[0] == '\\\\' && sep != '\\\\' {\n\t\tchunk = chunk[1:]\n\t\tif len(chunk) == 0 {\n\t\t\terr = filepath.ErrBadPattern\n\t\t\treturn\n\t\t}\n\t}\n\tr, n := utf8.DecodeRuneInString(chunk)\n\tif r == utf8.RuneError && n == 1 {\n\t\terr = filepath.ErrBadPattern\n\t}\n\tnchunk = chunk[n:]\n\tif len(nchunk) == 0 {\n\t\terr = filepath.ErrBadPattern\n\t}\n\treturn\n}\n"
  },
  {
    "path": "filepath/unix.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath\n\nimport (\n\t\"strings\"\n)\n\n// A substantial portion of this code comes from the Go stdlib code.\n\nconst (\n\tUnixSeparator     = '/' // OS-specific path separator\n\tUnixListSeparator = ':' // OS-specific path list separator\n)\n\n// UnixRenderer is a Renderer implementation for most flavors of Unix.\ntype UnixRenderer struct{}\n\n// Base implements Renderer.\nfunc (ur UnixRenderer) Base(path string) string {\n\treturn Base(UnixSeparator, ur.VolumeName, path)\n}\n\n// Clean implements Renderer.\nfunc (ur UnixRenderer) Clean(path string) string {\n\treturn Clean(UnixSeparator, ur.VolumeName, path)\n}\n\n// Dir implements Renderer.\nfunc (ur UnixRenderer) Dir(path string) string {\n\treturn Dir(UnixSeparator, ur.VolumeName, path)\n}\n\n// Ext implements Renderer.\nfunc (UnixRenderer) Ext(path string) string {\n\treturn Ext(UnixSeparator, path)\n}\n\n// FromSlash implements Renderer.\nfunc (UnixRenderer) FromSlash(path string) string {\n\treturn FromSlash(UnixSeparator, path)\n}\n\n// IsAbs implements Renderer.\nfunc (UnixRenderer) IsAbs(path string) bool {\n\treturn strings.HasPrefix(path, string(UnixSeparator))\n}\n\n// Join implements Renderer.\nfunc (ur UnixRenderer) Join(path ...string) string {\n\treturn Join(UnixSeparator, ur.VolumeName, path...)\n}\n\n// Match implements Renderer.\nfunc (UnixRenderer) Match(pattern, name string) (matched bool, err error) {\n\treturn Match(UnixSeparator, pattern, name)\n}\n\n// Split implements Renderer.\nfunc (ur UnixRenderer) Split(path string) (dir, file string) {\n\treturn Split(UnixSeparator, ur.VolumeName, path)\n}\n\n// SplitList implements Renderer.\nfunc (UnixRenderer) SplitList(path string) []string {\n\tif path == \"\" {\n\t\treturn []string{}\n\t}\n\treturn strings.Split(path, string(UnixListSeparator))\n}\n\n// ToSlash implements Renderer.\nfunc (UnixRenderer) ToSlash(path string) string {\n\treturn ToSlash(UnixSeparator, path)\n}\n\n// VolumeName implements Renderer.\nfunc (UnixRenderer) VolumeName(path string) string {\n\treturn \"\"\n}\n\n// NormCase implements Renderer.\nfunc (UnixRenderer) NormCase(path string) string {\n\treturn path\n}\n\n// SplitSuffix implements Renderer.\nfunc (UnixRenderer) SplitSuffix(path string) (string, string) {\n\treturn splitSuffix(path)\n}\n"
  },
  {
    "path": "filepath/unix_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath_test\n\nimport (\n\tgofilepath \"path/filepath\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/filepath\"\n)\n\nvar _ = gc.Suite(&unixSuite{})\nvar _ = gc.Suite(&unixThinWrapperSuite{})\n\ntype unixBaseSuite struct {\n\ttesting.IsolationSuite\n\n\tpath     string\n\trenderer *filepath.UnixRenderer\n}\n\nfunc (s *unixBaseSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.path = \"/a/b/c.xyz\"\n\ts.renderer = &filepath.UnixRenderer{}\n}\n\nfunc (s *unixBaseSuite) matchesRuntime() bool {\n\treturn gofilepath.Separator == filepath.UnixSeparator\n}\n\ntype unixSuite struct {\n\tunixBaseSuite\n}\n\nfunc (s unixSuite) TestIsAbs(c *gc.C) {\n\tisAbs := s.renderer.IsAbs(s.path)\n\n\tc.Check(isAbs, jc.IsTrue)\n\tif s.matchesRuntime() {\n\t\tc.Check(isAbs, gc.Equals, gofilepath.IsAbs(s.path))\n\t}\n}\n\nfunc (s unixSuite) TestSplitList(c *gc.C) {\n\tlist := s.renderer.SplitList(\"/a:b:/c/d\")\n\n\tc.Check(list, jc.DeepEquals, []string{\"/a\", \"b\", \"/c/d\"})\n\tif s.matchesRuntime() {\n\t\tgolist := gofilepath.SplitList(\"/a:b:/c/d\")\n\t\tc.Check(list, jc.DeepEquals, golist)\n\t}\n}\n\nfunc (s unixSuite) TestVolumeName(c *gc.C) {\n\tvolumeName := s.renderer.VolumeName(s.path)\n\n\tc.Check(volumeName, gc.Equals, \"\")\n}\n\nfunc (s unixSuite) TestNormCaseLower(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"spam\")\n\n\tc.Check(normalized, gc.Equals, \"spam\")\n}\n\nfunc (s unixSuite) TestNormCaseUpper(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"SPAM\")\n\n\tc.Check(normalized, gc.Equals, \"SPAM\")\n}\n\nfunc (s unixSuite) TestNormCaseMixed(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"sPaM\")\n\n\tc.Check(normalized, gc.Equals, \"sPaM\")\n}\n\nfunc (s unixSuite) TestNormCaseCapitalized(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"Spam\")\n\n\tc.Check(normalized, gc.Equals, \"Spam\")\n}\n\nfunc (s unixSuite) TestNormCasePunctuation(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"spam-eggs.ext\")\n\n\tc.Check(normalized, gc.Equals, \"spam-eggs.ext\")\n}\n\nfunc (s unixSuite) TestSplitSuffix(c *gc.C) {\n\t// This is just a sanity check. The splitSuffix tests are more\n\t// comprehensive.\n\tpath, suffix := s.renderer.SplitSuffix(\"spam.ext\")\n\n\tc.Check(path, gc.Equals, \"spam\")\n\tc.Check(suffix, gc.Equals, \".ext\")\n}\n\n// unixThinWrapperSuite contains test methods for UnixRenderer methods\n// that are just thin wrappers around the corresponding helpers in the\n// filepath package. As such the test coverage is minimal (more of a\n// sanity check).\ntype unixThinWrapperSuite struct {\n\tunixBaseSuite\n}\n\nfunc (s unixThinWrapperSuite) TestBase(c *gc.C) {\n\tpath := s.renderer.Base(s.path)\n\n\tc.Check(path, gc.Equals, \"c.xyz\")\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.Base(s.path)\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestClean(c *gc.C) {\n\t// TODO(ericsnow) Add more cases.\n\toriginals := map[string]string{\n\t\ts.path: s.path,\n\t}\n\tfor original, expected := range originals {\n\t\tc.Logf(\"checking %q\", original)\n\t\tpath := s.renderer.Clean(original)\n\n\t\tc.Check(path, gc.Equals, expected)\n\t\tif s.matchesRuntime() {\n\t\t\tgopath := gofilepath.Clean(original)\n\t\t\tc.Check(path, gc.Equals, gopath)\n\t\t}\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestDir(c *gc.C) {\n\tpath := s.renderer.Dir(s.path)\n\n\tc.Check(path, gc.Equals, \"/a/b\")\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.Dir(s.path)\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestExt(c *gc.C) {\n\text := s.renderer.Ext(s.path)\n\n\tc.Check(ext, gc.Equals, \".xyz\")\n\tif s.matchesRuntime() {\n\t\tgoext := gofilepath.Ext(s.path)\n\t\tc.Check(ext, gc.Equals, goext)\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestFromSlash(c *gc.C) {\n\toriginal := \"/a/b/c.xyz\"\n\tpath := s.renderer.FromSlash(original)\n\n\tc.Check(path, gc.Equals, s.path)\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.FromSlash(original)\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestJoin(c *gc.C) {\n\tpath := s.renderer.Join(\"a\", \"b\", \"c.xyz\")\n\n\tc.Check(path, gc.Equals, s.path[1:])\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.Join(\"a\", \"b\", \"c.xyz\")\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestSplit(c *gc.C) {\n\tdir, base := s.renderer.Split(s.path)\n\n\tc.Check(dir, gc.Equals, \"/a/b/\")\n\tc.Check(base, gc.Equals, \"c.xyz\")\n\tif s.matchesRuntime() {\n\t\tgodir, gobase := gofilepath.Split(s.path)\n\t\tc.Check(dir, gc.Equals, godir)\n\t\tc.Check(base, gc.Equals, gobase)\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestToSlash(c *gc.C) {\n\tpath := s.renderer.ToSlash(s.path)\n\n\tc.Check(path, gc.Equals, \"/a/b/c.xyz\")\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.ToSlash(s.path)\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestMatchTrue(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"abc\":   \"abc\",\n\t\t\"ab[c]\": \"abc\",\n\t\t\"\":      \"\",\n\t\t\"*\":     \"abc\",\n\t\t\"a*c\":   \"abc\",\n\t\t\"?\":     \"a\",\n\t\t\"a?c\":   \"abc\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\tmatched, err := s.renderer.Match(pattern, name)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\tc.Check(matched, jc.IsTrue)\n\t\tif s.matchesRuntime() {\n\t\t\tgomatched, err := gofilepath.Match(pattern, name)\n\t\t\tc.Assert(err, jc.ErrorIsNil)\n\t\t\tc.Check(matched, gc.Equals, gomatched)\n\t\t}\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestMatchFalse(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"abc\": \"xyz\",\n\t\t\"\":    \"abc\",\n\t\t\"a*c\": \"a\",\n\t\t\"?\":   \"\",\n\t\t\"a?c\": \"ac\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\tmatched, err := s.renderer.Match(pattern, name)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\tc.Check(matched, jc.IsFalse)\n\t\tif s.matchesRuntime() {\n\t\t\tgomatched, err := gofilepath.Match(pattern, name)\n\t\t\tc.Assert(err, jc.ErrorIsNil)\n\t\t\tc.Check(matched, gc.Equals, gomatched)\n\t\t}\n\t}\n}\n\nfunc (s unixThinWrapperSuite) TestMatchBadPattern(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"ab[\":    \"abc\",\n\t\t\"ab[-c]\": \"abc\",\n\t\t\"ab[]\":   \"abc\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\t_, err := s.renderer.Match(pattern, name)\n\n\t\tc.Check(err, gc.Equals, gofilepath.ErrBadPattern)\n\t\tif s.matchesRuntime() {\n\t\t\t_, goerr := gofilepath.Match(pattern, name)\n\t\t\tc.Check(err, gc.Equals, goerr)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "filepath/win.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath\n\nimport (\n\t\"strings\"\n)\n\n// A substantial portion of this code comes from the Go stdlib code.\n\nconst (\n\tWindowsSeparator     = '\\\\' // OS-specific path separator\n\tWindowsListSeparator = ';'  // OS-specific path list separator\n)\n\n// WindowsRenderer is a Renderer implementation for Windows.\ntype WindowsRenderer struct{}\n\n// Base implements Renderer.\nfunc (ur WindowsRenderer) Base(path string) string {\n\treturn Base(WindowsSeparator, ur.VolumeName, path)\n}\n\n// Clean implements Renderer.\nfunc (ur WindowsRenderer) Clean(path string) string {\n\treturn Clean(WindowsSeparator, ur.VolumeName, path)\n}\n\n// Dir implements Renderer.\nfunc (ur WindowsRenderer) Dir(path string) string {\n\treturn Dir(WindowsSeparator, ur.VolumeName, path)\n}\n\n// Ext implements Renderer.\nfunc (WindowsRenderer) Ext(path string) string {\n\treturn Ext(WindowsSeparator, path)\n}\n\n// FromSlash implements Renderer.\nfunc (WindowsRenderer) FromSlash(path string) string {\n\treturn FromSlash(WindowsSeparator, path)\n}\n\n// IsAbs implements Renderer.\nfunc (WindowsRenderer) IsAbs(path string) bool {\n\tl := volumeNameLen(path)\n\tif l == 0 {\n\t\treturn false\n\t}\n\tpath = path[l:]\n\tif path == \"\" {\n\t\treturn false\n\t}\n\treturn isSlash(path[0])\n}\n\n// Join implements Renderer.\nfunc (ur WindowsRenderer) Join(path ...string) string {\n\treturn Join(WindowsSeparator, ur.VolumeName, path...)\n}\n\n// Match implements Renderer.\nfunc (WindowsRenderer) Match(pattern, name string) (matched bool, err error) {\n\treturn Match(WindowsSeparator, pattern, name)\n}\n\n// Split implements Renderer.\nfunc (ur WindowsRenderer) Split(path string) (dir, file string) {\n\treturn Split(WindowsSeparator, ur.VolumeName, path)\n}\n\n// SplitList implements Renderer.\nfunc (WindowsRenderer) SplitList(path string) []string {\n\tif path == \"\" {\n\t\treturn []string{}\n\t}\n\n\t// Split path, respecting but preserving quotes.\n\tlist := []string{}\n\tstart := 0\n\tquo := false\n\tfor i := 0; i < len(path); i++ {\n\t\tswitch c := path[i]; {\n\t\tcase c == '\"':\n\t\t\tquo = !quo\n\t\tcase c == WindowsListSeparator && !quo:\n\t\t\tlist = append(list, path[start:i])\n\t\t\tstart = i + 1\n\t\t}\n\t}\n\tlist = append(list, path[start:])\n\n\t// Remove quotes.\n\tfor i, s := range list {\n\t\tif strings.Contains(s, `\"`) {\n\t\t\tlist[i] = strings.Replace(s, `\"`, ``, -1)\n\t\t}\n\t}\n\n\treturn list\n}\n\n// ToSlash implements Renderer.\nfunc (WindowsRenderer) ToSlash(path string) string {\n\treturn ToSlash(WindowsSeparator, path)\n}\n\n// VolumeName implements Renderer.\nfunc (WindowsRenderer) VolumeName(path string) string {\n\treturn path[:volumeNameLen(path)]\n}\n\n// NormCase implements Renderer.\nfunc (WindowsRenderer) NormCase(path string) string {\n\treturn strings.ToLower(path)\n}\n\n// SplitSuffix implements Renderer.\nfunc (WindowsRenderer) SplitSuffix(path string) (string, string) {\n\treturn splitSuffix(path)\n}\n\nfunc isSlash(c uint8) bool {\n\treturn c == WindowsSeparator || c == '/'\n}\n\n// volumeNameLen returns length of the leading volume name on Windows.\n// It returns 0 elsewhere.\nfunc volumeNameLen(path string) int {\n\tif len(path) < 2 {\n\t\treturn 0\n\t}\n\t// with drive letter\n\tc := path[0]\n\tif path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {\n\t\treturn 2\n\t}\n\t// is it UNC\n\tif l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&\n\t\t!isSlash(path[2]) && path[2] != '.' {\n\t\t// first, leading `\\\\` and next shouldn't be `\\`. its server name.\n\t\tfor n := 3; n < l-1; n++ {\n\t\t\t// second, next '\\' shouldn't be repeated.\n\t\t\tif isSlash(path[n]) {\n\t\t\t\tn++\n\t\t\t\t// third, following something characters. its share name.\n\t\t\t\tif !isSlash(path[n]) {\n\t\t\t\t\tif path[n] == '.' {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tfor ; n < l; n++ {\n\t\t\t\t\t\tif isSlash(path[n]) {\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn n\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\treturn 0\n}\n"
  },
  {
    "path": "filepath/win_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filepath_test\n\nimport (\n\tgofilepath \"path/filepath\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/filepath\"\n)\n\nvar _ = gc.Suite(&windowsSuite{})\nvar _ = gc.Suite(&windowsThinWrapperSuite{})\n\ntype windowsBaseSuite struct {\n\ttesting.IsolationSuite\n\n\tpath     string\n\trenderer *filepath.WindowsRenderer\n}\n\nfunc (s *windowsBaseSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.path = `c:\\a\\b\\c.xyz`\n\ts.renderer = &filepath.WindowsRenderer{}\n}\n\nfunc (s *windowsBaseSuite) matchesRuntime() bool {\n\treturn gofilepath.Separator == filepath.WindowsSeparator\n}\n\ntype windowsSuite struct {\n\twindowsBaseSuite\n}\n\nfunc (s windowsSuite) TestIsAbs(c *gc.C) {\n\tisAbs := s.renderer.IsAbs(s.path)\n\n\tc.Check(isAbs, jc.IsTrue)\n\tif s.matchesRuntime() {\n\t\tc.Check(isAbs, gc.Equals, gofilepath.IsAbs(s.path))\n\t}\n}\n\nfunc (s windowsSuite) TestSplitList(c *gc.C) {\n\tlist := s.renderer.SplitList(`\\a;b;\\c\\d`)\n\n\tc.Check(list, jc.DeepEquals, []string{`\\a`, \"b\", `\\c\\d`})\n\tif s.matchesRuntime() {\n\t\tgolist := gofilepath.SplitList(`\\a;b;\\c\\d`)\n\t\tc.Check(list, jc.DeepEquals, golist)\n\t}\n}\n\nfunc (s windowsSuite) TestVolumeName(c *gc.C) {\n\tvolumeName := s.renderer.VolumeName(s.path)\n\n\tc.Check(volumeName, gc.Equals, \"c:\")\n\tif s.matchesRuntime() {\n\t\tgoresult := gofilepath.VolumeName(s.path)\n\t\tc.Check(volumeName, gc.Equals, goresult)\n\t}\n}\n\nfunc (s windowsSuite) TestNormCaseLower(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"spam\")\n\n\tc.Check(normalized, gc.Equals, \"spam\")\n}\n\nfunc (s windowsSuite) TestNormCaseUpper(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"SPAM\")\n\n\tc.Check(normalized, gc.Equals, \"spam\")\n}\n\nfunc (s windowsSuite) TestNormCaseMixed(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"sPaM\")\n\n\tc.Check(normalized, gc.Equals, \"spam\")\n}\n\nfunc (s windowsSuite) TestNormCaseCapitalized(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"Spam\")\n\n\tc.Check(normalized, gc.Equals, \"spam\")\n}\n\nfunc (s windowsSuite) TestNormCasePunctuation(c *gc.C) {\n\tnormalized := s.renderer.NormCase(\"spam-eggs.ext\")\n\n\tc.Check(normalized, gc.Equals, \"spam-eggs.ext\")\n}\n\nfunc (s windowsSuite) TestSplitSuffix(c *gc.C) {\n\t// This is just a sanity check. The splitSuffix tests are more\n\t// comprehensive.\n\tpath, suffix := s.renderer.SplitSuffix(\"spam.ext\")\n\n\tc.Check(path, gc.Equals, \"spam\")\n\tc.Check(suffix, gc.Equals, \".ext\")\n}\n\n// windowsThinWrapperSuite contains test methods for WindowsRenderer methods\n// that are just thin wrappers around the corresponding helpers in the\n// filepath package. As such the test coverage is minimal (more of a\n// sanity check).\ntype windowsThinWrapperSuite struct {\n\twindowsBaseSuite\n}\n\nfunc (s windowsThinWrapperSuite) TestBase(c *gc.C) {\n\tpath := s.renderer.Base(s.path)\n\n\tc.Check(path, gc.Equals, \"c.xyz\")\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.Base(s.path)\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestClean(c *gc.C) {\n\t// TODO(ericsnow) Add more cases.\n\toriginals := map[string]string{\n\t\ts.path: s.path,\n\t}\n\tfor original, expected := range originals {\n\t\tc.Logf(\"checking %q\", original)\n\t\tpath := s.renderer.Clean(original)\n\n\t\tc.Check(path, gc.Equals, expected)\n\t\tif s.matchesRuntime() {\n\t\t\tgopath := gofilepath.Clean(original)\n\t\t\tc.Check(path, gc.Equals, gopath)\n\t\t}\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestDir(c *gc.C) {\n\tpath := s.renderer.Dir(s.path)\n\n\tc.Check(path, gc.Equals, `c:\\a\\b`)\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.Dir(s.path)\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestExt(c *gc.C) {\n\text := s.renderer.Ext(s.path)\n\n\tc.Check(ext, gc.Equals, \".xyz\")\n\tif s.matchesRuntime() {\n\t\tgoext := gofilepath.Ext(s.path)\n\t\tc.Check(ext, gc.Equals, goext)\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestFromSlash(c *gc.C) {\n\toriginal := \"/a/b/c.xyz\"\n\tpath := s.renderer.FromSlash(original)\n\n\tc.Check(path, gc.Equals, s.path[2:])\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.FromSlash(original)\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestJoin(c *gc.C) {\n\tpath := s.renderer.Join(\"a\", \"b\", \"c.xyz\")\n\n\tc.Check(path, gc.Equals, s.path[3:])\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.Join(\"a\", \"b\", \"c.xyz\")\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestSplit(c *gc.C) {\n\tdir, base := s.renderer.Split(s.path)\n\n\tc.Check(dir, gc.Equals, `c:\\a\\b\\`)\n\tc.Check(base, gc.Equals, \"c.xyz\")\n\tif s.matchesRuntime() {\n\t\tgodir, gobase := gofilepath.Split(s.path)\n\t\tc.Check(dir, gc.Equals, godir)\n\t\tc.Check(base, gc.Equals, gobase)\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestToSlash(c *gc.C) {\n\tpath := s.renderer.ToSlash(s.path)\n\n\tc.Check(path, gc.Equals, \"c:/a/b/c.xyz\")\n\tif s.matchesRuntime() {\n\t\tgopath := gofilepath.ToSlash(s.path)\n\t\tc.Check(path, gc.Equals, gopath)\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestMatchTrue(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"abc\":   \"abc\",\n\t\t\"ab[c]\": \"abc\",\n\t\t\"\":      \"\",\n\t\t\"*\":     \"abc\",\n\t\t\"a*c\":   \"abc\",\n\t\t\"?\":     \"a\",\n\t\t\"a?c\":   \"abc\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\tmatched, err := s.renderer.Match(pattern, name)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\tc.Check(matched, jc.IsTrue)\n\t\tif s.matchesRuntime() {\n\t\t\tgomatched, err := gofilepath.Match(pattern, name)\n\t\t\tc.Assert(err, jc.ErrorIsNil)\n\t\t\tc.Check(matched, gc.Equals, gomatched)\n\t\t}\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestMatchFalse(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"abc\": \"xyz\",\n\t\t\"\":    \"abc\",\n\t\t\"a*c\": \"a\",\n\t\t\"?\":   \"\",\n\t\t\"a?c\": \"ac\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\tmatched, err := s.renderer.Match(pattern, name)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\tc.Check(matched, jc.IsFalse)\n\t\tif s.matchesRuntime() {\n\t\t\tgomatched, err := gofilepath.Match(pattern, name)\n\t\t\tc.Assert(err, jc.ErrorIsNil)\n\t\t\tc.Check(matched, gc.Equals, gomatched)\n\t\t}\n\t}\n}\n\nfunc (s windowsThinWrapperSuite) TestMatchBadPattern(c *gc.C) {\n\ttests := map[string]string{\n\t\t\"ab[\":    \"abc\",\n\t\t\"ab[-c]\": \"abc\",\n\t\t\"ab[]\":   \"abc\",\n\t}\n\tfor pattern, name := range tests {\n\t\tc.Logf(\"- checking pattern %q against %q -\", pattern, name)\n\t\t_, err := s.renderer.Match(pattern, name)\n\n\t\tc.Check(err, gc.Equals, gofilepath.ErrBadPattern)\n\t\tif s.matchesRuntime() {\n\t\t\t_, goerr := gofilepath.Match(pattern, name)\n\t\t\tc.Check(err, gc.Equals, goerr)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "filestorage/doc.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n/*\nutils/filestorage provides types for abstracting and implementing a\nsystem that stores files, including their metadata.\n\nEach file in the system is identified by a unique ID, determined by the\nsystem at the time the file is stored.\n\nFile metadata includes such information as the size of the file, its\nchecksum, and when it was created.  Regardless of how it is stored in\nthe system, at the abstraction level it is represented as a document.\n\nMetadata can exist in the system without an associated file.  However,\nevery file must have a corresponding metadata doc stored in the system.\nA file can be added for a metadata doc that does not have one already.\n\nThe main type is the FileStorage interface.  It exposes the core\nfunctionality of such a system.  This includes adding/removing files,\nretrieving them or their metadata, and listing all files in the system.\n\nThe package also provides a basic implementation of FileStorage,\navailable through NewFileStorage().  This implementation simply wraps\ntwo more focused systems: doc storage and raw file storage.  The wrapper\nuses the doc storage to store the metadata and raw file storage to\nstore the files.\n\nThe two subsystems are exposed via corresponding interfaces: DocStorage\n(and its specialization MetadataStorage) and RawFileStorage.  While a\nsingle type could implement both, in practice they will be separate.\nThe doc storage is responsible to generating the unique IDs.  The raw\nfile storage defers to the doc storage for any information about the\nfile, including the ID.\n\n*/\npackage filestorage\n"
  },
  {
    "path": "filestorage/export_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage\n"
  },
  {
    "path": "filestorage/fakes_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage_test\n\nimport (\n\t\"io\"\n\n\t\"github.com/juju/errors\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/filestorage\"\n)\n\n// FakeMetadataStorage is used as a DocStorage and MetadataStorage for\n// testing purposes.\ntype FakeMetadataStorage struct {\n\tcalls []string\n\n\tid       string\n\tmeta     filestorage.Metadata\n\tmetaList []filestorage.Metadata\n\terr      error\n\n\tidArg   string\n\tmetaArg filestorage.Metadata\n}\n\n// Check verfies the state of the fake.\nfunc (s *FakeMetadataStorage) Check(c *gc.C, id string, meta filestorage.Metadata, calls ...string) {\n\tc.Check(s.calls, jc.DeepEquals, calls)\n\tc.Check(s.idArg, gc.Equals, id)\n\tc.Check(s.metaArg, gc.Equals, meta)\n}\n\nfunc (s *FakeMetadataStorage) Doc(id string) (filestorage.Document, error) {\n\ts.calls = append(s.calls, \"Doc\")\n\ts.idArg = id\n\tif s.err != nil {\n\t\treturn nil, s.err\n\t}\n\treturn s.meta, nil\n}\n\nfunc (s *FakeMetadataStorage) ListDocs() ([]filestorage.Document, error) {\n\ts.calls = append(s.calls, \"ListDoc\")\n\tif s.err != nil {\n\t\treturn nil, s.err\n\t}\n\tvar docs []filestorage.Document\n\tfor _, doc := range s.metaList {\n\t\tdocs = append(docs, doc)\n\t}\n\treturn docs, nil\n}\n\nfunc (s *FakeMetadataStorage) AddDoc(doc filestorage.Document) (string, error) {\n\ts.calls = append(s.calls, \"AddDoc\")\n\tmeta, err := filestorage.Convert(doc)\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\ts.metaArg = meta\n\treturn s.id, nil\n}\n\nfunc (s *FakeMetadataStorage) RemoveDoc(id string) error {\n\ts.calls = append(s.calls, \"RemoveDoc\")\n\ts.idArg = id\n\treturn s.err\n}\n\nfunc (s *FakeMetadataStorage) Close() error {\n\ts.calls = append(s.calls, \"Close\")\n\treturn s.err\n}\n\nfunc (s *FakeMetadataStorage) Metadata(id string) (filestorage.Metadata, error) {\n\ts.calls = append(s.calls, \"Metadata\")\n\ts.idArg = id\n\tif s.err != nil {\n\t\treturn nil, s.err\n\t}\n\treturn s.meta, nil\n}\n\nfunc (s *FakeMetadataStorage) ListMetadata() ([]filestorage.Metadata, error) {\n\ts.calls = append(s.calls, \"ListMetadata\")\n\tif s.err != nil {\n\t\treturn nil, s.err\n\t}\n\treturn s.metaList, nil\n}\n\nfunc (s *FakeMetadataStorage) AddMetadata(meta filestorage.Metadata) (string, error) {\n\ts.calls = append(s.calls, \"AddMetadata\")\n\ts.metaArg = meta\n\tif s.err != nil {\n\t\treturn \"\", s.err\n\t}\n\treturn s.id, nil\n}\n\nfunc (s *FakeMetadataStorage) RemoveMetadata(id string) error {\n\ts.calls = append(s.calls, \"RemoveMetadata\")\n\ts.idArg = id\n\treturn s.err\n}\n\nfunc (s *FakeMetadataStorage) SetStored(id string) error {\n\ts.calls = append(s.calls, \"SetStored\")\n\ts.idArg = id\n\treturn s.err\n}\n\n// FakeRawFileStorage is used in testing as a RawFileStorage.\ntype FakeRawFileStorage struct {\n\tcalls []string\n\n\tfile io.ReadCloser\n\terr  error\n\n\tidArg   string\n\tfileArg io.Reader\n\tsizeArg int64\n}\n\n// Check verfies the state of the fake.\nfunc (s *FakeRawFileStorage) Check(c *gc.C, id string, file io.Reader, size int64, calls ...string) {\n\tc.Check(s.calls, jc.DeepEquals, calls)\n\tc.Check(s.idArg, gc.Equals, id)\n\tc.Check(s.fileArg, gc.Equals, file)\n\tc.Check(s.sizeArg, gc.Equals, size)\n}\n\n// CheckNotUsed verifies that the fake was not used.\nfunc (s *FakeRawFileStorage) CheckNotUsed(c *gc.C) {\n\ts.Check(c, \"\", nil, 0)\n}\n\nfunc (s *FakeRawFileStorage) File(id string) (io.ReadCloser, error) {\n\ts.calls = append(s.calls, \"File\")\n\ts.idArg = id\n\tif s.err != nil {\n\t\treturn nil, s.err\n\t}\n\treturn s.file, nil\n}\n\nfunc (s *FakeRawFileStorage) AddFile(id string, file io.Reader, size int64) error {\n\ts.calls = append(s.calls, \"AddFile\")\n\ts.idArg = id\n\ts.fileArg = file\n\ts.sizeArg = size\n\treturn s.err\n}\n\nfunc (s *FakeRawFileStorage) RemoveFile(id string) error {\n\ts.calls = append(s.calls, \"RemoveFile\")\n\ts.idArg = id\n\treturn s.err\n}\n\nfunc (s *FakeRawFileStorage) Close() error {\n\ts.calls = append(s.calls, \"Close\")\n\treturn s.err\n}\n"
  },
  {
    "path": "filestorage/interfaces.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage\n\nimport (\n\t\"io\"\n\t\"time\"\n)\n\n// FileStorage is an abstraction that can be used for the storage of files.\ntype FileStorage interface {\n\tio.Closer\n\n\t// Metadata returns a file's metadata.\n\tMetadata(id string) (Metadata, error)\n\n\t// Get returns a file and its metadata.\n\tGet(id string) (Metadata, io.ReadCloser, error)\n\n\t// List returns the metadata for each stored file.\n\tList() ([]Metadata, error)\n\n\t// Add stores a file and its metadata.\n\tAdd(meta Metadata, archive io.Reader) (string, error)\n\n\t// SetFile stores a file for an existing metadata entry.\n\tSetFile(id string, file io.Reader) error\n\n\t// Remove removes a file from storage.\n\tRemove(id string) error\n}\n\n// Document represents a document that can be identified uniquely\n// by a string.\ntype Document interface {\n\t// ID returns the unique ID of the document.\n\tID() string\n\n\t// SetID sets the ID of the document.  If the ID is already set,\n\t// SetID() should return true (false otherwise).\n\tSetID(id string) (alreadySet bool)\n}\n\n// Metadata is the meta information for a stored file.\ntype Metadata interface {\n\tDocument\n\n\t// Size is the size of the file (in bytes).\n\tSize() int64\n\n\t// Checksum is the checksum for the file.\n\tChecksum() string\n\n\t// ChecksumFormat is the kind (and encoding) of checksum.\n\tChecksumFormat() string\n\n\t// Stored returns when the file was last stored.  If it has not been\n\t// stored yet, nil is returned.  If it has been stored but the\n\t// timestamp is not available, a zero value is returned\n\t// (see Time.IsZero).\n\tStored() *time.Time\n\n\t// SetFileInfo sets the file info on the metadata.\n\tSetFileInfo(size int64, checksum, checksumFormat string) error\n\n\t// SetStored records when the file was last stored.  If the previous\n\t// value matters, be sure to call Stored() first.\n\tSetStored(timestamp *time.Time)\n}\n\n// DocStorage is an abstraction for a system that can store docs (structs).\n// The system is expected to generate its own unique ID for each doc.\ntype DocStorage interface {\n\tio.Closer\n\n\t// Doc returns the doc that matches the ID.  If there is no match,\n\t// an error is returned (see errors.IsNotFound).  Any other problem\n\t// also results in an error.\n\tDoc(id string) (Document, error)\n\n\t// ListDocs returns a list of all the docs in the storage.\n\tListDocs() ([]Document, error)\n\n\t// AddDoc adds the doc to the storage.  If successful, the storage-\n\t// generated ID for the doc is returned.  Otherwise an error is\n\t// returned.\n\tAddDoc(doc Document) (string, error)\n\n\t// RemoveDoc removes the matching doc from the storage.  If there\n\t// is no match an error is returned (see errors.IsNotFound).  Any\n\t// other problem also results in an error.\n\tRemoveDoc(id string) error\n}\n\n// RawFileStorage is an abstraction around a system that can store files.\n// The system is expected to rely on the user for unique IDs.\ntype RawFileStorage interface {\n\tio.Closer\n\n\t// File returns the matching file.  If there is no match an error is\n\t// returned (see errors.IsNotFound).  Any other problem also results\n\t// in an error.\n\tFile(id string) (io.ReadCloser, error)\n\n\t// AddFile adds the file to the storage.  If it fails to do so,\n\t// it returns an error.  If a file is already stored for the ID,\n\t// AddFile() fails (see errors.IsAlreadyExists).\n\tAddFile(id string, file io.Reader, size int64) error\n\n\t// RemoveFile removes the matching file from the storage.  It fails\n\t// if there is no such file (see errors.IsNotFound).  Any other problem\n\t// also results in an error.\n\tRemoveFile(id string) error\n}\n\n// MetadataStorage is an extension of DocStorage adapted to file metadata.\ntype MetadataStorage interface {\n\tio.Closer\n\n\t// Metadata returns the matching Metadata.  It fails if there is no\n\t// match (see errors.IsNotFound).  Any other problems likewise\n\t// results in an error.\n\tMetadata(id string) (Metadata, error)\n\n\t// ListMetadata returns a list of all metadata in the storage.\n\tListMetadata() ([]Metadata, error)\n\n\t// AddMetadata adds the metadata to the storage.  If successful, the\n\t// storage-generated ID for the metadata is returned.  Otherwise an\n\t// error is returned.\n\tAddMetadata(meta Metadata) (string, error)\n\n\t// RemoveMetadata removes the matching metadata from the storage.\n\t// If there is no match an error is returned (see errors.IsNotFound).\n\t// Any other problem also results in an error.\n\tRemoveMetadata(id string) error\n\n\t// SetStored updates the stored metadata to indicate that the\n\t// associated file has been successfully stored in a RawFileStorage\n\t// system.  If it does not find a stored metadata with the matching\n\t// ID, it will return an error (see errors.IsNotFound).  It also\n\t// returns an error if it fails to update the stored metadata.\n\tSetStored(id string) error\n}\n"
  },
  {
    "path": "filestorage/metadata.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage\n\nimport (\n\t\"time\"\n\n\t\"github.com/juju/errors\"\n)\n\n// RawDoc is a basic, uniquely identifiable document.\ntype RawDoc struct {\n\t// ID is the unique identifier for the document.\n\tID string\n}\n\n// Doc wraps a document in the Document interface.\ntype Doc struct {\n\tRaw RawDoc\n}\n\n// ID returns the document's unique identifier.\nfunc (d *Doc) ID() string {\n\treturn d.Raw.ID\n}\n\n// SetID sets the document's unique identifier.  If the ID is already\n// set, SetID() returns true (false otherwise).\nfunc (d *Doc) SetID(id string) bool {\n\tif d.Raw.ID != \"\" {\n\t\treturn true\n\t}\n\td.Raw.ID = id\n\treturn false\n}\n\n// RawFileMetadata holds info specific to stored files.\ntype RawFileMetadata struct {\n\t// Size is the size (in bytes) of the stored file.\n\tSize int64\n\t// Checksum is the checksum of the stored file.\n\tChecksum string\n\t// ChecksumFormat describes the kind of the checksum.\n\tChecksumFormat string\n\t// Stored records the timestamp of when the file was last stored.\n\tStored *time.Time\n}\n\n// FileMetadata contains the metadata for a single stored file.\ntype FileMetadata struct {\n\tDoc\n\tRaw RawFileMetadata\n}\n\n// NewMetadata returns a new Metadata for a stored file.\nfunc NewMetadata() *FileMetadata {\n\tmeta := FileMetadata{}\n\treturn &meta\n}\n\nfunc (m *FileMetadata) Size() int64 {\n\treturn m.Raw.Size\n}\n\nfunc (m *FileMetadata) Checksum() string {\n\treturn m.Raw.Checksum\n}\n\nfunc (m *FileMetadata) ChecksumFormat() string {\n\treturn m.Raw.ChecksumFormat\n}\n\nfunc (m *FileMetadata) Stored() *time.Time {\n\treturn m.Raw.Stored\n}\n\nfunc (m *FileMetadata) SetFileInfo(size int64, checksum, format string) error {\n\t// Fall back to existing values.\n\tif size == 0 {\n\t\tsize = m.Raw.Size\n\t}\n\tif checksum == \"\" {\n\t\tchecksum = m.Raw.Checksum\n\t}\n\tif format == \"\" {\n\t\tformat = m.Raw.ChecksumFormat\n\t}\n\tif checksum != \"\" {\n\t\tif format == \"\" {\n\t\t\treturn errors.Errorf(\"missing checksum format\")\n\t\t}\n\t} else if format != \"\" {\n\t\treturn errors.Errorf(\"missing checksum\")\n\t}\n\t// Only allow setting once.\n\tif m.Raw.Size != 0 && size != m.Raw.Size {\n\t\treturn errors.Errorf(\"file information (size) already set\")\n\t}\n\tif m.Raw.Checksum != \"\" && checksum != m.Raw.Checksum {\n\t\treturn errors.Errorf(\"file information (checksum) already set\")\n\t}\n\tif m.Raw.ChecksumFormat != \"\" && format != m.Raw.ChecksumFormat {\n\t\treturn errors.Errorf(\"file information (checksum format) already set\")\n\t}\n\t// Set the values.\n\tm.Raw.Size = size\n\tm.Raw.Checksum = checksum\n\tm.Raw.ChecksumFormat = format\n\treturn nil\n}\n\nfunc (m *FileMetadata) SetStored(timestamp *time.Time) {\n\tif timestamp == nil {\n\t\tnow := time.Now().UTC()\n\t\tm.Raw.Stored = &now\n\t} else {\n\t\tm.Raw.Stored = timestamp\n\t}\n}\n"
  },
  {
    "path": "filestorage/metadata_store.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage\n\nimport (\n\t\"github.com/juju/errors\"\n)\n\n// Convert turns a Document into a Metadata if possible.\nfunc Convert(doc Document) (Metadata, error) {\n\tmeta, ok := doc.(Metadata)\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"expected a Metadata doc, got %v\", doc)\n\t}\n\treturn meta, nil\n}\n\n// MetadataDocStorage provides the MetadataStorage methods than can be\n// derived from DocStorage methods.  To fully implement MetadataStorage,\n// this type must be embedded in a type that implements the remaining\n// methods.\ntype MetadataDocStorage struct {\n\tDocStorage\n}\n\n// Metadata implements MetadataStorage.Metadata.\nfunc (s *MetadataDocStorage) Metadata(id string) (Metadata, error) {\n\tdoc, err := s.Doc(id)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tmeta, err := Convert(doc)\n\treturn meta, errors.Trace(err)\n}\n\n// ListMetadata implements MetadataStorage.ListMetadata.\nfunc (s *MetadataDocStorage) ListMetadata() ([]Metadata, error) {\n\tdocs, err := s.ListDocs()\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tvar metaList []Metadata\n\tfor _, doc := range docs {\n\t\tif doc == nil {\n\t\t\tcontinue\n\t\t}\n\t\tmeta, err := Convert(doc)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tmetaList = append(metaList, meta)\n\t}\n\treturn metaList, nil\n}\n\n// ListMetadata implements MetadataStorage.ListMetadata.\nfunc (s *MetadataDocStorage) AddMetadata(meta Metadata) (string, error) {\n\tid, err := s.AddDoc(meta)\n\treturn id, errors.Trace(err)\n}\n\n// ListMetadata implements MetadataStorage.ListMetadata.\nfunc (s *MetadataDocStorage) RemoveMetadata(id string) error {\n\treturn errors.Trace(s.RemoveDoc(id))\n}\n"
  },
  {
    "path": "filestorage/metadata_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage_test\n\nimport (\n\t\"time\"\n\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/filestorage\"\n)\n\nvar (\n\t_ filestorage.Document = (*filestorage.Doc)(nil)\n\t_ filestorage.Metadata = (*filestorage.FileMetadata)(nil)\n)\n\nvar _ = gc.Suite(&MetadataSuite{})\n\ntype MetadataSuite struct {\n\ttesting.IsolationSuite\n}\n\nfunc (s *MetadataSuite) TestFileMetadataNewMetadata(c *gc.C) {\n\tmeta := filestorage.NewMetadata()\n\n\tc.Check(meta.ID(), gc.Equals, \"\")\n\tc.Check(meta.Size(), gc.Equals, int64(0))\n\tc.Check(meta.Checksum(), gc.Equals, \"\")\n\tc.Check(meta.ChecksumFormat(), gc.Equals, \"\")\n\tc.Check(meta.Stored(), gc.IsNil)\n}\n\nfunc (s *MetadataSuite) TestFileMetadataSetIDInitial(c *gc.C) {\n\tmeta := filestorage.NewMetadata()\n\tmeta.SetFileInfo(10, \"some sum\", \"SHA-1\")\n\tc.Assert(meta.ID(), gc.Equals, \"\")\n\n\tsuccess := meta.SetID(\"some id\")\n\tc.Check(success, gc.Equals, false)\n\tc.Check(meta.ID(), gc.Equals, \"some id\")\n}\n\nfunc (s *MetadataSuite) TestFileMetadataSetIDAlreadySetSame(c *gc.C) {\n\tmeta := filestorage.NewMetadata()\n\tmeta.SetFileInfo(10, \"some sum\", \"SHA-1\")\n\tsuccess := meta.SetID(\"some id\")\n\tc.Assert(success, gc.Equals, false)\n\n\tsuccess = meta.SetID(\"some id\")\n\tc.Check(success, gc.Equals, true)\n\tc.Check(meta.ID(), gc.Equals, \"some id\")\n}\n\nfunc (s *MetadataSuite) TestFileMetadataSetIDAlreadySetDifferent(c *gc.C) {\n\tmeta := filestorage.NewMetadata()\n\tmeta.SetFileInfo(10, \"some sum\", \"SHA-1\")\n\tsuccess := meta.SetID(\"some id\")\n\tc.Assert(success, gc.Equals, false)\n\n\tsuccess = meta.SetID(\"another id\")\n\tc.Check(success, gc.Equals, true)\n\tc.Check(meta.ID(), gc.Equals, \"some id\")\n}\n\nfunc (s *MetadataSuite) TestFileMetadataSetFileInfo(c *gc.C) {\n\tmeta := filestorage.NewMetadata()\n\tc.Assert(meta.Size(), gc.Equals, int64(0))\n\tc.Assert(meta.Checksum(), gc.Equals, \"\")\n\tc.Assert(meta.ChecksumFormat(), gc.Equals, \"\")\n\tc.Assert(meta.Stored(), gc.IsNil)\n\tmeta.SetFileInfo(10, \"some sum\", \"SHA-1\")\n\n\tc.Check(meta.Size(), gc.Equals, int64(10))\n\tc.Check(meta.Checksum(), gc.Equals, \"some sum\")\n\tc.Check(meta.ChecksumFormat(), gc.Equals, \"SHA-1\")\n\tc.Check(meta.Stored(), gc.IsNil)\n}\n\nfunc (s *MetadataSuite) TestFileMetadataSetStored(c *gc.C) {\n\tmeta := filestorage.NewMetadata()\n\ttimestamp := time.Now().UTC()\n\tmeta.SetStored(&timestamp)\n\n\tc.Check(meta.Stored(), gc.Equals, &timestamp)\n}\n\nfunc (s *MetadataSuite) TestFileMetadataSetStoredDefault(c *gc.C) {\n\tmeta := filestorage.NewMetadata()\n\tc.Assert(meta.Stored(), gc.IsNil)\n\tmeta.SetStored(nil)\n\n\tc.Check(meta.Stored(), gc.NotNil)\n}\n"
  },
  {
    "path": "filestorage/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "filestorage/wrapper.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage\n\nimport (\n\t\"io\"\n\n\t\"github.com/juju/errors\"\n)\n\n// Ensure fileStorage implements FileStorage.\nvar _ = FileStorage((*fileStorage)(nil))\n\ntype fileStorage struct {\n\tmetaStorage MetadataStorage\n\trawStorage  RawFileStorage\n}\n\n// NewFileStorage returns a new FileStorage value that wraps a\n// MetadataStorage and a RawFileStorage.  It coordinates the two even\n// though they may not be designed to be compatible (or the two may be\n// the same value).\n//\n// A stored file will always have a metadata value stored.  However, it\n// is not required to have a raw file stored.\nfunc NewFileStorage(meta MetadataStorage, files RawFileStorage) FileStorage {\n\tstor := fileStorage{\n\t\tmetaStorage: meta,\n\t\trawStorage:  files,\n\t}\n\treturn &stor\n}\n\n// Metadata returns the matching metadata.  Failure to find it (see\n// errors.IsNotFound) or any other problem results in an error.\nfunc (s *fileStorage) Metadata(id string) (Metadata, error) {\n\tmeta, err := s.metaStorage.Metadata(id)\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\treturn meta, nil\n}\n\n// Get returns the matching file and its associated metadata.  If there\n// is no match (see errors.IsNotFound) or any other problem, it returns\n// an error.  Both the metadata and file must have been stored for the\n// file to be considered found.\nfunc (s *fileStorage) Get(id string) (Metadata, io.ReadCloser, error) {\n\tmeta, err := s.Metadata(id)\n\tif err != nil {\n\t\treturn nil, nil, errors.Trace(err)\n\t}\n\tif meta.Stored() == nil {\n\t\treturn nil, nil, errors.NotFoundf(\"no file stored for %q\", id)\n\t}\n\tfile, err := s.rawStorage.File(id)\n\tif err != nil {\n\t\treturn nil, nil, errors.Trace(err)\n\t}\n\treturn meta, file, nil\n}\n\n// List returns a list of the metadata for all files in the storage.\nfunc (s *fileStorage) List() ([]Metadata, error) {\n\treturn s.metaStorage.ListMetadata()\n}\n\nfunc (s *fileStorage) addFile(id string, size int64, file io.Reader) error {\n\terr := s.rawStorage.AddFile(id, file, size)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\terr = s.metaStorage.SetStored(id)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n\n// Add adds the file to the storage.  It returns the unique ID generated\n// by the storage for the file.  If no file is provided, only the\n// metadata is stored.  While the passed-in \"meta\" is not modified, the\n// new ID and \"stored\" flag will be saved in metadata storage.  Feel\n// free to explicitly call meta.SetID() and meta.SetStored() afterward.\n//\n// Any problem (including an existing file, see errors.IsAlreadyExists)\n// results in an error.  If there is an error while storing either the\n// file or metadata, neither will be stored.\nfunc (s *fileStorage) Add(meta Metadata, file io.Reader) (string, error) {\n\tid, err := s.metaStorage.AddMetadata(meta)\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\n\tif file != nil {\n\t\terr = s.addFile(id, meta.Size(), file)\n\t\tif err != nil {\n\t\t\t// Remove the metadata we just added.\n\t\t\tcontext := err\n\t\t\terr = s.metaStorage.RemoveMetadata(id)\n\t\t\tif err != nil {\n\t\t\t\terr = errors.Annotate(err, \"while handling another error\")\n\t\t\t\treturn \"\", errors.Wrap(context, err)\n\t\t\t}\n\t\t\treturn \"\", errors.Trace(context)\n\t\t}\n\t}\n\n\treturn id, nil\n}\n\n// SetFile stores the raw file for an existing metadata.  If there is no\n// matching stored metadata an error is returned (see errors.IsNotFound).\n// If a file has already been stored an error is returned (see\n// errors.IsAlreadyExists).  Any other failure to add the file also\n// results in an error.\nfunc (s *fileStorage) SetFile(id string, file io.Reader) error {\n\tmeta, err := s.Metadata(id)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\terr = s.addFile(id, meta.Size(), file)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n\n// Remove removes both the metadata and raw file from the storage.  If\n// there is no match an error is returned (see errors.IsNotFound).\n//\n// The raw file is removed first.  Thus if there is any problem after\n// removing the raw file, the metadata will still be stored.  However,\n// in that case the stored metadata is not guaranteed to accurately\n// represent that there is no corresponding raw file in storage.\nfunc (s *fileStorage) Remove(id string) error {\n\terr := s.rawStorage.RemoveFile(id)\n\tif err != nil && !errors.IsNotFound(err) {\n\t\treturn errors.Trace(err)\n\t}\n\terr = s.metaStorage.RemoveMetadata(id)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\treturn nil\n}\n\n// Close implements io.Closer.Close.\nfunc (s *fileStorage) Close() error {\n\tferr := s.rawStorage.Close()\n\tmerr := s.metaStorage.Close()\n\tif ferr == nil {\n\t\treturn errors.Trace(merr)\n\t} else if merr == nil {\n\t\treturn errors.Trace(ferr)\n\t} else {\n\t\tmsg := \"closing both failed: metadata (%v) and files (%v)\"\n\t\treturn errors.Errorf(msg, merr, ferr)\n\t}\n}\n"
  },
  {
    "path": "filestorage/wrapper_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage filestorage_test\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io/ioutil\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/filestorage\"\n)\n\nvar _ = gc.Suite(&WrapperSuite{})\n\ntype WrapperSuite struct {\n\ttesting.IsolationSuite\n\trawstor  *FakeRawFileStorage\n\tmetastor *FakeMetadataStorage\n\tstor     filestorage.FileStorage\n}\n\nfunc (s *WrapperSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.rawstor = &FakeRawFileStorage{}\n\ts.metastor = &FakeMetadataStorage{}\n\ts.stor = filestorage.NewFileStorage(s.metastor, s.rawstor)\n}\n\nfunc (s *WrapperSuite) metadata() filestorage.Metadata {\n\tmeta := filestorage.NewMetadata()\n\tmeta.SetFileInfo(10, \"\", \"\")\n\treturn meta\n}\n\nfunc (s *WrapperSuite) setMeta() (string, filestorage.Metadata) {\n\tid := \"<id>\"\n\tmeta := s.metadata()\n\tmeta.SetID(id)\n\ts.metastor.meta = meta\n\ts.metastor.metaList = append(s.metastor.metaList, meta)\n\treturn id, meta\n}\n\nfunc (s *WrapperSuite) setFile(data string) (string, filestorage.Metadata, io.ReadCloser) {\n\tid, meta := s.setMeta()\n\tfile := ioutil.NopCloser(bytes.NewBufferString(data))\n\ts.rawstor.file = file\n\tmeta.SetStored(nil)\n\treturn id, meta, file\n}\n\nfunc (s *WrapperSuite) TestFileStorageNewFileStorage(c *gc.C) {\n\tstor := filestorage.NewFileStorage(s.metastor, s.rawstor)\n\n\tc.Check(stor, gc.NotNil)\n}\n\nfunc (s *WrapperSuite) TestFileStorageMetadata(c *gc.C) {\n\tid, original := s.setMeta()\n\tmeta, err := s.stor.Metadata(id)\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(meta, jc.DeepEquals, original)\n\ts.metastor.Check(c, id, nil, \"Metadata\")\n\ts.rawstor.CheckNotUsed(c)\n}\n\nfunc (s *WrapperSuite) TestFileStorageGet(c *gc.C) {\n\tid, origmeta, origfile := s.setFile(\"spam\")\n\tmeta, file, err := s.stor.Get(id)\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(meta, gc.Equals, origmeta)\n\tc.Check(file, gc.Equals, origfile)\n}\n\nfunc (s *WrapperSuite) TestFileStorageListEmpty(c *gc.C) {\n\tlist, err := s.stor.List()\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(list, gc.HasLen, 0)\n}\n\nfunc (s *WrapperSuite) TestFileStorageListOne(c *gc.C) {\n\tid, _ := s.setMeta()\n\tlist, err := s.stor.List()\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(list, gc.HasLen, 1)\n\tc.Assert(list[0], gc.NotNil)\n\tc.Check(list[0].ID(), gc.Equals, id)\n}\n\nfunc (s *WrapperSuite) TestFileStorageListTwo(c *gc.C) {\n\tid1, _ := s.setMeta()\n\tid2, _ := s.setMeta()\n\tlist, err := s.stor.List()\n\tc.Assert(err, gc.IsNil)\n\n\tc.Assert(list, gc.HasLen, 2)\n\tc.Assert(list[0], gc.NotNil)\n\tc.Assert(list[1], gc.NotNil)\n\tif list[0].ID() == id1 {\n\t\tc.Check(list[1].ID(), gc.Equals, id2)\n\t} else {\n\t\tc.Check(list[1].ID(), gc.Equals, id1)\n\t}\n}\n\nfunc (s *WrapperSuite) TestFileStorageAddMeta(c *gc.C) {\n\ts.metastor.id = \"<spam>\"\n\n\tmeta := s.metadata()\n\tc.Assert(meta.ID(), gc.Equals, \"\")\n\tid, err := s.stor.Add(meta, nil)\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(id, gc.Equals, \"<spam>\")\n\tc.Check(meta.ID(), gc.Equals, \"\")\n\ts.metastor.Check(c, \"\", meta, \"AddMetadata\")\n\ts.rawstor.CheckNotUsed(c)\n}\n\nfunc (s *WrapperSuite) TestFileStorageAddFile(c *gc.C) {\n\ts.metastor.id = \"<spam>\"\n\n\tvar file *bytes.Buffer\n\tmeta := s.metadata()\n\tid, err := s.stor.Add(meta, file)\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(meta.ID(), gc.Equals, \"\")\n\tc.Check(meta.Stored(), gc.IsNil)\n\n\tc.Check(id, gc.Equals, \"<spam>\")\n\tc.Check(meta.ID(), gc.Equals, \"\")\n\ts.metastor.Check(c, id, meta, \"AddMetadata\", \"SetStored\")\n\ts.rawstor.Check(c, id, file, 10, \"AddFile\")\n}\n\nfunc (s *WrapperSuite) TestFileStorageAddIDNotSet(c *gc.C) {\n\toriginal := s.metadata()\n\tc.Assert(original.ID(), gc.Equals, \"\")\n\t_, err := s.stor.Add(original, nil)\n\tc.Check(err, gc.IsNil)\n\n\tc.Check(original.ID(), gc.Equals, \"\")\n}\n\nfunc (s *WrapperSuite) TestFileStorageAddMetaOnly(c *gc.C) {\n\tid, original := s.setMeta()\n\tmeta, err := s.stor.Metadata(id)\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(meta, gc.Equals, original)\n\tc.Check(meta.Stored(), gc.IsNil)\n}\n\nfunc (s *WrapperSuite) TestFileStorageAddIDAlreadySet(c *gc.C) {\n\toriginal := s.metadata()\n\toriginal.SetID(\"eggs\")\n\t_, err := s.stor.Add(original, nil)\n\n\tc.Check(err, gc.IsNil) // This should be handled at the lower level.\n}\n\nfunc (s *WrapperSuite) TestFileStorageAddFileFailureDropsMetadata(c *gc.C) {\n\toriginal := s.metadata()\n\tfailure := errors.New(\"failed!\")\n\traw := &FakeRawFileStorage{err: failure}\n\tstor := filestorage.NewFileStorage(s.metastor, raw)\n\t_, err := stor.Add(original, &bytes.Buffer{})\n\n\tc.Assert(errors.Cause(err), gc.Equals, failure)\n\n\tmetalist, metaErr := s.metastor.ListMetadata()\n\tc.Assert(metaErr, gc.IsNil)\n\tc.Check(metalist, gc.HasLen, 0)\n\tc.Check(original.ID(), gc.Equals, \"\")\n}\n\nfunc (s *WrapperSuite) TestFileStorageSetFile(c *gc.C) {\n\tid, _ := s.setMeta()\n\t_, _, err := s.stor.Get(id)\n\tc.Assert(err, gc.NotNil)\n\n\tfile := bytes.NewBufferString(\"spam\")\n\terr = s.stor.SetFile(id, file)\n\tc.Assert(err, gc.IsNil)\n\n\ts.metastor.Check(c, id, nil, \"Metadata\", \"Metadata\", \"SetStored\")\n\ts.rawstor.Check(c, id, file, 10, \"AddFile\")\n}\n\nfunc (s *WrapperSuite) TestFileStorageRemove(c *gc.C) {\n\tid := \"<spam>\"\n\terr := s.stor.Remove(id)\n\tc.Assert(err, gc.IsNil)\n\n\ts.metastor.Check(c, id, nil, \"RemoveMetadata\")\n\ts.rawstor.Check(c, id, nil, 0, \"RemoveFile\")\n}\n\nfunc (s *WrapperSuite) TestClose(c *gc.C) {\n\tmetaStor := &FakeMetadataStorage{}\n\tfileStor := &FakeRawFileStorage{}\n\tstor := filestorage.NewFileStorage(metaStor, fileStor)\n\terr := stor.Close()\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(metaStor.calls, gc.DeepEquals, []string{\"Close\"})\n\tc.Check(fileStor.calls, gc.DeepEquals, []string{\"Close\"})\n}\n"
  },
  {
    "path": "fs/copy.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage fs\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\n// Copy recursively copies the file, directory or symbolic link at src\n// to dst. The destination must not exist. Symbolic links are not\n// followed.\n//\n// If the copy fails half way through, the destination might be left\n// partially written.\nfunc Copy(src, dst string) error {\n\tsrcInfo, srcErr := os.Lstat(src)\n\tif srcErr != nil {\n\t\treturn srcErr\n\t}\n\t_, dstErr := os.Lstat(dst)\n\tif dstErr == nil {\n\t\t// TODO(rog) add a flag to permit overwriting?\n\t\treturn fmt.Errorf(\"will not overwrite %q\", dst)\n\t}\n\tif !os.IsNotExist(dstErr) {\n\t\treturn dstErr\n\t}\n\tswitch mode := srcInfo.Mode(); mode & os.ModeType {\n\tcase os.ModeSymlink:\n\t\treturn copySymLink(src, dst)\n\tcase os.ModeDir:\n\t\treturn copyDir(src, dst, mode)\n\tcase 0:\n\t\treturn copyFile(src, dst, mode)\n\tdefault:\n\t\treturn fmt.Errorf(\"cannot copy file with mode %v\", mode)\n\t}\n}\n\nfunc copySymLink(src, dst string) error {\n\ttarget, err := os.Readlink(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn os.Symlink(target, dst)\n}\n\nfunc copyFile(src, dst string, mode os.FileMode) error {\n\tsrcf, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer srcf.Close()\n\tdstf, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, mode.Perm())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer dstf.Close()\n\t// Make the actual permissions match the source permissions\n\t// even in the presence of umask.\n\tif err := os.Chmod(dstf.Name(), mode.Perm()); err != nil {\n\t\treturn err\n\t}\n\tif _, err := io.Copy(dstf, srcf); err != nil {\n\t\treturn fmt.Errorf(\"cannot copy %q to %q: %v\", src, dst, err)\n\t}\n\treturn nil\n}\n\nfunc copyDir(src, dst string, mode os.FileMode) error {\n\tsrcf, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer srcf.Close()\n\tif mode&0500 == 0 {\n\t\t// The source directory doesn't have write permission,\n\t\t// so give the new directory write permission anyway\n\t\t// so that we have permission to create its contents.\n\t\t// We'll make the permissions match at the end.\n\t\tmode |= 0500\n\t}\n\tif err := os.Mkdir(dst, mode.Perm()); err != nil {\n\t\treturn err\n\t}\n\tfor {\n\t\tnames, err := srcf.Readdirnames(100)\n\t\tfor _, name := range names {\n\t\t\tif err := Copy(filepath.Join(src, name), filepath.Join(dst, name)); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading directory %q: %v\", src, err)\n\t\t}\n\t}\n\tif err := os.Chmod(dst, mode.Perm()); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "fs/copy_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage fs_test\n\nimport (\n\t\"path/filepath\"\n\t\"testing\"\n\n\tft \"github.com/juju/testing/filetesting\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/fs\"\n)\n\ntype copySuite struct{}\n\nvar _ = gc.Suite(&copySuite{})\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n\nvar copyTests = []struct {\n\tabout string\n\tsrc   ft.Entries\n\tdst   ft.Entries\n\terr   string\n}{{\n\tabout: \"one file\",\n\tsrc: []ft.Entry{\n\t\tft.File{\"file\", \"data\", 0756},\n\t},\n}, {\n\tabout: \"one directory\",\n\tsrc: []ft.Entry{\n\t\tft.Dir{\"dir\", 0777},\n\t},\n}, {\n\tabout: \"one symlink\",\n\tsrc: []ft.Entry{\n\t\tft.Symlink{\"link\", \"/foo\"},\n\t},\n}, {\n\tabout: \"several entries\",\n\tsrc: []ft.Entry{\n\t\tft.Dir{\"top\", 0755},\n\t\tft.File{\"top/foo\", \"foodata\", 0644},\n\t\tft.File{\"top/bar\", \"bardata\", 0633},\n\t\tft.Dir{\"top/next\", 0721},\n\t\tft.Symlink{\"top/next/link\", \"../foo\"},\n\t\tft.File{\"top/next/another\", \"anotherdata\", 0644},\n\t},\n}, {\n\tabout: \"destination already exists\",\n\tsrc: []ft.Entry{\n\t\tft.Dir{\"dir\", 0777},\n\t},\n\tdst: []ft.Entry{\n\t\tft.Dir{\"dir\", 0777},\n\t},\n\terr: `will not overwrite \".+dir\"`,\n}, {\n\tabout: \"source with unwritable directory\",\n\tsrc: []ft.Entry{\n\t\tft.Dir{\"dir\", 0555},\n\t},\n}}\n\nfunc (*copySuite) TestCopy(c *gc.C) {\n\tfor i, test := range copyTests {\n\t\tc.Logf(\"test %d: %v\", i, test.about)\n\t\tsrc, dst := c.MkDir(), c.MkDir()\n\t\ttest.src.Create(c, src)\n\t\ttest.dst.Create(c, dst)\n\t\tpath := test.src[0].GetPath()\n\t\terr := fs.Copy(\n\t\t\tfilepath.Join(src, path),\n\t\t\tfilepath.Join(dst, path),\n\t\t)\n\t\tif test.err != \"\" {\n\t\t\tc.Check(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\ttest.src.Check(c, dst)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module github.com/juju/utils/v4\n\ngo 1.24.4\n\nrequire (\n\tgithub.com/juju/clock v1.0.3\n\tgithub.com/juju/collections v1.0.4\n\tgithub.com/juju/errors v1.0.0\n\tgithub.com/juju/loggo/v2 v2.0.0\n\tgithub.com/juju/mutex/v2 v2.0.0\n\tgithub.com/juju/testing v1.2.0\n\tgolang.org/x/crypto v0.39.0\n\tgolang.org/x/net v0.41.0\n\tgolang.org/x/text v0.26.0\n\tgopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c\n\tgopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7\n\tgopkg.in/yaml.v2 v2.4.0\n)\n\nrequire (\n\tgithub.com/juju/loggo v1.0.0 // indirect\n\tgithub.com/juju/utils/v3 v3.1.0 // indirect\n\tgithub.com/kr/pretty v0.3.1 // indirect\n\tgithub.com/kr/text v0.2.0 // indirect\n\tgithub.com/rogpeppe/go-internal v1.9.0 // indirect\n\tgolang.org/x/sys v0.33.0 // indirect\n\tgolang.org/x/term v0.32.0 // indirect\n)\n"
  },
  {
    "path": "go.sum",
    "content": "github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=\ngithub.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g=\ngithub.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=\ngithub.com/juju/ansiterm v0.0.0-20180109212912-720a0952cc2a/go.mod h1:UJSiEoRfvx3hP73CvoARgeLjaIOjybY9vj8PUPPFGeU=\ngithub.com/juju/clock v1.0.3 h1:yJHIsWXeU8j3QcBdiess09SzfiXRRrsjKPn2whnMeds=\ngithub.com/juju/clock v1.0.3/go.mod h1:HIBvJ8kiV/n7UHwKuCkdYL4l/MDECztHR2sAvWDxxf0=\ngithub.com/juju/collections v1.0.4 h1:GjL+aN512m2rVDqhPII7P6qB0e+iYFubz8sqBhZaZtk=\ngithub.com/juju/collections v1.0.4/go.mod h1:hVrdB0Zwq9wIU1Fl6ItD2+UETeNeOEs+nGvJufVe+0c=\ngithub.com/juju/errors v1.0.0 h1:yiq7kjCLll1BiaRuNY53MGI0+EQ3rF6GB+wvboZDefM=\ngithub.com/juju/errors v1.0.0/go.mod h1:B5x9thDqx0wIMH3+aLIMP9HjItInYWObRovoCFM5Qe8=\ngithub.com/juju/loggo v1.0.0 h1:Y6ZMQOGR9Aj3BGkiWx7HBbIx6zNwNkxhVNOHU2i1bl0=\ngithub.com/juju/loggo v1.0.0/go.mod h1:NIXFioti1SmKAlKNuUwbMenNdef59IF52+ZzuOmHYkg=\ngithub.com/juju/loggo/v2 v2.0.0 h1:PzyVIn+NgoZ22QUtPgKF/lh+6SnaCOEXhcP+sE4FhOk=\ngithub.com/juju/loggo/v2 v2.0.0/go.mod h1:647d6WvXBLj5lvka2qBvccr7vMIvF2KFkEH+0ZuFOUM=\ngithub.com/juju/mutex/v2 v2.0.0 h1:rVmJdOaXGWF8rjcFHBNd4x57/1tks5CgXHx55O55SB0=\ngithub.com/juju/mutex/v2 v2.0.0/go.mod h1:jwCfBs/smYDaeZLqeaCi8CB8M+tOes4yf827HoOEoqk=\ngithub.com/juju/testing v1.2.0 h1:Q0wxjaxx4XPVEN+SgzxKr3d82pjmSBcuM3WndAU391c=\ngithub.com/juju/testing v1.2.0/go.mod h1:lqZVzNwBKAbylGZidK77ts6kIdoOkmD52+4m0ysetPo=\ngithub.com/juju/utils/v3 v3.1.0 h1:NrNo73oVtfr7kLP17/BDpubXwa7YEW16+Ult6z9kpHI=\ngithub.com/juju/utils/v3 v3.1.0/go.mod h1:nAj3sHtdYfAkvnkqttTy3Xzm2HzkD9Hfgnc+upOW2Z8=\ngithub.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/lunixbochs/vtclean v0.0.0-20160125035106-4fbf7632a2c6/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=\ngithub.com/mattn/go-colorable v0.0.6/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=\ngithub.com/mattn/go-isatty v0.0.0-20160806122752-66b8e73f3f5c/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=\ngithub.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=\ngithub.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=\ngithub.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=\ngolang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=\ngolang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=\ngolang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=\ngolang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=\ngolang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=\ngolang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=\ngolang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=\ngolang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=\ngolang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=\ngolang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20160105164936-4f90aeace3a2/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=\ngopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=\ngopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\n"
  },
  {
    "path": "gomaxprocs.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"os\"\n\t\"runtime\"\n)\n\nvar gomaxprocs = runtime.GOMAXPROCS\nvar numCPU = runtime.NumCPU\n\n// UseMultipleCPUs sets GOMAXPROCS to the number of CPU cores unless it has\n// already been overridden by the GOMAXPROCS environment variable.\nfunc UseMultipleCPUs() {\n\tif envGOMAXPROCS := os.Getenv(\"GOMAXPROCS\"); envGOMAXPROCS != \"\" {\n\t\tn := gomaxprocs(0)\n\t\tlogger.Debugf(\"GOMAXPROCS already set in environment to %q, %d internally\",\n\t\t\tenvGOMAXPROCS, n)\n\t\treturn\n\t}\n\tn := numCPU()\n\tlogger.Debugf(\"setting GOMAXPROCS to %d\", n)\n\tgomaxprocs(n)\n}\n"
  },
  {
    "path": "gomaxprocs_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"os\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype gomaxprocsSuite struct {\n\ttesting.IsolationSuite\n\tsetmaxprocs    chan int\n\tnumCPUResponse int\n\tsetMaxProcs    int\n}\n\nvar _ = gc.Suite(&gomaxprocsSuite{})\n\nfunc (s *gomaxprocsSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\t// always stub out GOMAXPROCS so we don't actually change anything\n\ts.numCPUResponse = 2\n\ts.setMaxProcs = -1\n\tmaxProcsFunc := func(n int) int {\n\t\ts.setMaxProcs = n\n\t\treturn 1\n\t}\n\tnumCPUFunc := func() int { return s.numCPUResponse }\n\ts.PatchValue(utils.GOMAXPROCS, maxProcsFunc)\n\ts.PatchValue(utils.NumCPU, numCPUFunc)\n\ts.PatchEnvironment(\"GOMAXPROCS\", \"\")\n}\n\nfunc (s *gomaxprocsSuite) TestUseMultipleCPUsDoesNothingWhenGOMAXPROCSSet(c *gc.C) {\n\terr := os.Setenv(\"GOMAXPROCS\", \"1\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tutils.UseMultipleCPUs()\n\tc.Check(s.setMaxProcs, gc.Equals, 0)\n}\n\nfunc (s *gomaxprocsSuite) TestUseMultipleCPUsWhenEnabled(c *gc.C) {\n\tutils.UseMultipleCPUs()\n\tc.Check(s.setMaxProcs, gc.Equals, 2)\n\ts.numCPUResponse = 4\n\tutils.UseMultipleCPUs()\n\tc.Check(s.setMaxProcs, gc.Equals, 4)\n}\n"
  },
  {
    "path": "hash/fingerprint.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage hash\n\nimport (\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"hash\"\n\t\"io\"\n\n\t\"github.com/juju/errors\"\n)\n\n// Fingerprint represents the checksum for some data.\ntype Fingerprint struct {\n\tsum []byte\n}\n\n// NewFingerprint returns wraps the provided raw hash sum. This function\n// roundtrips with Fingerprint.Bytes().\nfunc NewFingerprint(sum []byte, validate func([]byte) error) (Fingerprint, error) {\n\tif validate == nil {\n\t\treturn Fingerprint{}, errors.New(\"missing validate func\")\n\t}\n\n\tif err := validate(sum); err != nil {\n\t\treturn Fingerprint{}, errors.Trace(err)\n\t}\n\treturn newFingerprint(sum), nil\n}\n\n// NewValidFingerprint returns a Fingerprint corresponding\n// to the current of the provided hash.\nfunc NewValidFingerprint(hash hash.Hash) Fingerprint {\n\tsum := hash.Sum(nil)\n\treturn newFingerprint(sum)\n}\n\nfunc newFingerprint(sum []byte) Fingerprint {\n\treturn Fingerprint{\n\t\tsum: append([]byte{}, sum...), // Use an isolated copy.\n\t}\n}\n\n// GenerateFingerprint returns the fingerprint for the provided data.\nfunc GenerateFingerprint(reader io.Reader, newHash func() hash.Hash) (Fingerprint, error) {\n\tvar fp Fingerprint\n\n\tif reader == nil {\n\t\treturn fp, errors.New(\"missing reader\")\n\t}\n\tif newHash == nil {\n\t\treturn fp, errors.New(\"missing new hash func\")\n\t}\n\n\thash := newHash()\n\tif _, err := io.Copy(hash, reader); err != nil {\n\t\treturn fp, errors.Trace(err)\n\t}\n\tfp.sum = hash.Sum(nil)\n\treturn fp, nil\n}\n\n// ParseHexFingerprint returns wraps the provided raw fingerprint string.\n// This function roundtrips with Fingerprint.Hex().\nfunc ParseHexFingerprint(hexSum string, validate func([]byte) error) (Fingerprint, error) {\n\tif validate == nil {\n\t\treturn Fingerprint{}, errors.New(\"missing validate func\")\n\t}\n\n\tsum, err := hex.DecodeString(hexSum)\n\tif err != nil {\n\t\treturn Fingerprint{}, errors.Trace(err)\n\t}\n\tfp, err := NewFingerprint(sum, validate)\n\tif err != nil {\n\t\treturn Fingerprint{}, errors.Trace(err)\n\t}\n\treturn fp, nil\n}\n\n// ParseBase64Fingerprint returns wraps the provided raw fingerprint string.\n// This function roundtrips with Fingerprint.Base64().\nfunc ParseBase64Fingerprint(b64Sum string, validate func([]byte) error) (Fingerprint, error) {\n\tif validate == nil {\n\t\treturn Fingerprint{}, errors.New(\"missing validate func\")\n\t}\n\n\tsum, err := base64.StdEncoding.DecodeString(b64Sum)\n\tif err != nil {\n\t\treturn Fingerprint{}, errors.Trace(err)\n\t}\n\tfp, err := NewFingerprint(sum, validate)\n\tif err != nil {\n\t\treturn Fingerprint{}, errors.Trace(err)\n\t}\n\treturn fp, nil\n}\n\n// String implements fmt.Stringer.\nfunc (fp Fingerprint) String() string {\n\treturn fp.Hex()\n}\n\n// Hex returns the hex string representation of the fingerprint.\nfunc (fp Fingerprint) Hex() string {\n\treturn hex.EncodeToString(fp.sum)\n}\n\n// Base64 returns the base64 encoded fingerprint.\nfunc (fp Fingerprint) Base64() string {\n\treturn base64.StdEncoding.EncodeToString(fp.sum)\n}\n\n// Bytes returns the raw (sum) bytes of the fingerprint.\nfunc (fp Fingerprint) Bytes() []byte {\n\treturn append([]byte{}, fp.sum...)\n}\n\n// IsZero returns whether or not the fingerprint is the zero value.\nfunc (fp Fingerprint) IsZero() bool {\n\treturn len(fp.sum) == 0\n}\n\n// Validate returns an error if the fingerprint is invalid.\nfunc (fp Fingerprint) Validate() error {\n\tif fp.IsZero() {\n\t\treturn errors.NotValidf(\"zero-value fingerprint\")\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "hash/fingerprint_test.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage hash_test\n\nimport (\n\t\"crypto/sha512\"\n\t\"encoding/hex\"\n\tstdhash \"hash\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\t\"github.com/juju/testing/filetesting\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/hash\"\n)\n\nvar _ = gc.Suite(&FingerprintSuite{})\n\ntype FingerprintSuite struct {\n\tstub *testing.Stub\n\thash *filetesting.StubHash\n}\n\nfunc (s *FingerprintSuite) SetUpTest(c *gc.C) {\n\ts.stub = &testing.Stub{}\n\ts.hash = filetesting.NewStubHash(s.stub, nil)\n}\n\nfunc (s *FingerprintSuite) newHash() stdhash.Hash {\n\ts.stub.AddCall(\"newHash\")\n\ts.stub.NextErr() // Pop one off.\n\n\treturn s.hash\n}\n\nfunc (s *FingerprintSuite) validate(sum []byte) error {\n\ts.stub.AddCall(\"validate\", sum)\n\tif err := s.stub.NextErr(); err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\n\treturn nil\n}\n\nfunc (s *FingerprintSuite) TestNewFingerprintOkay(c *gc.C) {\n\texpected, _ := newFingerprint(c, \"spamspamspam\")\n\n\tfp, err := hash.NewFingerprint(expected, s.validate)\n\tc.Assert(err, jc.ErrorIsNil)\n\tsum := fp.Bytes()\n\n\ts.stub.CheckCallNames(c, \"validate\")\n\tc.Check(sum, jc.DeepEquals, expected)\n}\n\nfunc (s *FingerprintSuite) TestNewFingerprintInvalid(c *gc.C) {\n\texpected, _ := newFingerprint(c, \"spamspamspam\")\n\tfailure := errors.NewNotValid(nil, \"bogus!!!\")\n\ts.stub.SetErrors(failure)\n\n\t_, err := hash.NewFingerprint(expected, s.validate)\n\n\ts.stub.CheckCallNames(c, \"validate\")\n\tc.Check(errors.Cause(err), gc.Equals, failure)\n}\n\nfunc (s *FingerprintSuite) TestNewValidFingerprint(c *gc.C) {\n\texpected, _ := newFingerprint(c, \"spamspamspam\")\n\ts.hash.ReturnSum = expected\n\n\tfp := hash.NewValidFingerprint(s.hash)\n\tsum := fp.Bytes()\n\n\ts.stub.CheckCallNames(c, \"Sum\")\n\tc.Check(sum, jc.DeepEquals, expected)\n}\n\nfunc (s *FingerprintSuite) TestGenerateFingerprintOkay(c *gc.C) {\n\texpected, _ := newFingerprint(c, \"spamspamspam\")\n\ts.hash.ReturnSum = expected\n\ts.hash.Writer, _ = filetesting.NewStubWriter(s.stub)\n\treader := filetesting.NewStubReader(s.stub, \"spamspamspam\")\n\n\tfp, err := hash.GenerateFingerprint(reader, s.newHash)\n\tc.Assert(err, jc.ErrorIsNil)\n\tsum := fp.Bytes()\n\n\ts.stub.CheckCallNames(c, \"newHash\", \"Read\", \"Write\", \"Read\", \"Sum\")\n\tc.Check(sum, jc.DeepEquals, expected)\n}\n\nfunc (s *FingerprintSuite) TestGenerateFingerprintNil(c *gc.C) {\n\t_, err := hash.GenerateFingerprint(nil, s.newHash)\n\n\ts.stub.CheckNoCalls(c)\n\tc.Check(err, gc.ErrorMatches, `missing reader`)\n}\n\nfunc (s *FingerprintSuite) TestParseHexFingerprint(c *gc.C) {\n\texpected, hexSum := newFingerprint(c, \"spamspamspam\")\n\n\tfp, err := hash.ParseHexFingerprint(hexSum, s.validate)\n\tc.Assert(err, jc.ErrorIsNil)\n\tsum := fp.Bytes()\n\n\ts.stub.CheckCallNames(c, \"validate\")\n\tc.Check(sum, jc.DeepEquals, expected)\n}\n\nfunc (s *FingerprintSuite) TestString(c *gc.C) {\n\tsum, expected := newFingerprint(c, \"spamspamspam\")\n\tfp, err := hash.NewFingerprint(sum, s.validate)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\thex := fp.String()\n\n\tc.Check(hex, gc.Equals, expected)\n}\n\nfunc (s *FingerprintSuite) TestHex(c *gc.C) {\n\tsum, expected := newFingerprint(c, \"spamspamspam\")\n\tfp, err := hash.NewFingerprint(sum, s.validate)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\thex := fp.String()\n\n\tc.Check(hex, gc.Equals, expected)\n}\n\nfunc (s *FingerprintSuite) TestBytes(c *gc.C) {\n\texpected, _ := newFingerprint(c, \"spamspamspam\")\n\tfp, err := hash.NewFingerprint(expected, s.validate)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tsum := fp.Bytes()\n\n\tc.Check(sum, jc.DeepEquals, expected)\n}\n\nfunc (s *FingerprintSuite) TestValidateOkay(c *gc.C) {\n\tsum, _ := newFingerprint(c, \"spamspamspam\")\n\tfp, err := hash.NewFingerprint(sum, s.validate)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\terr = fp.Validate()\n\n\tc.Check(err, jc.ErrorIsNil)\n}\n\nfunc (s *FingerprintSuite) TestValidateZero(c *gc.C) {\n\tvar fp hash.Fingerprint\n\terr := fp.Validate()\n\n\tc.Check(err, jc.Satisfies, errors.IsNotValid)\n\tc.Check(err, gc.ErrorMatches, `zero-value fingerprint not valid`)\n}\n\nfunc newFingerprint(c *gc.C, data string) ([]byte, string) {\n\thash := sha512.New384()\n\t_, err := hash.Write([]byte(data))\n\tc.Assert(err, jc.ErrorIsNil)\n\tsum := hash.Sum(nil)\n\n\thexStr := hex.EncodeToString(sum)\n\treturn sum, hexStr\n}\n"
  },
  {
    "path": "hash/hash.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// The hash package provides utilities that support use of the stdlib\n// hash.Hash. Most notably is the Fingerprint type that wraps the\n// checksum of a hash.\n//\n// Conversion between checksums and strings are facailitated through\n// Fingerprint.\n//\n// Here are some hash-related recipes that bring it all together:\n//\n//   - Extract the SHA384 hash while writing to elsewhere, then get the\n//     raw checksum:\n//\n//     newHash, _ := hash.SHA384()\n//     h := newHash()\n//     hashingWriter := io.MultiWriter(writer, h)\n//     if err := writeAll(hashingWriter); err != nil { ... }\n//     fp := hash.NewValidFingerprint(h)\n//     checksum := fp.Bytes()\n//\n//   - Extract the SHA384 hash while reading from elsewhere, then get the\n//     hex-encoded checksum to send over the wire:\n//\n//     newHash, _ := hash.SHA384()\n//     h := newHash()\n//     hashingReader := io.TeeReader(reader, h)\n//     if err := processStream(hashingReader); err != nil { ... }\n//     fp := hash.NewValidFingerprint(h)\n//     hexSum := fp.Hex()\n//     req.Header.Set(\"Content-Sha384\", hexSum)\n//\n// * Turn a checksum sent over the wire back into a fingerprint:\n//\n//\t_, validate := hash.SHA384()\n//\thexSum := req.Header.Get(\"Content-Sha384\")\n//\tvar fp hash.Fingerprint\n//\tif len(hexSum) != 0 {\n//\t    fp, err = hash.ParseHexFingerprint(hexSum, validate)\n//\t    ...\n//\t}\n//\tif fp.IsZero() {\n//\t    ...\n//\t}\npackage hash\n\nimport (\n\t\"crypto/sha512\"\n\t\"hash\"\n\n\t\"github.com/juju/errors\"\n)\n\n// SHA384 returns the newHash and validate functions for use\n// with SHA384 hashes. SHA384 is used in several key places in Juju.\nfunc SHA384() (newHash func() hash.Hash, validate func([]byte) error) {\n\tconst digestLenBytes = 384 / 8\n\tvalidate = newSizeChecker(digestLenBytes)\n\treturn sha512.New384, validate\n}\n\nfunc newSizeChecker(size int) func([]byte) error {\n\treturn func(sum []byte) error {\n\t\tif len(sum) < size {\n\t\t\treturn errors.NewNotValid(nil, \"invalid fingerprint (too small)\")\n\t\t}\n\t\tif len(sum) > size {\n\t\t\treturn errors.NewNotValid(nil, \"invalid fingerprint (too big)\")\n\t\t}\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "hash/hash_test.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage hash_test\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\t\"github.com/juju/testing/filetesting\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/hash\"\n)\n\nvar _ = gc.Suite(&HashSuite{})\n\ntype HashSuite struct {\n\ttesting.IsolationSuite\n}\n\nfunc (s *HashSuite) TestHashingWriter(c *gc.C) {\n\tdata := \"some data\"\n\tnewHash, _ := hash.SHA384()\n\texpected, err := hash.GenerateFingerprint(strings.NewReader(data), newHash)\n\tc.Assert(err, jc.ErrorIsNil)\n\tvar writer bytes.Buffer\n\n\th := newHash()\n\thashingWriter := io.MultiWriter(&writer, h)\n\t_, err = hashingWriter.Write([]byte(data))\n\tc.Assert(err, jc.ErrorIsNil)\n\tfp := hash.NewValidFingerprint(h)\n\n\tc.Check(fp, jc.DeepEquals, expected)\n\tc.Check(writer.String(), gc.Equals, data)\n}\n\nfunc (s *HashSuite) TestHashingReader(c *gc.C) {\n\texpected := \"some data\"\n\tstub := &testing.Stub{}\n\treader := &filetesting.StubReader{\n\t\tStub: stub,\n\t\tReturnRead: &fakeStream{\n\t\t\tdata: expected,\n\t\t},\n\t}\n\n\tnewHash, validate := hash.SHA384()\n\th := newHash()\n\thashingReader := io.TeeReader(reader, h)\n\tdata, err := ioutil.ReadAll(hashingReader)\n\tc.Assert(err, jc.ErrorIsNil)\n\tfp := hash.NewValidFingerprint(h)\n\thexSum := fp.Hex()\n\tfpAgain, err := hash.ParseHexFingerprint(hexSum, validate)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tstub.CheckCallNames(c, \"Read\") // The EOF was mixed with the data.\n\tc.Check(string(data), gc.Equals, expected)\n\tc.Check(fpAgain, jc.DeepEquals, fp)\n}\n\ntype fakeStream struct {\n\tdata string\n\tpos  uint64\n}\n\nfunc (f *fakeStream) Read(data []byte) (int, error) {\n\tn := copy(data, f.data[f.pos:])\n\tf.pos += uint64(n)\n\tif f.pos >= uint64(len(f.data)) {\n\t\treturn n, io.EOF\n\t}\n\treturn n, nil\n}\n"
  },
  {
    "path": "hash/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage hash_test\n\nimport (\n\tstdtesting \"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "hash/writer.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage hash\n\nimport (\n\t\"encoding/base64\"\n\t\"hash\"\n\t\"io\"\n)\n\n// TODO(ericsnow) Remove HashingWriter and NewHashingWriter().\n\n// HashingWriter wraps an io.Writer, providing the checksum of all data\n// written to it.  A HashingWriter may be used in place of the writer it\n// wraps.\n//\n// Note: HashingWriter is deprecated. Please do not use it. We will\n// remove it ASAP.\ntype HashingWriter struct {\n\thash    hash.Hash\n\twrapped io.Writer\n}\n\n// NewHashingWriter returns a new HashingWriter that wraps the provided\n// writer and the hasher.\n//\n// Example:\n//   hw := NewHashingWriter(w, sha1.New())\n//   io.Copy(hw, reader)\n//   hash := hw.Base64Sum()\n//\n// Note: NewHashingWriter is deprecated. Please do not use it. We will\n// remove it ASAP.\nfunc NewHashingWriter(writer io.Writer, hasher hash.Hash) *HashingWriter {\n\treturn &HashingWriter{\n\t\thash:    hasher,\n\t\twrapped: io.MultiWriter(writer, hasher),\n\t}\n}\n\n// Base64Sum returns the base64 encoded hash.\nfunc (hw HashingWriter) Base64Sum() string {\n\tsumBytes := hw.hash.Sum(nil)\n\treturn base64.StdEncoding.EncodeToString(sumBytes)\n}\n\n// Write writes to both the wrapped file and the hash.\nfunc (hw *HashingWriter) Write(data []byte) (int, error) {\n\t// No trace because some callers, like ioutil.ReadAll(), won't work.\n\treturn hw.wrapped.Write(data)\n}\n"
  },
  {
    "path": "hash/writer_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage hash_test\n\nimport (\n\t\"bytes\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\t\"github.com/juju/testing/filetesting\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/hash\"\n)\n\nvar _ = gc.Suite(&WriterSuite{})\n\ntype WriterSuite struct {\n\ttesting.IsolationSuite\n\n\tstub    *testing.Stub\n\twBuffer *bytes.Buffer\n\twriter  *filetesting.StubWriter\n\thBuffer *bytes.Buffer\n\thash    *filetesting.StubHash\n}\n\nfunc (s *WriterSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.stub = &testing.Stub{}\n\ts.wBuffer = new(bytes.Buffer)\n\ts.writer = &filetesting.StubWriter{\n\t\tStub:        s.stub,\n\t\tReturnWrite: s.wBuffer,\n\t}\n\ts.hBuffer = new(bytes.Buffer)\n\ts.hash = filetesting.NewStubHash(s.stub, s.hBuffer)\n}\n\nfunc (s *WriterSuite) TestHashingWriterWriteEmpty(c *gc.C) {\n\tw := hash.NewHashingWriter(s.writer, s.hash)\n\tn, err := w.Write(nil)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\ts.stub.CheckCallNames(c, \"Write\", \"Write\")\n\tc.Check(n, gc.Equals, 0)\n\tc.Check(s.wBuffer.String(), gc.Equals, \"\")\n\tc.Check(s.hBuffer.String(), gc.Equals, \"\")\n}\n\nfunc (s *WriterSuite) TestHashingWriterWriteSmall(c *gc.C) {\n\tw := hash.NewHashingWriter(s.writer, s.hash)\n\tn, err := w.Write([]byte(\"spam\"))\n\tc.Assert(err, jc.ErrorIsNil)\n\n\ts.stub.CheckCallNames(c, \"Write\", \"Write\")\n\tc.Check(n, gc.Equals, 4)\n\tc.Check(s.wBuffer.String(), gc.Equals, \"spam\")\n\tc.Check(s.hBuffer.String(), gc.Equals, \"spam\")\n}\n\nfunc (s *WriterSuite) TestHashingWriterWriteFileError(c *gc.C) {\n\tw := hash.NewHashingWriter(s.writer, s.hash)\n\tfailure := errors.New(\"<failed>\")\n\ts.stub.SetErrors(failure)\n\n\t_, err := w.Write([]byte(\"spam\"))\n\n\ts.stub.CheckCallNames(c, \"Write\")\n\tc.Check(errors.Cause(err), gc.Equals, failure)\n}\n\nfunc (s *WriterSuite) TestHashingWriterBase64Sum(c *gc.C) {\n\ts.hash.ReturnSum = []byte(\"spam\")\n\tw := hash.NewHashingWriter(s.writer, s.hash)\n\tb64sum := w.Base64Sum()\n\n\ts.stub.CheckCallNames(c, \"Sum\")\n\tc.Check(b64sum, gc.Equals, \"c3BhbQ==\")\n}\n"
  },
  {
    "path": "home_unix.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n//go:build !windows\n// +build !windows\n\npackage utils\n\nimport (\n\t\"os\"\n)\n\n// Home returns the os-specific home path.\n// Always returns the \"real\" home, not the\n// confined home that is used when running\n// inside a strictly confined snap.\nfunc Home() string {\n\t// Used when running inside a confined snap.\n\trealHome, exists := os.LookupEnv(\"SNAP_REAL_HOME\")\n\tif exists {\n\t\treturn realHome\n\t}\n\treturn os.Getenv(\"HOME\")\n}\n\n// SetHome sets the os-specific home path in the environment.\nfunc SetHome(s string) error {\n\tif _, exists := os.LookupEnv(\"SNAP_REAL_HOME\"); exists {\n\t\treturn os.Setenv(\"SNAP_REAL_HOME\", s)\n\t}\n\treturn os.Setenv(\"HOME\", s)\n}\n"
  },
  {
    "path": "home_unix_test.go",
    "content": "// Copyright 2011, 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n//go:build !windows\n// +build !windows\n\npackage utils_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype homeSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&homeSuite{})\n\nfunc (s *homeSuite) TestHomeLinux(c *gc.C) {\n\th := \"/home/foo/bar\"\n\ts.PatchEnvironment(\"HOME\", h)\n\tc.Check(utils.Home(), gc.Equals, h)\n}\n\nfunc (s *homeSuite) TestHomeConfined(c *gc.C) {\n\th := \"/home/foo/bar\"\n\ts.PatchEnvironment(\"HOME\", \"/home/user/snap/foo/1\")\n\ts.PatchEnvironment(\"SNAP_REAL_HOME\", h)\n\tc.Check(utils.Home(), gc.Equals, h)\n}\n"
  },
  {
    "path": "home_windows.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n)\n\n// Home returns the os-specific home path as specified in the environment.\nfunc Home() string {\n\treturn filepath.Join(os.Getenv(\"HOMEDRIVE\"), os.Getenv(\"HOMEPATH\"))\n}\n\n// SetHome sets the os-specific home path in the environment.\nfunc SetHome(s string) error {\n\tv := filepath.VolumeName(s)\n\tif v != \"\" {\n\t\tif err := os.Setenv(\"HOMEDRIVE\", v); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn os.Setenv(\"HOMEPATH\", s[len(v):])\n}\n"
  },
  {
    "path": "home_windows_test.go",
    "content": "// Copyright 2011, 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"os\"\n\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype homeSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&homeSuite{})\n\nfunc (s *homeSuite) TestHome(c *gc.C) {\n\ts.PatchEnvironment(\"HOMEPATH\", \"\")\n\ts.PatchEnvironment(\"HOMEDRIVE\", \"\")\n\n\tdrive := \"P:\"\n\tpath := `\\home\\foo\\bar`\n\th := drive + path\n\tutils.SetHome(h)\n\tc.Check(os.Getenv(\"HOMEPATH\"), gc.Equals, path)\n\tc.Check(os.Getenv(\"HOMEDRIVE\"), gc.Equals, drive)\n\tc.Check(utils.Home(), gc.Equals, h)\n\n\t// now test that if we only set the path, we don't mess with the drive\n\n\tpath2 := `\\home\\someotherfoo\\bar`\n\n\tutils.SetHome(path2)\n\n\tc.Check(os.Getenv(\"HOMEPATH\"), gc.Equals, path2)\n\tc.Check(os.Getenv(\"HOMEDRIVE\"), gc.Equals, drive)\n\tc.Check(utils.Home(), gc.Equals, drive+path2)\n}\n"
  },
  {
    "path": "isubuntu.go",
    "content": "// Copyright 2011, 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"strings\"\n)\n\n// IsUbuntu executes lxb_release to see if the host OS is Ubuntu.\nfunc IsUbuntu() bool {\n\tout, err := RunCommand(\"lsb_release\", \"-i\", \"-s\")\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn strings.TrimSpace(out) == \"Ubuntu\"\n}\n"
  },
  {
    "path": "isubuntu_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype IsUbuntuSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&IsUbuntuSuite{})\n\nfunc (s *IsUbuntuSuite) patchLsbRelease(c *gc.C, name string) {\n\tvar content string\n\tvar execName string\n\tif runtime.GOOS != \"windows\" {\n\t\tcontent = fmt.Sprintf(\"#!/bin/bash --norc\\n%s\", name)\n\t\texecName = \"lsb_release\"\n\t} else {\n\t\texecName = \"lsb_release.bat\"\n\t\tcontent = fmt.Sprintf(\"@echo off\\r\\n%s\", name)\n\t}\n\tpatchExecutable(s, c.MkDir(), execName, content)\n}\n\nfunc (s *IsUbuntuSuite) TestIsUbuntu(c *gc.C) {\n\ts.patchLsbRelease(c, \"echo Ubuntu\")\n\tc.Assert(utils.IsUbuntu(), jc.IsTrue)\n}\n\nfunc (s *IsUbuntuSuite) TestIsNotUbuntu(c *gc.C) {\n\ts.patchLsbRelease(c, \"echo Windows NT\")\n\tc.Assert(utils.IsUbuntu(), jc.IsFalse)\n}\n\nfunc (s *IsUbuntuSuite) TestIsNotUbuntuLsbReleaseNotFound(c *gc.C) {\n\tif runtime.GOOS != \"windows\" {\n\t\ts.patchLsbRelease(c, \"exit 127\")\n\t}\n\tc.Assert(utils.IsUbuntu(), jc.IsFalse)\n}\n"
  },
  {
    "path": "jsonhttp/jsonhttp.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// Package jsonhttp provides general functions for returning\n// JSON responses to HTTP requests. It is agnostic about\n// the specific form of any returned errors.\npackage jsonhttp\n\nimport (\n\t\"encoding/json\"\n\t\"net/http\"\n\n\t\"github.com/juju/errors\"\n)\n\n// ErrorToResponse represents a function that can convert a Go error\n// into a form that can be returned as a JSON body from an HTTP request.\n// The httpStatus value reports the desired HTTP status.\ntype ErrorToResponse func(err error) (httpStatus int, errorBody any)\n\n// ErrorHandler is like http.Handler except it returns an error\n// which may be returned as the error body of the response.\n// An ErrorHandler function should not itself write to the ResponseWriter\n// if it returns an error.\ntype ErrorHandler func(http.ResponseWriter, *http.Request) error\n\n// HandleErrors returns a function that can be used to convert an ErrorHandler\n// into an http.Handler. The given errToResp parameter is used to convert\n// any non-nil error returned by handle to the response in the HTTP body.\nfunc HandleErrors(errToResp ErrorToResponse) func(handle ErrorHandler) http.Handler {\n\twriteError := WriteError(errToResp)\n\treturn func(handle ErrorHandler) http.Handler {\n\t\tf := func(w http.ResponseWriter, req *http.Request) {\n\t\t\tw1 := responseWriter{\n\t\t\t\tResponseWriter: w,\n\t\t\t}\n\t\t\tif err := handle(&w1, req); err != nil {\n\t\t\t\t// We write the error only if the header hasn't\n\t\t\t\t// already been written, because if it has, then\n\t\t\t\t// we will not be able to set the appropriate error\n\t\t\t\t// response code, and there's a danger that we\n\t\t\t\t// may be corrupting output by appending\n\t\t\t\t// a JSON error message to it.\n\t\t\t\tif !w1.headerWritten {\n\t\t\t\t\twriteError(w, err)\n\t\t\t\t}\n\t\t\t\t// TODO log the error?\n\t\t\t}\n\t\t}\n\t\treturn http.HandlerFunc(f)\n\t}\n}\n\n// responseWriter wraps http.ResponseWriter but allows us\n// to find out whether any body has already been written.\ntype responseWriter struct {\n\theaderWritten bool\n\thttp.ResponseWriter\n}\n\nfunc (w *responseWriter) Write(data []byte) (int, error) {\n\tw.headerWritten = true\n\treturn w.ResponseWriter.Write(data)\n}\n\nfunc (w *responseWriter) WriteHeader(code int) {\n\tw.headerWritten = true\n\tw.ResponseWriter.WriteHeader(code)\n}\n\n// Flush implements http.Flusher.Flush.\nfunc (w *responseWriter) Flush() {\n\tw.headerWritten = true\n\tif f, ok := w.ResponseWriter.(http.Flusher); ok {\n\t\tf.Flush()\n\t}\n}\n\n// Ensure statically that responseWriter does implement http.Flusher.\nvar _ http.Flusher = (*responseWriter)(nil)\n\n// WriteError returns a function that can be used to write an error to a ResponseWriter\n// and set the HTTP status code. The errToResp parameter is used to determine\n// the actual error value and status to write.\nfunc WriteError(errToResp ErrorToResponse) func(w http.ResponseWriter, err error) {\n\treturn func(w http.ResponseWriter, err error) {\n\t\tstatus, resp := errToResp(err)\n\t\t_ = WriteJSON(w, status, resp)\n\t}\n}\n\n// WriteJSON writes the given value to the ResponseWriter\n// and sets the HTTP status to the given code.\nfunc WriteJSON(w http.ResponseWriter, code int, val any) error {\n\t// TODO consider marshalling directly to w using json.NewEncoder.\n\t// pro: this will not require a full buffer allocation.\n\t// con: if there's an error after the first write, it will be lost.\n\tdata, err := json.Marshal(val)\n\tif err != nil {\n\t\t// TODO(rog) log an error if this fails and lose the\n\t\t// error return, because most callers will need\n\t\t// to do that anyway.\n\t\treturn errors.Mask(err)\n\t}\n\tw.Header().Set(\"content-type\", \"application/json\")\n\tw.WriteHeader(code)\n\t_, _ = w.Write(data)\n\treturn nil\n}\n\n// JSONHandler is like http.Handler except that it returns a\n// body (to be converted to JSON) and an error.\n// The Header parameter can be used to set\n// custom header on the response.\ntype JSONHandler func(http.Header, *http.Request) (any, error)\n\n// HandleJSON returns a function that can be used to convert an JSONHandler\n// into an http.Handler. The given errToResp parameter is used to convert\n// any non-nil error returned by handle to the response in the HTTP body\n// If it returns a nil value, the original error is returned as a JSON string.\nfunc HandleJSON(errToResp ErrorToResponse) func(handle JSONHandler) http.Handler {\n\thandleErrors := HandleErrors(errToResp)\n\treturn func(handle JSONHandler) http.Handler {\n\t\tf := func(w http.ResponseWriter, req *http.Request) error {\n\t\t\tval, err := handle(w.Header(), req)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\treturn WriteJSON(w, http.StatusOK, val)\n\t\t}\n\t\treturn handleErrors(f)\n\t}\n}\n"
  },
  {
    "path": "jsonhttp/jsonhttp_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage jsonhttp_test\n\nimport (\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/utils/v4/jsonhttp\"\n\tgc \"gopkg.in/check.v1\"\n)\n\ntype suite struct{}\n\nvar _ = gc.Suite(&suite{})\n\nfunc (*suite) TestWriteJSON(c *gc.C) {\n\trec := httptest.NewRecorder()\n\ttype Number struct {\n\t\tN int\n\t}\n\terr := jsonhttp.WriteJSON(rec, http.StatusTeapot, Number{1234})\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(rec.Code, gc.Equals, http.StatusTeapot)\n\tc.Assert(rec.Body.String(), gc.Equals, `{\"N\":1234}`)\n\tc.Assert(rec.Header().Get(\"content-type\"), gc.Equals, \"application/json\")\n}\n\nvar (\n\terrUnauth = errors.New(\"unauth\")\n\terrBadReq = errors.New(\"bad request\")\n\terrOther  = errors.New(\"other\")\n\terrNil    = errors.New(\"nil result\")\n)\n\ntype errorResponse struct {\n\tMessage string\n}\n\nfunc errorToResponse(err error) (int, any) {\n\tresp := &errorResponse{\n\t\tMessage: err.Error(),\n\t}\n\tstatus := http.StatusInternalServerError\n\tswitch errors.Cause(err) {\n\tcase errUnauth:\n\t\tstatus = http.StatusUnauthorized\n\tcase errBadReq:\n\t\tstatus = http.StatusBadRequest\n\tcase errNil:\n\t\treturn status, nil\n\t}\n\treturn status, &resp\n}\n\nvar writeErrorTests = []struct {\n\terr          error\n\texpectStatus int\n\texpectResp   *errorResponse\n}{{\n\terr:          errUnauth,\n\texpectStatus: http.StatusUnauthorized,\n\texpectResp: &errorResponse{\n\t\tMessage: errUnauth.Error(),\n\t},\n}, {\n\terr:          errBadReq,\n\texpectStatus: http.StatusBadRequest,\n\texpectResp: &errorResponse{\n\t\tMessage: errBadReq.Error(),\n\t},\n}, {\n\terr:          errOther,\n\texpectStatus: http.StatusInternalServerError,\n\texpectResp: &errorResponse{\n\t\tMessage: errOther.Error(),\n\t},\n}, {\n\terr:          errNil,\n\texpectStatus: http.StatusInternalServerError,\n}}\n\nfunc (s *suite) TestWriteError(c *gc.C) {\n\twriteError := jsonhttp.WriteError(errorToResponse)\n\tfor i, test := range writeErrorTests {\n\t\tc.Logf(\"%d: %s\", i, test.err)\n\t\trec := httptest.NewRecorder()\n\t\twriteError(rec, test.err)\n\t\tresp := parseErrorResponse(c, rec.Body.Bytes())\n\t\tc.Assert(resp, gc.DeepEquals, test.expectResp)\n\t\tc.Assert(rec.Code, gc.Equals, test.expectStatus)\n\t}\n}\n\nfunc parseErrorResponse(c *gc.C, body []byte) *errorResponse {\n\tvar errResp *errorResponse\n\terr := json.Unmarshal(body, &errResp)\n\tc.Assert(err, gc.IsNil)\n\treturn errResp\n}\n\nfunc (s *suite) TestHandleErrors(c *gc.C) {\n\thandleErrors := jsonhttp.HandleErrors(errorToResponse)\n\n\t// Test when handler returns an error.\n\thandler := handleErrors(func(http.ResponseWriter, *http.Request) error {\n\t\treturn errUnauth\n\t})\n\trec := httptest.NewRecorder()\n\thandler.ServeHTTP(rec, new(http.Request))\n\tc.Assert(rec.Code, gc.Equals, http.StatusUnauthorized)\n\tresp := parseErrorResponse(c, rec.Body.Bytes())\n\tc.Assert(resp, gc.DeepEquals, &errorResponse{\n\t\tMessage: errUnauth.Error(),\n\t})\n\n\t// Test when handler returns nil.\n\thandler = handleErrors(func(w http.ResponseWriter, _ *http.Request) error {\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tw.Write([]byte(\"something\"))\n\t\treturn nil\n\t})\n\trec = httptest.NewRecorder()\n\thandler.ServeHTTP(rec, new(http.Request))\n\tc.Assert(rec.Code, gc.Equals, http.StatusCreated)\n\tc.Assert(rec.Body.String(), gc.Equals, \"something\")\n}\n\nvar handleErrorsWithErrorAfterWriteHeaderTests = []struct {\n\tabout            string\n\tcauseWriteHeader func(w http.ResponseWriter)\n}{{\n\tabout: \"write\",\n\tcauseWriteHeader: func(w http.ResponseWriter) {\n\t\tw.Write([]byte(\"\"))\n\t},\n}, {\n\tabout: \"write header\",\n\tcauseWriteHeader: func(w http.ResponseWriter) {\n\t\tw.WriteHeader(http.StatusOK)\n\t},\n}, {\n\tabout: \"flush\",\n\tcauseWriteHeader: func(w http.ResponseWriter) {\n\t\tw.(http.Flusher).Flush()\n\t},\n}}\n\nfunc (s *suite) TestHandleErrorsWithErrorAfterWriteHeader(c *gc.C) {\n\thandleErrors := jsonhttp.HandleErrors(errorToResponse)\n\tfor i, test := range handleErrorsWithErrorAfterWriteHeaderTests {\n\t\tc.Logf(\"test %d: %s\", i, test.about)\n\t\thandler := handleErrors(func(w http.ResponseWriter, _ *http.Request) error {\n\t\t\ttest.causeWriteHeader(w)\n\t\t\treturn errors.New(\"unexpected\")\n\t\t})\n\t\trec := httptest.NewRecorder()\n\t\thandler.ServeHTTP(rec, new(http.Request))\n\t\tc.Assert(rec.Code, gc.Equals, http.StatusOK)\n\t\tc.Assert(rec.Body.String(), gc.Equals, \"\")\n\t}\n}\n\nfunc (s *suite) TestHandleJSON(c *gc.C) {\n\thandleJSON := jsonhttp.HandleJSON(errorToResponse)\n\n\t// Test when handler returns an error.\n\thandler := handleJSON(func(http.Header, *http.Request) (any, error) {\n\t\treturn nil, errUnauth\n\t})\n\trec := httptest.NewRecorder()\n\thandler.ServeHTTP(rec, new(http.Request))\n\tresp := parseErrorResponse(c, rec.Body.Bytes())\n\tc.Assert(resp, gc.DeepEquals, &errorResponse{\n\t\tMessage: errUnauth.Error(),\n\t})\n\tc.Assert(rec.Code, gc.Equals, http.StatusUnauthorized)\n\n\t// Test when handler returns a body.\n\thandler = handleJSON(func(h http.Header, _ *http.Request) (any, error) {\n\t\th.Set(\"Some-Header\", \"value\")\n\t\treturn \"something\", nil\n\t})\n\trec = httptest.NewRecorder()\n\thandler.ServeHTTP(rec, new(http.Request))\n\tc.Assert(rec.Code, gc.Equals, http.StatusOK)\n\tc.Assert(rec.Body.String(), gc.Equals, `\"something\"`)\n\tc.Assert(rec.Header().Get(\"Some-Header\"), gc.Equals, \"value\")\n}\n"
  },
  {
    "path": "jsonhttp/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage jsonhttp_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "keyvalues/keyvalues.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// The keyvalues package implements a set of functions for parsing key=value data,\n// usually passed in as command-line parameters to juju subcommands, e.g.\n// juju-set mongodb logging=true\npackage keyvalues\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n// DuplicateError signals that a duplicate key was encountered while parsing\n// the input into a map.\ntype DuplicateError string\n\nfunc (e DuplicateError) Error() string {\n\treturn string(e)\n}\n\n// Parse parses the supplied string slice into a map mapping\n// keys to values. Duplicate keys cause an error to be returned.\nfunc Parse(src []string, allowEmptyValues bool) (map[string]string, error) {\n\tresults := map[string]string{}\n\tfor _, kv := range src {\n\t\tif kv == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tparts := strings.SplitN(kv, \"=\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn nil, fmt.Errorf(`expected \"key=value\", got %q`, kv)\n\t\t}\n\t\tkey, value := strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1])\n\t\tif len(key) == 0 || (!allowEmptyValues && len(value) == 0) {\n\t\t\treturn nil, fmt.Errorf(`expected \"key=value\", got \"%s=%s\"`, key, value)\n\t\t}\n\t\tif _, exists := results[key]; exists {\n\t\t\treturn nil, DuplicateError(fmt.Sprintf(\"key %q specified more than once\", key))\n\t\t}\n\t\tresults[key] = value\n\t}\n\treturn results, nil\n}\n"
  },
  {
    "path": "keyvalues/keyvalues_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage keyvalues_test\n\nimport (\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/keyvalues\"\n)\n\ntype keyValuesSuite struct{}\n\nvar _ = gc.Suite(&keyValuesSuite{})\n\nvar testCases = []struct {\n\tabout         string\n\tinput         []string\n\tallowEmptyVal bool\n\toutput        map[string]string\n\terror         string\n}{{\n\tabout:         \"simple test case\",\n\tinput:         []string{\"key=value\"},\n\tallowEmptyVal: false,\n\toutput:        map[string]string{\"key\": \"value\"},\n\terror:         \"\",\n}, {\n\tabout:         \"empty list\",\n\tinput:         []string{},\n\tallowEmptyVal: false,\n\toutput:        map[string]string{},\n\terror:         \"\",\n}, {\n\tabout:         \"nil list\",\n\tinput:         nil,\n\tallowEmptyVal: false,\n\toutput:        map[string]string{},\n\terror:         \"\",\n}, {\n\tabout:         \"invalid format - missing value\",\n\tinput:         []string{\"key\"},\n\tallowEmptyVal: false,\n\toutput:        nil,\n\terror:         `expected \"key=value\", got \"key\"`,\n}, {\n\tabout:         \"invalid format - missing value\",\n\tinput:         []string{\"key=\"},\n\tallowEmptyVal: false,\n\toutput:        nil,\n\terror:         `expected \"key=value\", got \"key=\"`,\n}, {\n\tabout:         \"invalid format - missing key\",\n\tinput:         []string{\"=value\"},\n\tallowEmptyVal: false,\n\toutput:        nil,\n\terror:         `expected \"key=value\", got \"=value\"`,\n}, {\n\tabout:         \"invalid format\",\n\tinput:         []string{\"=\"},\n\tallowEmptyVal: false,\n\toutput:        nil,\n\terror:         `expected \"key=value\", got \"=\"`,\n}, {\n\tabout:         \"invalid format, allowing empty\",\n\tinput:         []string{\"=\"},\n\tallowEmptyVal: true,\n\toutput:        nil,\n\terror:         `expected \"key=value\", got \"=\"`,\n}, {\n\tabout:         \"duplicate keys\",\n\tinput:         []string{\"key=value\", \"key=value\"},\n\tallowEmptyVal: true,\n\toutput:        nil,\n\terror:         `key \"key\" specified more than once`,\n}, {\n\tabout:         \"multiple keys\",\n\tinput:         []string{\"key=value\", \"key2=value\", \"key3=value\"},\n\tallowEmptyVal: true,\n\toutput:        map[string]string{\"key\": \"value\", \"key2\": \"value\", \"key3\": \"value\"},\n\terror:         \"\",\n}, {\n\tabout:         \"empty value\",\n\tinput:         []string{\"key=\"},\n\tallowEmptyVal: true,\n\toutput:        map[string]string{\"key\": \"\"},\n\terror:         \"\",\n}, {\n\tabout:         \"whitespace trimmed\",\n\tinput:         []string{\"key=value\\n\", \"key2\\t=\\tvalue2\"},\n\tallowEmptyVal: true,\n\toutput:        map[string]string{\"key\": \"value\", \"key2\": \"value2\"},\n\terror:         \"\",\n}, {\n\tabout:         \"whitespace trimming and duplicate keys\",\n\tinput:         []string{\"key =value\", \"key\\t=\\tvalue2\"},\n\tallowEmptyVal: true,\n\toutput:        nil,\n\terror:         `key \"key\" specified more than once`,\n}, {\n\tabout:         \"whitespace trimming and empty value not allowed\",\n\tinput:         []string{\"key=    \"},\n\tallowEmptyVal: false,\n\toutput:        nil,\n\terror:         `expected \"key=value\", got \"key=\"`,\n}, {\n\tabout:         \"whitespace trimming and empty value\",\n\tinput:         []string{\"key=    \"},\n\tallowEmptyVal: true,\n\toutput:        map[string]string{\"key\": \"\"},\n\terror:         \"\",\n}, {\n\tabout:         \"whitespace trimming and missing key\",\n\tinput:         []string{\"   =value\"},\n\tallowEmptyVal: true,\n\toutput:        nil,\n\terror:         `expected \"key=value\", got \"=value\"`,\n}, {\n\tabout:         \"empty inputs are skipped\",\n\tinput:         []string{\"key=value\", \"\", \"foo=bar\"},\n\tallowEmptyVal: true,\n\toutput:        map[string]string{\"key\": \"value\", \"foo\": \"bar\"},\n\terror:         \"\",\n}}\n\nfunc (keyValuesSuite) TestMapParsing(c *gc.C) {\n\tfor i, t := range testCases {\n\t\tc.Logf(\"test %d: %s\", i, t.about)\n\t\tresult, err := keyvalues.Parse(t.input, t.allowEmptyVal)\n\t\tc.Check(result, gc.DeepEquals, t.output)\n\t\tif t.error == \"\" {\n\t\t\tc.Check(err, gc.IsNil)\n\t\t} else {\n\t\t\tc.Check(err, gc.ErrorMatches, t.error)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "keyvalues/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage keyvalues_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "limiter.go",
    "content": "// Copyright 2011, 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"math/rand\"\n\t\"time\"\n\n\t\"github.com/juju/clock\"\n)\n\ntype empty struct{}\ntype limiter struct {\n\twait     chan empty\n\tminPause time.Duration\n\tmaxPause time.Duration\n\tclock    clock.Clock\n}\n\n// Limiter represents a limited resource (eg a semaphore).\ntype Limiter interface {\n\t// Acquire another unit of the resource.\n\t// Acquire returns false to indicate there is no more availability,\n\t// until another entity calls Release.\n\tAcquire() bool\n\t// AcquireWait requests a unit of resource, but blocks until one is\n\t// available.\n\tAcquireWait()\n\t// Release returns a unit of the resource. Calling Release when there\n\t// are no units Acquired is an error.\n\tRelease() error\n}\n\n// NewLimiter creates a limiter.\nfunc NewLimiter(maxAllowed int) Limiter {\n\treturn NewLimiterWithPause(maxAllowed, 0, 0, nil)\n}\n\n// NewLimiterWithPause creates a limiter. If minpause and maxPause is > 0,\n// there will be a random delay in that duration range before attempting an Acquire.\nfunc NewLimiterWithPause(maxAllowed int, minPause, maxPause time.Duration, clk clock.Clock) Limiter {\n\trand.Seed(time.Now().UTC().UnixNano())\n\tif clk == nil {\n\t\tclk = clock.WallClock\n\t}\n\treturn limiter{\n\t\twait:     make(chan empty, maxAllowed),\n\t\tminPause: minPause,\n\t\tmaxPause: maxPause,\n\t\tclock:    clk,\n\t}\n}\n\n// Acquire requests some resources that you can return later\n// It returns 'true' if there are resources available, but false if they are\n// not. Callers are responsible for calling Release if this returns true, but\n// should not release if this returns false.\nfunc (l limiter) Acquire() bool {\n\t// Pause before attempting to grab a slot.\n\t// This is optional depending on what was used to\n\t// construct this limiter, and is used to throttle\n\t// incoming connections.\n\tl.pause()\n\te := empty{}\n\tselect {\n\tcase l.wait <- e:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// AcquireWait waits for the resource to become available before returning.\nfunc (l limiter) AcquireWait() {\n\te := empty{}\n\tl.wait <- e\n}\n\n// Release returns the resource to the available pool.\nfunc (l limiter) Release() error {\n\tselect {\n\tcase <-l.wait:\n\t\treturn nil\n\tdefault:\n\t\treturn fmt.Errorf(\"Release without an associated Acquire\")\n\t}\n}\n\nfunc (l limiter) pause() {\n\tif l.minPause <= 0 || l.maxPause <= 0 {\n\t\treturn\n\t}\n\tpauseRange := int((l.maxPause - l.minPause) / time.Millisecond)\n\tpauseTime := time.Duration(rand.Intn(pauseRange)) * time.Millisecond\n\tpauseTime += l.minPause\n\tselect {\n\tcase <-l.clock.After(pauseTime):\n\t}\n}\n"
  },
  {
    "path": "limiter_test.go",
    "content": "// Copyright 2011, 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/juju/clock/testclock\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\nconst longWait = 10 * time.Second\n\ntype limiterSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&limiterSuite{})\n\nfunc (*limiterSuite) TestAcquireUntilFull(c *gc.C) {\n\tl := utils.NewLimiter(2)\n\tc.Check(l.Acquire(), jc.IsTrue)\n\tc.Check(l.Acquire(), jc.IsTrue)\n\tc.Check(l.Acquire(), jc.IsFalse)\n}\n\nfunc (*limiterSuite) TestBadRelease(c *gc.C) {\n\tl := utils.NewLimiter(2)\n\tc.Check(l.Release(), gc.ErrorMatches, \"Release without an associated Acquire\")\n}\n\nfunc (*limiterSuite) TestAcquireAndRelease(c *gc.C) {\n\tl := utils.NewLimiter(2)\n\tc.Check(l.Acquire(), jc.IsTrue)\n\tc.Check(l.Acquire(), jc.IsTrue)\n\tc.Check(l.Acquire(), jc.IsFalse)\n\tc.Check(l.Release(), gc.IsNil)\n\tc.Check(l.Acquire(), jc.IsTrue)\n\tc.Check(l.Release(), gc.IsNil)\n\tc.Check(l.Release(), gc.IsNil)\n\tc.Check(l.Release(), gc.ErrorMatches, \"Release without an associated Acquire\")\n}\n\nfunc (*limiterSuite) TestAcquireWaitBlocksUntilRelease(c *gc.C) {\n\tl := utils.NewLimiter(2)\n\tcalls := make([]string, 0, 10)\n\tstart := make(chan bool, 0)\n\twaiting := make(chan bool, 0)\n\tdone := make(chan bool, 0)\n\tgo func() {\n\t\t<-start\n\t\tcalls = append(calls, fmt.Sprintf(\"%v\", l.Acquire()))\n\t\tcalls = append(calls, fmt.Sprintf(\"%v\", l.Acquire()))\n\t\tcalls = append(calls, fmt.Sprintf(\"%v\", l.Acquire()))\n\t\twaiting <- true\n\t\tl.AcquireWait()\n\t\tcalls = append(calls, \"waited\")\n\t\tcalls = append(calls, fmt.Sprintf(\"%v\", l.Acquire()))\n\t\tdone <- true\n\t}()\n\t// Start the routine, and wait for it to get to the first checkpoint\n\tstart <- true\n\tselect {\n\tcase <-waiting:\n\tcase <-time.After(longWait):\n\t\tc.Fatalf(\"timed out waiting for 'waiting' to trigger\")\n\t}\n\tc.Check(l.Acquire(), jc.IsFalse)\n\tl.Release()\n\tselect {\n\tcase <-done:\n\tcase <-time.After(longWait):\n\t\tc.Fatalf(\"timed out waiting for 'done' to trigger\")\n\t}\n\tc.Check(calls, gc.DeepEquals, []string{\"true\", \"true\", \"false\", \"waited\", \"false\"})\n}\n\nfunc (*limiterSuite) TestAcquirePauses(c *gc.C) {\n\tclk := testclock.NewClock(time.Now())\n\tl := utils.NewLimiterWithPause(2, 10*time.Millisecond, 20*time.Millisecond, clk)\n\tacquired := make(chan bool, 1)\n\tstart := make(chan bool, 0)\n\tgo func() {\n\t\t<-start\n\t\tdefer l.Release()\n\t\tacquired <- l.Acquire()\n\t}()\n\n\tstart <- true\n\t// Minimum pause time not exceeded, acquire should not happen.\n\tclk.Advance(9 * time.Millisecond)\n\tselect {\n\tcase <-acquired:\n\t\tc.Fail()\n\tcase <-time.After(50 * time.Millisecond):\n\t}\n\n\tclk.Advance(11 * time.Millisecond)\n\tselect {\n\tcase <-acquired:\n\tcase <-time.After(50 * time.Millisecond):\n\t\tc.Fatal(\"acquire failed\")\n\t}\n}\n"
  },
  {
    "path": "multireader.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"io\"\n\t\"sort\"\n\n\t\"github.com/juju/errors\"\n)\n\n// SizeReaderAt combines io.ReaderAt with a Size method.\ntype SizeReaderAt interface {\n\t// Size returns the size of the data readable\n\t// from the reader.\n\tSize() int64\n\tio.ReaderAt\n}\n\n// NewMultiReaderAt is like io.MultiReader but produces a ReaderAt\n// (and Size), instead of just a reader.\n//\n// Note: this implementation was taken from a talk given\n// by Brad Fitzpatrick as OSCON 2013.\n//\n// http://talks.golang.org/2013/oscon-dl.slide#49\n// https://github.com/golang/talks/blob/master/2013/oscon-dl/server-compose.go\nfunc NewMultiReaderAt(parts ...SizeReaderAt) SizeReaderAt {\n\tm := &multiReaderAt{\n\t\tparts: make([]offsetAndSource, 0, len(parts)),\n\t}\n\tvar off int64\n\tfor _, p := range parts {\n\t\tm.parts = append(m.parts, offsetAndSource{off, p})\n\t\toff += p.Size()\n\t}\n\tm.size = off\n\treturn m\n}\n\ntype offsetAndSource struct {\n\toff int64\n\tSizeReaderAt\n}\n\ntype multiReaderAt struct {\n\tparts []offsetAndSource\n\tsize  int64\n}\n\nfunc (m *multiReaderAt) Size() int64 {\n\treturn m.size\n}\n\nfunc (m *multiReaderAt) ReadAt(p []byte, off int64) (n int, err error) {\n\twantN := len(p)\n\n\t// Skip past the requested offset.\n\tskipParts := sort.Search(len(m.parts), func(i int) bool {\n\t\t// This function returns whether parts[i] will\n\t\t// contribute any bytes to our output.\n\t\tpart := m.parts[i]\n\t\treturn part.off+part.Size() > off\n\t})\n\tparts := m.parts[skipParts:]\n\n\t// How far to skip in the first part.\n\tneedSkip := off\n\tif len(parts) > 0 {\n\t\tneedSkip -= parts[0].off\n\t}\n\n\tfor len(parts) > 0 && len(p) > 0 {\n\t\treadP := p\n\t\tpartSize := parts[0].Size()\n\t\tif int64(len(readP)) > partSize-needSkip {\n\t\t\treadP = readP[:partSize-needSkip]\n\t\t}\n\t\tpn, err0 := parts[0].ReadAt(readP, needSkip)\n\t\tif err0 != nil {\n\t\t\treturn n, err0\n\t\t}\n\t\tn += pn\n\t\tp = p[pn:]\n\t\tif int64(pn)+needSkip == partSize {\n\t\t\tparts = parts[1:]\n\t\t}\n\t\tneedSkip = 0\n\t}\n\n\tif n != wantN {\n\t\terr = io.ErrUnexpectedEOF\n\t}\n\treturn\n}\n\n// NewMultiReaderSeeker returns an io.ReadSeeker that combines\n// all the given readers into a single one. It assumes that\n// all the seekers are initially positioned at the start.\nfunc NewMultiReaderSeeker(readers ...io.ReadSeeker) io.ReadSeeker {\n\tsreaders := make([]SizeReaderAt, len(readers))\n\tfor i, r := range readers {\n\t\tr1, err := newSizeReaderAt(r)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tsreaders[i] = r1\n\t}\n\treturn &readSeeker{\n\t\tr: NewMultiReaderAt(sreaders...),\n\t}\n}\n\n// newSizeReaderAt adapts an io.ReadSeeker to a SizeReaderAt.\n// Note that it doesn't strictly adhere to the ReaderAt\n// contract because it's not safe to call ReadAt concurrently.\n// This doesn't matter because io.ReadSeeker doesn't\n// need to be thread-safe and this is only used in that\n// context.\nfunc newSizeReaderAt(r io.ReadSeeker) (SizeReaderAt, error) {\n\tsize, err := r.Seek(0, 2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &sizeReaderAt{\n\t\tr:    r,\n\t\tsize: size,\n\t\toff:  size,\n\t}, nil\n}\n\n// sizeReaderAt adapts an io.ReadSeeker to a SizeReaderAt.\ntype sizeReaderAt struct {\n\tr    io.ReadSeeker\n\tsize int64\n\toff  int64\n}\n\n// ReadAt implemnts SizeReaderAt.ReadAt.\nfunc (r *sizeReaderAt) ReadAt(buf []byte, off int64) (n int, err error) {\n\tif off != r.off {\n\t\t_, err = r.r.Seek(off, 0)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tr.off = off\n\t}\n\tn, err = io.ReadFull(r.r, buf)\n\tr.off += int64(n)\n\treturn n, err\n}\n\n// Size implemnts SizeReaderAt.Size.\nfunc (r *sizeReaderAt) Size() int64 {\n\treturn r.size\n}\n\n// readSeeker adapts a SizeReaderAt to an io.ReadSeeker.\ntype readSeeker struct {\n\tr   SizeReaderAt\n\toff int64\n}\n\n// Seek implements io.Seeker.Seek.\nfunc (r *readSeeker) Seek(off int64, whence int) (int64, error) {\n\tswitch whence {\n\tcase 0:\n\tcase 1:\n\t\toff += r.off\n\tcase 2:\n\t\toff = r.r.Size() + off\n\t}\n\tif off < 0 {\n\t\treturn 0, errors.New(\"negative position\")\n\t}\n\tr.off = off\n\treturn off, nil\n}\n\n// Read implements io.Reader.Read.\nfunc (r *readSeeker) Read(buf []byte) (int, error) {\n\tn, err := r.r.ReadAt(buf, r.off)\n\tr.off += int64(n)\n\tif err == io.ErrUnexpectedEOF {\n\t\terr = io.EOF\n\t}\n\treturn n, err\n}\n"
  },
  {
    "path": "multireader_test.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\t\"strings\"\n\t\"testing/iotest\"\n\n\tjc \"github.com/juju/testing/checkers\"\n\t\"github.com/juju/utils/v4\"\n\tgc \"gopkg.in/check.v1\"\n)\n\ntype multiReaderSeekerSuite struct{}\n\nvar _ = gc.Suite(&multiReaderSeekerSuite{})\n\nfunc (*multiReaderSeekerSuite) TestSequentialRead(c *gc.C) {\n\tparts := []string{\n\t\t\"one\",\n\t\t\"two\",\n\t\t\"three\",\n\t\t\"four\",\n\t}\n\tr := newMultiStringReader(parts)\n\tdata, err := ioutil.ReadAll(r)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(data), gc.Equals, strings.Join(parts, \"\"))\n}\n\nfunc (*multiReaderSeekerSuite) TestSeekStart(c *gc.C) {\n\tparts := []string{\n\t\t\"one\",\n\t\t\"two\",\n\t\t\"three\",\n\t\t\"four\",\n\t}\n\tall := strings.Join(parts, \"\")\n\tfor off := int64(0); off <= int64(len(all)); off++ {\n\t\tc.Logf(\"-- offset %d\", off)\n\t\tr := newMultiStringReader(parts)\n\t\tgotOff, err := r.Seek(off, 0)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(gotOff, gc.Equals, off)\n\n\t\tdata, err := ioutil.ReadAll(r)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(string(data), gc.Equals, all[off:])\n\t}\n}\n\nfunc (*multiReaderSeekerSuite) TestSeekEnd(c *gc.C) {\n\tparts := []string{\n\t\t\"one\",\n\t\t\"two\",\n\t\t\"three\",\n\t\t\"four\",\n\t}\n\tall := strings.Join(parts, \"\")\n\tfor off := int64(0); off <= int64(len(all)); off++ {\n\t\tr := newMultiStringReader(parts)\n\t\texpectOff := int64(len(all)) - off\n\t\tgotOff, err := r.Seek(-off, 2)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(gotOff, gc.Equals, expectOff)\n\n\t\tdata, err := ioutil.ReadAll(r)\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(string(data), gc.Equals, all[expectOff:])\n\t}\n}\n\nfunc (*multiReaderSeekerSuite) TestSeekCur(c *gc.C) {\n\tparts := []string{\n\t\t\"one\",\n\t\t\"two\",\n\t\t\"three\",\n\t\t\"four\",\n\t}\n\tall := strings.Join(parts, \"\")\n\tfor off := int64(0); off <= int64(len(all)); off++ {\n\t\tfor newOff := int64(0); newOff <= int64(len(all)); newOff++ {\n\t\t\treaders := make([]io.ReadSeeker, len(parts))\n\t\t\tfor i, part := range parts {\n\t\t\t\treaders[i] = strings.NewReader(part)\n\t\t\t}\n\t\t\tr := utils.NewMultiReaderSeeker(readers...)\n\t\t\tgotOff, err := r.Seek(off, 0)\n\t\t\tc.Assert(gotOff, gc.Equals, off)\n\t\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\t\tdiff := newOff - off\n\t\t\tgotNewOff, err := r.Seek(diff, 1)\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(gotNewOff, gc.Equals, newOff)\n\n\t\t\tdata, err := ioutil.ReadAll(r)\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(string(data), gc.Equals, all[newOff:])\n\t\t}\n\t}\n}\n\nfunc (*multiReaderSeekerSuite) TestSeekAfterRead(c *gc.C) {\n\tparts := []string{\n\t\t\"one\",\n\t\t\"two\",\n\t\t\"three\",\n\t\t\"four\",\n\t}\n\tall := strings.Join(parts, \"\")\n\tr := newMultiStringReader(parts)\n\tdata, err := ioutil.ReadAll(iotest.OneByteReader(r))\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(data), gc.Equals, all)\n\n\toff, err := r.Seek(-8, 2)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(off, gc.Equals, int64(len(all)-8))\n\n\tdata, err = ioutil.ReadAll(r)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(data), gc.Equals, \"hreefour\")\n}\n\nfunc (*multiReaderSeekerSuite) TestSeekNegative(c *gc.C) {\n\tr := newMultiStringReader([]string{\"one\", \"two\"})\n\n\t_, err := r.Seek(-1, 0)\n\tc.Assert(err, gc.ErrorMatches, \"negative position\")\n\n\tn, err := r.Seek(0, 0)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(n, gc.Equals, int64(0))\n\n\t_, err = r.Seek(-7, 2)\n\tc.Assert(err, gc.ErrorMatches, \"negative position\")\n\n\tn, err = r.Seek(0, 0)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(n, gc.Equals, int64(0))\n\n\t_, err = r.Seek(-1, 1)\n\tc.Assert(err, gc.ErrorMatches, \"negative position\")\n\n\tn, err = r.Seek(0, 0)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(n, gc.Equals, int64(0))\n}\n\nfunc (*multiReaderSeekerSuite) TestSeekPastEnd(c *gc.C) {\n\tr := newMultiStringReader([]string{\"one\", \"two\"})\n\n\tn, err := r.Seek(100, 0)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(n, gc.Equals, int64(100))\n\n\tnr, err := r.Read(make([]byte, 1))\n\tc.Assert(nr, gc.Equals, 0)\n\tc.Assert(err, gc.Equals, io.EOF)\n\n\tn, err = r.Seek(-5, 1)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(n, gc.Equals, int64(95))\n\n\tnr, err = r.Read(make([]byte, 1))\n\tc.Assert(nr, gc.Equals, 0)\n\tc.Assert(err, gc.Equals, io.EOF)\n\n\tn, err = r.Seek(-94, 1)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(n, gc.Equals, int64(1))\n\n\tdata, err := ioutil.ReadAll(r)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(string(data), gc.Equals, \"netwo\")\n}\n\ntype multiReaderAtSuite struct{}\n\nvar _ = gc.Suite(&multiReaderAtSuite{})\n\nfunc (*multiReaderAtSuite) TestReadComplete(c *gc.C) {\n\tparts := []string{\n\t\t\"one\",\n\t\t\"two\",\n\t\t\"three\",\n\t\t\"four\",\n\t}\n\tall := strings.Join(parts, \"\")\n\tr := newMultistringReaderAt(parts)\n\n\tbuf := make([]byte, len(all))\n\tn, err := r.ReadAt(buf, 0)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(n, gc.Equals, len(buf))\n\tc.Assert(string(buf), gc.Equals, all)\n}\n\nfunc (*multiReaderAtSuite) TestReadPartial(c *gc.C) {\n\tparts := []string{\n\t\t\"one\",\n\t\t\"two\",\n\t\t\"three\",\n\t\t\"four\",\n\t}\n\tall := strings.Join(parts, \"\")\n\tr := newMultistringReaderAt(parts)\n\n\tbuf := make([]byte, len(all)-4)\n\tn, err := r.ReadAt(buf, 2)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(n, gc.Equals, len(buf))\n\tc.Assert(string(buf), gc.Equals, \"etwothreefo\")\n}\n\nfunc newMultiStringReader(parts []string) io.ReadSeeker {\n\treaders := make([]io.ReadSeeker, len(parts))\n\tfor i, part := range parts {\n\t\treaders[i] = strings.NewReader(part)\n\t}\n\treturn utils.NewMultiReaderSeeker(readers...)\n}\n\ntype stringReader struct {\n\t*strings.Reader\n}\n\n// This method is implemented in later versions\n// of Go's StringReader but not prior to Go 1.5.\nfunc (r stringReader) Size() int64 {\n\treturn int64(r.Len())\n}\n\nfunc newMultistringReaderAt(parts []string) io.ReaderAt {\n\treaders := make([]utils.SizeReaderAt, len(parts))\n\tfor i, part := range parts {\n\t\treaders[i] = stringReader{strings.NewReader(part)}\n\t}\n\treturn utils.NewMultiReaderAt(readers...)\n}\n"
  },
  {
    "path": "naturalsort.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"unicode\"\n)\n\n// SortStringsNaturally sorts strings according to their natural sort order.\nfunc SortStringsNaturally(s []string) []string {\n\tsort.Sort(naturally(s))\n\treturn s\n}\n\ntype naturally []string\n\nfunc (n naturally) Len() int {\n\treturn len(n)\n}\n\nfunc (n naturally) Swap(a, b int) {\n\tn[a], n[b] = n[b], n[a]\n}\n\n// Less sorts by non-numeric prefix and numeric suffix\n// when one exists.\nfunc (n naturally) Less(a, b int) bool {\n\taVal := n[a]\n\tbVal := n[b]\n\n\tfor {\n\t\t// If bVal is empty, then aVal can't be less than it.\n\t\tif bVal == \"\" {\n\t\t\treturn false\n\t\t}\n\t\t// If aVal is empty here, then is must be less than bVal.\n\t\tif aVal == \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\taPrefix, aNumber, aRemainder := splitAtNumber(aVal)\n\t\tbPrefix, bNumber, bRemainder := splitAtNumber(bVal)\n\t\tif aPrefix != bPrefix {\n\t\t\treturn aPrefix < bPrefix\n\t\t}\n\t\tif aNumber != bNumber {\n\t\t\treturn aNumber < bNumber\n\t\t}\n\n\t\t// Everything is the same so far, try again with the remainer.\n\t\taVal = aRemainder\n\t\tbVal = bRemainder\n\t}\n}\n\n// splitAtNumber splits given string at the first digit, returning the\n// prefix before the number, the integer represented by the first\n// series of digits, and the remainder of the string after the first\n// series of digits. If no digits are present, the number is returned\n// as -1 and the remainder is empty.\nfunc splitAtNumber(str string) (string, int, string) {\n\ti := indexOfDigit(str)\n\tif i == -1 {\n\t\t// no numbers\n\t\treturn str, -1, \"\"\n\t}\n\tj := i + indexOfNonDigit(str[i:])\n\tn, err := strconv.Atoi(str[i:j])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"parsing number %v: %v\", str[i:j], err)) // should never happen\n\t}\n\treturn str[:i], n, str[j:]\n}\n\nfunc indexOfDigit(str string) int {\n\tfor i, r := range str {\n\t\tif unicode.IsDigit(r) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn -1\n}\n\nfunc indexOfNonDigit(str string) int {\n\tfor i, r := range str {\n\t\tif !unicode.IsDigit(r) {\n\t\t\treturn i\n\t\t}\n\t}\n\treturn len(str)\n}\n"
  },
  {
    "path": "naturalsort_test.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"math/rand\"\n\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/testing\"\n\t\"github.com/juju/utils/v4\"\n)\n\ntype naturalSortSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&naturalSortSuite{})\n\nfunc (s *naturalSortSuite) TestEmpty(c *gc.C) {\n\tcheckCorrectSort(c, []string{})\n}\n\nfunc (s *naturalSortSuite) TestAlpha(c *gc.C) {\n\tcheckCorrectSort(c, []string{\"abc\", \"bac\", \"cba\"})\n}\n\nfunc (s *naturalSortSuite) TestNumVsString(c *gc.C) {\n\tcheckCorrectSort(c, []string{\"1\", \"a\"})\n}\n\nfunc (s *naturalSortSuite) TestStringVsStringNum(c *gc.C) {\n\tcheckCorrectSort(c, []string{\"a\", \"a1\"})\n}\n\nfunc (s *naturalSortSuite) TestCommonPrefix(c *gc.C) {\n\tcheckCorrectSort(c, []string{\"a1\", \"a1a\", \"a1b\", \"a2b\", \"a2c\"})\n}\n\nfunc (s *naturalSortSuite) TestDifferentNumberLengths(c *gc.C) {\n\tcheckCorrectSort(c, []string{\"a1a\", \"a2\", \"a22a\", \"a333\", \"a333a\", \"a333b\"})\n}\n\nfunc (s *naturalSortSuite) TestZeroPadding(c *gc.C) {\n\tcheckCorrectSort(c, []string{\"a1\", \"a002\", \"a3\"})\n}\n\nfunc (s *naturalSortSuite) TestMixed(c *gc.C) {\n\tcheckCorrectSort(c, []string{\"1a\", \"a1\", \"a1/1\", \"a10\", \"a100\"})\n}\n\nfunc (s *naturalSortSuite) TestSeveralNumericParts(c *gc.C) {\n\tcheckCorrectSort(c, []string{\n\t\t\"x\",\n\t\t\"x1\",\n\t\t\"x1-g0\",\n\t\t\"x1-g1\",\n\t\t\"x1-g2\",\n\t\t\"x1-g10\",\n\t\t\"x2\",\n\t\t\"x2-g0\",\n\t\t\"x2-g2\",\n\t\t\"x11-g0\",\n\t\t\"x11-g0-0\",\n\t\t\"x11-g0-1\",\n\t\t\"x11-g0-10\",\n\t\t\"x11-g0-11\",\n\t\t\"x11-g0-20\",\n\t\t\"x11-g0-100\",\n\t\t\"x11-g10-1\",\n\t\t\"x11-g10-10\",\n\t\t\"xx1\",\n\t\t\"xx10\",\n\t})\n}\n\nfunc (s *naturalSortSuite) TestUnitNameLike(c *gc.C) {\n\tcheckCorrectSort(c, []string{\"a1/1\", \"a1/2\", \"a1/7\", \"a1/11\", \"a1/100\"})\n}\n\nfunc (s *naturalSortSuite) TestMachineIdLike(c *gc.C) {\n\tcheckCorrectSort(c, []string{\n\t\t\"1\",\n\t\t\"1/lxc/0\",\n\t\t\"1/lxc/1\",\n\t\t\"1/lxc/2\",\n\t\t\"1/lxc/10\",\n\t\t\"1/lxd/0\",\n\t\t\"1/lxd/1\",\n\t\t\"1/lxd/10\",\n\t\t\"2\",\n\t\t\"11\",\n\t\t\"11/lxc/6\",\n\t\t\"11/lxc/60\",\n\t\t\"20\",\n\t\t\"21\",\n\t})\n}\n\nfunc (s *naturalSortSuite) TestIPs(c *gc.C) {\n\tcheckCorrectSort(c, []string{\n\t\t\"1.1.10.122\",\n\t\t\"001.001.010.123\",\n\t\t\"001.002.010.123\",\n\t\t\"100.001.010.123\",\n\t\t\"100.1.10.124\",\n\t\t\"100.2.10.124\",\n\t})\n}\n\nfunc checkCorrectSort(c *gc.C, expected []string) {\n\tcheckSort(c, expected, reverse)\n\tfor i := 0; i < 5; i++ {\n\t\tcheckSort(c, expected, shuffle)\n\t}\n}\n\nfunc checkSort(c *gc.C, expected []string, xform func([]string)) {\n\tinput := copyStrSlice(expected)\n\txform(input)\n\torigInput := copyStrSlice(input)\n\tutils.SortStringsNaturally(input)\n\tc.Check(input, gc.DeepEquals, expected, gc.Commentf(\"input was: %#v\", origInput))\n}\n\nfunc copyStrSlice(in []string) []string {\n\tout := make([]string, len(in))\n\tcopy(out, in)\n\treturn out\n}\n\nfunc shuffle(a []string) {\n\t// See https://en.wikipedia.org/wiki/Fisher%E2%80%93Yates_shuffle#Modern_method\n\tfor i := len(a) - 1; i >= 1; i-- {\n\t\tj := rand.Intn(i + 1)\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}\n\nfunc reverse(a []string) {\n\tsize := len(a)\n\tfor i := 0; i < size/2; i++ {\n\t\tj := size - i - 1\n\t\ta[i], a[j] = a[j], a[i]\n\t}\n}\n"
  },
  {
    "path": "network.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\n\t\"github.com/juju/loggo/v2\"\n)\n\nvar logger = loggo.GetLogger(\"juju.utils\")\n\n// GetIPv4Address iterates through the addresses expecting the format from\n// func (ifi *net.Interface) Addrs() ([]net.Addr, error)\nfunc GetIPv4Address(addresses []net.Addr) (string, error) {\n\tfor _, addr := range addresses {\n\t\tip, _, err := net.ParseCIDR(addr.String())\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tipv4 := ip.To4()\n\t\tif ipv4 == nil {\n\t\t\tcontinue\n\t\t}\n\t\treturn ipv4.String(), nil\n\t}\n\treturn \"\", fmt.Errorf(\"no addresses match\")\n}\n\n// GetIPv6Address iterates through the addresses expecting the format from\n// func (ifi *net.Interface) Addrs() ([]net.Addr, error) and returns the first\n// non-link local address.\nfunc GetIPv6Address(addresses []net.Addr) (string, error) {\n\t_, llNet, _ := net.ParseCIDR(\"fe80::/10\")\n\tfor _, addr := range addresses {\n\t\tip, _, err := net.ParseCIDR(addr.String())\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tif ip.To4() == nil && !llNet.Contains(ip) {\n\t\t\treturn ip.String(), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no addresses match\")\n}\n\n// GetAddressForInterface looks for the network interface\n// and returns the IPv4 address from the possible addresses.\nfunc GetAddressForInterface(interfaceName string) (string, error) {\n\tiface, err := net.InterfaceByName(interfaceName)\n\tif err != nil {\n\t\tlogger.Errorf(\"cannot find network interface %q: %v\", interfaceName, err)\n\t\treturn \"\", err\n\t}\n\taddrs, err := iface.Addrs()\n\tif err != nil {\n\t\tlogger.Errorf(\"cannot get addresses for network interface %q: %v\", interfaceName, err)\n\t\treturn \"\", err\n\t}\n\treturn GetIPv4Address(addrs)\n}\n\n// GetV4OrV6AddressForInterface looks for the network interface\n// and returns preferably the IPv4 address, and if it doesn't\n// exists then IPv6 address.\nfunc GetV4OrV6AddressForInterface(interfaceName string) (string, error) {\n\tiface, err := net.InterfaceByName(interfaceName)\n\tif err != nil {\n\t\tlogger.Errorf(\"cannot find network interface %q: %v\", interfaceName, err)\n\t\treturn \"\", err\n\t}\n\taddrs, err := iface.Addrs()\n\tif err != nil {\n\t\tlogger.Errorf(\"cannot get addresses for network interface %q: %v\", interfaceName, err)\n\t\treturn \"\", err\n\t}\n\tif ip, err := GetIPv4Address(addrs); err == nil {\n\t\treturn ip, nil\n\t}\n\treturn GetIPv6Address(addrs)\n}\n"
  },
  {
    "path": "network_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"net\"\n\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype networkSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&networkSuite{})\n\ntype fakeAddress struct {\n\taddress string\n}\n\nfunc (fake fakeAddress) Network() string {\n\treturn \"ignored\"\n}\n\nfunc (fake fakeAddress) String() string {\n\treturn fake.address\n}\n\nfunc makeAddresses(values ...string) (result []net.Addr) {\n\tfor _, v := range values {\n\t\tresult = append(result, &fakeAddress{v})\n\t}\n\treturn\n}\n\nfunc (*networkSuite) TestGetIPv4Address(c *gc.C) {\n\tfor _, test := range []struct {\n\t\taddresses   []net.Addr\n\t\texpected    string\n\t\terrorString string\n\t}{{\n\t\taddresses: makeAddresses(\n\t\t\t\"complete\",\n\t\t\t\"nonsense\"),\n\t\terrorString: \"invalid CIDR address: complete\",\n\t}, {\n\t\taddresses: makeAddresses(\n\t\t\t\"fe80::90cf:9dff:fe6e:ece/64\",\n\t\t),\n\t\terrorString: \"no addresses match\",\n\t}, {\n\t\taddresses: makeAddresses(\n\t\t\t\"fe80::90cf:9dff:fe6e:ece/64\",\n\t\t\t\"10.0.3.1/24\",\n\t\t),\n\t\texpected: \"10.0.3.1\",\n\t}, {\n\t\taddresses: makeAddresses(\n\t\t\t\"10.0.3.1/24\",\n\t\t\t\"fe80::90cf:9dff:fe6e:ece/64\",\n\t\t),\n\t\texpected: \"10.0.3.1\",\n\t}} {\n\t\tip, err := utils.GetIPv4Address(test.addresses)\n\t\tif test.errorString == \"\" {\n\t\t\tc.Check(err, gc.IsNil)\n\t\t\tc.Check(ip, gc.Equals, test.expected)\n\t\t} else {\n\t\t\tc.Check(err, gc.ErrorMatches, test.errorString)\n\t\t\tc.Check(ip, gc.Equals, \"\")\n\t\t}\n\t}\n}\n\nfunc (*networkSuite) TestGetIPv6Address(c *gc.C) {\n\tfor _, test := range []struct {\n\t\taddresses   []net.Addr\n\t\texpected    string\n\t\terrorString string\n\t}{{\n\t\taddresses: makeAddresses(\n\t\t\t\"complete\",\n\t\t\t\"nonsense\"),\n\t\terrorString: \"invalid CIDR address: complete\",\n\t}, {\n\t\taddresses: makeAddresses(\n\t\t\t\"fe80::90cf:9dff:fe6e:ece/64\",\n\t\t),\n\t\terrorString: \"no addresses match\",\n\t}, {\n\t\taddresses: makeAddresses(\n\t\t\t\"fe80::90cf:9dff:fe6e:ece/64\",\n\t\t\t\"10.0.3.1/24\",\n\t\t),\n\t\terrorString: \"no addresses match\",\n\t}, {\n\t\taddresses: makeAddresses(\n\t\t\t\"10.0.3.1/24\",\n\t\t),\n\t\terrorString: \"no addresses match\",\n\t}, {\n\t\taddresses: makeAddresses(\n\t\t\t\"10.0.3.1/24\",\n\t\t\t\"2001:db8::90cf:9dff:fe6e:ece/64\",\n\t\t),\n\t\texpected: \"2001:db8::90cf:9dff:fe6e:ece\",\n\t}, {\n\t\taddresses: makeAddresses(\n\t\t\t\"2001:db8::90cf:9dff:fe6e:ece/64\",\n\t\t\t\"10.0.3.1/24\",\n\t\t),\n\t\texpected: \"2001:db8::90cf:9dff:fe6e:ece\",\n\t}} {\n\t\tip, err := utils.GetIPv6Address(test.addresses)\n\t\tif test.errorString == \"\" {\n\t\t\tc.Check(err, gc.IsNil)\n\t\t\tc.Check(ip, gc.Equals, test.expected)\n\t\t} else {\n\t\t\tc.Check(err, gc.ErrorMatches, test.errorString)\n\t\t\tc.Check(ip, gc.Equals, \"\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "os.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\n// These are the names of the operating systems recognized by Go.\nconst (\n\tOSWindows   = \"windows\"\n\tOSDarwin    = \"darwin\"\n\tOSDragonfly = \"dragonfly\"\n\tOSFreebsd   = \"freebsd\"\n\tOSLinux     = \"linux\"\n\tOSNacl      = \"nacl\"\n\tOSNetbsd    = \"netbsd\"\n\tOSOpenbsd   = \"openbsd\"\n\tOSSolaris   = \"solaris\"\n)\n\n// OSUnix is the list of unix-like operating systems recognized by Go.\n// See http://golang.org/src/path/filepath/path_unix.go.\nvar OSUnix = []string{\n\tOSDarwin,\n\tOSDragonfly,\n\tOSFreebsd,\n\tOSLinux,\n\tOSNacl,\n\tOSNetbsd,\n\tOSOpenbsd,\n\tOSSolaris,\n}\n\n// OSIsUnix determines whether or not the given OS name is one of the\n// unix-like operating systems recognized by Go.\nfunc OSIsUnix(os string) bool {\n\tfor _, goos := range OSUnix {\n\t\tif os == goos {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "os_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\nvar _ = gc.Suite(&osSuite{})\n\ntype osSuite struct {\n\ttesting.IsolationSuite\n}\n\nfunc (osSuite) TestOSIsUnixKnown(c *gc.C) {\n\tfor _, os := range utils.OSUnix {\n\t\tc.Logf(\"checking %q\", os)\n\t\tisUnix := utils.OSIsUnix(os)\n\n\t\tc.Check(isUnix, jc.IsTrue)\n\t}\n}\n\nfunc (osSuite) TestOSIsUnixWindows(c *gc.C) {\n\tisUnix := utils.OSIsUnix(\"windows\")\n\n\tc.Check(isUnix, jc.IsFalse)\n}\n\nfunc (osSuite) TestOSIsUnixUnknown(c *gc.C) {\n\tisUnix := utils.OSIsUnix(\"<unknown OS>\")\n\n\tc.Check(isUnix, jc.IsFalse)\n}\n"
  },
  {
    "path": "package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "parallel/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage parallel_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "parallel/parallel.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// The parallel package provides utilities for running tasks\n// concurrently.\npackage parallel\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n)\n\n// Run represents a number of functions running concurrently.\ntype Run struct {\n\tmu      sync.Mutex\n\tresults chan Errors\n\tmax     int\n\trunning int\n\twork    chan func() error\n}\n\n// Errors holds any errors encountered during the parallel run.\ntype Errors []error\n\nfunc (errs Errors) Error() string {\n\tswitch len(errs) {\n\tcase 0:\n\t\treturn \"no error\"\n\tcase 1:\n\t\treturn errs[0].Error()\n\t}\n\treturn fmt.Sprintf(\"%s (and %d more)\", errs[0].Error(), len(errs)-1)\n}\n\n// NewRun returns a new parallel instance. It provides a way of running\n// functions concurrently while limiting the maximum number running at\n// once to max.\nfunc NewRun(max int) *Run {\n\tif max < 1 {\n\t\tpanic(\"parameter max must be >= 1\")\n\t}\n\treturn &Run{\n\t\tmax:     max,\n\t\tresults: make(chan Errors),\n\t\twork:    make(chan func() error),\n\t}\n}\n\n// Do requests that r run f concurrently.  If there are already the maximum\n// number of functions running concurrently, it will block until one of them\n// has completed. Do may itself be called concurrently, but may not be called\n// concurrently with Wait.\nfunc (r *Run) Do(f func() error) {\n\tselect {\n\tcase r.work <- f:\n\t\treturn\n\tdefault:\n\t}\n\tr.mu.Lock()\n\tif r.running < r.max {\n\t\tr.running++\n\t\tgo r.runner()\n\t}\n\tr.mu.Unlock()\n\tr.work <- f\n}\n\n// Wait marks the parallel instance as complete and waits for all the functions\n// to complete.  If any errors were encountered, it returns an Errors value\n// describing all the errors in arbitrary order.\nfunc (r *Run) Wait() error {\n\tclose(r.work)\n\tvar errs Errors\n\tfor i := 0; i < r.running; i++ {\n\t\terrs = append(errs, <-r.results...)\n\t}\n\tif len(errs) == 0 {\n\t\treturn nil\n\t}\n\t// TODO(rog) sort errors by original order of Do request?\n\treturn errs\n}\n\nfunc (r *Run) runner() {\n\tvar errs Errors\n\tfor f := range r.work {\n\t\tif err := f(); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\tr.results <- errs\n}\n"
  },
  {
    "path": "parallel/parallel_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage parallel_test\n\nimport (\n\t\"sort\"\n\t\"sync\"\n\t\"sync/atomic\"\n\tstdtesting \"testing\"\n\t\"time\"\n\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/parallel\"\n)\n\ntype parallelSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&parallelSuite{})\n\nfunc (*parallelSuite) TestParallelMaxPar(c *gc.C) {\n\tconst (\n\t\ttotalDo                 = 10\n\t\tmaxConcurrentRunnersPar = 3\n\t)\n\tvar mu sync.Mutex\n\tmaxConcurrentRunners := 0\n\tnbRunners := 0\n\tnbRuns := 0\n\tparallelRunner := parallel.NewRun(maxConcurrentRunnersPar)\n\tfor i := 0; i < totalDo; i++ {\n\t\tparallelRunner.Do(func() error {\n\t\t\tmu.Lock()\n\t\t\tnbRuns++\n\t\t\tnbRunners++\n\t\t\tif nbRunners > maxConcurrentRunners {\n\t\t\t\tmaxConcurrentRunners = nbRunners\n\t\t\t}\n\t\t\tmu.Unlock()\n\t\t\ttime.Sleep(time.Second / 10)\n\t\t\tmu.Lock()\n\t\t\tnbRunners--\n\t\t\tmu.Unlock()\n\t\t\treturn nil\n\t\t})\n\t}\n\terr := parallelRunner.Wait()\n\tif nbRunners != 0 {\n\t\tc.Errorf(\"%d functions still running\", nbRunners)\n\t}\n\tif nbRuns != totalDo {\n\t\tc.Errorf(\"all functions not executed; want %d got %d\", totalDo, nbRuns)\n\t}\n\tc.Check(err, gc.IsNil)\n\tif maxConcurrentRunners != maxConcurrentRunnersPar {\n\t\tc.Errorf(\"wrong number of do's ran at once; want %d got %d\", maxConcurrentRunnersPar, maxConcurrentRunners)\n\t}\n}\n\nfunc nothing() error {\n\treturn nil\n}\n\nfunc BenchmarkRunSingle(b *stdtesting.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tr := parallel.NewRun(1)\n\t\tr.Do(nothing)\n\t\tr.Wait()\n\t}\n}\n\nfunc BenchmarkRun1000p100(b *stdtesting.B) {\n\tfor i := 0; i < b.N; i++ {\n\t\tr := parallel.NewRun(100)\n\t\tfor j := 0; j < 1000; j++ {\n\t\t\tr.Do(nothing)\n\t\t}\n\t\tr.Wait()\n\t}\n}\n\nfunc (*parallelSuite) TestConcurrentDo(c *gc.C) {\n\tr := parallel.NewRun(3)\n\tvar count int32\n\tvar wg sync.WaitGroup\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tr.Do(func() error {\n\t\t\t\tatomic.AddInt32(&count, 1)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\terr := r.Wait()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(count, gc.Equals, int32(100))\n}\n\ntype intError int\n\nfunc (intError) Error() string {\n\treturn \"error\"\n}\n\nfunc (*parallelSuite) TestParallelError(c *gc.C) {\n\tconst (\n\t\ttotalDo = 10\n\t\terrDo   = 5\n\t)\n\tparallelRun := parallel.NewRun(6)\n\tfor i := 0; i < totalDo; i++ {\n\t\ti := i\n\t\tif i >= errDo {\n\t\t\tparallelRun.Do(func() error {\n\t\t\t\treturn intError(i)\n\t\t\t})\n\t\t} else {\n\t\t\tparallelRun.Do(func() error {\n\t\t\t\treturn nil\n\t\t\t})\n\t\t}\n\t}\n\terr := parallelRun.Wait()\n\tc.Check(err, gc.NotNil)\n\terrs := err.(parallel.Errors)\n\tc.Check(len(errs), gc.Equals, totalDo-errDo)\n\tints := make([]int, len(errs))\n\tfor i, err := range errs {\n\t\tints[i] = int(err.(intError))\n\t}\n\tsort.Ints(ints)\n\tfor i, n := range ints {\n\t\tc.Check(n, gc.Equals, i+errDo)\n\t}\n}\n\nfunc (*parallelSuite) TestZeroWorkerPanics(c *gc.C) {\n\tdefer func() {\n\t\tr := recover()\n\t\tc.Check(r, gc.Matches, \"parameter max must be >= 1\")\n\t}()\n\tparallel.NewRun(0)\n}\n"
  },
  {
    "path": "parallel/try.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage parallel\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"sync\"\n\n\t\"gopkg.in/tomb.v1\"\n)\n\nvar (\n\tErrStopped = errors.New(\"try was stopped\")\n\tErrClosed  = errors.New(\"try was closed\")\n)\n\n// Try represents an attempt made concurrently\n// by a number of goroutines.\ntype Try struct {\n\ttomb          tomb.Tomb\n\tcloseMutex    sync.Mutex\n\tclose         chan struct{}\n\tlimiter       chan struct{}\n\tstart         chan func()\n\tresult        chan result\n\tcombineErrors func(err0, err1 error) error\n\tmaxParallel   int\n\tendResult     io.Closer\n}\n\n// NewTry returns an object that runs functions concurrently until one\n// succeeds. The result of the first function that returns without an\n// error is available from the Result method. If maxParallel is\n// positive, it limits the number of concurrently running functions.\n//\n// The function combineErrors(oldErr, newErr) is called to determine\n// the error return (see the Result method). The first time it is called,\n// oldErr will be nil; subsequently oldErr will be the error previously\n// returned by combineErrors. If combineErrors is nil, the last\n// encountered error is chosen.\nfunc NewTry(maxParallel int, combineErrors func(err0, err1 error) error) *Try {\n\tif combineErrors == nil {\n\t\tcombineErrors = chooseLastError\n\t}\n\tt := &Try{\n\t\tcombineErrors: combineErrors,\n\t\tmaxParallel:   maxParallel,\n\t\tclose:         make(chan struct{}, 1),\n\t\tresult:        make(chan result),\n\t\tstart:         make(chan func()),\n\t}\n\tif t.maxParallel > 0 {\n\t\tt.limiter = make(chan struct{}, t.maxParallel)\n\t\tfor i := 0; i < t.maxParallel; i++ {\n\t\t\tt.limiter <- struct{}{}\n\t\t}\n\t}\n\tgo func() {\n\t\tdefer t.tomb.Done()\n\t\tval, err := t.loop()\n\t\tt.endResult = val\n\t\tt.tomb.Kill(err)\n\t}()\n\treturn t\n}\n\nfunc chooseLastError(err0, err1 error) error {\n\treturn err1\n}\n\ntype result struct {\n\tval io.Closer\n\terr error\n}\n\nfunc (t *Try) loop() (io.Closer, error) {\n\tvar err error\n\tclose := t.close\n\tnrunning := 0\n\tfor {\n\t\tselect {\n\t\tcase f := <-t.start:\n\t\t\tnrunning++\n\t\t\tgo f()\n\t\tcase r := <-t.result:\n\t\t\tif r.err == nil {\n\t\t\t\treturn r.val, r.err\n\t\t\t}\n\t\t\terr = t.combineErrors(err, r.err)\n\t\t\tnrunning--\n\t\t\tif close == nil && nrunning == 0 {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\tcase <-t.tomb.Dying():\n\t\t\tif err == nil {\n\t\t\t\treturn nil, ErrStopped\n\t\t\t}\n\t\t\treturn nil, err\n\t\tcase <-close:\n\t\t\tclose = nil\n\t\t\tif nrunning == 0 {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n}\n\n// Start requests the given function to be started, waiting until there\n// are less than maxParallel functions running if necessary. It returns\n// an error if the function has not been started (ErrClosed if the Try\n// has been closed, and ErrStopped if the try is finishing).\n//\n// The function should listen on the stop channel and return if it\n// receives a value, though this is advisory only - the Try does not\n// wait for all started functions to return before completing.\n//\n// If the function returns a nil error but some earlier try was\n// successful (that is, the returned value is being discarded),\n// its returned value will be closed by calling its Close method.\nfunc (t *Try) Start(try func(stop <-chan struct{}) (io.Closer, error)) error {\n\tif t.limiter != nil {\n\t\t// Wait for availability slot.\n\t\tselect {\n\t\tcase <-t.limiter:\n\t\tcase <-t.tomb.Dying():\n\t\t\treturn ErrStopped\n\t\tcase <-t.close:\n\t\t\treturn ErrClosed\n\t\t}\n\t}\n\tdying := t.tomb.Dying()\n\tf := func() {\n\t\tval, err := try(dying)\n\t\tif t.limiter != nil {\n\t\t\t// Signal availability slot is now free.\n\t\t\tt.limiter <- struct{}{}\n\t\t}\n\t\t// Deliver result.\n\t\tselect {\n\t\tcase t.result <- result{val, err}:\n\t\tcase <-dying:\n\t\t\tif err == nil {\n\t\t\t\tval.Close()\n\t\t\t}\n\t\t}\n\t}\n\tselect {\n\tcase t.start <- f:\n\t\treturn nil\n\tcase <-dying:\n\t\treturn ErrStopped\n\tcase <-t.close:\n\t\treturn ErrClosed\n\t}\n}\n\n// Close closes the Try. No more functions will be started\n// if Start is called, and the Try will terminate when all\n// outstanding functions have completed (or earlier\n// if one succeeds)\nfunc (t *Try) Close() {\n\tt.closeMutex.Lock()\n\tdefer t.closeMutex.Unlock()\n\tselect {\n\tcase <-t.close:\n\tdefault:\n\t\tclose(t.close)\n\t}\n}\n\n// Dead returns a channel that is closed when the\n// Try completes.\nfunc (t *Try) Dead() <-chan struct{} {\n\treturn t.tomb.Dead()\n}\n\n// Wait waits for the Try to complete and returns the same\n// error returned by Result.\nfunc (t *Try) Wait() error {\n\treturn t.tomb.Wait()\n}\n\n// Result waits for the Try to complete and returns the result of the\n// first successful function started by Start.\n//\n// If no function succeeded, the last error returned by\n// combineErrors is returned. If there were no errors or\n// combineErrors returned nil, ErrStopped is returned.\nfunc (t *Try) Result() (io.Closer, error) {\n\terr := t.tomb.Wait()\n\treturn t.endResult, err\n}\n\n// Kill stops the try and all its currently executing functions.\nfunc (t *Try) Kill() {\n\tt.tomb.Kill(nil)\n}\n"
  },
  {
    "path": "parallel/try_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage parallel_test\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sort\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/parallel\"\n)\n\nconst (\n\tshortWait = 50 * time.Millisecond\n\tlongWait  = 10 * time.Second\n)\n\ntype result string\n\nfunc (r result) Close() error {\n\treturn nil\n}\n\ntype trySuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&trySuite{})\n\nfunc tryFunc(delay time.Duration, val io.Closer, err error) func(<-chan struct{}) (io.Closer, error) {\n\treturn func(<-chan struct{}) (io.Closer, error) {\n\t\ttime.Sleep(delay)\n\t\treturn val, err\n\t}\n}\n\nfunc (*trySuite) TestOneSuccess(c *gc.C) {\n\ttry := parallel.NewTry(0, nil)\n\ttry.Start(tryFunc(0, result(\"hello\"), nil))\n\tval, err := try.Result()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(val, gc.Equals, result(\"hello\"))\n}\n\nfunc (*trySuite) TestOneFailure(c *gc.C) {\n\ttry := parallel.NewTry(0, nil)\n\texpectErr := errors.New(\"foo\")\n\terr := try.Start(tryFunc(0, nil, expectErr))\n\tc.Assert(err, gc.IsNil)\n\tselect {\n\tcase <-try.Dead():\n\t\tc.Fatalf(\"try died before it should\")\n\tcase <-time.After(shortWait):\n\t}\n\ttry.Close()\n\tselect {\n\tcase <-try.Dead():\n\tcase <-time.After(longWait):\n\t\tc.Fatalf(\"timed out waiting for Try to complete\")\n\t}\n\tval, err := try.Result()\n\tc.Assert(val, gc.IsNil)\n\tc.Assert(err, gc.Equals, expectErr)\n}\n\nfunc (*trySuite) TestStartReturnsErrorAfterClose(c *gc.C) {\n\ttry := parallel.NewTry(0, nil)\n\texpectErr := errors.New(\"foo\")\n\terr := try.Start(tryFunc(0, nil, expectErr))\n\tc.Assert(err, gc.IsNil)\n\ttry.Close()\n\terr = try.Start(tryFunc(0, result(\"goodbye\"), nil))\n\tc.Assert(err, gc.Equals, parallel.ErrClosed)\n\t// Wait for the first try to deliver its result\n\ttime.Sleep(shortWait)\n\ttry.Kill()\n\terr = try.Wait()\n\tc.Assert(err, gc.Equals, expectErr)\n}\n\nfunc (*trySuite) TestOutOfOrderResults(c *gc.C) {\n\ttry := parallel.NewTry(0, nil)\n\ttry.Start(tryFunc(50*time.Millisecond, result(\"first\"), nil))\n\ttry.Start(tryFunc(10*time.Millisecond, result(\"second\"), nil))\n\tr, err := try.Result()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(r, gc.Equals, result(\"second\"))\n}\n\nfunc (*trySuite) TestMaxParallel(c *gc.C) {\n\ttry := parallel.NewTry(3, nil)\n\tvar (\n\t\tmu    sync.Mutex\n\t\tcount int\n\t\tmax   int\n\t)\n\n\tfor i := 0; i < 10; i++ {\n\t\ttry.Start(func(<-chan struct{}) (io.Closer, error) {\n\t\t\tmu.Lock()\n\t\t\tif count++; count > max {\n\t\t\t\tmax = count\n\t\t\t}\n\t\t\tc.Check(count, gc.Not(jc.GreaterThan), 3)\n\t\t\tmu.Unlock()\n\t\t\ttime.Sleep(20 * time.Millisecond)\n\t\t\tmu.Lock()\n\t\t\tcount--\n\t\t\tmu.Unlock()\n\t\t\treturn result(\"hello\"), nil\n\t\t})\n\t}\n\tr, err := try.Result()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(r, gc.Equals, result(\"hello\"))\n\tmu.Lock()\n\tdefer mu.Unlock()\n\tc.Assert(max, gc.Equals, 3)\n}\n\nfunc (*trySuite) TestStartBlocksForMaxParallel(c *gc.C) {\n\ttry := parallel.NewTry(3, nil)\n\n\tstarted := make(chan struct{})\n\tbegin := make(chan struct{})\n\tgo func() {\n\t\tfor i := 0; i < 6; i++ {\n\t\t\terr := try.Start(func(<-chan struct{}) (io.Closer, error) {\n\t\t\t\t<-begin\n\t\t\t\treturn nil, fmt.Errorf(\"an error\")\n\t\t\t})\n\t\t\tstarted <- struct{}{}\n\t\t\tif i < 5 {\n\t\t\t\tc.Check(err, gc.IsNil)\n\t\t\t} else {\n\t\t\t\tc.Check(err, gc.Equals, parallel.ErrClosed)\n\t\t\t}\n\t\t}\n\t\tclose(started)\n\t}()\n\t// Check we can start the first three.\n\ttimeout := time.After(longWait)\n\tfor i := 0; i < 3; i++ {\n\t\tselect {\n\t\tcase <-started:\n\t\tcase <-timeout:\n\t\t\tc.Fatalf(\"timed out\")\n\t\t}\n\t}\n\t// Check we block when going above maxParallel.\n\ttimeout = time.After(shortWait)\n\tselect {\n\tcase <-started:\n\t\tc.Fatalf(\"Start did not block\")\n\tcase <-timeout:\n\t}\n\n\t// Unblock two attempts.\n\tbegin <- struct{}{}\n\tbegin <- struct{}{}\n\n\t// Check we can start another two.\n\ttimeout = time.After(longWait)\n\tfor i := 0; i < 2; i++ {\n\t\tselect {\n\t\tcase <-started:\n\t\tcase <-timeout:\n\t\t\tc.Fatalf(\"timed out\")\n\t\t}\n\t}\n\n\t// Check we block again when going above maxParallel.\n\ttimeout = time.After(shortWait)\n\tselect {\n\tcase <-started:\n\t\tc.Fatalf(\"Start did not block\")\n\tcase <-timeout:\n\t}\n\n\t// Close the Try - the last request should be discarded,\n\t// unblocking last remaining Start request.\n\ttry.Close()\n\n\ttimeout = time.After(longWait)\n\tselect {\n\tcase <-started:\n\tcase <-timeout:\n\t\tc.Fatalf(\"Start did not unblock after Close\")\n\t}\n\n\t// Ensure all checks are completed\n\tselect {\n\tcase _, ok := <-started:\n\t\tc.Assert(ok, gc.Equals, false)\n\tcase <-timeout:\n\t\tc.Fatalf(\"Start goroutine did not finish\")\n\t}\n}\n\nfunc (*trySuite) TestAllConcurrent(c *gc.C) {\n\ttry := parallel.NewTry(0, nil)\n\tstarted := make(chan chan struct{})\n\tfor i := 0; i < 10; i++ {\n\t\ttry.Start(func(<-chan struct{}) (io.Closer, error) {\n\t\t\treply := make(chan struct{})\n\t\t\tstarted <- reply\n\t\t\t<-reply\n\t\t\treturn result(\"hello\"), nil\n\t\t})\n\t}\n\ttimeout := time.After(longWait)\n\tfor i := 0; i < 10; i++ {\n\t\tselect {\n\t\tcase reply := <-started:\n\t\t\treply <- struct{}{}\n\t\tcase <-timeout:\n\t\t\tc.Fatalf(\"timed out\")\n\t\t}\n\t}\n}\n\ntype gradedError int\n\nfunc (e gradedError) Error() string {\n\treturn fmt.Sprintf(\"error with importance %d\", e)\n}\n\nfunc gradedErrorCombine(err0, err1 error) error {\n\tif err0 == nil || err0.(gradedError) < err1.(gradedError) {\n\t\treturn err1\n\t}\n\treturn err0\n}\n\ntype multiError struct {\n\terrs []int\n}\n\nfunc (e *multiError) Error() string {\n\treturn fmt.Sprintf(\"%v\", e.errs)\n}\n\nfunc (*trySuite) TestErrorCombine(c *gc.C) {\n\t// Use maxParallel=1 to guarantee that all errors are processed sequentially.\n\ttry := parallel.NewTry(1, func(err0, err1 error) error {\n\t\tif err0 == nil {\n\t\t\terr0 = &multiError{}\n\t\t}\n\t\terr0.(*multiError).errs = append(err0.(*multiError).errs, int(err1.(gradedError)))\n\t\treturn err0\n\t})\n\terrors := []gradedError{3, 2, 4, 0, 5, 5, 3}\n\tfor _, err := range errors {\n\t\terr := err\n\t\ttry.Start(func(<-chan struct{}) (io.Closer, error) {\n\t\t\treturn nil, err\n\t\t})\n\t}\n\ttry.Close()\n\tval, err := try.Result()\n\tc.Assert(val, gc.IsNil)\n\tgrades := err.(*multiError).errs\n\tsort.Ints(grades)\n\tc.Assert(grades, gc.DeepEquals, []int{0, 2, 3, 3, 4, 5, 5})\n}\n\nfunc (*trySuite) TestTriesAreStopped(c *gc.C) {\n\ttry := parallel.NewTry(0, nil)\n\tstopped := make(chan struct{})\n\ttry.Start(func(stop <-chan struct{}) (io.Closer, error) {\n\t\t<-stop\n\t\tstopped <- struct{}{}\n\t\treturn nil, parallel.ErrStopped\n\t})\n\ttry.Start(tryFunc(0, result(\"hello\"), nil))\n\tval, err := try.Result()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(val, gc.Equals, result(\"hello\"))\n\n\tselect {\n\tcase <-stopped:\n\tcase <-time.After(longWait):\n\t\tc.Fatalf(\"timed out waiting for stop\")\n\t}\n}\n\nfunc (*trySuite) TestCloseTwice(c *gc.C) {\n\ttry := parallel.NewTry(0, nil)\n\ttry.Close()\n\ttry.Close()\n\tval, err := try.Result()\n\tc.Assert(val, gc.IsNil)\n\tc.Assert(err, gc.IsNil)\n}\n\ntype closeResult struct {\n\tclosed chan struct{}\n}\n\nfunc (r *closeResult) Close() error {\n\tclose(r.closed)\n\treturn nil\n}\n\nfunc (*trySuite) TestExtraResultsAreClosed(c *gc.C) {\n\ttry := parallel.NewTry(0, nil)\n\tbegin := make([]chan struct{}, 4)\n\tresults := make([]*closeResult, len(begin))\n\tfor i := range begin {\n\t\tbegin[i] = make(chan struct{})\n\t\tresults[i] = &closeResult{make(chan struct{})}\n\t\ti := i\n\t\ttry.Start(func(<-chan struct{}) (io.Closer, error) {\n\t\t\t<-begin[i]\n\t\t\treturn results[i], nil\n\t\t})\n\t}\n\tbegin[0] <- struct{}{}\n\tval, err := try.Result()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(val, gc.Equals, results[0])\n\n\ttimeout := time.After(shortWait)\n\tfor i, r := range results[1:] {\n\t\tbegin[i+1] <- struct{}{}\n\t\tselect {\n\t\tcase <-r.closed:\n\t\tcase <-timeout:\n\t\t\tc.Fatalf(\"timed out waiting for close\")\n\t\t}\n\t}\n\tselect {\n\tcase <-results[0].closed:\n\t\tc.Fatalf(\"result was inappropriately closed\")\n\tcase <-time.After(shortWait):\n\t}\n}\n\nfunc (*trySuite) TestEverything(c *gc.C) {\n\ttry := parallel.NewTry(5, gradedErrorCombine)\n\ttries := []struct {\n\t\tstartAt time.Duration\n\t\twait    time.Duration\n\t\tval     result\n\t\terr     error\n\t}{{\n\t\twait: 30 * time.Millisecond,\n\t\terr:  gradedError(3),\n\t}, {\n\t\tstartAt: 10 * time.Millisecond,\n\t\twait:    20 * time.Millisecond,\n\t\tval:     result(\"result 1\"),\n\t}, {\n\t\tstartAt: 20 * time.Millisecond,\n\t\twait:    10 * time.Millisecond,\n\t\tval:     result(\"result 2\"),\n\t}, {\n\t\tstartAt: 20 * time.Millisecond,\n\t\twait:    5 * time.Second,\n\t\tval:     \"delayed result\",\n\t}, {\n\t\tstartAt: 5 * time.Millisecond,\n\t\terr:     gradedError(4),\n\t}}\n\tfor _, t := range tries {\n\t\tt := t\n\t\tgo func() {\n\t\t\ttime.Sleep(t.startAt)\n\t\t\ttry.Start(tryFunc(t.wait, t.val, t.err))\n\t\t}()\n\t}\n\tval, err := try.Result()\n\tif val != result(\"result 1\") && val != result(\"result 2\") {\n\t\tc.Errorf(`expected \"result 1\" or \"result 2\" got %#v`, val)\n\t}\n\tc.Assert(err, gc.IsNil)\n}\n"
  },
  {
    "path": "password.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"crypto/rand\"\n\t\"crypto/sha512\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"io\"\n\n\t\"golang.org/x/crypto/pbkdf2\"\n)\n\n// CompatSalt is because Juju 1.16 and older used a hard-coded salt to compute\n// the password hash for all users and agents\nvar CompatSalt = string([]byte{0x75, 0x82, 0x81, 0xca})\n\nconst randomPasswordBytes = 18\n\n// MinAgentPasswordLength describes how long agent passwords should be. We\n// require this length because we assume enough entropy in the Agent password\n// that it is safe to not do extra rounds of iterated hashing.\nvar MinAgentPasswordLength = base64.StdEncoding.EncodedLen(randomPasswordBytes)\n\n// RandomBytes returns n random bytes.\nfunc RandomBytes(n int) ([]byte, error) {\n\tbuf := make([]byte, n)\n\t_, err := io.ReadFull(rand.Reader, buf)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot read random bytes: %v\", err)\n\t}\n\treturn buf, nil\n}\n\n// RandomPassword generates a random base64-encoded password.\nfunc RandomPassword() (string, error) {\n\tb, err := RandomBytes(randomPasswordBytes)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.StdEncoding.EncodeToString(b), nil\n}\n\n// RandomSalt generates a random base64 data suitable for using as a password\n// salt The pbkdf2 guideline is to use 8 bytes of salt, so we do 12 raw bytes\n// into 16 base64 bytes. (The alternative is 6 raw into 8 base64).\nfunc RandomSalt() (string, error) {\n\tb, err := RandomBytes(12)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.StdEncoding.EncodeToString(b), nil\n}\n\n// FastInsecureHash specifies whether a fast, insecure version of the hash\n// algorithm will be used.  Changing this will cause PasswordHash to\n// produce incompatible passwords.  It should only be changed for\n// testing purposes - to make tests run faster.\nvar FastInsecureHash = false\n\n// UserPasswordHash returns base64-encoded one-way hash password that is\n// computationally hard to crack by iterating through possible passwords.\nfunc UserPasswordHash(password string, salt string) string {\n\tif salt == \"\" {\n\t\tpanic(\"salt is not allowed to be empty\")\n\t}\n\titer := 8192\n\tif FastInsecureHash {\n\t\titer = 1\n\t}\n\t// Generate 18 byte passwords because we know that MongoDB\n\t// uses the MD5 sum of the password anyway, so there's\n\t// no point in using more bytes. (18 so we don't get base 64\n\t// padding characters).\n\th := pbkdf2.Key([]byte(password), []byte(salt), iter, 18, sha512.New)\n\treturn base64.StdEncoding.EncodeToString(h)\n}\n\n// AgentPasswordHash returns base64-encoded one-way hash of password. This is\n// not suitable for User passwords because those will have limited entropy (see\n// UserPasswordHash). However, since we generate long random passwords for\n// agents, we can trust that there is sufficient entropy to prevent brute force\n// search. And using a faster hash allows us to restart the state machines and\n// have 1000s of agents log in in a reasonable amount of time.\nfunc AgentPasswordHash(password string) string {\n\tsum := sha512.New()\n\tsum.Write([]byte(password))\n\th := sum.Sum(nil)\n\treturn base64.StdEncoding.EncodeToString(h[:18])\n}\n"
  },
  {
    "path": "password_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype passwordSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&passwordSuite{})\n\n// Base64 *can* include a tail of '=' characters, but all the tests here\n// explicitly *don't* want those because it is wasteful.\nvar base64Chars = \"^[A-Za-z0-9+/]+$\"\n\nfunc (*passwordSuite) TestRandomBytes(c *gc.C) {\n\tb, err := utils.RandomBytes(16)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(b, gc.HasLen, 16)\n\tx0 := b[0]\n\tfor _, x := range b {\n\t\tif x != x0 {\n\t\t\treturn\n\t\t}\n\t}\n\tc.Errorf(\"all same bytes in result of RandomBytes\")\n}\n\nfunc (*passwordSuite) TestRandomPassword(c *gc.C) {\n\tp, err := utils.RandomPassword()\n\tc.Assert(err, gc.IsNil)\n\tif len(p) < 18 {\n\t\tc.Errorf(\"password too short: %q\", p)\n\t}\n\tc.Assert(p, gc.Matches, base64Chars)\n}\n\nfunc (*passwordSuite) TestRandomSalt(c *gc.C) {\n\tsalt, err := utils.RandomSalt()\n\tc.Assert(err, gc.IsNil)\n\tif len(salt) < 12 {\n\t\tc.Errorf(\"salt too short: %q\", salt)\n\t}\n\t// check we're not adding base64 padding.\n\tc.Assert(salt, gc.Matches, base64Chars)\n}\n\nvar testPasswords = []string{\"\", \"a\", \"a longer password than i would usually bother with\"}\n\nvar testSalts = []string{\"abcd\", \"abcdefgh\", \"abcdefghijklmnop\", utils.CompatSalt}\n\nfunc (*passwordSuite) TestUserPasswordHash(c *gc.C) {\n\tseenHashes := make(map[string]bool)\n\tfor i, password := range testPasswords {\n\t\tfor j, salt := range testSalts {\n\t\t\tc.Logf(\"test %d, %d %s %s\", i, j, password, salt)\n\t\t\thashed := utils.UserPasswordHash(password, salt)\n\t\t\tc.Logf(\"hash %q\", hashed)\n\t\t\tc.Assert(len(hashed), gc.Equals, 24)\n\t\t\tc.Assert(seenHashes[hashed], gc.Equals, false)\n\t\t\t// check we're not adding base64 padding.\n\t\t\tc.Assert(hashed, gc.Matches, base64Chars)\n\t\t\tseenHashes[hashed] = true\n\t\t\t// check it's deterministic\n\t\t\taltHashed := utils.UserPasswordHash(password, salt)\n\t\t\tc.Assert(altHashed, gc.Equals, hashed)\n\t\t}\n\t}\n}\n\nfunc (*passwordSuite) TestAgentPasswordHash(c *gc.C) {\n\tseenValues := make(map[string]bool)\n\tfor i := 0; i < 1000; i++ {\n\t\tpassword, err := utils.RandomPassword()\n\t\tc.Assert(err, gc.IsNil)\n\t\tc.Assert(seenValues[password], jc.IsFalse)\n\t\tseenValues[password] = true\n\t\thashed := utils.AgentPasswordHash(password)\n\t\tc.Assert(hashed, gc.Not(gc.Equals), password)\n\t\tc.Assert(seenValues[hashed], jc.IsFalse)\n\t\tseenValues[hashed] = true\n\t\tc.Assert(len(hashed), gc.Equals, 24)\n\t\t// check we're not adding base64 padding.\n\t\tc.Assert(hashed, gc.Matches, base64Chars)\n\t}\n}\n"
  },
  {
    "path": "proxy/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage proxy_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "proxy/proxy.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage proxy\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/juju/collections/set\"\n)\n\nconst (\n\t// Remove the likelihood of errors by mistyping string values.\n\thttp_proxy  = \"http_proxy\"\n\thttps_proxy = \"https_proxy\"\n\tftp_proxy   = \"ftp_proxy\"\n\tno_proxy    = \"no_proxy\"\n)\n\n// Settings holds the values for the HTTP, HTTPS and FTP proxies as well as the\n// no_proxy value found by Detect Proxies.\n// AutoNoProxy is filled with addresses of controllers, we never want to proxy those\ntype Settings struct {\n\tHttp        string\n\tHttps       string\n\tFtp         string\n\tNoProxy     string\n\tAutoNoProxy string\n}\n\nfunc getSetting(key string) string {\n\tvalue := os.Getenv(key)\n\tif value == \"\" {\n\t\tvalue = os.Getenv(strings.ToUpper(key))\n\t}\n\treturn value\n}\n\n// DetectProxies returns the proxy settings found the environment.\nfunc DetectProxies() Settings {\n\treturn Settings{\n\t\tHttp:    getSetting(http_proxy),\n\t\tHttps:   getSetting(https_proxy),\n\t\tFtp:     getSetting(ftp_proxy),\n\t\tNoProxy: getSetting(no_proxy),\n\t}\n}\n\n// AsScriptEnvironment returns a potentially multi-line string in a format\n// that specifies exported key=value lines. There are two lines for each non-\n// empty proxy value, one lower-case and one upper-case.\nfunc (s *Settings) AsScriptEnvironment() string {\n\tvar lines []string\n\taddLine := func(proxy, value string) {\n\t\tif value != \"\" {\n\t\t\tlines = append(\n\t\t\t\tlines,\n\t\t\t\tfmt.Sprintf(\"export %s=%s\", proxy, value),\n\t\t\t\tfmt.Sprintf(\"export %s=%s\", strings.ToUpper(proxy), value))\n\t\t}\n\t}\n\taddLine(http_proxy, s.Http)\n\taddLine(https_proxy, s.Https)\n\taddLine(ftp_proxy, s.Ftp)\n\taddLine(no_proxy, s.FullNoProxy())\n\treturn strings.Join(lines, \"\\n\")\n}\n\n// AsEnvironmentValues returns a slice of strings of the format \"key=value\"\n// suitable to be used in a command environment. There are two values for each\n// non-empty proxy value, one lower-case and one upper-case.\nfunc (s *Settings) AsEnvironmentValues() []string {\n\tlines := []string{}\n\taddLine := func(proxy, value string) {\n\t\tif value != \"\" {\n\t\t\tlines = append(\n\t\t\t\tlines,\n\t\t\t\tfmt.Sprintf(\"%s=%s\", proxy, value),\n\t\t\t\tfmt.Sprintf(\"%s=%s\", strings.ToUpper(proxy), value))\n\t\t}\n\t}\n\taddLine(http_proxy, s.Http)\n\taddLine(https_proxy, s.Https)\n\taddLine(ftp_proxy, s.Ftp)\n\taddLine(no_proxy, s.FullNoProxy())\n\treturn lines\n}\n\n// AsSystemdDefaultEnv returns a string in the format understood by systemd:\n// DefaultEnvironment=\"http_proxy=....\" \"HTTP_PROXY=...\" ...\nfunc (s *Settings) AsSystemdDefaultEnv() string {\n\tlines := s.AsEnvironmentValues()\n\trv := `# To allow juju to control the global systemd proxy settings,\n# create symbolic links to this file from within /etc/systemd/system.conf.d/\n# and /etc/systemd/users.conf.d/.\n[Manager]\nDefaultEnvironment=`\n\tfor _, line := range lines {\n\t\trv += fmt.Sprintf(`\"%s\" `, line)\n\t}\n\treturn rv + \"\\n\"\n}\n\n// SetEnvironmentValues updates the process environment with the\n// proxy values stored in the settings object.  Both the lower-case\n// and upper-case variants are set.\n//\n// http_proxy, HTTP_PROXY\n// https_proxy, HTTPS_PROXY\n// ftp_proxy, FTP_PROXY\nfunc (s *Settings) SetEnvironmentValues() {\n\tsetenv := func(proxy, value string) {\n\t\tos.Setenv(proxy, value)\n\t\tos.Setenv(strings.ToUpper(proxy), value)\n\t}\n\tsetenv(http_proxy, s.Http)\n\tsetenv(https_proxy, s.Https)\n\tsetenv(ftp_proxy, s.Ftp)\n\tsetenv(no_proxy, s.FullNoProxy())\n}\n\n// FullNoProxy merges NoProxy and AutoNoProxyList\nfunc (s *Settings) FullNoProxy() string {\n\tvar allNoProxy []string\n\tif s.NoProxy != \"\" {\n\t\tallNoProxy = strings.Split(s.NoProxy, \",\")\n\t}\n\tif s.AutoNoProxy != \"\" {\n\t\tallNoProxy = append(allNoProxy, strings.Split(s.AutoNoProxy, \",\")...)\n\t}\n\tnoProxySet := set.NewStrings(allNoProxy...)\n\treturn strings.Join(noProxySet.SortedValues(), \",\")\n}\n"
  },
  {
    "path": "proxy/proxy_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage proxy_test\n\nimport (\n\t\"os\"\n\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/proxy\"\n)\n\ntype proxySuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&proxySuite{})\n\nfunc (s *proxySuite) TestDetectNoSettings(c *gc.C) {\n\t// Patch all of the environment variables we check out just in case the\n\t// user has one set.\n\ts.PatchEnvironment(\"http_proxy\", \"\")\n\ts.PatchEnvironment(\"HTTP_PROXY\", \"\")\n\ts.PatchEnvironment(\"https_proxy\", \"\")\n\ts.PatchEnvironment(\"HTTPS_PROXY\", \"\")\n\ts.PatchEnvironment(\"ftp_proxy\", \"\")\n\ts.PatchEnvironment(\"FTP_PROXY\", \"\")\n\ts.PatchEnvironment(\"no_proxy\", \"\")\n\ts.PatchEnvironment(\"NO_PROXY\", \"\")\n\n\tproxies := proxy.DetectProxies()\n\n\tc.Assert(proxies, gc.DeepEquals, proxy.Settings{})\n}\n\nfunc (s *proxySuite) TestDetectPrimary(c *gc.C) {\n\t// Patch all of the environment variables we check out just in case the\n\t// user has one set.\n\ts.PatchEnvironment(\"http_proxy\", \"http://user@10.0.0.1\")\n\ts.PatchEnvironment(\"HTTP_PROXY\", \"\")\n\ts.PatchEnvironment(\"https_proxy\", \"https://user@10.0.0.1\")\n\ts.PatchEnvironment(\"HTTPS_PROXY\", \"\")\n\ts.PatchEnvironment(\"ftp_proxy\", \"ftp://user@10.0.0.1\")\n\ts.PatchEnvironment(\"FTP_PROXY\", \"\")\n\ts.PatchEnvironment(\"no_proxy\", \"10.0.3.1,localhost\")\n\ts.PatchEnvironment(\"NO_PROXY\", \"\")\n\n\tproxies := proxy.DetectProxies()\n\n\tc.Assert(proxies, gc.DeepEquals, proxy.Settings{\n\t\tHttp:    \"http://user@10.0.0.1\",\n\t\tHttps:   \"https://user@10.0.0.1\",\n\t\tFtp:     \"ftp://user@10.0.0.1\",\n\t\tNoProxy: \"10.0.3.1,localhost\",\n\t})\n}\n\nfunc (s *proxySuite) TestDetectFallback(c *gc.C) {\n\t// Patch all of the environment variables we check out just in case the\n\t// user has one set.\n\ts.PatchEnvironment(\"http_proxy\", \"\")\n\ts.PatchEnvironment(\"HTTP_PROXY\", \"http://user@10.0.0.2\")\n\ts.PatchEnvironment(\"https_proxy\", \"\")\n\ts.PatchEnvironment(\"HTTPS_PROXY\", \"https://user@10.0.0.2\")\n\ts.PatchEnvironment(\"ftp_proxy\", \"\")\n\ts.PatchEnvironment(\"FTP_PROXY\", \"ftp://user@10.0.0.2\")\n\ts.PatchEnvironment(\"no_proxy\", \"\")\n\ts.PatchEnvironment(\"NO_PROXY\", \"10.0.3.1,localhost\")\n\n\tproxies := proxy.DetectProxies()\n\n\tc.Assert(proxies, gc.DeepEquals, proxy.Settings{\n\t\tHttp:    \"http://user@10.0.0.2\",\n\t\tHttps:   \"https://user@10.0.0.2\",\n\t\tFtp:     \"ftp://user@10.0.0.2\",\n\t\tNoProxy: \"10.0.3.1,localhost\",\n\t})\n}\n\nfunc (s *proxySuite) TestDetectPrimaryPreference(c *gc.C) {\n\t// Patch all of the environment variables we check out just in case the\n\t// user has one set.\n\ts.PatchEnvironment(\"http_proxy\", \"http://user@10.0.0.1\")\n\ts.PatchEnvironment(\"https_proxy\", \"https://user@10.0.0.1\")\n\ts.PatchEnvironment(\"ftp_proxy\", \"ftp://user@10.0.0.1\")\n\ts.PatchEnvironment(\"no_proxy\", \"10.0.3.1,localhost\")\n\ts.PatchEnvironment(\"HTTP_PROXY\", \"http://user@10.0.0.2\")\n\ts.PatchEnvironment(\"HTTPS_PROXY\", \"https://user@10.0.0.2\")\n\ts.PatchEnvironment(\"FTP_PROXY\", \"ftp://user@10.0.0.2\")\n\ts.PatchEnvironment(\"NO_PROXY\", \"localhost\")\n\n\tproxies := proxy.DetectProxies()\n\n\tc.Assert(proxies, gc.DeepEquals, proxy.Settings{\n\t\tHttp:    \"http://user@10.0.0.1\",\n\t\tHttps:   \"https://user@10.0.0.1\",\n\t\tFtp:     \"ftp://user@10.0.0.1\",\n\t\tNoProxy: \"10.0.3.1,localhost\",\n\t})\n}\n\nfunc (s *proxySuite) TestAsScriptEnvironmentEmpty(c *gc.C) {\n\tproxies := proxy.Settings{}\n\tc.Assert(proxies.AsScriptEnvironment(), gc.Equals, \"\")\n}\n\nfunc (s *proxySuite) TestAsScriptEnvironmentOneValue(c *gc.C) {\n\tproxies := proxy.Settings{\n\t\tHttp: \"some-value\",\n\t}\n\texpected := `\nexport http_proxy=some-value\nexport HTTP_PROXY=some-value`[1:]\n\tc.Assert(proxies.AsScriptEnvironment(), gc.Equals, expected)\n}\n\nfunc (s *proxySuite) TestAsScriptEnvironmentAllValue(c *gc.C) {\n\tproxies := proxy.Settings{\n\t\tHttp:    \"some-value\",\n\t\tHttps:   \"special\",\n\t\tFtp:     \"who uses this?\",\n\t\tNoProxy: \"10.0.3.1,localhost\",\n\t}\n\texpected := `\nexport http_proxy=some-value\nexport HTTP_PROXY=some-value\nexport https_proxy=special\nexport HTTPS_PROXY=special\nexport ftp_proxy=who uses this?\nexport FTP_PROXY=who uses this?\nexport no_proxy=10.0.3.1,localhost\nexport NO_PROXY=10.0.3.1,localhost`[1:]\n\tc.Assert(proxies.AsScriptEnvironment(), gc.Equals, expected)\n}\n\nfunc (s *proxySuite) TestAsEnvironmentValuesEmpty(c *gc.C) {\n\tproxies := proxy.Settings{}\n\tc.Assert(proxies.AsEnvironmentValues(), gc.HasLen, 0)\n}\n\nfunc (s *proxySuite) TestAsEnvironmentValuesOneValue(c *gc.C) {\n\tproxies := proxy.Settings{\n\t\tHttp: \"some-value\",\n\t}\n\texpected := []string{\n\t\t\"http_proxy=some-value\",\n\t\t\"HTTP_PROXY=some-value\",\n\t}\n\tc.Assert(proxies.AsEnvironmentValues(), gc.DeepEquals, expected)\n}\n\nfunc (s *proxySuite) TestAsEnvironmentValuesAllValue(c *gc.C) {\n\tproxies := proxy.Settings{\n\t\tHttp:    \"some-value\",\n\t\tHttps:   \"special\",\n\t\tFtp:     \"who uses this?\",\n\t\tNoProxy: \"10.0.3.1,localhost\",\n\t}\n\texpected := []string{\n\t\t\"http_proxy=some-value\",\n\t\t\"HTTP_PROXY=some-value\",\n\t\t\"https_proxy=special\",\n\t\t\"HTTPS_PROXY=special\",\n\t\t\"ftp_proxy=who uses this?\",\n\t\t\"FTP_PROXY=who uses this?\",\n\t\t\"no_proxy=10.0.3.1,localhost\",\n\t\t\"NO_PROXY=10.0.3.1,localhost\",\n\t}\n\tc.Assert(proxies.AsEnvironmentValues(), gc.DeepEquals, expected)\n}\n\nfunc (s *proxySuite) TestAsSystemdDefaultEnv(c *gc.C) {\n\tproxies := proxy.Settings{\n\t\tHttp:    \"some-value\",\n\t\tHttps:   \"special\",\n\t\tFtp:     \"who uses this?\",\n\t\tNoProxy: \"10.0.3.1,localhost\",\n\t}\n\texpected := `\n# To allow juju to control the global systemd proxy settings,\n# create symbolic links to this file from within /etc/systemd/system.conf.d/\n# and /etc/systemd/users.conf.d/.\n[Manager]\nDefaultEnvironment=\"http_proxy=some-value\" \"HTTP_PROXY=some-value\" \"https_proxy=special\" \"HTTPS_PROXY=special\" \"ftp_proxy=who uses this?\" \"FTP_PROXY=who uses this?\" \"no_proxy=10.0.3.1,localhost\" \"NO_PROXY=10.0.3.1,localhost\" \n`[1:]\n\tc.Assert(proxies.AsSystemdDefaultEnv(), gc.DeepEquals, expected)\n}\n\nfunc (s *proxySuite) TestSetEnvironmentValues(c *gc.C) {\n\ts.PatchEnvironment(\"http_proxy\", \"initial\")\n\ts.PatchEnvironment(\"HTTP_PROXY\", \"initial\")\n\ts.PatchEnvironment(\"https_proxy\", \"initial\")\n\ts.PatchEnvironment(\"HTTPS_PROXY\", \"initial\")\n\ts.PatchEnvironment(\"ftp_proxy\", \"initial\")\n\ts.PatchEnvironment(\"FTP_PROXY\", \"initial\")\n\ts.PatchEnvironment(\"no_proxy\", \"initial\")\n\ts.PatchEnvironment(\"NO_PROXY\", \"initial\")\n\n\tproxySettings := proxy.Settings{\n\t\tHttp:  \"http proxy\",\n\t\tHttps: \"https proxy\",\n\t\t// Ftp left blank to show clearing env.\n\t\tNoProxy: \"10.0.3.1,localhost\",\n\t}\n\tproxySettings.SetEnvironmentValues()\n\n\tobtained := proxy.DetectProxies()\n\n\tc.Assert(obtained, gc.DeepEquals, proxySettings)\n\n\tc.Assert(os.Getenv(\"http_proxy\"), gc.Equals, \"http proxy\")\n\tc.Assert(os.Getenv(\"HTTP_PROXY\"), gc.Equals, \"http proxy\")\n\tc.Assert(os.Getenv(\"https_proxy\"), gc.Equals, \"https proxy\")\n\tc.Assert(os.Getenv(\"HTTPS_PROXY\"), gc.Equals, \"https proxy\")\n\tc.Assert(os.Getenv(\"ftp_proxy\"), gc.Equals, \"\")\n\tc.Assert(os.Getenv(\"FTP_PROXY\"), gc.Equals, \"\")\n\tc.Assert(os.Getenv(\"no_proxy\"), gc.Equals, \"10.0.3.1,localhost\")\n\tc.Assert(os.Getenv(\"NO_PROXY\"), gc.Equals, \"10.0.3.1,localhost\")\n}\n\nfunc (s *proxySuite) TestAutoNoProxy(c *gc.C) {\n\tproxies := proxy.Settings{\n\t\tNoProxy: \"10.0.3.1,localhost\",\n\t}\n\n\texpectedFirst := []string{\n\t\t\"no_proxy=10.0.3.1,localhost\",\n\t\t\"NO_PROXY=10.0.3.1,localhost\",\n\t}\n\texpectedSecond := []string{\n\t\t\"no_proxy=10.0.3.1,10.0.3.2,localhost\",\n\t\t\"NO_PROXY=10.0.3.1,10.0.3.2,localhost\",\n\t}\n\n\tc.Assert(proxies.AsEnvironmentValues(), gc.DeepEquals, expectedFirst)\n\tproxies.AutoNoProxy = \"10.0.3.1,10.0.3.2\"\n\tc.Assert(proxies.AsEnvironmentValues(), gc.DeepEquals, expectedSecond)\n}\n"
  },
  {
    "path": "randomstring.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"math/rand\"\n\t\"sync\"\n\t\"time\"\n)\n\n// Can be used as a sane default argument for RandomString\nvar (\n\tLowerAlpha = []rune(\"abcdefghijklmnopqrstuvwxyz\")\n\tUpperAlpha = []rune(\"ABCDEFGHIJKLMNOPQRSTUVWXYZ\")\n\tDigits     = []rune(\"0123456789\")\n)\n\nvar (\n\trandomStringMu   sync.Mutex\n\trandomStringRand *rand.Rand\n)\n\nfunc init() {\n\trandomStringRand = rand.New(\n\t\trand.NewSource(time.Now().UnixNano()),\n\t)\n}\n\n// RandomString will return a string of length n that will only\n// contain runes inside validRunes\nfunc RandomString(n int, validRunes []rune) string {\n\trandomStringMu.Lock()\n\tdefer randomStringMu.Unlock()\n\n\trunes := make([]rune, n)\n\tfor i := range runes {\n\t\trunes[i] = validRunes[randomStringRand.Intn(len(validRunes))]\n\t}\n\n\treturn string(runes)\n}\n"
  },
  {
    "path": "randomstring_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Copyright 2015 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\t\"github.com/juju/utils/v4\"\n\tgc \"gopkg.in/check.v1\"\n)\n\ntype randomStringSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&randomStringSuite{})\n\nvar (\n\tvalidChars = []rune(\"thisissorandom\")\n\tlength     = 7\n)\n\nfunc (randomStringSuite) TestLength(c *gc.C) {\n\ts := utils.RandomString(length, validChars)\n\tc.Assert(s, gc.HasLen, length)\n}\n\nfunc (randomStringSuite) TestContentInValidRunes(c *gc.C) {\n\ts := utils.RandomString(length, validChars)\n\tfor _, char := range s {\n\t\tc.Assert(string(validChars), jc.Contains, string(char))\n\t}\n}\n"
  },
  {
    "path": "registry/export_test.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage registry\n\nvar (\n\tDescriptionFromVersions = descriptionFromVersions\n)\n"
  },
  {
    "path": "registry/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage registry_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestAll(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "registry/registry.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage registry\n\nimport (\n\t\"fmt\"\n\t\"reflect\"\n\t\"sort\"\n\n\t\"github.com/juju/errors\"\n)\n\n// TypedNameVersion is a registry that will allow you to register objects based\n// on a name and version pair. The objects must be convertible to the Type\n// defined when the registry was created. It will be cast during Register so\n// you can be sure all objects returned from Get() are safe to TypeAssert to\n// that type.\ntype TypedNameVersion struct {\n\trequiredType reflect.Type\n\tversions     map[string]Versions\n}\n\n// NewTypedNameVersion creates a place to register your objects\nfunc NewTypedNameVersion(requiredType reflect.Type) *TypedNameVersion {\n\treturn &TypedNameVersion{\n\t\trequiredType: requiredType,\n\t\tversions:     make(map[string]Versions),\n\t}\n}\n\n// Description gives the name and available versions in a registry.\ntype Description struct {\n\tName     string\n\tVersions []int\n}\n\n// Versions maps concrete versions of the objects.\ntype Versions map[int]any\n\n// Register records the factory that can be used to produce an instance of the\n// facade at the supplied version.\n// If the object being registered doesn't Implement the required Type, then an\n// error is returned.\n// An error is also returned if an object is already registered with the given\n// name and version.\nfunc (r *TypedNameVersion) Register(name string, version int, obj any) error {\n\tif !reflect.TypeOf(obj).ConvertibleTo(r.requiredType) {\n\t\treturn fmt.Errorf(\"object of type %T cannot be converted to type %s.%s\", obj, r.requiredType.PkgPath(), r.requiredType.Name())\n\t}\n\tobj = reflect.ValueOf(obj).Convert(r.requiredType).Interface()\n\tif r.versions == nil {\n\t\tr.versions = make(map[string]Versions, 1)\n\t}\n\tif versions, ok := r.versions[name]; ok {\n\t\tif _, ok := versions[version]; ok {\n\t\t\tfullname := fmt.Sprintf(\"%s(%d)\", name, version)\n\t\t\treturn fmt.Errorf(\"object %q already registered\", fullname)\n\t\t}\n\t\tversions[version] = obj\n\t} else {\n\t\tr.versions[name] = Versions{version: obj}\n\t}\n\treturn nil\n}\n\n// descriptionFromVersions aggregates the information in a Versions map into a\n// more friendly form for List()\nfunc descriptionFromVersions(name string, versions Versions) Description {\n\tintVersions := make([]int, 0, len(versions))\n\tfor version := range versions {\n\t\tintVersions = append(intVersions, version)\n\t}\n\tsort.Ints(intVersions)\n\treturn Description{\n\t\tName:     name,\n\t\tVersions: intVersions,\n\t}\n}\n\n// List returns a slice describing each of the registered Facades.\nfunc (r *TypedNameVersion) List() []Description {\n\tnames := make([]string, 0, len(r.versions))\n\tfor name := range r.versions {\n\t\tnames = append(names, name)\n\t}\n\tsort.Strings(names)\n\tdescriptions := make([]Description, len(r.versions))\n\tfor i, name := range names {\n\t\tversions := r.versions[name]\n\t\tdescriptions[i] = descriptionFromVersions(name, versions)\n\t}\n\treturn descriptions\n}\n\n// Get returns the object for a single name and version. If the requested\n// facade is not found, it returns error.NotFound\nfunc (r *TypedNameVersion) Get(name string, version int) (any, error) {\n\tif versions, ok := r.versions[name]; ok {\n\t\tif factory, ok := versions[version]; ok {\n\t\t\treturn factory, nil\n\t\t}\n\t}\n\treturn nil, errors.NotFoundf(\"%s(%d)\", name, version)\n}\n"
  },
  {
    "path": "registry/registry_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage registry_test\n\nimport (\n\t\"reflect\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/registry\"\n)\n\ntype registrySuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&registrySuite{})\n\ntype Factory func() (any, error)\n\nfunc nilFactory() (any, error) {\n\treturn nil, nil\n}\n\nvar factoryType = reflect.TypeOf((*Factory)(nil)).Elem()\n\ntype testFacade struct {\n\tversion string\n\tcalled  bool\n}\n\ntype stringVal struct {\n\tvalue string\n}\n\nfunc (t *testFacade) TestMethod() stringVal {\n\tt.called = true\n\treturn stringVal{\"called \" + t.version}\n}\n\nfunc (s *registrySuite) TestDescriptionFromVersions(c *gc.C) {\n\tversions := registry.Versions{0: nilFactory}\n\tc.Check(registry.DescriptionFromVersions(\"name\", versions),\n\t\tgc.DeepEquals,\n\t\tregistry.Description{\n\t\t\tName:     \"name\",\n\t\t\tVersions: []int{0},\n\t\t})\n\tversions[2] = nilFactory\n\tc.Check(registry.DescriptionFromVersions(\"name\", versions),\n\t\tgc.DeepEquals,\n\t\tregistry.Description{\n\t\t\tName:     \"name\",\n\t\t\tVersions: []int{0, 2},\n\t\t})\n}\n\nfunc (s *registrySuite) TestDescriptionFromVersionsAreSorted(c *gc.C) {\n\tversions := registry.Versions{\n\t\t10: nilFactory,\n\t\t5:  nilFactory,\n\t\t0:  nilFactory,\n\t\t18: nilFactory,\n\t\t6:  nilFactory,\n\t\t4:  nilFactory,\n\t}\n\tc.Check(registry.DescriptionFromVersions(\"name\", versions),\n\t\tgc.DeepEquals,\n\t\tregistry.Description{\n\t\t\tName:     \"name\",\n\t\t\tVersions: []int{0, 4, 5, 6, 10, 18},\n\t\t})\n}\n\nfunc (s *registrySuite) TestRegisterAndList(c *gc.C) {\n\tr := registry.NewTypedNameVersion(factoryType)\n\tc.Assert(r.Register(\"name\", 0, nilFactory), gc.IsNil)\n\tc.Check(r.List(), gc.DeepEquals, []registry.Description{\n\t\t{Name: \"name\", Versions: []int{0}},\n\t})\n}\n\nfunc (s *registrySuite) TestRegisterAndListMultiple(c *gc.C) {\n\tr := registry.NewTypedNameVersion(factoryType)\n\tc.Assert(r.Register(\"other\", 0, nilFactory), gc.IsNil)\n\tc.Assert(r.Register(\"name\", 0, nilFactory), gc.IsNil)\n\tc.Assert(r.Register(\"third\", 2, nilFactory), gc.IsNil)\n\tc.Check(r.List(), gc.DeepEquals, []registry.Description{\n\t\t{Name: \"name\", Versions: []int{0}},\n\t\t{Name: \"other\", Versions: []int{0}},\n\t\t{Name: \"third\", Versions: []int{2}},\n\t})\n}\n\nfunc (s *registrySuite) TestRegisterWrongType(c *gc.C) {\n\tr := registry.NewTypedNameVersion(factoryType)\n\terr := r.Register(\"other\", 0, \"notAFactory\")\n\tc.Check(err, gc.ErrorMatches, `object of type string cannot be converted to type .*registry_test.Factory`)\n}\n\nfunc (s *registrySuite) TestRegisterAlreadyPresent(c *gc.C) {\n\tr := registry.NewTypedNameVersion(factoryType)\n\terr := r.Register(\"name\", 0, func() (any, error) {\n\t\treturn \"orig\", nil\n\t})\n\tc.Assert(err, gc.IsNil)\n\terr = r.Register(\"name\", 0, func() (any, error) {\n\t\treturn \"broken\", nil\n\t})\n\tc.Check(err, gc.ErrorMatches, `object \"name\\(0\\)\" already registered`)\n\tf, err := r.Get(\"name\", 0)\n\tc.Assert(err, gc.IsNil)\n\tval, err := f.(Factory)()\n\tc.Assert(err, gc.IsNil)\n\tc.Check(val, gc.Equals, \"orig\")\n}\n\nfunc (s *registrySuite) TestGet(c *gc.C) {\n\tr := registry.NewTypedNameVersion(factoryType)\n\tcustomFactory := func() (any, error) {\n\t\treturn 10, nil\n\t}\n\tc.Assert(r.Register(\"name\", 0, customFactory), gc.IsNil)\n\tf, err := r.Get(\"name\", 0)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(f, gc.NotNil)\n\tres, err := f.(Factory)()\n\tc.Assert(err, gc.IsNil)\n\tc.Check(res, gc.Equals, 10)\n}\n\nfunc (s *registrySuite) TestGetUnknown(c *gc.C) {\n\tr := registry.NewTypedNameVersion(factoryType)\n\tf, err := r.Get(\"name\", 0)\n\tc.Check(err, jc.Satisfies, errors.IsNotFound)\n\tc.Check(err, gc.ErrorMatches, `name\\(0\\) not found`)\n\tc.Check(f, gc.IsNil)\n}\n\nfunc (s *registrySuite) TestGetUnknownVersion(c *gc.C) {\n\tr := registry.NewTypedNameVersion(factoryType)\n\tc.Assert(r.Register(\"name\", 0, nilFactory), gc.IsNil)\n\tf, err := r.Get(\"name\", 1)\n\tc.Check(err, jc.Satisfies, errors.IsNotFound)\n\tc.Check(err, gc.ErrorMatches, `name\\(1\\) not found`)\n\tc.Check(f, gc.IsNil)\n}\n"
  },
  {
    "path": "relativeurl.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"strings\"\n\n\t\"github.com/juju/errors\"\n)\n\n// RelativeURLPath returns a relative URL path that is lexically\n// equivalent to targpath when interpreted by url.URL.ResolveReference.\n// On success, the returned path will always be non-empty and relative\n// to basePath, even if basePath and targPath share no elements.\n//\n// It is assumed that both basePath and targPath are normalized\n// (have no . or .. elements).\n//\n// An error is returned if basePath or targPath are not absolute paths.\nfunc RelativeURLPath(basePath, targPath string) (string, error) {\n\tif !strings.HasPrefix(basePath, \"/\") {\n\t\treturn \"\", errors.New(\"non-absolute base URL\")\n\t}\n\tif !strings.HasPrefix(targPath, \"/\") {\n\t\treturn \"\", errors.New(\"non-absolute target URL\")\n\t}\n\tbaseParts := strings.Split(basePath, \"/\")\n\ttargParts := strings.Split(targPath, \"/\")\n\n\t// For the purposes of dotdot, the last element of\n\t// the paths are irrelevant. We save the last part\n\t// of the target path for later.\n\tlastElem := targParts[len(targParts)-1]\n\tbaseParts = baseParts[0 : len(baseParts)-1]\n\ttargParts = targParts[0 : len(targParts)-1]\n\n\t// Find the common prefix between the two paths:\n\tvar i int\n\tfor ; i < len(baseParts); i++ {\n\t\tif i >= len(targParts) || baseParts[i] != targParts[i] {\n\t\t\tbreak\n\t\t}\n\t}\n\tdotdotCount := len(baseParts) - i\n\ttargOnly := targParts[i:]\n\tresult := make([]string, 0, dotdotCount+len(targOnly)+1)\n\tfor i := 0; i < dotdotCount; i++ {\n\t\tresult = append(result, \"..\")\n\t}\n\tresult = append(result, targOnly...)\n\tresult = append(result, lastElem)\n\tfinal := strings.Join(result, \"/\")\n\tif final == \"\" {\n\t\t// If the final result is empty, the last element must\n\t\t// have been empty, so the target was slash terminated\n\t\t// and there were no previous elements, so \".\"\n\t\t// is appropriate.\n\t\tfinal = \".\"\n\t}\n\treturn final, nil\n}\n"
  },
  {
    "path": "relativeurl_test.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"net/url\"\n\n\tjujutesting \"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype relativeURLSuite struct {\n\tjujutesting.LoggingSuite\n}\n\nvar _ = gc.Suite(&relativeURLSuite{})\n\nvar relativeURLTests = []struct {\n\tbase        string\n\ttarget      string\n\texpect      string\n\texpectError string\n}{{\n\texpectError: \"non-absolute base URL\",\n}, {\n\tbase:        \"/foo\",\n\texpectError: \"non-absolute target URL\",\n}, {\n\tbase:        \"foo\",\n\texpectError: \"non-absolute base URL\",\n}, {\n\tbase:        \"/foo\",\n\ttarget:      \"foo\",\n\texpectError: \"non-absolute target URL\",\n}, {\n\tbase:   \"/foo\",\n\ttarget: \"/bar\",\n\texpect: \"bar\",\n}, {\n\tbase:   \"/foo/\",\n\ttarget: \"/bar\",\n\texpect: \"../bar\",\n}, {\n\tbase:   \"/bar\",\n\ttarget: \"/foo/\",\n\texpect: \"foo/\",\n}, {\n\tbase:   \"/foo/\",\n\ttarget: \"/bar/\",\n\texpect: \"../bar/\",\n}, {\n\tbase:   \"/foo/bar\",\n\ttarget: \"/bar/\",\n\texpect: \"../bar/\",\n}, {\n\tbase:   \"/foo/bar/\",\n\ttarget: \"/bar/\",\n\texpect: \"../../bar/\",\n}, {\n\tbase:   \"/foo/bar/baz\",\n\ttarget: \"/foo/targ\",\n\texpect: \"../targ\",\n}, {\n\tbase:   \"/foo/bar/baz/frob\",\n\ttarget: \"/foo/bar/one/two/\",\n\texpect: \"../one/two/\",\n}, {\n\tbase:   \"/foo/bar/baz/\",\n\ttarget: \"/foo/targ\",\n\texpect: \"../../targ\",\n}, {\n\tbase:   \"/foo/bar/baz/frob/\",\n\ttarget: \"/foo/bar/one/two/\",\n\texpect: \"../../one/two/\",\n}, {\n\tbase:   \"/foo/bar\",\n\ttarget: \"/foot/bar\",\n\texpect: \"../foot/bar\",\n}, {\n\tbase:   \"/foo/bar/baz/frob\",\n\ttarget: \"/foo/bar\",\n\texpect: \"../../bar\",\n}, {\n\tbase:   \"/foo/bar/baz/frob/\",\n\ttarget: \"/foo/bar\",\n\texpect: \"../../../bar\",\n}, {\n\tbase:   \"/foo/bar/baz/frob/\",\n\ttarget: \"/foo/bar/\",\n\texpect: \"../../\",\n}, {\n\tbase:   \"/foo/bar/baz\",\n\ttarget: \"/foo/bar/other\",\n\texpect: \"other\",\n}, {\n\tbase:   \"/foo/bar/\",\n\ttarget: \"/foo/bar/\",\n\texpect: \".\",\n}, {\n\tbase:   \"/foo/bar\",\n\ttarget: \"/foo/bar\",\n\texpect: \"bar\",\n}, {\n\tbase:   \"/foo/bar/\",\n\ttarget: \"/foo/bar/\",\n\texpect: \".\",\n}, {\n\tbase:   \"/foo/bar\",\n\ttarget: \"/foo/\",\n\texpect: \".\",\n}, {\n\tbase:   \"/foo\",\n\ttarget: \"/\",\n\texpect: \".\",\n}, {\n\tbase:   \"/foo/\",\n\ttarget: \"/\",\n\texpect: \"../\",\n}, {\n\tbase:   \"/foo/bar\",\n\ttarget: \"/\",\n\texpect: \"../\",\n}, {\n\tbase:   \"/foo/bar/\",\n\ttarget: \"/\",\n\texpect: \"../../\",\n}}\n\nfunc (*relativeURLSuite) TestRelativeURL(c *gc.C) {\n\tfor i, test := range relativeURLTests {\n\t\tc.Logf(\"test %d: %q %q\", i, test.base, test.target)\n\t\t// Sanity check the test itself.\n\t\tif test.expectError == \"\" {\n\t\t\tbaseURL := &url.URL{Path: test.base}\n\t\t\texpectURL := &url.URL{Path: test.expect}\n\t\t\ttargetURL := baseURL.ResolveReference(expectURL)\n\t\t\tc.Check(targetURL.Path, gc.Equals, test.target, gc.Commentf(\"resolve reference failure (%q + %q != %q)\", test.base, test.expect, test.target))\n\t\t}\n\n\t\tresult, err := utils.RelativeURLPath(test.base, test.target)\n\t\tif test.expectError != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.expectError)\n\t\t\tc.Assert(result, gc.Equals, \"\")\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Check(result, gc.Equals, test.expect)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "setenv.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"strings\"\n)\n\n// Setenv sets an environment variable entry in the given env slice (as\n// returned by os.Environ or passed in exec.Cmd.Environ) to the given\n// value. The entry should be in the form \"x=y\" where x is the name of the\n// environment variable and y is its value; if not, env will be\n// returned unchanged.\n//\n// If a value isn't already present in the slice, the entry is appended.\n//\n// The new environ slice is returned.\nfunc Setenv(env []string, entry string) []string {\n\ti := strings.Index(entry, \"=\")\n\tif i == -1 {\n\t\treturn env\n\t}\n\tprefix := entry[0 : i+1]\n\tfor i, e := range env {\n\t\tif strings.HasPrefix(e, prefix) {\n\t\t\tenv[i] = entry\n\t\t\treturn env\n\t\t}\n\t}\n\treturn append(env, entry)\n}\n"
  },
  {
    "path": "setenv_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype SetenvSuite struct{}\n\nvar _ = gc.Suite(&SetenvSuite{})\n\nvar setenvTests = []struct {\n\tset    string\n\texpect []string\n}{\n\t{\"foo=1\", []string{\"foo=1\", \"arble=\"}},\n\t{\"foo=\", []string{\"foo=\", \"arble=\"}},\n\t{\"arble=23\", []string{\"foo=bar\", \"arble=23\"}},\n\t{\"zaphod=42\", []string{\"foo=bar\", \"arble=\", \"zaphod=42\"}},\n\t{\"bar\", []string{\"foo=bar\", \"arble=\"}},\n}\n\nfunc (*SetenvSuite) TestSetenv(c *gc.C) {\n\tenv0 := []string{\"foo=bar\", \"arble=\"}\n\tfor i, t := range setenvTests {\n\t\tc.Logf(\"test %d\", i)\n\t\tenv := make([]string, len(env0))\n\t\tcopy(env, env0)\n\t\tenv = utils.Setenv(env, t.set)\n\t\tc.Check(env, gc.DeepEquals, t.expect)\n\t}\n}\n"
  },
  {
    "path": "shell/bash.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"strings\"\n)\n\n// BashRenderer is the shell renderer for bash.\ntype BashRenderer struct {\n\tunixRenderer\n}\n\n// Render implements ScriptWriter.\nfunc (*BashRenderer) RenderScript(commands []string) []byte {\n\tcommands = append([]string{\"#!/usr/bin/env bash\", \"\"}, commands...)\n\treturn []byte(strings.Join(commands, \"\\n\"))\n}\n"
  },
  {
    "path": "shell/bash_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell_test\n\nimport (\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/shell\"\n)\n\ntype bashSuite struct {\n\ttesting.IsolationSuite\n\n\tdirname  string\n\tfilename string\n\trenderer *shell.BashRenderer\n}\n\nvar _ = gc.Suite(&bashSuite{})\n\nfunc (s *bashSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.dirname = `/some/dir`\n\ts.filename = s.dirname + `/file`\n\ts.renderer = &shell.BashRenderer{}\n}\n\nfunc (s bashSuite) TestExeSuffix(c *gc.C) {\n\tsuffix := s.renderer.ExeSuffix()\n\n\tc.Check(suffix, gc.Equals, \"\")\n}\n\nfunc (s bashSuite) TestShQuote(c *gc.C) {\n\tquoted := s.renderer.Quote(\"abc\")\n\n\tc.Check(quoted, gc.Equals, `'abc'`)\n}\n\nfunc (s bashSuite) TestChmod(c *gc.C) {\n\tcommands := s.renderer.Chmod(s.filename, 0644)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t\"chmod 0644 '/some/dir/file'\",\n\t})\n}\n\nfunc (s bashSuite) TestWriteFile(c *gc.C) {\n\tdata := []byte(\"something\\nhere\\n\")\n\tcommands := s.renderer.WriteFile(s.filename, data)\n\n\texpected := `cat > '/some/dir/file' << 'EOF'\nsomething\nhere\n\nEOF`\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\texpected,\n\t})\n}\n\nfunc (s bashSuite) TestMkdir(c *gc.C) {\n\tcommands := s.renderer.Mkdir(s.dirname)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t`mkdir '/some/dir'`,\n\t})\n}\n\nfunc (s bashSuite) TestMkdirAll(c *gc.C) {\n\tcommands := s.renderer.MkdirAll(s.dirname)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t`mkdir -p '/some/dir'`,\n\t})\n}\n\nfunc (s bashSuite) TestChown(c *gc.C) {\n\tcommands := s.renderer.Chown(\"/a/b/c\", \"x\", \"y\")\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t\"chown x:y '/a/b/c'\",\n\t})\n}\n\nfunc (s bashSuite) TestTouchDefault(c *gc.C) {\n\tcommands := s.renderer.Touch(\"/a/b/c\", nil)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t\"touch '/a/b/c'\",\n\t})\n}\n\nfunc (s bashSuite) TestTouchTimestamp(c *gc.C) {\n\tnow := time.Date(2015, time.Month(3), 14, 12, 26, 38, 0, time.UTC)\n\tcommands := s.renderer.Touch(\"/a/b/c\", &now)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t\"touch -t 201503141226.38 '/a/b/c'\",\n\t})\n}\n\nfunc (s bashSuite) TestRedirectFD(c *gc.C) {\n\tcommands := s.renderer.RedirectFD(\"stdout\", \"stderr\")\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t\"exec 2>&1\",\n\t})\n}\n\nfunc (s bashSuite) TestRedirectOutput(c *gc.C) {\n\tcommands := s.renderer.RedirectOutput(\"/a/b/c\")\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t\"exec >> '/a/b/c'\",\n\t})\n}\n\nfunc (s bashSuite) TestRedirectOutputReset(c *gc.C) {\n\tcommands := s.renderer.RedirectOutputReset(\"/a/b/c\")\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t\"exec > '/a/b/c'\",\n\t})\n}\n\nfunc (s bashSuite) TestScriptFilename(c *gc.C) {\n\tfilename := s.renderer.ScriptFilename(\"spam\", \"/ham/eggs\")\n\n\tc.Check(filename, gc.Equals, \"/ham/eggs/spam.sh\")\n}\n\nfunc (s bashSuite) TestScriptPermissions(c *gc.C) {\n\tperm := s.renderer.ScriptPermissions()\n\n\tc.Check(perm, gc.Equals, os.FileMode(0755))\n}\n"
  },
  {
    "path": "shell/command.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"os\"\n\t\"time\"\n)\n\n// CommandRenderer provides methods that may be used to generate shell\n// commands for a variety of shell and filesystem operations.\ntype CommandRenderer interface {\n\t// Chown returns a shell command for changing the ownership of\n\t// a file or directory. The copies the behavior of os.Chown,\n\t// though it also supports names in addition to ints.\n\tChown(name, user, group string) []string\n\n\t// Chmod returns a shell command that sets the given file's\n\t// permissions. The result is equivalent to os.Chmod.\n\tChmod(path string, perm os.FileMode) []string\n\n\t// WriteFile returns a shell command that writes the provided\n\t// content to a file. The command is functionally equivalent to\n\t// ioutil.WriteFile with permissions from the current umask.\n\tWriteFile(filename string, data []byte) []string\n\n\t// Mkdir returns a shell command for creating a directory. The\n\t// command is functionally equivalent to os.MkDir using permissions\n\t// appropriate for a directory.\n\tMkdir(dirname string) []string\n\n\t// MkdirAll returns a shell command for creating a directory and\n\t// all missing parent directories. The command is functionally\n\t// equivalent to os.MkDirAll using permissions appropriate for\n\t// a directory.\n\tMkdirAll(dirname string) []string\n\n\t// Touch returns a shell command that updates the atime and ctime\n\t// of the named file. If the provided timestamp is nil then the\n\t// current time is used. If the file does not exist then it is\n\t// created. If UTC is desired then Time.UTC() should be called\n\t// before calling Touch.\n\tTouch(filename string, timestamp *time.Time) []string\n}\n"
  },
  {
    "path": "shell/interface_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nvar _ Renderer = (*BashRenderer)(nil)\nvar _ Renderer = (*PowershellRenderer)(nil)\nvar _ Renderer = (*WinCmdRenderer)(nil)\n"
  },
  {
    "path": "shell/output.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n)\n\n// OutputRenderer exposes the Renderer methods that relate to shell output.\n//\n// The methods all accept strings to identify their file descriptor\n// arguments. While the interpretation of these values is up to the\n// renderer, it will likely conform to the result of calling ResolveFD.\n// If an FD arg is not recognized then no commands will be returned.\n// Unless otherwise specified, the default file descriptor is stdout\n// (FD 1). This applies to the empty string.\ntype OutputRenderer interface {\n\t// RedirectFD returns a shell command that redirects the src\n\t// file descriptor to the dst one.\n\tRedirectFD(dst, src string) []string\n\n\t// TODO(ericsnow) Add CopyFD and CreateFD?\n\n\t// TODO(ericsnow) Support passing the src FD as an arg?\n\n\t// RedirectOutput will cause all subsequent output from the shell\n\t// (or script) to go be appended to the given file. Only stdout is\n\t// redirected (use RedirectFD to redirect stderr or other FDs).\n\t//\n\t// The file should already exist (so a call to Touch may be\n\t// necessary before calling RedirectOutput). If the file should have\n\t// specific permissions or a specific owner then Chmod and Chown\n\t// should be called before calling RedirectOutput.\n\tRedirectOutput(filename string) []string\n\n\t// RedirectOutputReset will cause all subsequent output from the\n\t// shell (or script) to go be written to the given file. The file\n\t// will be reset (truncated to 0) before anything is written. Only\n\t// stdout is redirected (use RedirectFD to redirect stderr or\n\t// other FDs).\n\t//\n\t// The file should already exist (so a call to Touch may be\n\t// necessary before calling RedirectOutputReset). If the file should\n\t// have specific permissions or a specific owner then Chmod and\n\t// Chown should be called before calling RedirectOutputReset.\n\tRedirectOutputReset(filename string) []string\n}\n\n// ResolveFD converts the file descriptor name to the corresponding int.\n// \"stdout\" and \"out\" match stdout (FD 1). \"stderr\" and \"err\" match\n// stderr (FD 2), \"stdin\" and \"in\" match stdin (FD 0). All positive\n// integers match. If there should be an upper bound then the caller\n// should check it on the result. If the provided name is empty then\n// the result defaults to stdout. If the name is not recognized then\n// false is returned.\nfunc ResolveFD(name string) (int, bool) {\n\tswitch strings.ToLower(name) {\n\tcase \"stdout\", \"out\", \"\":\n\t\treturn 1, true\n\tcase \"stderr\", \"err\":\n\t\treturn 2, true\n\tcase \"stdin\", \"in\":\n\t\treturn 0, true\n\tdefault:\n\t\tfd, err := strconv.ParseUint(name, 10, 64)\n\t\tif err != nil {\n\t\t\treturn -1, false\n\t\t}\n\t\treturn int(fd), true\n\t}\n}\n"
  },
  {
    "path": "shell/package_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "shell/powershell.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"golang.org/x/text/encoding/unicode\"\n\n\t\"github.com/juju/errors\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\n// PowershellRenderer is a shell renderer for Windows Powershell.\ntype PowershellRenderer struct {\n\twindowsRenderer\n}\n\n// Quote implements Renderer.\nfunc (pr *PowershellRenderer) Quote(str string) string {\n\treturn utils.WinPSQuote(str)\n}\n\n// Chmod implements Renderer.\nfunc (pr *PowershellRenderer) Chmod(path string, perm os.FileMode) []string {\n\t// TODO(ericsnow) Is this necessary? Should we use Set-Acl?\n\treturn nil\n}\n\n// WriteFile implements Renderer.\nfunc (pr *PowershellRenderer) WriteFile(filename string, data []byte) []string {\n\tfilename = pr.Quote(filename)\n\treturn []string{\n\t\tfmt.Sprintf(\"Set-Content %s @\\\"\\n%s\\n\\\"@\", filename, data),\n\t}\n}\n\n// MkDir implements Renderer.\nfunc (pr *PowershellRenderer) Mkdir(dirname string) []string {\n\tdirname = pr.FromSlash(dirname)\n\treturn []string{\n\t\tfmt.Sprintf(`mkdir %s`, pr.Quote(dirname)),\n\t}\n}\n\n// MkdirAll implements Renderer.\nfunc (pr *PowershellRenderer) MkdirAll(dirname string) []string {\n\treturn pr.Mkdir(dirname)\n}\n\n// ScriptFilename implements ScriptWriter.\nfunc (pr *PowershellRenderer) ScriptFilename(name, dirname string) string {\n\treturn pr.Join(dirname, name+\".ps1\")\n}\n\n// By default, winrm executes command usind cmd. Prefix the command we send over WinRM with powershell.exe.\n// the powershell.exe it's a program that will execute the \"%s\" encoded command.\n// A breakdown of the parameters:\n//\n//\t-NonInteractive - prevent any prompts from stopping the execution of the scrips\n//\t-ExecutionPolicy - sets the execution policy for the current command, regardless of the default ExecutionPolicy on the system.\n//\t-EncodedCommand - allows us to run a base64 encoded script. This spares us from having to quote/escape shell special characters.\nconst psRemoteWrapper = \"powershell.exe -Sta -NonInteractive -ExecutionPolicy RemoteSigned -EncodedCommand %s\"\n\n// newEncodedPSScript returns a UTF16-LE, base64 encoded script.\n// The -EncodedCommand parameter expects this encoding for any base64 script we send over.\nfunc newEncodedPSScript(script string) (string, error) {\n\tuni := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM)\n\tencoded, err := uni.NewEncoder().String(script)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base64.StdEncoding.EncodeToString([]byte(encoded)), nil\n}\n\n// NewPSEncodedCommand converts the given string to a UTF16-LE, base64 encoded string,\n// suitable for execution using powershell.exe -EncodedCommand. This can be used on\n// local systems, as well as remote systems via WinRM.\nfunc NewPSEncodedCommand(script string) (string, error) {\n\tvar err error\n\tscript, err = newEncodedPSScript(script)\n\tif err != nil {\n\t\treturn \"\", errors.Annotatef(err, \"Cannot construct powershell command for remote execution\")\n\t}\n\n\treturn fmt.Sprintf(psRemoteWrapper, script), nil\n}\n"
  },
  {
    "path": "shell/powershell_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/shell\"\n)\n\nvar _ = gc.Suite(&powershellSuite{})\n\ntype powershellSuite struct {\n\ttesting.IsolationSuite\n\n\tdirname  string\n\tfilename string\n\trenderer *shell.PowershellRenderer\n}\n\nfunc (s *powershellSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.dirname = `C:\\some\\dir`\n\ts.filename = s.dirname + `\\file`\n\ts.renderer = &shell.PowershellRenderer{}\n}\n\nfunc (s powershellSuite) TestExeSuffix(c *gc.C) {\n\tsuffix := s.renderer.ExeSuffix()\n\n\tc.Check(suffix, gc.Equals, \".exe\")\n}\n\nfunc (s powershellSuite) TestShQuote(c *gc.C) {\n\tquoted := s.renderer.Quote(\"abc\")\n\n\tc.Check(quoted, gc.Equals, `'abc'`)\n}\n\nfunc (s powershellSuite) TestChmod(c *gc.C) {\n\tcommands := s.renderer.Chmod(s.filename, 0644)\n\n\tc.Check(commands, gc.HasLen, 0)\n}\n\nfunc (s powershellSuite) TestWriteFile(c *gc.C) {\n\tdata := []byte(\"something\\nhere\\n\")\n\tcommands := s.renderer.WriteFile(s.filename, data)\n\n\texpected := `\nSet-Content 'C:\\some\\dir\\file' @\"\nsomething\nhere\n\n\"@`[1:]\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\texpected,\n\t})\n}\n\nfunc (s powershellSuite) TestMkdir(c *gc.C) {\n\tcommands := s.renderer.Mkdir(s.dirname)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t`mkdir 'C:\\some\\dir'`,\n\t})\n}\n\nfunc (s powershellSuite) TestMkdirAll(c *gc.C) {\n\tcommands := s.renderer.MkdirAll(s.dirname)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t`mkdir 'C:\\some\\dir'`,\n\t})\n}\n\nfunc (s powershellSuite) TestNewPSEncodedCommand(c *gc.C) {\n\tscript := `\n\tGet-WmiObject win32_processor\n`\n\texpected := \"powershell.exe -Sta -NonInteractive -ExecutionPolicy RemoteSigned -EncodedCommand CgAJAEcAZQB0AC0AVwBtAGkATwBiAGoAZQBjAHQAIAB3AGkAbgAzADIAXwBwAHIAbwBjAGUAcwBzAG8AcgAKAA==\"\n\n\tout, err := shell.NewPSEncodedCommand(script)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert((len(out) > 0), gc.Equals, true)\n\tc.Assert(out, jc.DeepEquals, expected)\n\n}\n"
  },
  {
    "path": "shell/renderer.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com/juju/errors\"\n\n\t\"github.com/juju/utils/v4\"\n\t\"github.com/juju/utils/v4/filepath\"\n)\n\n// A PathRenderer generates paths that are appropriate for a given\n// shell environment.\ntype PathRenderer interface {\n\tfilepath.Renderer\n\n\t// Quote generates a new string with quotation marks and relevant\n\t// escape/control characters properly escaped. The resulting string\n\t// is wrapped in quotation marks such that it will be treated as a\n\t// single string by the shell.\n\tQuote(str string) string\n\n\t// ExeSuffix returns the filename suffix for executable files.\n\tExeSuffix() string\n}\n\n// Renderer provides all the functionality needed to generate shell-\n// compatible paths and commands.\ntype Renderer interface {\n\tPathRenderer\n\tCommandRenderer\n\tOutputRenderer\n}\n\n// NewRenderer returns a Renderer for the given shell, OS, or distro name.\nfunc NewRenderer(name string) (Renderer, error) {\n\tif name == \"\" {\n\t\tname = runtime.GOOS\n\t} else {\n\t\tname = strings.ToLower(name)\n\t}\n\n\t// Try known shell names first.\n\tswitch name {\n\tcase \"bash\":\n\t\treturn &BashRenderer{}, nil\n\tcase \"ps\", \"powershell\":\n\t\treturn &PowershellRenderer{}, nil\n\tcase \"cmd\", \"batch\", \"bat\":\n\t\treturn &WinCmdRenderer{}, nil\n\t}\n\n\t// Fall back to operating systems.\n\tswitch {\n\tcase name == \"windows\":\n\t\treturn &PowershellRenderer{}, nil\n\tcase utils.OSIsUnix(name):\n\t\treturn &BashRenderer{}, nil\n\t}\n\n\t// Finally try distros.\n\tswitch name {\n\tcase \"ubuntu\":\n\t\treturn &BashRenderer{}, nil\n\t}\n\n\treturn nil, errors.NotFoundf(\"renderer for %q\", name)\n}\n"
  },
  {
    "path": "shell/renderer_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell_test\n\nimport (\n\t\"runtime\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n\t\"github.com/juju/utils/v4/shell\"\n)\n\ntype rendererSuite struct {\n\ttesting.IsolationSuite\n\n\tunix    *shell.BashRenderer\n\twindows *shell.PowershellRenderer\n}\n\nvar _ = gc.Suite(&rendererSuite{})\n\nfunc (s *rendererSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.unix = &shell.BashRenderer{}\n\ts.windows = &shell.PowershellRenderer{}\n}\n\nfunc (s rendererSuite) checkRenderer(c *gc.C, renderer shell.Renderer, expected string) {\n\tswitch expected {\n\tcase \"powershell\":\n\t\tc.Check(renderer, gc.FitsTypeOf, s.windows)\n\tcase \"bash\":\n\t\tc.Check(renderer, gc.FitsTypeOf, s.unix)\n\tdefault:\n\t\tc.Errorf(\"unknown kind %q\", expected)\n\t}\n}\n\nfunc (s rendererSuite) TestNewRendererDefault(c *gc.C) {\n\t// All possible values of runtime.GOOS should be supported.\n\trenderer, err := shell.NewRenderer(\"\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\ts.checkRenderer(c, renderer, \"powershell\")\n\tdefault:\n\t\ts.checkRenderer(c, renderer, \"bash\")\n\t}\n}\n\nfunc (s rendererSuite) TestNewRendererGOOS(c *gc.C) {\n\t// All possible values of runtime.GOOS should be supported.\n\trenderer, err := shell.NewRenderer(runtime.GOOS)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\ts.checkRenderer(c, renderer, \"powershell\")\n\tdefault:\n\t\ts.checkRenderer(c, renderer, \"bash\")\n\t}\n}\n\nfunc (s rendererSuite) TestNewRendererWindows(c *gc.C) {\n\trenderer, err := shell.NewRenderer(\"windows\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\ts.checkRenderer(c, renderer, \"powershell\")\n}\n\nfunc (s rendererSuite) TestNewRendererUnix(c *gc.C) {\n\tfor _, os := range utils.OSUnix {\n\t\tc.Logf(\"trying %q\", os)\n\t\trenderer, err := shell.NewRenderer(os)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\ts.checkRenderer(c, renderer, \"bash\")\n\t}\n}\n\nfunc (s rendererSuite) TestNewRendererDistros(c *gc.C) {\n\tdistros := []string{\"ubuntu\"}\n\tfor _, distro := range distros {\n\t\tc.Logf(\"trying %q\", distro)\n\t\trenderer, err := shell.NewRenderer(distro)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\n\t\ts.checkRenderer(c, renderer, \"bash\")\n\t}\n}\n\nfunc (s rendererSuite) TestNewRendererUnknown(c *gc.C) {\n\t_, err := shell.NewRenderer(\"<unknown OS>\")\n\n\tc.Check(err, jc.Satisfies, errors.IsNotFound)\n}\n"
  },
  {
    "path": "shell/script.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\n// DumpFileOnErrorScript returns a bash script that\n// may be used to dump the contents of the specified\n// file to stderr when the shell exits with an error.\nfunc DumpFileOnErrorScript(filename string) string {\n\tscript := `\ndump_file() {\n    code=$?\n    if [ $code -ne 0 -a -e %s ]; then\n        cat %s >&2\n    fi\n    exit $code\n}\ntrap dump_file EXIT\n`[1:]\n\tfilename = utils.ShQuote(filename)\n\treturn fmt.Sprintf(script, filename, filename)\n}\n\n// A ScriptRenderer provides the functionality necessary to render a\n// sequence of shell commands into the content of a shell script.\ntype ScriptRenderer interface {\n\t// RenderScript generates the content of a shell script for the\n\t// provided shell commands.\n\tRenderScript(commands []string) []byte\n}\n\n// A ScriptWriter provides the functionality necessarily to render and\n// write a sequence of shell commands to a shell script that is ready\n// to be run.\ntype ScriptWriter interface {\n\tScriptRenderer\n\n\t// Chmod returns a shell command that sets the given file's\n\t// permissions. The result is equivalent to os.Chmod.\n\tChmod(path string, perm os.FileMode) []string\n\n\t// WriteFile returns a shell command that writes the provided\n\t// content to a file. The command is functionally equivalent to\n\t// ioutil.WriteFile with permissions from the current umask.\n\tWriteFile(filename string, data []byte) []string\n\n\t// ScriptFilename generates a filename appropriate for a script\n\t// from the provided file and directory names.\n\tScriptFilename(name, dirname string) string\n\n\t// ScriptPermissions returns the permissions appropriate for a script.\n\tScriptPermissions() os.FileMode\n}\n\n// WriteScript returns a sequence of shell commands that write the\n// provided shell commands to a file. The filename is composed from the\n// given directory name and name, and the appropriate suffix for a shell\n// script is applied. The script content is prefixed with any necessary\n// content related to shell scripts (e.g. a shbang line). The file's\n// permissions are set to those appropriate for a script (e.g. 0755).\nfunc WriteScript(renderer ScriptWriter, name, dirname string, script []string) []string {\n\tfilename := renderer.ScriptFilename(name, dirname)\n\tperm := renderer.ScriptPermissions()\n\n\tvar commands []string\n\n\tdata := renderer.RenderScript(script)\n\tcommands = append(commands, renderer.WriteFile(filename, data)...)\n\n\tcommands = append(commands, renderer.Chmod(filename, perm)...)\n\n\treturn commands\n}\n"
  },
  {
    "path": "shell/script_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell_test\n\nimport (\n\t\"bytes\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/shell\"\n)\n\ntype scriptSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&scriptSuite{})\n\nfunc (*scriptSuite) TestDumpFileOnErrorScriptOutput(c *gc.C) {\n\tscript := shell.DumpFileOnErrorScript(\"a b c\")\n\tc.Assert(script, gc.Equals, `\ndump_file() {\n    code=$?\n    if [ $code -ne 0 -a -e 'a b c' ]; then\n        cat 'a b c' >&2\n    fi\n    exit $code\n}\ntrap dump_file EXIT\n`[1:])\n}\n\nfunc (*scriptSuite) TestDumpFileOnErrorScript(c *gc.C) {\n\ttempdir := c.MkDir()\n\tfilename := filepath.Join(tempdir, \"log.txt\")\n\terr := ioutil.WriteFile(filename, []byte(\"abc\"), 0644)\n\tc.Assert(err, gc.IsNil)\n\n\tdumpScript := shell.DumpFileOnErrorScript(filename)\n\tc.Logf(\"%s\", dumpScript)\n\trun := func(command string) (stdout, stderr string) {\n\t\tvar stdoutBuf, stderrBuf bytes.Buffer\n\t\tcmd := exec.Command(\"/bin/bash\", \"-s\")\n\t\tcmd.Stdin = strings.NewReader(dumpScript + command)\n\t\tcmd.Stdout = &stdoutBuf\n\t\tcmd.Stderr = &stderrBuf\n\t\tcmd.Run()\n\t\treturn stdoutBuf.String(), stderrBuf.String()\n\t}\n\n\tstdout, stderr := run(\"exit 0\")\n\tc.Assert(stdout, gc.Equals, \"\")\n\tc.Assert(stderr, gc.Equals, \"\")\n\n\tstdout, stderr = run(\"exit 1\")\n\tc.Assert(stdout, gc.Equals, \"\")\n\tc.Assert(stderr, gc.Equals, \"abc\")\n\n\terr = os.Remove(filename)\n\tc.Assert(err, gc.IsNil)\n\tstdout, stderr = run(\"exit 1\")\n\tc.Assert(stdout, gc.Equals, \"\")\n\tc.Assert(stderr, gc.Equals, \"\")\n}\n\nfunc (*scriptSuite) TestWriteScriptUnix(c *gc.C) {\n\trenderer := &shell.BashRenderer{}\n\tscript := `\nexec a-command\nexec another-command\n`\n\tcommands := shell.WriteScript(renderer, \"spam\", \"/ham/eggs\", strings.Split(script, \"\\n\"))\n\n\tcmd := `\ncat > '/ham/eggs/spam.sh' << 'EOF'\n#!/usr/bin/env bash\n\n\nexec a-command\nexec another-command\n\nEOF`[1:]\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\tcmd,\n\t\t\"chmod 0755 '/ham/eggs/spam.sh'\",\n\t})\n}\n\nfunc (*scriptSuite) TestWriteScriptWindows(c *gc.C) {\n\trenderer := &shell.PowershellRenderer{}\n\tscript := `\nexec a-command\nexec another-command\n`\n\tcommands := shell.WriteScript(renderer, \"spam\", `C:\\ham\\eggs`, strings.Split(script, \"\\n\"))\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t`Set-Content 'C:\\ham\\eggs\\spam.ps1' @\"\n\nexec a-command\nexec another-command\n\n\"@`,\n\t})\n}\n"
  },
  {
    "path": "shell/unix.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/juju/utils/v4\"\n\t\"github.com/juju/utils/v4/filepath\"\n)\n\n// unixRenderer is the base shell renderer for \"unix\" shells.\ntype unixRenderer struct {\n\tfilepath.UnixRenderer\n}\n\n// Quote implements Renderer.\nfunc (unixRenderer) Quote(str string) string {\n\t// This *may* not be correct for *all* unix shells...\n\treturn utils.ShQuote(str)\n}\n\n// ExeSuffix implements Renderer.\nfunc (unixRenderer) ExeSuffix() string {\n\treturn \"\"\n}\n\n// Mkdir implements Renderer.\nfunc (ur unixRenderer) Mkdir(dirname string) []string {\n\tdirname = ur.Quote(dirname)\n\treturn []string{\n\t\tfmt.Sprintf(\"mkdir %s\", dirname),\n\t}\n}\n\n// MkdirAll implements Renderer.\nfunc (ur unixRenderer) MkdirAll(dirname string) []string {\n\tdirname = ur.Quote(dirname)\n\treturn []string{\n\t\tfmt.Sprintf(\"mkdir -p %s\", dirname),\n\t}\n}\n\n// Chmod implements Renderer.\nfunc (ur unixRenderer) Chmod(path string, perm os.FileMode) []string {\n\tpath = ur.Quote(path)\n\treturn []string{\n\t\tfmt.Sprintf(\"chmod %04o %s\", perm, path),\n\t}\n}\n\n// Chown implements Renderer.\nfunc (ur unixRenderer) Chown(path, owner, group string) []string {\n\tpath = ur.Quote(path)\n\treturn []string{\n\t\tfmt.Sprintf(\"chown %s:%s %s\", owner, group, path),\n\t}\n}\n\n// Touch implements Renderer.\nfunc (ur unixRenderer) Touch(path string, timestamp *time.Time) []string {\n\tpath = ur.Quote(path)\n\tvar opt string\n\tif timestamp != nil {\n\t\topt = timestamp.Format(\"-t 200601021504.05 \")\n\t}\n\treturn []string{\n\t\tfmt.Sprintf(\"touch %s%s\", opt, path),\n\t}\n}\n\n// WriteFile implements Renderer.\nfunc (ur unixRenderer) WriteFile(filename string, data []byte) []string {\n\tfilename = ur.Quote(filename)\n\treturn []string{\n\t\t// An alternate approach would be to use printf.\n\t\tfmt.Sprintf(\"cat > %s << 'EOF'\\n%s\\nEOF\", filename, data),\n\t}\n}\n\nfunc (unixRenderer) outFD(name string) (int, bool) {\n\tfd, ok := ResolveFD(name)\n\tif !ok || fd <= 0 {\n\t\treturn -1, false\n\t}\n\treturn fd, true\n}\n\n// RedirectFD implements OutputRenderer.\nfunc (ur unixRenderer) RedirectFD(dst, src string) []string {\n\tdstFD, ok := ur.outFD(dst)\n\tif !ok {\n\t\treturn nil\n\t}\n\tsrcFD, ok := ur.outFD(src)\n\tif !ok {\n\t\treturn nil\n\t}\n\treturn []string{\n\t\tfmt.Sprintf(\"exec %d>&%d\", srcFD, dstFD),\n\t}\n}\n\n// RedirectOutput implements OutputRenderer.\nfunc (ur unixRenderer) RedirectOutput(filename string) []string {\n\tfilename = ur.Quote(filename)\n\n\treturn []string{\n\t\t\"exec >> \" + filename,\n\t}\n}\n\n// RedirectOutputReset implements OutputRenderer.\nfunc (ur unixRenderer) RedirectOutputReset(filename string) []string {\n\tfilename = ur.Quote(filename)\n\n\treturn []string{\n\t\t\"exec > \" + filename,\n\t}\n}\n\n// ScriptFilename implements ScriptWriter.\nfunc (ur *unixRenderer) ScriptFilename(name, dirname string) string {\n\treturn ur.Join(dirname, name+\".sh\")\n}\n\n// ScriptPermissions implements ScriptWriter.\nfunc (ur *unixRenderer) ScriptPermissions() os.FileMode {\n\treturn 0755\n}\n"
  },
  {
    "path": "shell/win.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/juju/utils/v4/filepath\"\n)\n\n// windowsRenderer is the base implementation for Windows shells.\ntype windowsRenderer struct {\n\tfilepath.WindowsRenderer\n}\n\n// ExeSuffix implements Renderer.\nfunc (w *windowsRenderer) ExeSuffix() string {\n\treturn \".exe\"\n}\n\n// ScriptPermissions implements ScriptWriter.\nfunc (w *windowsRenderer) ScriptPermissions() os.FileMode {\n\treturn 0755\n}\n\n// Render implements ScriptWriter.\nfunc (w *windowsRenderer) RenderScript(commands []string) []byte {\n\treturn []byte(strings.Join(commands, \"\\n\"))\n}\n\n// Chown implements Renderer.\nfunc (w windowsRenderer) Chown(path, owner, group string) []string {\n\t// TODO(ericsnow) Use ???\n\tpanic(\"not supported\")\n}\n\n// Touch implements Renderer.\nfunc (w windowsRenderer) Touch(path string, timestamp *time.Time) []string {\n\t// TODO(ericsnow) Use ???\n\tpanic(\"not supported\")\n}\n\n// RedirectFD implements OutputRenderer.\nfunc (w windowsRenderer) RedirectFD(dst, src string) []string {\n\t// TODO(ericsnow) Use ???\n\tpanic(\"not supported\")\n}\n\n// RedirectOutput implements OutputRenderer.\nfunc (w windowsRenderer) RedirectOutput(filename string) []string {\n\t// TODO(ericsnow) Use ???\n\tpanic(\"not supported\")\n}\n\n// RedirectOutputReset implements OutputRenderer.\nfunc (w windowsRenderer) RedirectOutputReset(filename string) []string {\n\t// TODO(ericsnow) Use ???\n\tpanic(\"not supported\")\n}\n"
  },
  {
    "path": "shell/wincmd.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\n// WinCmdRenderer is a shell renderer for Windows cmd.exe.\ntype WinCmdRenderer struct {\n\twindowsRenderer\n}\n\n// Quote implements Renderer.\nfunc (wcr *WinCmdRenderer) Quote(str string) string {\n\treturn utils.WinCmdQuote(str)\n}\n\n// Chmod implements Renderer.\nfunc (wcr *WinCmdRenderer) Chmod(path string, perm os.FileMode) []string {\n\t// TODO(ericsnow) Is this necessary? Should we use icacls?\n\treturn nil\n}\n\n// WriteFile implements Renderer.\nfunc (wcr *WinCmdRenderer) WriteFile(filename string, data []byte) []string {\n\tfilename = wcr.Quote(filename)\n\tvar commands []string\n\tfor _, line := range bytes.Split(data, []byte{'\\n'}) {\n\t\tcmd := fmt.Sprintf(\">>%s @echo %s\", filename, line)\n\t\tcommands = append(commands, cmd)\n\t}\n\treturn commands\n}\n\n// MkDir implements Renderer.\nfunc (wcr *WinCmdRenderer) Mkdir(dirname string) []string {\n\tdirname = wcr.Quote(dirname)\n\treturn []string{\n\t\tfmt.Sprintf(`mkdir %s`, wcr.FromSlash(dirname)),\n\t}\n}\n\n// MkDirAll implements Renderer.\nfunc (wcr *WinCmdRenderer) MkdirAll(dirname string) []string {\n\tdirname = wcr.Quote(dirname)\n\t// TODO(ericsnow) Wrap in \"setlocal enableextensions...endlocal\"?\n\treturn []string{\n\t\tfmt.Sprintf(`mkdir %s`, wcr.FromSlash(dirname)),\n\t}\n}\n\n// ScriptFilename implements ScriptWriter.\nfunc (wcr *WinCmdRenderer) ScriptFilename(name, dirname string) string {\n\treturn wcr.Join(dirname, name+\".bat\")\n}\n"
  },
  {
    "path": "shell/wincmd_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage shell_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/shell\"\n)\n\nvar _ = gc.Suite(&winCmdSuite{})\n\ntype winCmdSuite struct {\n\ttesting.IsolationSuite\n\n\tdirname  string\n\tfilename string\n\trenderer *shell.WinCmdRenderer\n}\n\nfunc (s *winCmdSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\ts.dirname = `C:\\some\\dir`\n\ts.filename = s.dirname + `\\file`\n\ts.renderer = &shell.WinCmdRenderer{}\n}\n\nfunc (s winCmdSuite) TestExeSuffix(c *gc.C) {\n\tsuffix := s.renderer.ExeSuffix()\n\n\tc.Check(suffix, gc.Equals, \".exe\")\n}\n\nfunc (s winCmdSuite) TestShQuote(c *gc.C) {\n\tquoted := s.renderer.Quote(\"abc\")\n\n\tc.Check(quoted, gc.Equals, `^\"abc^\"`)\n}\n\nfunc (s winCmdSuite) TestChmod(c *gc.C) {\n\tcommands := s.renderer.Chmod(s.filename, 0644)\n\n\tc.Check(commands, gc.HasLen, 0)\n}\n\nfunc (s winCmdSuite) TestWriteFile(c *gc.C) {\n\tdata := []byte(\"something\\nhere\\n\")\n\tcommands := s.renderer.WriteFile(s.filename, data)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t`>>^\"C:\\\\some\\\\dir\\\\file^\" @echo something`,\n\t\t`>>^\"C:\\\\some\\\\dir\\\\file^\" @echo here`,\n\t\t`>>^\"C:\\\\some\\\\dir\\\\file^\" @echo `,\n\t})\n}\n\nfunc (s winCmdSuite) TestMkdir(c *gc.C) {\n\tcommands := s.renderer.Mkdir(s.dirname)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t`mkdir ^\"C:\\\\some\\\\dir^\"`,\n\t})\n}\n\nfunc (s winCmdSuite) TestMkdirAll(c *gc.C) {\n\tcommands := s.renderer.MkdirAll(s.dirname)\n\n\tc.Check(commands, jc.DeepEquals, []string{\n\t\t`mkdir ^\"C:\\\\some\\\\dir^\"`,\n\t})\n}\n"
  },
  {
    "path": "size.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"math\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com/juju/errors\"\n)\n\n// ParseSize parses the string as a size, in mebibytes.\n//\n// The string must be a is a non-negative number with\n// an optional multiplier suffix (M, G, T, P, E, Z, or Y).\n// If the suffix is not specified, \"M\" is implied.\nfunc ParseSize(str string) (MB uint64, err error) {\n\t// Find the first non-digit/period:\n\ti := strings.IndexFunc(str, func(r rune) bool {\n\t\treturn r != '.' && !unicode.IsDigit(r)\n\t})\n\tvar multiplier float64 = 1\n\tif i > 0 {\n\t\tsuffix := str[i:]\n\t\tmultiplier = 0\n\t\tfor j := 0; j < len(sizeSuffixes); j++ {\n\t\t\tbase := string(sizeSuffixes[j])\n\t\t\t// M, MB, or MiB are all valid.\n\t\t\tswitch suffix {\n\t\t\tcase base, base + \"B\", base + \"iB\":\n\t\t\t\tmultiplier = float64(sizeSuffixMultiplier(j))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif multiplier == 0 {\n\t\t\treturn 0, errors.Errorf(\"invalid multiplier suffix %q, expected one of %s\", suffix, []byte(sizeSuffixes))\n\t\t}\n\t\tstr = str[:i]\n\t}\n\n\tval, err := strconv.ParseFloat(str, 64)\n\tif err != nil || val < 0 {\n\t\treturn 0, errors.Errorf(\"expected a non-negative number, got %q\", str)\n\t}\n\tval *= multiplier\n\treturn uint64(math.Ceil(val)), nil\n}\n\nvar sizeSuffixes = \"MGTPEZY\"\n\nfunc sizeSuffixMultiplier(i int) int {\n\treturn 1 << uint(i*10)\n}\n\n// SizeTracker tracks the number of bytes passing through\n// its Write method (which is otherwise a no-op).\n//\n// Use SizeTracker with io.MultiWriter() to track number of bytes\n// written. Use with io.TeeReader() to track number of bytes read.\ntype SizeTracker struct {\n\t// size is the number of bytes written so far.\n\tsize int64\n}\n\n// Size returns the number of bytes written so far.\nfunc (st SizeTracker) Size() int64 {\n\treturn st.size\n}\n\n// Write implements io.Writer.\nfunc (st *SizeTracker) Write(data []byte) (n int, err error) {\n\tn = len(data)\n\tst.size += int64(n)\n\treturn n, nil\n}\n"
  },
  {
    "path": "size_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"io\"\n\t\"io/ioutil\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\t\"github.com/juju/testing/filetesting\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\nvar _ = gc.Suite(&sizeSuite{})\n\ntype sizeSuite struct {\n\ttesting.IsolationSuite\n}\n\nfunc (*sizeSuite) TestParseSize(c *gc.C) {\n\ttype test struct {\n\t\tin  string\n\t\tout uint64\n\t\terr string\n\t}\n\ttests := []test{{\n\t\tin:  \"\",\n\t\terr: `expected a non-negative number, got \"\"`,\n\t}, {\n\t\tin:  \"-1\",\n\t\terr: `expected a non-negative number, got \"-1\"`,\n\t}, {\n\t\tin:  \"1MZ\",\n\t\terr: `invalid multiplier suffix \"MZ\", expected one of MGTPEZY`,\n\t}, {\n\t\tin:  \"0\",\n\t\tout: 0,\n\t}, {\n\t\tin:  \"123\",\n\t\tout: 123,\n\t}, {\n\t\tin:  \"1M\",\n\t\tout: 1,\n\t}, {\n\t\tin:  \"0.5G\",\n\t\tout: 512,\n\t}, {\n\t\tin:  \"0.5GB\",\n\t\tout: 512,\n\t}, {\n\t\tin:  \"0.5GiB\",\n\t\tout: 512,\n\t}, {\n\t\tin:  \"0.5T\",\n\t\tout: 524288,\n\t}, {\n\t\tin:  \"0.5P\",\n\t\tout: 536870912,\n\t}, {\n\t\tin:  \"0.0009765625E\",\n\t\tout: 1073741824,\n\t}, {\n\t\tin:  \"1Z\",\n\t\tout: 1125899906842624,\n\t}, {\n\t\tin:  \"1Y\",\n\t\tout: 1152921504606846976,\n\t}}\n\tfor i, test := range tests {\n\t\tc.Logf(\"test %d: %+v\", i, test)\n\t\tsize, err := utils.ParseSize(test.in)\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.NotNil)\n\t\t\tc.Assert(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\tc.Assert(size, gc.Equals, test.out)\n\t\t}\n\t}\n}\n\nfunc (*sizeSuite) TestSizingReaderOkay(c *gc.C) {\n\texpected := \"some data\"\n\tstub := &testing.Stub{}\n\treader := filetesting.NewStubReader(stub, expected)\n\n\tvar st utils.SizeTracker\n\tsizingReader := io.TeeReader(reader, &st)\n\tdata, err := ioutil.ReadAll(sizingReader)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tstub.CheckCallNames(c, \"Read\", \"Read\")\n\tc.Check(string(data), gc.Equals, expected)\n\tc.Check(st.Size(), gc.Equals, int64(len(expected)))\n}\n\nfunc (*sizeSuite) TestSizingReaderMixedEOF(c *gc.C) {\n\texpected := \"some data\"\n\tstub := &testing.Stub{}\n\treader := &filetesting.StubReader{\n\t\tStub: stub,\n\t\tReturnRead: &fakeStream{\n\t\t\tdata: expected,\n\t\t},\n\t}\n\n\tvar st utils.SizeTracker\n\tsizingReader := io.TeeReader(reader, &st)\n\tdata, err := ioutil.ReadAll(sizingReader)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tstub.CheckCallNames(c, \"Read\") // The EOF was mixed with the data.\n\tc.Check(string(data), gc.Equals, expected)\n\tc.Check(st.Size(), gc.Equals, int64(len(expected)))\n}\n\nfunc (*sizeSuite) TestSizingWriter(c *gc.C) {\n\texpected := \"some data\"\n\tstub := &testing.Stub{}\n\twriter, buffer := filetesting.NewStubWriter(stub)\n\n\tvar st utils.SizeTracker\n\tsizingWriter := io.MultiWriter(writer, &st)\n\tn, err := sizingWriter.Write([]byte(expected))\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tstub.CheckCallNames(c, \"Write\")\n\tc.Check(n, gc.Equals, len(expected))\n\tc.Check(buffer.String(), gc.Equals, expected)\n\tc.Check(st.Size(), gc.Equals, int64(len(expected)))\n}\n\ntype fakeStream struct {\n\tdata string\n\tpos  uint64\n}\n\nfunc (f *fakeStream) Read(data []byte) (int, error) {\n\tn := copy(data, f.data[f.pos:])\n\tf.pos += uint64(n)\n\tif f.pos >= uint64(len(f.data)) {\n\t\treturn n, io.EOF\n\t}\n\treturn n, nil\n}\n"
  },
  {
    "path": "ssh/authorisedkeys.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/loggo/v2\"\n\t\"golang.org/x/crypto/ssh\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\nvar logger = loggo.GetLogger(\"juju.utils.ssh\")\n\ntype ListMode bool\n\nvar (\n\tFullKeys     ListMode = true\n\tFingerprints ListMode = false\n)\n\nconst (\n\tdefaultAuthKeysFile = \"authorized_keys\"\n)\n\ntype AuthorisedKey struct {\n\tType    string\n\tKey     []byte\n\tComment string\n}\n\nfunc authKeysDir(username string) (string, error) {\n\thomeDir, err := utils.UserHomeDir(username)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thomeDir, err = utils.NormalizePath(homeDir)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(homeDir, \".ssh\"), nil\n}\n\n// ParseAuthorisedKey parses a non-comment line from an\n// authorized_keys file and returns the constituent parts.\n// Based on description in \"man sshd\".\nfunc ParseAuthorisedKey(line string) (*AuthorisedKey, error) {\n\tif strings.Contains(line, \"\\n\") {\n\t\treturn nil, errors.NotValidf(\"newline in authorized_key %q\", line)\n\t}\n\tkey, comment, _, _, err := ssh.ParseAuthorizedKey([]byte(line))\n\tif err != nil {\n\t\treturn nil, errors.Errorf(\"invalid authorized_key %q\", line)\n\t}\n\treturn &AuthorisedKey{\n\t\tType:    key.Type(),\n\t\tKey:     key.Marshal(),\n\t\tComment: comment,\n\t}, nil\n}\n\n// ConcatAuthorisedKeys will joing two or more authorised keys together to form\n// a string based list of authorised keys that can be read by ssh programs. Keys\n// joined with a newline as the separator.\nfunc ConcatAuthorisedKeys(a, b string) string {\n\tif a == \"\" {\n\t\treturn b\n\t}\n\tif b == \"\" {\n\t\treturn a\n\t}\n\tif a[len(a)-1] != '\\n' {\n\t\treturn a + \"\\n\" + b\n\t}\n\treturn a + b\n}\n\n// SplitAuthorisedKeys extracts a key slice from the specified key data,\n// by splitting the key data into lines and ignoring comments and blank lines.\nfunc SplitAuthorisedKeys(keyData string) []string {\n\tvar keys []string\n\tfor _, key := range strings.Split(string(keyData), \"\\n\") {\n\t\tkey = strings.Trim(key, \" \\r\")\n\t\tif len(key) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif key[0] == '#' {\n\t\t\tcontinue\n\t\t}\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys\n}\n\nfunc readAuthorisedKeys(username, filename string) ([]string, error) {\n\tkeyDir, err := authKeysDir(username)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsshKeyFile := filepath.Join(keyDir, filename)\n\tlogger.Debugf(\"reading authorised keys file %s\", sshKeyFile)\n\tkeyData, err := ioutil.ReadFile(sshKeyFile)\n\tif os.IsNotExist(err) {\n\t\treturn []string{}, nil\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Annotate(err, \"reading ssh authorised keys file\")\n\t}\n\tvar keys []string\n\tfor _, key := range strings.Split(string(keyData), \"\\n\") {\n\t\tif len(strings.Trim(key, \" \\r\")) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tkeys = append(keys, key)\n\t}\n\treturn keys, nil\n}\n\nfunc writeAuthorisedKeys(username, filename string, keys []string) error {\n\tkeyDir, err := authKeysDir(username)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = os.MkdirAll(keyDir, os.FileMode(0755))\n\tif err != nil {\n\t\treturn errors.Annotate(err, \"cannot create ssh key directory\")\n\t}\n\tkeyData := strings.Join(keys, \"\\n\") + \"\\n\"\n\n\t// Get perms to use on auth keys file\n\tsshKeyFile := filepath.Join(keyDir, filename)\n\tperms := os.FileMode(0644)\n\tinfo, err := os.Stat(sshKeyFile)\n\tif err == nil {\n\t\tperms = info.Mode().Perm()\n\t}\n\n\tlogger.Debugf(\"writing authorised keys file %s\", sshKeyFile)\n\terr = utils.AtomicWriteFile(sshKeyFile, []byte(keyData), perms)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO (wallyworld) - what to do on windows (if anything)\n\t// TODO(dimitern) - no need to use user.Current() if username\n\t// is \"\" - it will use the current user anyway.\n\tif runtime.GOOS != \"windows\" {\n\t\t// Ensure the resulting authorised keys file has its ownership\n\t\t// set to the specified username.\n\t\tvar u *user.User\n\t\tif username == \"\" {\n\t\t\tu, err = user.Current()\n\t\t} else {\n\t\t\tu, err = user.Lookup(username)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// chown requires ints but user.User has strings for windows.\n\t\tuid, err := strconv.Atoi(u.Uid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tgid, err := strconv.Atoi(u.Gid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = os.Chown(sshKeyFile, uid, gid)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// We need a mutex because updates to the authorised keys file are done by\n// reading the contents, updating, and writing back out. So only one caller\n// at a time can use either Add, Delete, List.\nvar keysMutex sync.Mutex\n\n// AddKeys adds the specified ssh keys to the authorized_keys file for user.\n// Returns an error if there is an issue with *any* of the supplied keys.\nfunc AddKeys(user string, newKeys ...string) error {\n\tkeysMutex.Lock()\n\tdefer keysMutex.Unlock()\n\texistingKeys, err := readAuthorisedKeys(user, defaultAuthKeysFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn addKeys(user, defaultAuthKeysFile, newKeys, existingKeys)\n}\n\n// DeleteKeys removes the specified ssh keys from the authorized ssh keys file for user.\n// keyIds may be either key comments or fingerprints.\n// Returns an error if there is an issue with *any* of the keys to delete.\nfunc DeleteKeys(user string, keyIds ...string) error {\n\tkeysMutex.Lock()\n\tdefer keysMutex.Unlock()\n\texistingKeys, err := readAuthorisedKeys(user, defaultAuthKeysFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn deleteKeys(user, defaultAuthKeysFile, existingKeys, keyIds, false)\n}\n\n// ReplaceKeys writes the specified ssh keys to the authorized_keys file for user,\n// replacing any that are already there.\n// Returns an error if there is an issue with *any* of the supplied keys.\nfunc ReplaceKeys(user string, newKeys ...string) error {\n\tkeysMutex.Lock()\n\tdefer keysMutex.Unlock()\n\n\texistingKeyData, err := readAuthorisedKeys(user, defaultAuthKeysFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tvar existingNonKeyLines []string\n\tfor _, line := range existingKeyData {\n\t\t_, _, err := KeyFingerprint(line)\n\t\tif err != nil {\n\t\t\texistingNonKeyLines = append(existingNonKeyLines, line)\n\t\t}\n\t}\n\treturn writeAuthorisedKeys(user, defaultAuthKeysFile, append(existingNonKeyLines, newKeys...))\n}\n\n// ListKeys returns either the full keys or key comments from the authorized ssh keys file for user.\nfunc ListKeys(user string, mode ListMode) ([]string, error) {\n\tkeysMutex.Lock()\n\tdefer keysMutex.Unlock()\n\tkeyData, err := readAuthorisedKeys(user, defaultAuthKeysFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn listKeys(keyData, mode)\n}\n\n// Any ssh key added to the authorised keys list by Juju will have this prefix.\n// This allows Juju to know which keys have been added externally and any such keys\n// will always be retained by Juju when updating the authorised keys file.\nconst JujuCommentPrefix = \"Juju:\"\n\nfunc EnsureJujuComment(key string) string {\n\tak, err := ParseAuthorisedKey(key)\n\t// Just return an invalid key as is.\n\tif err != nil {\n\t\tlogger.Warningf(\"invalid Juju ssh key %s: %v\", key, err)\n\t\treturn key\n\t}\n\tif ak.Comment == \"\" {\n\t\treturn key + \" \" + JujuCommentPrefix + \"sshkey\"\n\t} else {\n\t\t// Add the Juju prefix to the comment if necessary.\n\t\tif !strings.HasPrefix(ak.Comment, JujuCommentPrefix) {\n\t\t\tcommentIndex := strings.LastIndex(key, ak.Comment)\n\t\t\treturn key[:commentIndex] + JujuCommentPrefix + ak.Comment\n\t\t}\n\t}\n\treturn key\n}\n\n// AddKeysToFile adds the specified ssh keys to the specified file for user.\n// Returns an error if there is an issue with *any* of the supplied keys.\nfunc AddKeysToFile(user, file string, newKeys []string) error {\n\tkeysMutex.Lock()\n\tdefer keysMutex.Unlock()\n\texistingKeys, err := readAuthorisedKeys(user, file)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn addKeys(user, file, newKeys, existingKeys)\n}\n\n// DeleteKeysFromFile removes the specified ssh keys from the authorized ssh keys file for user.\n// keyIds may be either key comments or fingerprints.\n// Returns an error if there is an issue with *any* of the keys to delete.\n//\n// Unlike DeleteKeys, this version can delete ALL keys from the target file.\nfunc DeleteKeysFromFile(user, file string, keyIds []string) error {\n\tkeysMutex.Lock()\n\tdefer keysMutex.Unlock()\n\texistingKeys, err := readAuthorisedKeys(user, file)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn deleteKeys(user, file, existingKeys, keyIds, true)\n}\n\n// ListKeys returns either the full keys or key comments from the authorized ssh keys file for user.\nfunc ListKeysFromFile(user, file string, mode ListMode) ([]string, error) {\n\tkeysMutex.Lock()\n\tdefer keysMutex.Unlock()\n\tkeyData, err := readAuthorisedKeys(user, file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn listKeys(keyData, mode)\n}\n\nfunc addKeys(user, file string, newKeys, existingKeys []string) error {\n\tfor _, newKey := range newKeys {\n\t\tfingerprint, comment, err := KeyFingerprint(newKey)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif comment == \"\" {\n\t\t\treturn errors.Errorf(\"cannot add ssh key without comment\")\n\t\t}\n\t\tfor _, key := range existingKeys {\n\t\t\texistingFingerprint, existingComment, err := KeyFingerprint(key)\n\t\t\tif err != nil {\n\t\t\t\t// Only log a warning if the unrecognised key line is not a comment.\n\t\t\t\tif key[0] != '#' {\n\t\t\t\t\tlogger.Warningf(\"invalid existing ssh key %q: %v\", key, err)\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif existingFingerprint == fingerprint {\n\t\t\t\treturn errors.Errorf(\"cannot add duplicate ssh key: %v\", fingerprint)\n\t\t\t}\n\t\t\tif existingComment == comment {\n\t\t\t\treturn errors.Errorf(\"cannot add ssh key with duplicate comment: %v\", comment)\n\t\t\t}\n\t\t}\n\t}\n\tsshKeys := append(existingKeys, newKeys...)\n\treturn writeAuthorisedKeys(user, file, sshKeys)\n}\n\nfunc deleteKeys(user, file string, existingKeys, keyIdsToDelete []string, deleteAll bool) error {\n\t// Build up a map of keys indexed by fingerprint, and fingerprints indexed by comment\n\t// so we can easily get the key represented by each keyId, which may be either a fingerprint\n\t// or comment.\n\tvar keysToWrite []string\n\tvar sshKeys = make(map[string]string)\n\tvar keyComments = make(map[string]string)\n\tfor _, key := range existingKeys {\n\t\tfingerprint, comment, err := KeyFingerprint(key)\n\t\tif err != nil {\n\t\t\tlogger.Debugf(\"keeping unrecognised existing ssh key %q: %v\", key, err)\n\t\t\tkeysToWrite = append(keysToWrite, key)\n\t\t\tcontinue\n\t\t}\n\t\tsshKeys[fingerprint] = key\n\t\tif comment != \"\" {\n\t\t\tkeyComments[comment] = fingerprint\n\t\t}\n\t}\n\tfor _, keyId := range keyIdsToDelete {\n\t\t// assume keyId may be a fingerprint\n\t\tfingerprint := keyId\n\t\t_, ok := sshKeys[keyId]\n\t\tif !ok {\n\t\t\t// keyId is a comment\n\t\t\tfingerprint, ok = keyComments[keyId]\n\t\t}\n\t\tif !ok {\n\t\t\treturn errors.Errorf(\"cannot delete non existent key: %v\", keyId)\n\t\t}\n\t\tdelete(sshKeys, fingerprint)\n\t}\n\tfor _, key := range sshKeys {\n\t\tkeysToWrite = append(keysToWrite, key)\n\t}\n\tif len(keysToWrite) == 0 && !deleteAll {\n\t\treturn errors.Errorf(\"cannot delete all keys\")\n\t}\n\treturn writeAuthorisedKeys(user, file, keysToWrite)\n}\n\nfunc listKeys(existingKeys []string, mode ListMode) ([]string, error) {\n\tvar keys []string\n\tfor _, key := range existingKeys {\n\t\tfingerprint, comment, err := KeyFingerprint(key)\n\t\tif err != nil {\n\t\t\t// Only log a warning if the unrecognised key line is not a comment.\n\t\t\tif key[0] != '#' {\n\t\t\t\tlogger.Warningf(\"ignoring invalid ssh key %q: %v\", key, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif mode == FullKeys {\n\t\t\tkeys = append(keys, key)\n\t\t} else {\n\t\t\tshortKey := fingerprint\n\t\t\tif comment != \"\" {\n\t\t\t\tshortKey += fmt.Sprintf(\" (%s)\", comment)\n\t\t\t}\n\t\t\tkeys = append(keys, shortKey)\n\t\t}\n\t}\n\treturn keys, nil\n}\n"
  },
  {
    "path": "ssh/authorisedkeys_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"encoding/base64\"\n\t\"strings\"\n\n\tgitjujutesting \"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/ssh\"\n\tsshtesting \"github.com/juju/utils/v4/ssh/testing\"\n)\n\ntype AuthorisedKeysKeysSuite struct {\n\tgitjujutesting.FakeHomeSuite\n}\n\nconst (\n\t// We'll use the current user for ssh tests.\n\ttestSSHUser          = \"\"\n\tauthKeysFile         = \"authorized_keys\"\n\talternativeKeysFile2 = \"authorized_keys2\"\n\talternativeKeysFile3 = \"authorized_keys3\"\n)\n\nvar _ = gc.Suite(&AuthorisedKeysKeysSuite{})\n\nfunc writeAuthKeysFile(c *gc.C, keys []string, file string) {\n\terr := ssh.WriteAuthorisedKeys(testSSHUser, file, keys)\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestListKeys(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key,\n\t}\n\twriteAuthKeysFile(c, keys, authKeysFile)\n\tkeys, err := ssh.ListKeys(testSSHUser, ssh.Fingerprints)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(\n\t\tkeys, gc.DeepEquals,\n\t\t[]string{sshtesting.ValidKeyOne.Fingerprint + \" (user@host)\", sshtesting.ValidKeyTwo.Fingerprint})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestListKeysFull(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" anotheruser@host\",\n\t}\n\twriteAuthKeysFile(c, keys, authKeysFile)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(actual, gc.DeepEquals, keys)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddNewKey(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, jc.ErrorIsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{key})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddMoreKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey}, authKeysFile)\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyTwo.Key + \" anotheruser@host\",\n\t\tsshtesting.ValidKeyThree.Key + \" yetanotheruser@host\",\n\t}\n\terr := ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, jc.ErrorIsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(actual, gc.DeepEquals, append([]string{firstKey}, moreKeys...))\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddDuplicateKey(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, jc.ErrorIsNil)\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" yetanotheruser@host\",\n\t}\n\terr = ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add duplicate ssh key: \"+sshtesting.ValidKeyOne.Fingerprint)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddDuplicateComment(c *gc.C) {\n\tkey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeys(testSSHUser, key)\n\tc.Assert(err, jc.ErrorIsNil)\n\tmoreKeys := []string{\n\t\tsshtesting.ValidKeyTwo.Key + \" user@host\",\n\t\tsshtesting.ValidKeyThree.Key + \" yetanotheruser@host\",\n\t}\n\terr = ssh.AddKeys(testSSHUser, moreKeys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add ssh key with duplicate comment: user@host\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddKeyWithoutComment(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key,\n\t}\n\terr := ssh.AddKeys(testSSHUser, keys...)\n\tc.Assert(err, gc.ErrorMatches, \"cannot add ssh key without comment\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddKeepsUnrecognised(c *gc.C) {\n\twriteAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\"}, authKeysFile)\n\tanotherKey := sshtesting.ValidKeyTwo.Key + \" anotheruser@host\"\n\terr := ssh.AddKeys(testSSHUser, anotherKey)\n\tc.Assert(err, jc.ErrorIsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser, authKeysFile)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\", anotherKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tanotherKey := sshtesting.ValidKeyTwo.Key\n\tthirdKey := sshtesting.ValidKeyThree.Key + \" anotheruser@host\"\n\twriteAuthKeysFile(c, []string{firstKey, anotherKey, thirdKey}, authKeysFile)\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\", sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, jc.ErrorIsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{thirdKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteKeysKeepsUnrecognised(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey, sshtesting.ValidKeyTwo.Key, \"invalid-key\"}, authKeysFile)\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser, authKeysFile)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{\"invalid-key\", sshtesting.ValidKeyTwo.Key})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentComment(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey}, authKeysFile)\n\terr := ssh.DeleteKeys(testSSHUser, \"someone@host\")\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete non existent key: someone@host\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteNonExistentFingerprint(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{firstKey}, authKeysFile)\n\terr := ssh.DeleteKeys(testSSHUser, sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete non existent key: \"+sshtesting.ValidKeyTwo.Fingerprint)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteLastKeyForbidden(c *gc.C) {\n\tkeys := []string{\n\t\tsshtesting.ValidKeyOne.Key + \" user@host\",\n\t\tsshtesting.ValidKeyTwo.Key + \" yetanotheruser@host\",\n\t}\n\twriteAuthKeysFile(c, keys, authKeysFile)\n\terr := ssh.DeleteKeys(testSSHUser, \"user@host\", sshtesting.ValidKeyTwo.Fingerprint)\n\tc.Assert(err, gc.ErrorMatches, \"cannot delete all keys\")\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestReplaceKeys(c *gc.C) {\n\tfirstKey := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tanotherKey := sshtesting.ValidKeyTwo.Key\n\twriteAuthKeysFile(c, []string{firstKey, anotherKey}, authKeysFile)\n\n\t// replaceKey is created without a comment so test that\n\t// ReplaceKeys handles keys without comments. This is\n\t// because existing keys may not have a comment and\n\t// ReplaceKeys is used to rewrite the entire authorized_keys\n\t// file when adding new keys.\n\treplaceKey := sshtesting.ValidKeyThree.Key\n\terr := ssh.ReplaceKeys(testSSHUser, replaceKey)\n\tc.Assert(err, jc.ErrorIsNil)\n\tactual, err := ssh.ListKeys(testSSHUser, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{replaceKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestReplaceKeepsUnrecognised(c *gc.C) {\n\twriteAuthKeysFile(c, []string{sshtesting.ValidKeyOne.Key, \"invalid-key\"}, authKeysFile)\n\tanotherKey := sshtesting.ValidKeyTwo.Key + \" anotheruser@host\"\n\terr := ssh.ReplaceKeys(testSSHUser, anotherKey)\n\tc.Assert(err, jc.ErrorIsNil)\n\tactual, err := ssh.ReadAuthorisedKeys(testSSHUser, authKeysFile)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(actual, gc.DeepEquals, []string{\"invalid-key\", anotherKey})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestEnsureJujuComment(c *gc.C) {\n\tsshKey := sshtesting.ValidKeyOne.Key\n\tfor _, test := range []struct {\n\t\tkey      string\n\t\texpected string\n\t}{\n\t\t{\"invalid-key\", \"invalid-key\"},\n\t\t{sshKey, sshKey + \" Juju:sshkey\"},\n\t\t{sshKey + \" user@host\", sshKey + \" Juju:user@host\"},\n\t\t{sshKey + \" Juju:user@host\", sshKey + \" Juju:user@host\"},\n\t\t{sshKey + \" \" + sshKey[3:5], sshKey + \" Juju:\" + sshKey[3:5]},\n\t} {\n\t\tactual := ssh.EnsureJujuComment(test.key)\n\t\tc.Assert(actual, gc.Equals, test.expected)\n\t}\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestSplitAuthorisedKeys(c *gc.C) {\n\tsshKey := sshtesting.ValidKeyOne.Key\n\tfor _, test := range []struct {\n\t\tkeyData  string\n\t\texpected []string\n\t}{\n\t\t{\"\", nil},\n\t\t{sshKey, []string{sshKey}},\n\t\t{sshKey + \"\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n#comment\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\n #comment\\n\", []string{sshKey}},\n\t\t{sshKey + \"\\ninvalid\\n\", []string{sshKey, \"invalid\"}},\n\t} {\n\t\tactual := ssh.SplitAuthorisedKeys(test.keyData)\n\t\tc.Assert(actual, gc.DeepEquals, test.expected)\n\t}\n}\n\nfunc b64decode(c *gc.C, s string) []byte {\n\tb, err := base64.StdEncoding.DecodeString(s)\n\tc.Assert(err, jc.ErrorIsNil)\n\treturn b\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestParseAuthorisedKey(c *gc.C) {\n\tfor i, test := range []struct {\n\t\tline    string\n\t\tkey     []byte\n\t\tcomment string\n\t\terr     string\n\t}{{\n\t\tline: sshtesting.ValidKeyOne.Key,\n\t\tkey:  b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t}, {\n\t\tline:    sshtesting.ValidKeyOne.Key + \" a b c\",\n\t\tkey:     b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t\tcomment: \"a b c\",\n\t}, {\n\t\tline: \"ssh-xsa blah\",\n\t\terr:  \"invalid authorized_key \\\"ssh-xsa blah\\\"\",\n\t}, {\n\t\t// options should be skipped\n\t\tline: `no-pty,principals=\"\\\"\",command=\"\\!\" ` + sshtesting.ValidKeyOne.Key,\n\t\tkey:  b64decode(c, strings.Fields(sshtesting.ValidKeyOne.Key)[1]),\n\t}, {\n\t\tline: \"ssh-rsa\",\n\t\terr:  \"invalid authorized_key \\\"ssh-rsa\\\"\",\n\t}, {\n\t\tline: sshtesting.ValidKeyOne.Key + \" line1\\nline2\",\n\t\terr:  \"newline in authorized_key \\\".*\",\n\t}} {\n\t\tc.Logf(\"test %d: %s\", i, test.line)\n\t\tak, err := ssh.ParseAuthorisedKey(test.line)\n\t\tif test.err != \"\" {\n\t\t\tc.Assert(err, gc.ErrorMatches, test.err)\n\t\t} else {\n\t\t\tc.Assert(err, jc.ErrorIsNil)\n\t\t\tc.Assert(ak, gc.Not(gc.IsNil))\n\t\t\tc.Assert(ak.Key, gc.DeepEquals, test.key)\n\t\t\tc.Assert(ak.Comment, gc.Equals, test.comment)\n\t\t}\n\t}\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestConcatAuthorisedKeys(c *gc.C) {\n\tfor _, test := range []struct{ a, b, result string }{\n\t\t{\"a\", \"\", \"a\"},\n\t\t{\"\", \"b\", \"b\"},\n\t\t{\"a\", \"b\", \"a\\nb\"},\n\t\t{\"a\\n\", \"b\", \"a\\nb\"},\n\t} {\n\t\tc.Check(ssh.ConcatAuthorisedKeys(test.a, test.b), gc.Equals, test.result)\n\t}\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddKeysToFileToDifferentFiles(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\terr := ssh.AddKeysToFile(testSSHUser, alternativeKeysFile2, []string{key1})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tlist1, err := ssh.ListKeysFromFile(testSSHUser, alternativeKeysFile2, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(list1, gc.DeepEquals, []string{key1})\n\n\tkey2 := sshtesting.ValidKeyTwo.Key + \" user@host\"\n\terr = ssh.AddKeysToFile(testSSHUser, alternativeKeysFile3, []string{key2})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tlist2, err := ssh.ListKeysFromFile(testSSHUser, alternativeKeysFile3, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(list2, gc.DeepEquals, []string{key2})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestAddKeysToFileMultipleKeys(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key + \" alice@host\"\n\terr := ssh.AddKeysToFile(testSSHUser, alternativeKeysFile2, []string{key1, key2})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tlist, err := ssh.ListKeysFromFile(testSSHUser, alternativeKeysFile2, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(list, jc.DeepEquals, []string{key1, key2})\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteAllKeysFromFile(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\twriteAuthKeysFile(c, []string{key1}, alternativeKeysFile2)\n\n\terr := ssh.DeleteKeysFromFile(testSSHUser, alternativeKeysFile2, []string{sshtesting.ValidKeyOne.Fingerprint})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\temptyList, err := ssh.ListKeysFromFile(testSSHUser, alternativeKeysFile2, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(emptyList, gc.HasLen, 0)\n}\n\nfunc (s *AuthorisedKeysKeysSuite) TestDeleteSomeKeysFromFile(c *gc.C) {\n\tkey1 := sshtesting.ValidKeyOne.Key + \" user@host\"\n\tkey2 := sshtesting.ValidKeyTwo.Key + \" alice@host\"\n\tkey3 := sshtesting.ValidKeyThree.Key + \" bob@host\"\n\twriteAuthKeysFile(c, []string{key1, key2, key3}, alternativeKeysFile2)\n\n\terr := ssh.DeleteKeysFromFile(testSSHUser, alternativeKeysFile2, []string{sshtesting.ValidKeyTwo.Fingerprint})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tkeys, err := ssh.ListKeysFromFile(testSSHUser, alternativeKeysFile2, ssh.FullKeys)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(keys, gc.HasLen, 2)\n\tc.Assert(keys, jc.SameContents, []string{key1, key3})\n}\n"
  },
  {
    "path": "ssh/clientkeys.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\n\t\"github.com/juju/collections/set\"\n\t\"golang.org/x/crypto/ssh\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\nconst clientKeyName = \"juju_id_ed25519\"\n\n// PublicKeySuffix is the file extension for public key files.\nconst PublicKeySuffix = \".pub\"\n\nvar (\n\tclientKeysMutex sync.Mutex\n\n\t// clientKeys is a cached map of private key filenames\n\t// to ssh.Signers. The private keys are those loaded\n\t// from the client key directory, passed to LoadClientKeys.\n\tclientKeys map[string]ssh.Signer\n)\n\n// LoadClientKeys loads the client SSH keys from the\n// specified directory, and caches them as a process-wide\n// global. If the directory does not exist, it is created;\n// if the directory did not exist, or contains no keys, it\n// is populated with a new key pair.\n//\n// If the directory exists, then all pairs of files where one\n// has the same name as the other + \".pub\" will be loaded as\n// private/public key pairs.\n//\n// Calls to LoadClientKeys will clear the previously loaded\n// keys, and recompute the keys.\nfunc LoadClientKeys(dir string) error {\n\tclientKeysMutex.Lock()\n\tdefer clientKeysMutex.Unlock()\n\tdir, err := utils.NormalizePath(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := os.Stat(dir); err == nil {\n\t\tkeys, err := loadClientKeys(dir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t} else if len(keys) > 0 {\n\t\t\tclientKeys = keys\n\t\t\treturn nil\n\t\t}\n\t\t// Directory exists but contains no keys;\n\t\t// fall through and create one.\n\t}\n\tif err := os.MkdirAll(dir, 0700); err != nil {\n\t\treturn err\n\t}\n\tkeyfile, key, err := generateClientKey(dir)\n\tif err != nil {\n\t\tos.RemoveAll(dir)\n\t\treturn err\n\t}\n\tclientKeys = map[string]ssh.Signer{keyfile: key}\n\treturn nil\n}\n\n// ClearClientKeys clears the client keys cached in memory.\nfunc ClearClientKeys() {\n\tclientKeysMutex.Lock()\n\tdefer clientKeysMutex.Unlock()\n\tclientKeys = nil\n}\n\nfunc generateClientKey(dir string) (keyfile string, key ssh.Signer, err error) {\n\tprivate, public, err := GenerateKey(\"juju-client-key\")\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tclientPrivateKey, err := ssh.ParsePrivateKey([]byte(private))\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tprivkeyFilename := filepath.Join(dir, clientKeyName)\n\tif err = ioutil.WriteFile(privkeyFilename, []byte(private), 0600); err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif err := ioutil.WriteFile(privkeyFilename+PublicKeySuffix, []byte(public), 0600); err != nil {\n\t\tos.Remove(privkeyFilename)\n\t\treturn \"\", nil, err\n\t}\n\treturn privkeyFilename, clientPrivateKey, nil\n}\n\nfunc loadClientKeys(dir string) (map[string]ssh.Signer, error) {\n\tpublicKeyFiles, err := publicKeyFiles(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkeys := make(map[string]ssh.Signer, len(publicKeyFiles))\n\tfor _, filename := range publicKeyFiles {\n\t\tfilename = filename[:len(filename)-len(PublicKeySuffix)]\n\t\tdata, err := ioutil.ReadFile(filename)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tkeys[filename], err = ssh.ParsePrivateKey(data)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"parsing key file %q: %v\", filename, err)\n\t\t}\n\t}\n\treturn keys, nil\n}\n\n// privateKeys returns the private keys loaded by LoadClientKeys.\nfunc privateKeys() (signers []ssh.Signer) {\n\tclientKeysMutex.Lock()\n\tdefer clientKeysMutex.Unlock()\n\tfor _, key := range clientKeys {\n\t\tsigners = append(signers, key)\n\t}\n\treturn signers\n}\n\n// PrivateKeyFiles returns the filenames of private SSH keys loaded by\n// LoadClientKeys.\nfunc PrivateKeyFiles() []string {\n\tclientKeysMutex.Lock()\n\tdefer clientKeysMutex.Unlock()\n\tkeyfiles := make([]string, 0, len(clientKeys))\n\tfor f := range clientKeys {\n\t\tkeyfiles = append(keyfiles, f)\n\t}\n\treturn keyfiles\n}\n\n// PublicKeyFiles returns the filenames of public SSH keys loaded by\n// LoadClientKeys.\nfunc PublicKeyFiles() []string {\n\tprivkeys := PrivateKeyFiles()\n\tpubkeys := make([]string, len(privkeys))\n\tfor i, priv := range privkeys {\n\t\tpubkeys[i] = priv + PublicKeySuffix\n\t}\n\treturn pubkeys\n}\n\n// publicKeyFiles returns the filenames of public SSH keys\n// in the specified directory (all the files ending with .pub).\nfunc publicKeyFiles(clientKeysDir string) ([]string, error) {\n\tif clientKeysDir == \"\" {\n\t\treturn nil, nil\n\t}\n\tvar keys []string\n\tdir, err := os.Open(clientKeysDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnames, err := dir.Readdirnames(-1)\n\tdir.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcandidates := set.NewStrings(names...)\n\tfor _, name := range names {\n\t\tif !strings.HasSuffix(name, PublicKeySuffix) {\n\t\t\tcontinue\n\t\t}\n\t\t// If the private key filename also exists, add the file.\n\t\tpriv := name[:len(name)-len(PublicKeySuffix)]\n\t\tif candidates.Contains(priv) {\n\t\t\tkeys = append(keys, filepath.Join(dir.Name(), name))\n\t\t}\n\t}\n\treturn keys, nil\n}\n"
  },
  {
    "path": "ssh/clientkeys_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\n\tgitjujutesting \"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n\t\"github.com/juju/utils/v4/ssh\"\n)\n\ntype ClientKeysSuite struct {\n\tgitjujutesting.FakeHomeSuite\n}\n\nvar _ = gc.Suite(&ClientKeysSuite{})\n\nfunc (s *ClientKeysSuite) SetUpTest(c *gc.C) {\n\ts.FakeHomeSuite.SetUpTest(c)\n\ts.AddCleanup(func(*gc.C) { ssh.ClearClientKeys() })\n\tgenerateKeyRestorer := overrideGenerateKey()\n\ts.AddCleanup(func(*gc.C) { generateKeyRestorer.Restore() })\n}\n\nfunc checkFiles(c *gc.C, obtained, expected []string) {\n\tvar err error\n\tfor i, e := range expected {\n\t\texpected[i], err = utils.NormalizePath(e)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\t}\n\tc.Assert(obtained, jc.SameContents, expected)\n}\n\nfunc checkPublicKeyFiles(c *gc.C, expected ...string) {\n\tkeys := ssh.PublicKeyFiles()\n\tcheckFiles(c, keys, expected)\n}\n\nfunc checkPrivateKeyFiles(c *gc.C, expected ...string) {\n\tkeys := ssh.PrivateKeyFiles()\n\tcheckFiles(c, keys, expected)\n}\n\nfunc (s *ClientKeysSuite) TestPublicKeyFiles(c *gc.C) {\n\t// LoadClientKeys will create the specified directory\n\t// and populate it with a key pair.\n\terr := ssh.LoadClientKeys(\"~/.juju/ssh\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tcheckPublicKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519.pub\")\n\t// All files ending with .pub in the client key dir get picked up.\n\tpriv, pub, err := ssh.GenerateKey(\"whatever\")\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ioutil.WriteFile(gitjujutesting.HomePath(\".juju\", \"ssh\", \"whatever.pub\"), []byte(pub), 0600)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ssh.LoadClientKeys(\"~/.juju/ssh\")\n\tc.Assert(err, jc.ErrorIsNil)\n\t// The new public key won't be observed until the\n\t// corresponding private key exists.\n\tcheckPublicKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519.pub\")\n\terr = ioutil.WriteFile(gitjujutesting.HomePath(\".juju\", \"ssh\", \"whatever\"), []byte(priv), 0600)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ssh.LoadClientKeys(\"~/.juju/ssh\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tcheckPublicKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519.pub\", \"~/.juju/ssh/whatever.pub\")\n}\n\nfunc (s *ClientKeysSuite) TestPrivateKeyFiles(c *gc.C) {\n\t// Create/load client keys. They will be cached in memory:\n\t// any files added to the directory will not be considered\n\t// unless LoadClientKeys is called again.\n\terr := ssh.LoadClientKeys(\"~/.juju/ssh\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tcheckPrivateKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519\")\n\tpriv, pub, err := ssh.GenerateKey(\"whatever\")\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ioutil.WriteFile(gitjujutesting.HomePath(\".juju\", \"ssh\", \"whatever\"), []byte(priv), 0600)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ssh.LoadClientKeys(\"~/.juju/ssh\")\n\tc.Assert(err, jc.ErrorIsNil)\n\t// The new private key won't be observed until the\n\t// corresponding public key exists.\n\tcheckPrivateKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519\")\n\terr = ioutil.WriteFile(gitjujutesting.HomePath(\".juju\", \"ssh\", \"whatever.pub\"), []byte(pub), 0600)\n\tc.Assert(err, jc.ErrorIsNil)\n\t// new keys won't be reported until we call LoadClientKeys again\n\tcheckPublicKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519.pub\")\n\tcheckPrivateKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519\")\n\terr = ssh.LoadClientKeys(\"~/.juju/ssh\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tcheckPublicKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519.pub\", \"~/.juju/ssh/whatever.pub\")\n\tcheckPrivateKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519\", \"~/.juju/ssh/whatever\")\n}\n\nfunc (s *ClientKeysSuite) TestLoadClientKeysDirExists(c *gc.C) {\n\terr := os.MkdirAll(gitjujutesting.HomePath(\".juju\", \"ssh\"), 0755)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ssh.LoadClientKeys(\"~/.juju/ssh\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tcheckPrivateKeyFiles(c, \"~/.juju/ssh/juju_id_ed25519\")\n}\n"
  },
  {
    "path": "ssh/export_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"sync/atomic\"\n\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/testing\"\n)\n\nvar (\n\tReadAuthorisedKeys  = readAuthorisedKeys\n\tWriteAuthorisedKeys = writeAuthorisedKeys\n\tInitDefaultClient   = initDefaultClient\n\tDefaultIdentities   = &defaultIdentities\n\tSSHDial             = &sshDial\n\tED25519GenerateKey  = &ed25519GenerateKey\n\tTestCopyReader      = copyReader\n\tTestNewCmd          = newCmd\n)\n\ntype ReadLineWriter readLineWriter\n\nfunc PatchTerminal(s *testing.CleanupSuite, rlw ReadLineWriter) {\n\tvar balance int64\n\ts.PatchValue(&getTerminal, func() (readLineWriter, func(), error) {\n\t\tatomic.AddInt64(&balance, 1)\n\t\tcleanup := func() {\n\t\t\tatomic.AddInt64(&balance, -1)\n\t\t}\n\t\treturn rlw, cleanup, nil\n\t})\n\ts.AddCleanup(func(c *gc.C) {\n\t\tc.Assert(atomic.LoadInt64(&balance), gc.Equals, int64(0))\n\t})\n}\n\nfunc PatchNilTerminal(s *testing.CleanupSuite) {\n\ts.PatchValue(&getTerminal, func() (readLineWriter, func(), error) {\n\t\treturn nil, func() {}, nil\n\t})\n}\n"
  },
  {
    "path": "ssh/fakes_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"io/ioutil\"\n\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/ssh\"\n)\n\ntype fakeClient struct {\n\tcalls      []string\n\thostArg    string\n\tcommandArg []string\n\toptionsArg *ssh.Options\n\tcopyArgs   []string\n\n\terr  error\n\tcmd  *ssh.Cmd\n\timpl fakeCommandImpl\n}\n\nfunc (cl *fakeClient) checkCalls(c *gc.C, host string, command []string, options *ssh.Options, copyArgs []string, calls ...string) {\n\tc.Check(cl.hostArg, gc.Equals, host)\n\tc.Check(cl.commandArg, jc.DeepEquals, command)\n\tc.Check(cl.optionsArg, gc.Equals, options)\n\tc.Check(cl.copyArgs, jc.DeepEquals, copyArgs)\n\tc.Check(cl.calls, jc.DeepEquals, calls)\n}\n\nfunc (cl *fakeClient) Command(host string, command []string, options *ssh.Options) *ssh.Cmd {\n\tcl.calls = append(cl.calls, \"Command\")\n\tcl.hostArg = host\n\tcl.commandArg = command\n\tcl.optionsArg = options\n\tcmd := cl.cmd\n\tif cmd == nil {\n\t\tcmd = ssh.TestNewCmd(&cl.impl)\n\t}\n\treturn cmd\n}\n\nfunc (cl *fakeClient) Copy(args []string, options *ssh.Options) error {\n\tcl.calls = append(cl.calls, \"Copy\")\n\tcl.copyArgs = args\n\tcl.optionsArg = options\n\treturn cl.err\n}\n\ntype bufferWriter struct {\n\tbytes.Buffer\n}\n\nfunc (*bufferWriter) Close() error {\n\treturn nil\n}\n\ntype fakeCommandImpl struct {\n\tcalls     []string\n\tstdinArg  io.Reader\n\tstdoutArg io.Writer\n\tstderrArg io.Writer\n\tstdinData bufferWriter\n\n\terr        error\n\tstdinRaw   io.Reader\n\tstdoutRaw  io.Writer\n\tstderrRaw  io.Writer\n\tstdoutData bytes.Buffer\n\tstderrData bytes.Buffer\n}\n\nfunc (ci *fakeCommandImpl) checkCalls(c *gc.C, stdin io.Reader, stdout, stderr io.Writer, calls ...string) {\n\tc.Check(ci.stdinArg, gc.Equals, stdin)\n\tc.Check(ci.stdoutArg, gc.Equals, stdout)\n\tc.Check(ci.stderrArg, gc.Equals, stderr)\n\tc.Check(ci.calls, jc.DeepEquals, calls)\n}\n\nfunc (ci *fakeCommandImpl) checkStdin(c *gc.C, data string) {\n\tc.Check(ci.stdinData.String(), gc.Equals, data)\n}\n\nfunc (ci *fakeCommandImpl) Start() error {\n\tci.calls = append(ci.calls, \"Start\")\n\treturn ci.err\n}\n\nfunc (ci *fakeCommandImpl) Wait() error {\n\tci.calls = append(ci.calls, \"Wait\")\n\treturn ci.err\n}\n\nfunc (ci *fakeCommandImpl) Kill() error {\n\tci.calls = append(ci.calls, \"Kill\")\n\treturn ci.err\n}\n\nfunc (ci *fakeCommandImpl) SetStdio(stdin io.Reader, stdout, stderr io.Writer) {\n\tci.calls = append(ci.calls, \"SetStdio\")\n\tci.stdinArg = stdin\n\tci.stdoutArg = stdout\n\tci.stderrArg = stderr\n}\n\nfunc (ci *fakeCommandImpl) StdinPipe() (io.WriteCloser, io.Reader, error) {\n\tci.calls = append(ci.calls, \"StdinPipe\")\n\treturn &ci.stdinData, ci.stdinRaw, ci.err\n}\n\nfunc (ci *fakeCommandImpl) StdoutPipe() (io.ReadCloser, io.Writer, error) {\n\tci.calls = append(ci.calls, \"StdoutPipe\")\n\treturn ioutil.NopCloser(&ci.stdoutData), ci.stdoutRaw, ci.err\n}\n\nfunc (ci *fakeCommandImpl) StderrPipe() (io.ReadCloser, io.Writer, error) {\n\tci.calls = append(ci.calls, \"StderrPipe\")\n\treturn ioutil.NopCloser(&ci.stderrData), ci.stderrRaw, ci.err\n}\n"
  },
  {
    "path": "ssh/fingerprint.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"bytes\"\n\t\"crypto/md5\"\n\t\"fmt\"\n\n\t\"github.com/juju/errors\"\n)\n\n// KeyFingerprint returns the fingerprint and comment for the specified key\n// in authorized_key format. Fingerprints are generated according to RFC4716.\n// See ttp://www.ietf.org/rfc/rfc4716.txt, section 4.\nfunc KeyFingerprint(key string) (fingerprint, comment string, err error) {\n\tak, err := ParseAuthorisedKey(key)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Errorf(\"generating key fingerprint: %v\", err)\n\t}\n\thash := md5.New()\n\thash.Write(ak.Key)\n\tsum := hash.Sum(nil)\n\tvar buf bytes.Buffer\n\tfor i := 0; i < hash.Size(); i++ {\n\t\tif i > 0 {\n\t\t\tbuf.WriteByte(':')\n\t\t}\n\t\tbuf.WriteString(fmt.Sprintf(\"%02x\", sum[i]))\n\t}\n\treturn buf.String(), ak.Comment, nil\n}\n"
  },
  {
    "path": "ssh/fingerprint_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/ssh\"\n\tsshtesting \"github.com/juju/utils/v4/ssh/testing\"\n)\n\ntype FingerprintSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&FingerprintSuite{})\n\nfunc (s *FingerprintSuite) TestKeyFingerprint(c *gc.C) {\n\tkeys := []sshtesting.SSHKey{\n\t\tsshtesting.ValidKeyOne,\n\t\tsshtesting.ValidKeyTwo,\n\t\tsshtesting.ValidKeyThree,\n\t}\n\tfor _, k := range keys {\n\t\tfingerprint, _, err := ssh.KeyFingerprint(k.Key)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\t\tc.Assert(fingerprint, gc.Equals, k.Fingerprint)\n\t}\n}\n\nfunc (s *FingerprintSuite) TestKeyFingerprintError(c *gc.C) {\n\t_, _, err := ssh.KeyFingerprint(\"invalid key\")\n\tc.Assert(err, gc.ErrorMatches, `generating key fingerprint: invalid authorized_key \"invalid key\"`)\n}\n"
  },
  {
    "path": "ssh/generate.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"crypto/ed25519\"\n\t\"crypto/rand\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/juju/errors\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\n// ed25519GenerateKey allows for tests to patch out ed25519 key generation\nvar ed25519GenerateKey = ed25519.GenerateKey\n\n// GenerateKey makes a ED25519 no-passphrase SSH capable key.\n// The private key returned is encoded to ASCII using the PKCS1 encoding.\n// The public key is suitable to be added into an authorized_keys file,\n// and has the comment passed in as the comment part of the key.\nfunc GenerateKey(comment string) (private, public string, err error) {\n\t_, privateKey, err := ed25519GenerateKey(rand.Reader)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Trace(err)\n\t}\n\n\tpemBlock, err := ssh.MarshalPrivateKey(privateKey, comment)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Trace(err)\n\t}\n\tidentity := pem.EncodeToMemory(pemBlock)\n\n\tpublic, err = PublicKey(identity, comment)\n\tif err != nil {\n\t\treturn \"\", \"\", errors.Trace(err)\n\t}\n\n\treturn string(identity), public, nil\n}\n\n// PublicKey returns the public key for any private key. The public key is\n// suitable to be added into an authorized_keys file, and has the comment\n// passed in as the comment part of the key.\nfunc PublicKey(privateKey []byte, comment string) (string, error) {\n\tsigner, err := ssh.ParsePrivateKey(privateKey)\n\tif err != nil {\n\t\treturn \"\", errors.Annotate(err, \"failed to load key\")\n\t}\n\n\tauth_key := string(ssh.MarshalAuthorizedKey(signer.PublicKey()))\n\t// Strip off the trailing new line so we can add a comment.\n\tauth_key = strings.TrimSpace(auth_key)\n\tpublic := fmt.Sprintf(\"%s %s\\n\", auth_key, comment)\n\n\treturn public, nil\n}\n"
  },
  {
    "path": "ssh/generate_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"crypto/dsa\"\n\t\"crypto/ed25519\"\n\t\"io\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/ssh\"\n)\n\ntype GenerateSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&GenerateSuite{})\n\nvar (\n\tpregeneratedKey ed25519.PrivateKey\n)\n\n// overrideGenerateKey patches out rsa.GenerateKey to create a single testing\n// key which is saved and used between tests to save computation time.\nfunc overrideGenerateKey() testing.Restorer {\n\trestorer := testing.PatchValue(ssh.ED25519GenerateKey, func(random io.Reader) (ed25519.PublicKey, ed25519.PrivateKey, error) {\n\t\tif pregeneratedKey != nil {\n\t\t\treturn ed25519.PublicKey{}, pregeneratedKey, nil\n\t\t}\n\t\tpublic, private, err := generateED25519Key(random)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\t\tpregeneratedKey = private\n\t\treturn public, private, nil\n\t})\n\treturn restorer\n}\n\nfunc generateED25519Key(random io.Reader) (ed25519.PublicKey, ed25519.PrivateKey, error) {\n\t// Ignore requested bits and just use 512 bits for speed\n\tpublic, private, err := ed25519.GenerateKey(random)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn public, private, nil\n}\n\nfunc generateDSAKey(random io.Reader) (*dsa.PrivateKey, error) {\n\tvar privKey dsa.PrivateKey\n\tif err := dsa.GenerateParameters(&privKey.Parameters, random, dsa.L1024N160); err != nil {\n\t\treturn nil, err\n\t}\n\tif err := dsa.GenerateKey(&privKey, random); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &privKey, nil\n}\n\nfunc (s *GenerateSuite) TestGenerate(c *gc.C) {\n\tdefer overrideGenerateKey().Restore()\n\tprivate, public, err := ssh.GenerateKey(\"some-comment\")\n\tc.Check(err, jc.ErrorIsNil)\n\tc.Check(private, jc.HasPrefix, \"-----BEGIN OPENSSH PRIVATE KEY-----\\n\")\n\tc.Check(private, jc.HasSuffix, \"-----END OPENSSH PRIVATE KEY-----\\n\")\n\tc.Check(public, jc.HasPrefix, \"ssh-ed25519 \")\n\tc.Check(public, jc.HasSuffix, \" some-comment\\n\")\n}\n"
  },
  {
    "path": "ssh/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "ssh/run.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"bytes\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/juju/clock\"\n\t\"github.com/juju/errors\"\n\n\tutilexec \"github.com/juju/utils/v4/exec\"\n)\n\n// ExecParams are used for the parameters for ExecuteCommandOnMachine.\ntype ExecParams struct {\n\tIdentityFile string\n\tHost         string\n\tCommand      string\n\tTimeout      time.Duration\n}\n\n// StartCommandOnMachine executes the command on the given host. The\n// command is run in a Bash shell over an SSH connection. All output\n// is captured. A RunningCmd is returned that may be used to wait\n// for the command to finish running.\nfunc StartCommandOnMachine(params ExecParams) (*RunningCmd, error) {\n\t// execute bash accepting commands on stdin\n\tif params.Host == \"\" {\n\t\treturn nil, errors.Errorf(\"missing host address\")\n\t}\n\tlogger.Debugf(\"execute on %s\", params.Host)\n\n\tvar options Options\n\tif params.IdentityFile != \"\" {\n\t\toptions.SetIdentities(params.IdentityFile)\n\t}\n\tcommand := Command(params.Host, []string{\"/bin/bash\", \"-s\"}, &options)\n\n\t// Run the command.\n\trunning := &RunningCmd{\n\t\tSSHCmd: command,\n\t}\n\tcommand.Stdout = &running.Stdout\n\tcommand.Stderr = &running.Stderr\n\tcommand.Stdin = strings.NewReader(params.Command + \"\\n\")\n\tif err := command.Start(); err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\n\treturn running, nil\n}\n\n// RunningCmd represents a command that has been started.\ntype RunningCmd struct {\n\t// SSHCmd is the command the was started.\n\tSSHCmd *Cmd\n\n\t// Stdout and Stderr are the output streams the command is using.\n\tStdout bytes.Buffer\n\tStderr bytes.Buffer\n}\n\n// Wait waits for the command to complete and returns the result.\nfunc (cmd *RunningCmd) Wait() (result utilexec.ExecResponse, _ error) {\n\tdefer func() {\n\t\t// Gather as much as we have from stdout and stderr.\n\t\tresult.Stdout = cmd.Stdout.Bytes()\n\t\tresult.Stderr = cmd.Stderr.Bytes()\n\t}()\n\n\terr := cmd.SSHCmd.Wait()\n\tlogger.Debugf(\"command.Wait finished (err: %v)\", err)\n\tcode, err := getExitCode(err)\n\tif err != nil {\n\t\treturn result, errors.Trace(err)\n\t}\n\n\tresult.Code = code\n\treturn result, nil\n}\n\n// TODO(ericsnow) Add RunningCmd.WaitAbortable(abortChan <-chan error) ...\n// based on WaitWithTimeout and update WaitWithTimeout to use it. We\n// could make it WaitAbortable(abortChans ...<-chan error), which would\n// require using reflect.Select(). Then that could simply replace Wait().\n// It may make more sense, however, to have a helper function:\n//   Wait(cmd T, abortChans ...<-chan error) ...\n\n// Cancelled is an error indicating that a command timed out.\nvar Cancelled = errors.New(\"command timed out\")\n\n// WaitWithCancel waits for the command to complete and returns the result. If\n// cancel is closed before the result was returned, then it takes longer than\n// the provided timeout then Cancelled is returned.\nfunc (cmd *RunningCmd) WaitWithCancel(cancel <-chan struct{}) (utilexec.ExecResponse, error) {\n\tvar result utilexec.ExecResponse\n\n\tdone := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(done)\n\t\twaitResult, err := cmd.Wait()\n\t\tresult = waitResult\n\t\tdone <- err\n\t}()\n\n\tselect {\n\tcase err := <-done:\n\t\treturn result, errors.Trace(err)\n\tcase <-cancel:\n\t\tlogger.Infof(\"killing the command due to cancellation\")\n\t\tcmd.SSHCmd.Kill()\n\n\t\t<-done            // Ensure that the original cmd.Wait() call completed.\n\t\tcmd.SSHCmd.Wait() // Finalize cmd.SSHCmd, if necessary.\n\t\treturn result, Cancelled\n\t}\n}\n\nfunc getExitCode(err error) (int, error) {\n\tif err == nil {\n\t\treturn 0, nil\n\t}\n\terr = errors.Cause(err)\n\tif ee, ok := err.(*exec.ExitError); ok {\n\t\traw := ee.ProcessState.Sys()\n\t\tstatus, ok := raw.(syscall.WaitStatus)\n\t\tif !ok {\n\t\t\tlogger.Errorf(\"unexpected type %T from ProcessState.Sys()\", raw)\n\t\t} else if status.Exited() {\n\t\t\t// A non-zero return code isn't considered an error here.\n\t\t\treturn status.ExitStatus(), nil\n\t\t}\n\t}\n\treturn -1, err\n}\n\n// ExecuteCommandOnMachine will execute the command passed through on\n// the host specified. This is done using ssh, and passing the commands\n// through /bin/bash.  If the command is not finished within the timeout\n// specified, an error is returned.  Any output captured during that time\n// is also returned in the remote response.\nfunc ExecuteCommandOnMachine(args ExecParams) (utilexec.ExecResponse, error) {\n\tvar result utilexec.ExecResponse\n\n\tcmd, err := StartCommandOnMachine(args)\n\tif err != nil {\n\t\treturn result, errors.Trace(err)\n\t}\n\n\tcancel := make(chan struct{})\n\tgo func() {\n\t\t<-clock.WallClock.After(args.Timeout)\n\t\tclose(cancel)\n\t}()\n\tresult, err = cmd.WaitWithCancel(cancel)\n\tif err != nil {\n\t\treturn result, errors.Trace(err)\n\t}\n\n\treturn result, nil\n}\n"
  },
  {
    "path": "ssh/run_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/ssh\"\n)\n\nconst (\n\tshortWait = 50 * time.Millisecond\n\tlongWait  = 10 * time.Second\n)\n\ntype ExecuteSSHCommandSuite struct {\n\ttesting.IsolationSuite\n\toriginalPath string\n\ttestbin      string\n\tfakessh      string\n}\n\nvar _ = gc.Suite(&ExecuteSSHCommandSuite{})\n\nfunc (s *ExecuteSSHCommandSuite) SetUpSuite(c *gc.C) {\n\ts.originalPath = os.Getenv(\"PATH\")\n\ts.IsolationSuite.SetUpSuite(c)\n}\n\nfunc (s *ExecuteSSHCommandSuite) SetUpTest(c *gc.C) {\n\tif runtime.GOOS == \"windows\" {\n\t\tc.Skip(\"issue 1403084: Tests use OpenSSH only\")\n\t}\n\ts.IsolationSuite.SetUpTest(c)\n\terr := os.Setenv(\"PATH\", s.originalPath)\n\tc.Assert(err, jc.ErrorIsNil)\n\ts.testbin = c.MkDir()\n\ts.fakessh = filepath.Join(s.testbin, \"ssh\")\n\ts.PatchEnvPathPrepend(s.testbin)\n}\n\nfunc (s *ExecuteSSHCommandSuite) fakeSSH(c *gc.C, cmd string) {\n\terr := ioutil.WriteFile(s.fakessh, []byte(cmd), 0755)\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (s *ExecuteSSHCommandSuite) TestCaptureOutput(c *gc.C) {\n\ts.fakeSSH(c, echoSSH)\n\n\tresponse, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{\n\t\tHost:    \"hostname\",\n\t\tCommand: \"sudo apt-get update\\nsudo apt-get upgrade\",\n\t\tTimeout: longWait,\n\t})\n\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(response.Code, gc.Equals, 0)\n\tc.Assert(string(response.Stdout), gc.Equals, \"sudo apt-get update\\nsudo apt-get upgrade\\n\")\n\tc.Assert(string(response.Stderr), gc.Equals,\n\t\t\"-o PasswordAuthentication no -o ServerAliveInterval 30 hostname /bin/bash -s\\n\")\n}\n\nfunc (s *ExecuteSSHCommandSuite) TestIdentityFile(c *gc.C) {\n\ts.fakeSSH(c, echoSSH)\n\n\tresponse, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{\n\t\tIdentityFile: \"identity-file\",\n\t\tHost:         \"hostname\",\n\t\tTimeout:      longWait,\n\t})\n\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(response.Stderr), jc.Contains, \" -i identity-file \")\n}\n\nfunc (s *ExecuteSSHCommandSuite) TestTimoutCaptureOutput(c *gc.C) {\n\ts.fakeSSH(c, slowSSH)\n\n\tresponse, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{\n\t\tIdentityFile: \"identity-file\",\n\t\tHost:         \"hostname\",\n\t\tCommand:      \"ignored\",\n\t\tTimeout:      shortWait,\n\t})\n\n\tc.Check(err, gc.ErrorMatches, \"command timed out\")\n\tc.Assert(response.Code, gc.Equals, 0)\n\tc.Assert(string(response.Stdout), gc.Equals, \"stdout\\n\")\n\tc.Assert(string(response.Stderr), gc.Equals, \"stderr\\n\")\n}\n\nfunc (s *ExecuteSSHCommandSuite) TestCapturesReturnCode(c *gc.C) {\n\ts.fakeSSH(c, passthroughSSH)\n\n\tresponse, err := ssh.ExecuteCommandOnMachine(ssh.ExecParams{\n\t\tIdentityFile: \"identity-file\",\n\t\tHost:         \"hostname\",\n\t\tCommand:      \"echo stdout; exit 42\",\n\t\tTimeout:      longWait,\n\t})\n\n\tc.Check(err, jc.ErrorIsNil)\n\tc.Assert(response.Code, gc.Equals, 42)\n\tc.Assert(string(response.Stdout), gc.Equals, \"stdout\\n\")\n\tc.Assert(string(response.Stderr), gc.Equals, \"\")\n}\n\n// echoSSH outputs the command args to stderr, and copies stdin to stdout\nvar echoSSH = `#!/bin/bash\n# Write the args to stderr\necho \"$*\" >&2\ncat /dev/stdin\n`\n\n// slowSSH sleeps for a while after outputting some text to stdout and stderr\nvar slowSSH = `#!/bin/bash\necho \"stderr\" >&2\necho \"stdout\"\nsleep 5s\n`\n\n// passthroughSSH creates an ssh that executes stdin.\nvar passthroughSSH = `#!/bin/bash -s`\n"
  },
  {
    "path": "ssh/ssh.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// Package ssh contains utilities for dealing with SSH connections,\n// key management, and so on. All SSH-based command executions in\n// Juju should use the Command/ScpCommand functions in this package.\npackage ssh\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os/exec\"\n\t\"syscall\"\n\n\t\"github.com/juju/errors\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\n// StrictHostChecksOption defines the possible values taken by\n// Option.SetStrictHostKeyChecking().\ntype StrictHostChecksOption int\n\nconst (\n\t// StrictHostChecksDefault configures the default,\n\t// implementation-specific, behaviour.\n\t//\n\t// For the OpenSSH implementation, this elides the\n\t// StrictHostKeyChecking option, which means the\n\t// user's personal configuration will be used.\n\t//\n\t// For the go.crypto implementation, the default is\n\t// the equivalent of \"ask\".\n\tStrictHostChecksDefault StrictHostChecksOption = iota\n\n\t// StrictHostChecksNo disables strict host key checking.\n\tStrictHostChecksNo\n\n\t// StrictHostChecksYes enabled strict host key checking\n\t// enabled. Target hosts must appear in known_hosts file or\n\t// connections will fail.\n\tStrictHostChecksYes\n\n\t// StrictHostChecksAsk will cause openssh to ask the user about\n\t// hosts that don't appear in known_hosts file.\n\tStrictHostChecksAsk\n)\n\n// Options is a client-implementation independent SSH options set.\ntype Options struct {\n\t// proxyCommand specifies the command to\n\t// execute to proxy SSH traffic through.\n\tproxyCommand []string\n\n\t// ssh server port; zero means use the default (22)\n\tport int\n\n\t// no PTY forced by default\n\tallocatePTY bool\n\n\t// password authentication is disallowed by default\n\tpasswordAuthAllowed bool\n\n\t// identities is a sequence of paths to private key/identity files\n\t// to use when attempting to login. A client implementaton may attempt\n\t// with additional identities, but must give preference to these\n\tidentities []string\n\n\t// knownHostsFile is a path to a file in which to save the host's\n\t// fingerprint.\n\tknownHostsFile string\n\n\t// strictHostKeyChecking sets that the host being connected to must\n\t// exist in the known_hosts file, and with a matching public key.\n\tstrictHostKeyChecking StrictHostChecksOption\n\n\t// hostKeyAlgorithms sets the host key types that the client will\n\t// accept from the server, in order of preference. By default the\n\t// client implementation will specify a set of reasonable types.\n\thostKeyAlgorithms []string\n}\n\n// SetProxyCommand sets a command to execute to proxy traffic through.\nfunc (o *Options) SetProxyCommand(command ...string) {\n\to.proxyCommand = append([]string{}, command...)\n}\n\n// SetPort sets the SSH server port to connect to.\nfunc (o *Options) SetPort(port int) {\n\to.port = port\n}\n\n// EnablePTY forces the allocation of a pseudo-TTY.\n//\n// Forcing a pseudo-TTY is required, for example, for sudo\n// prompts on the target host.\nfunc (o *Options) EnablePTY() {\n\to.allocatePTY = true\n}\n\n// SetKnownHostsFile sets the host's fingerprint to be saved in the given file.\n//\n// Host fingerprints are saved in ~/.ssh/known_hosts by default.\nfunc (o *Options) SetKnownHostsFile(file string) {\n\to.knownHostsFile = file\n}\n\n// SetStrictHostKeyChecking sets the desired host key checking\n// behaviour. It takes one of the StrictHostChecksOption constants.\n// See also EnableStrictHostKeyChecking.\nfunc (o *Options) SetStrictHostKeyChecking(value StrictHostChecksOption) {\n\to.strictHostKeyChecking = value\n}\n\n// AllowPasswordAuthentication allows the SSH\n// client to prompt the user for a password.\n//\n// Password authentication is disallowed by default.\nfunc (o *Options) AllowPasswordAuthentication() {\n\to.passwordAuthAllowed = true\n}\n\n// SetIdentities sets a sequence of paths to private key/identity files\n// to use when attempting login. Client implementations may attempt to\n// use additional identities, but must give preference to the ones\n// specified here.\nfunc (o *Options) SetIdentities(identityFiles ...string) {\n\to.identities = append([]string{}, identityFiles...)\n}\n\n// SetHostKeyAlgorithms sets the host key types that the client will\n// accept from the server, in order of preference. If not specified,\n// the client implementation may choose its own defaults.\nfunc (o *Options) SetHostKeyAlgorithms(algos ...string) {\n\to.hostKeyAlgorithms = algos\n}\n\n// Client is an interface for SSH clients to implement\ntype Client interface {\n\t// Command returns a Command for executing a command\n\t// on the specified host. Each Command is executed\n\t// within its own SSH session.\n\t//\n\t// Host is specified in the format [user@]host.\n\tCommand(host string, command []string, options *Options) *Cmd\n\n\t// Copy copies file(s) between local and remote host(s).\n\t// Paths are specified in the scp format, [[user@]host:]path. If\n\t// any extra arguments are specified in extraArgs, they are passed\n\t// verbatim.\n\tCopy(args []string, options *Options) error\n}\n\n// Cmd represents a command to be (or being) executed\n// on a remote host.\ntype Cmd struct {\n\tStdin  io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n\timpl   command\n}\n\nfunc newCmd(impl command) *Cmd {\n\treturn &Cmd{impl: impl}\n}\n\n// CombinedOutput runs the command, and returns the\n// combined stdout/stderr output and result of\n// executing the command.\nfunc (c *Cmd) CombinedOutput() ([]byte, error) {\n\tif c.Stdout != nil {\n\t\treturn nil, errors.New(\"ssh: Stdout already set\")\n\t}\n\tif c.Stderr != nil {\n\t\treturn nil, errors.New(\"ssh: Stderr already set\")\n\t}\n\tvar b bytes.Buffer\n\tc.Stdout = &b\n\tc.Stderr = &b\n\terr := c.Run()\n\treturn b.Bytes(), err\n}\n\n// Output runs the command, and returns the stdout\n// output and result of executing the command.\nfunc (c *Cmd) Output() ([]byte, error) {\n\tif c.Stdout != nil {\n\t\treturn nil, errors.New(\"ssh: Stdout already set\")\n\t}\n\tvar b bytes.Buffer\n\tc.Stdout = &b\n\terr := c.Run()\n\treturn b.Bytes(), err\n}\n\n// Run runs the command, and returns the result as an error.\nfunc (c *Cmd) Run() error {\n\tif err := c.Start(); err != nil {\n\t\treturn err\n\t}\n\terr := c.Wait()\n\tif exitError, ok := err.(*exec.ExitError); ok && exitError != nil {\n\t\tstatus := exitError.ProcessState.Sys().(syscall.WaitStatus)\n\t\tif status.Exited() {\n\t\t\treturn utils.NewRcPassthroughError(status.ExitStatus())\n\t\t}\n\t}\n\treturn err\n}\n\n// Start starts the command running, but does not wait for\n// it to complete. If the command could not be started, an\n// error is returned.\nfunc (c *Cmd) Start() error {\n\tc.impl.SetStdio(c.Stdin, c.Stdout, c.Stderr)\n\treturn c.impl.Start()\n}\n\n// Wait waits for the started command to complete,\n// and returns the result as an error.\nfunc (c *Cmd) Wait() error {\n\treturn c.impl.Wait()\n}\n\n// Kill kills the started command.\nfunc (c *Cmd) Kill() error {\n\treturn c.impl.Kill()\n}\n\n// StdinPipe creates a pipe and connects it to\n// the command's stdin. The read end of the pipe\n// is assigned to c.Stdin.\nfunc (c *Cmd) StdinPipe() (io.WriteCloser, error) {\n\twc, r, err := c.impl.StdinPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.Stdin = r\n\treturn wc, nil\n}\n\n// StdoutPipe creates a pipe and connects it to\n// the command's stdout. The write end of the pipe\n// is assigned to c.Stdout.\nfunc (c *Cmd) StdoutPipe() (io.ReadCloser, error) {\n\trc, w, err := c.impl.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.Stdout = w\n\treturn rc, nil\n}\n\n// StderrPipe creates a pipe and connects it to\n// the command's stderr. The write end of the pipe\n// is assigned to c.Stderr.\nfunc (c *Cmd) StderrPipe() (io.ReadCloser, error) {\n\trc, w, err := c.impl.StderrPipe()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.Stderr = w\n\treturn rc, nil\n}\n\n// command is an implementation-specific representation of a\n// command prepared to execute against a specific host.\ntype command interface {\n\tStart() error\n\tWait() error\n\tKill() error\n\tSetStdio(stdin io.Reader, stdout, stderr io.Writer)\n\tStdinPipe() (io.WriteCloser, io.Reader, error)\n\tStdoutPipe() (io.ReadCloser, io.Writer, error)\n\tStderrPipe() (io.ReadCloser, io.Writer, error)\n}\n\n// DefaultClient is the default SSH client for the process.\n//\n// If the OpenSSH client is found in $PATH, then it will be\n// used for DefaultClient; otherwise, DefaultClient will use\n// an embedded client based on go.crypto/ssh.\nvar DefaultClient Client\n\n// chosenClient holds the type of SSH client created for\n// DefaultClient, so that we can log it in Command or Copy.\nvar chosenClient string\n\nfunc init() {\n\tinitDefaultClient()\n}\n\nfunc initDefaultClient() {\n\tif client, err := NewOpenSSHClient(); err == nil {\n\t\tDefaultClient = client\n\t\tchosenClient = \"OpenSSH\"\n\t} else if client, err := NewGoCryptoClient(); err == nil {\n\t\tDefaultClient = client\n\t\tchosenClient = \"go.crypto (embedded)\"\n\t}\n}\n\n// Command is a short-cut for DefaultClient.Command.\nfunc Command(host string, command []string, options *Options) *Cmd {\n\tlogger.Debugf(\"using %s ssh client\", chosenClient)\n\treturn DefaultClient.Command(host, command, options)\n}\n\n// Copy is a short-cut for DefaultClient.Copy.\nfunc Copy(args []string, options *Options) error {\n\tlogger.Debugf(\"using %s ssh client\", chosenClient)\n\treturn DefaultClient.Copy(args, options)\n}\n\n// CopyReader sends the reader's data to a file on the remote host over SSH.\nfunc CopyReader(host, filename string, r io.Reader, options *Options) error {\n\tlogger.Debugf(\"using %s ssh client\", chosenClient)\n\treturn copyReader(DefaultClient, host, filename, r, options)\n}\n\nfunc copyReader(client Client, host, filename string, r io.Reader, options *Options) error {\n\tcmd := client.Command(host, []string{\"cat - > \" + filename}, options)\n\tcmd.Stdin = r\n\treturn errors.Trace(cmd.Run())\n}\n"
  },
  {
    "path": "ssh/ssh_gocrypto.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/user\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/juju/clock\"\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/mutex/v2\"\n\t\"golang.org/x/crypto/ssh\"\n\t\"golang.org/x/crypto/ssh/knownhosts\"\n\t\"golang.org/x/crypto/ssh/terminal\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\nconst sshDefaultPort = 22\n\n// GoCryptoClient is an implementation of Client that\n// uses the embedded go.crypto/ssh SSH client.\n//\n// GoCryptoClient is intentionally limited in the\n// functionality that it enables, as it is currently\n// intended to be used only for non-interactive command\n// execution.\ntype GoCryptoClient struct {\n\tsigners []ssh.Signer\n}\n\n// NewGoCryptoClient creates a new GoCryptoClient.\n//\n// If no signers are specified, NewGoCryptoClient will\n// use the private key generated by LoadClientKeys.\nfunc NewGoCryptoClient(signers ...ssh.Signer) (*GoCryptoClient, error) {\n\treturn &GoCryptoClient{signers: signers}, nil\n}\n\n// Command implements Client.Command.\nfunc (c *GoCryptoClient) Command(host string, command []string, options *Options) *Cmd {\n\tshellCommand := utils.CommandString(command...)\n\tsigners := c.signers\n\tif len(signers) == 0 {\n\t\tsigners = privateKeys()\n\t}\n\tuser, host := splitUserHost(host)\n\tport := sshDefaultPort\n\tvar proxyCommand []string\n\tvar knownHostsFile string\n\tvar strictHostKeyChecking StrictHostChecksOption\n\tvar hostKeyAlgorithms []string\n\tif options != nil {\n\t\tif options.port != 0 {\n\t\t\tport = options.port\n\t\t}\n\t\tproxyCommand = options.proxyCommand\n\t\tknownHostsFile = options.knownHostsFile\n\t\tstrictHostKeyChecking = options.strictHostKeyChecking\n\t\thostKeyAlgorithms = options.hostKeyAlgorithms\n\t}\n\tlogger.Tracef(`running (equivalent of): ssh \"%s@%s\" -p %d '%s'`, user, host, port, shellCommand)\n\treturn &Cmd{impl: &goCryptoCommand{\n\t\tsigners:               signers,\n\t\tuser:                  user,\n\t\taddr:                  net.JoinHostPort(host, strconv.Itoa(port)),\n\t\tcommand:               shellCommand,\n\t\tproxyCommand:          proxyCommand,\n\t\tknownHostsFile:        knownHostsFile,\n\t\tstrictHostKeyChecking: strictHostKeyChecking,\n\t\thostKeyAlgorithms:     hostKeyAlgorithms,\n\t}}\n}\n\n// Copy implements Client.Copy.\n//\n// Copy is currently unimplemented, and will always return an error.\nfunc (c *GoCryptoClient) Copy(args []string, options *Options) error {\n\treturn errors.Errorf(\"scp command is not implemented (OpenSSH scp not available in PATH)\")\n}\n\ntype goCryptoCommand struct {\n\tsigners               []ssh.Signer\n\tuser                  string\n\taddr                  string\n\tcommand               string\n\tproxyCommand          []string\n\tknownHostsFile        string\n\tstrictHostKeyChecking StrictHostChecksOption\n\thostKeyAlgorithms     []string\n\tstdin                 io.Reader\n\tstdout                io.Writer\n\tstderr                io.Writer\n\tclient                *ssh.Client\n\tsess                  *ssh.Session\n}\n\nvar sshDial = ssh.Dial\n\nvar sshDialWithProxy = func(addr string, proxyCommand []string, config *ssh.ClientConfig) (*ssh.Client, error) {\n\tif len(proxyCommand) == 0 {\n\t\treturn sshDial(\"tcp\", addr, config)\n\t}\n\t// User has specified a proxy. Create a pipe and\n\t// redirect the proxy command's stdin/stdout to it.\n\thost, port, err := net.SplitHostPort(addr)\n\tif err != nil {\n\t\thost = addr\n\t}\n\tfor i, arg := range proxyCommand {\n\t\targ = strings.Replace(arg, \"%h\", host, -1)\n\t\tif port != \"\" {\n\t\t\targ = strings.Replace(arg, \"%p\", port, -1)\n\t\t}\n\t\targ = strings.Replace(arg, \"%r\", config.User, -1)\n\t\tproxyCommand[i] = arg\n\t}\n\tclient, server := net.Pipe()\n\tlogger.Tracef(`executing proxy command %q`, proxyCommand)\n\tcmd := exec.Command(proxyCommand[0], proxyCommand[1:]...)\n\tcmd.Stdin = server\n\tcmd.Stdout = server\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Start(); err != nil {\n\t\treturn nil, err\n\t}\n\tconn, chans, reqs, err := ssh.NewClientConn(client, addr, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ssh.NewClient(conn, chans, reqs), nil\n}\n\nfunc (c *goCryptoCommand) ensureSession() (*ssh.Session, error) {\n\tif c.sess != nil {\n\t\treturn c.sess, nil\n\t}\n\tif len(c.signers) == 0 {\n\t\treturn nil, errors.Errorf(\"no private keys available\")\n\t}\n\tif c.user == \"\" {\n\t\tcurrentUser, err := user.Current()\n\t\tif err != nil {\n\t\t\treturn nil, errors.Errorf(\"getting current user: %v\", err)\n\t\t}\n\t\tc.user = currentUser.Username\n\t}\n\tconfig := &ssh.ClientConfig{\n\t\tUser:              c.user,\n\t\tHostKeyCallback:   c.hostKeyCallback,\n\t\tHostKeyAlgorithms: c.hostKeyAlgorithms,\n\t\tAuth: []ssh.AuthMethod{\n\t\t\tssh.PublicKeysCallback(func() ([]ssh.Signer, error) {\n\t\t\t\treturn c.signers, nil\n\t\t\t}),\n\t\t},\n\t}\n\tclient, err := sshDialWithProxy(c.addr, c.proxyCommand, config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsess, err := client.NewSession()\n\tif err != nil {\n\t\tclient.Close()\n\t\treturn nil, err\n\t}\n\tc.client = client\n\tc.sess = sess\n\tc.sess.Stdin = WrapStdin(c.stdin)\n\tc.sess.Stdout = c.stdout\n\tc.sess.Stderr = c.stderr\n\treturn sess, nil\n}\n\nfunc (c *goCryptoCommand) Start() error {\n\tsess, err := c.ensureSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif c.command == \"\" {\n\t\treturn sess.Shell()\n\t}\n\treturn sess.Start(c.command)\n}\n\nfunc (c *goCryptoCommand) Close() error {\n\tif c.sess == nil {\n\t\treturn nil\n\t}\n\terr0 := c.sess.Close()\n\terr1 := c.client.Close()\n\tif err0 == nil {\n\t\terr0 = err1\n\t}\n\tc.sess = nil\n\tc.client = nil\n\treturn err0\n}\n\nfunc (c *goCryptoCommand) Wait() error {\n\tif c.sess == nil {\n\t\treturn errors.Errorf(\"command has not been started\")\n\t}\n\terr := c.sess.Wait()\n\tc.Close()\n\treturn err\n}\n\nfunc (c *goCryptoCommand) Kill() error {\n\tif c.sess == nil {\n\t\treturn errors.Errorf(\"command has not been started\")\n\t}\n\treturn c.sess.Signal(ssh.SIGKILL)\n}\n\nfunc (c *goCryptoCommand) SetStdio(stdin io.Reader, stdout, stderr io.Writer) {\n\tc.stdin = stdin\n\tc.stdout = stdout\n\tc.stderr = stderr\n}\n\nfunc (c *goCryptoCommand) StdinPipe() (io.WriteCloser, io.Reader, error) {\n\tsess, err := c.ensureSession()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\twc, err := sess.StdinPipe()\n\treturn wc, sess.Stdin, err\n}\n\nfunc (c *goCryptoCommand) StdoutPipe() (io.ReadCloser, io.Writer, error) {\n\tsess, err := c.ensureSession()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\twc, err := sess.StdoutPipe()\n\treturn ioutil.NopCloser(wc), sess.Stdout, err\n}\n\nfunc (c *goCryptoCommand) StderrPipe() (io.ReadCloser, io.Writer, error) {\n\tsess, err := c.ensureSession()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\twc, err := sess.StderrPipe()\n\treturn ioutil.NopCloser(wc), sess.Stderr, err\n}\n\nfunc (c *goCryptoCommand) hostKeyCallback(hostname string, remote net.Addr, key ssh.PublicKey) error {\n\tknownHostsFile := c.knownHostsFile\n\tif knownHostsFile == \"\" {\n\t\tknownHostsFile = GoCryptoKnownHostsFile()\n\t\tif knownHostsFile == \"\" {\n\t\t\treturn errors.New(\"known_hosts file not configured\")\n\t\t}\n\t}\n\n\tvar printError func(string) error\n\tterm, cleanupTerm, err := getTerminal()\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t} else if term != nil {\n\t\tdefer cleanupTerm()\n\t\tprintError = func(message string) error {\n\t\t\t_, err := fmt.Fprintln(term, message)\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tprintError = func(message string) error {\n\t\t\tlogger.Errorf(\"%s\", message)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tmatched, err := checkHostKey(hostname, remote, key, knownHostsFile, printError)\n\tif err != nil || matched {\n\t\treturn errors.Trace(err)\n\t}\n\t// We did not find a matching key, so what we do next depends on the\n\t// strict host key checking configuration.\n\n\tvar warnAdd bool\n\tswitch c.strictHostKeyChecking {\n\tcase StrictHostChecksNo:\n\t\t// Don't ask, just add.\n\t\twarnAdd = true\n\tcase StrictHostChecksDefault, StrictHostChecksAsk:\n\t\tmessage := fmt.Sprintf(`The authenticity of host '%s (%s)' can't be established.\n%s key fingerprint is %s.\n`,\n\t\t\thostname,\n\t\t\tremote,\n\t\t\tkey.Type(),\n\t\t\tssh.FingerprintSHA256(key),\n\t\t)\n\t\tif term == nil {\n\t\t\t// If we're not running in a terminal,\n\t\t\t// we can't ask the user if they want\n\t\t\t// to accept.\n\t\t\tlogger.Errorf(\"%s\", message)\n\t\t\treturn errors.New(\"not running in a terminal, cannot prompt for verification\")\n\t\t}\n\n\t\t// Prompt user, asking if they trust the key.\n\t\tfmt.Fprint(term, message+\"Are you sure you want to continue connecting (yes/no)? \")\n\t\tfor {\n\t\t\tline, err := term.ReadLine()\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Trace(err)\n\t\t\t}\n\t\t\tvar yes bool\n\t\t\tswitch strings.ToLower(line) {\n\t\t\tcase \"yes\":\n\t\t\t\tyes = true\n\t\t\tcase \"no\":\n\t\t\t\treturn errors.New(\"Host key verification failed.\")\n\t\t\tdefault:\n\t\t\t\tfmt.Fprint(term, \"Please type 'yes' or 'no': \")\n\t\t\t}\n\t\t\tif yes {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn errors.Errorf(\n\t\t\t`no %s host key is known for %s and you have requested strict checking`,\n\t\t\tkey.Type(), hostname,\n\t\t)\n\t}\n\n\tif knownHostsFile != os.DevNull {\n\t\t// Make sure no other process modifies the file.\n\t\treleaser, err := mutex.Acquire(mutex.Spec{\n\t\t\tName:  \"juju-ssh-client\",\n\t\t\tClock: clock.WallClock,\n\t\t\tDelay: time.Second,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tdefer releaser.Release()\n\n\t\t// Write the file atomically, so the initial ReadAll above\n\t\t// doesn't have to hold the mutex.\n\t\tknownHostsData, err := ioutil.ReadFile(knownHostsFile)\n\t\tif err != nil && !os.IsNotExist(err) {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t\tbuf := bytes.NewBuffer(knownHostsData)\n\t\tif len(knownHostsData) > 0 && !bytes.HasSuffix(knownHostsData, []byte(\"\\n\")) {\n\t\t\tbuf.WriteRune('\\n')\n\t\t}\n\t\tbuf.WriteString(knownhosts.Line([]string{hostname}, key))\n\t\tbuf.WriteRune('\\n')\n\t\tif err := utils.AtomicWriteFile(knownHostsFile, buf.Bytes(), 0600); err != nil {\n\t\t\treturn errors.Trace(err)\n\t\t}\n\t}\n\tif warnAdd {\n\t\tprintError(fmt.Sprintf(\n\t\t\t\"Warning: permanently added '%s' (%s) to the list of known hosts.\",\n\t\t\thostname, key.Type(),\n\t\t))\n\t}\n\treturn nil\n}\n\ntype readLineWriter interface {\n\tio.Writer\n\tReadLine() (string, error)\n}\n\nvar getTerminal = func() (readLineWriter, func(), error) {\n\tif fd := int(os.Stdin.Fd()); terminal.IsTerminal(fd) {\n\t\toldState, err := terminal.MakeRaw(fd)\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Trace(err)\n\t\t}\n\t\tcleanup := func() { terminal.Restore(fd, oldState) }\n\t\treturn terminal.NewTerminal(os.Stdin, \"\"), cleanup, nil\n\t}\n\treturn nil, nil, nil\n}\n\n// checkHostKey checks the given (hostname, address, public key) tuple\n// against the local known-hosts database, if it exists, and returns a\n// boolean indicating whether a match was found, and any errors encountered.\nfunc checkHostKey(\n\thostname string,\n\tremote net.Addr,\n\tkey ssh.PublicKey,\n\tknownHostsFile string,\n\tprintError func(string) error,\n) (bool, error) {\n\t// NOTE(axw) the knownhosts code is incomplete, but enough for\n\t// our limited use cases. We do not support parsing a known_hosts\n\t// file managed by OpenSSH (due to hashed hosts, etc.), but that\n\t// is OK since this client exists only to support systems that\n\t// do not have access to OpenSSH.\n\tcallback, err := knownhosts.New(knownHostsFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t// The known_hosts file does not exist.\n\t\t\treturn false, nil\n\t\t}\n\t\treturn false, errors.Trace(err)\n\t}\n\terr = callback(hostname, remote, key)\n\tswitch err := err.(type) {\n\tcase nil:\n\t\t// Known host with matching key.\n\t\treturn true, nil\n\tcase *knownhosts.KeyError:\n\t\tif len(err.Want) == 0 {\n\t\t\t// Unknown host.\n\t\t\treturn false, nil\n\t\t}\n\t\thead := fmt.Sprintf(`\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!     @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nIT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\nSomeone could be eavesdropping on you right now (man-in-the-middle attack)!\nIt is also possible that a host key has just been changed.\nThe fingerprint for the %s key sent by the remote host is\n%s.\nPlease contact your system administrator.\nAdd correct host key in %s to get rid of this message.\n`[1:], key.Type(), ssh.FingerprintSHA256(key), knownHostsFile)\n\n\t\tvar typeKey *knownhosts.KnownKey\n\t\tfor i, knownKey := range err.Want {\n\t\t\tif knownKey.Key.Type() == key.Type() {\n\t\t\t\ttypeKey = &err.Want[i]\n\t\t\t}\n\t\t}\n\n\t\tvar tail string\n\t\tif typeKey != nil {\n\t\t\ttail = fmt.Sprintf(\n\t\t\t\t\"Offending %s key in %s:%d\",\n\t\t\t\ttypeKey.Key.Type(),\n\t\t\t\ttypeKey.Filename,\n\t\t\t\ttypeKey.Line,\n\t\t\t)\n\t\t} else {\n\t\t\ttail = \"Host was previously using different host key algorithms:\"\n\t\t\tfor _, knownKey := range err.Want {\n\t\t\t\ttail += fmt.Sprintf(\n\t\t\t\t\t\"\\n - %s key in %s:%d\",\n\t\t\t\t\tknownKey.Key.Type(),\n\t\t\t\t\tknownKey.Filename,\n\t\t\t\t\tknownKey.Line,\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t\tif err := printError(head + tail); err != nil {\n\t\t\t// Not being able to display the warning\n\t\t\t// should be considered fatal.\n\t\t\treturn false, errors.Annotate(\n\t\t\t\terr, \"failed to print host key mismatch warning\",\n\t\t\t)\n\t\t}\n\t}\n\treturn false, errors.Trace(err)\n}\n\nfunc splitUserHost(s string) (user, host string) {\n\tuserHost := strings.SplitN(s, \"@\", 2)\n\tif len(userHost) == 2 {\n\t\treturn userHost[0], userHost[1]\n\t}\n\treturn \"\", userHost[0]\n}\n\nvar (\n\tgoCryptoKnownHostsMutex sync.Mutex\n\tgoCryptoKnownHostsFile  string\n)\n\n// GoCryptoKnownHostsFile returns the known_hosts file used\n// by the golang.org/x/crypto/ssh-based client by default.\nfunc GoCryptoKnownHostsFile() string {\n\tgoCryptoKnownHostsMutex.Lock()\n\tdefer goCryptoKnownHostsMutex.Unlock()\n\treturn goCryptoKnownHostsFile\n}\n\n// SetGoCryptoKnownHostsFile returns the known_hosts file used\n// by the golang.org/x/crypto/ssh-based client.\nfunc SetGoCryptoKnownHostsFile(file string) {\n\tgoCryptoKnownHostsMutex.Lock()\n\tdefer goCryptoKnownHostsMutex.Unlock()\n\tgoCryptoKnownHostsFile = file\n}\n"
  },
  {
    "path": "ssh/ssh_gocrypto_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"bytes\"\n\t\"crypto/rand\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"net\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tcryptossh \"golang.org/x/crypto/ssh\"\n\t\"golang.org/x/crypto/ssh/testdata\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/ssh\"\n)\n\nvar (\n\ttestCommand     = []string{\"echo\", \"$abc\"}\n\ttestCommandFlat = `echo \"\\$abc\"`\n)\n\ntype sshServer struct {\n\tcfg      *cryptossh.ServerConfig\n\tlistener net.Listener\n\tclient   *cryptossh.Client\n}\n\nfunc (s *sshServer) run(errorCh chan error, done chan bool) {\n\tnetconn, err := s.listener.Accept()\n\tif err != nil {\n\t\terrorCh <- fmt.Errorf(\"accepting connection: %w\", err)\n\t\treturn\n\t}\n\tdefer netconn.Close()\n\n\tconn, chans, reqs, err := cryptossh.NewServerConn(netconn, s.cfg)\n\tif err != nil {\n\t\terrorCh <- fmt.Errorf(\"getting ssh server connection: %w\", err)\n\t\treturn\n\t}\n\ts.client = cryptossh.NewClient(conn, chans, reqs)\n\n\tvar wg sync.WaitGroup\n\tdefer func() {\n\t\twg.Wait()\n\t\tclose(errorCh)\n\t}()\n\n\tsessionChannels := s.client.HandleChannelOpen(\"session\")\n\tselect {\n\tcase <-done:\n\t\treturn\n\tcase newChannel := <-sessionChannels:\n\t\tif sCh := newChannel.ChannelType(); sCh != \"session\" {\n\t\t\terrorCh <- fmt.Errorf(\"unexpected session channel %q\", sCh)\n\t\t\treturn\n\t\t}\n\n\t\tchannel, reqs, err := newChannel.Accept()\n\t\tif err != nil {\n\t\t\terrorCh <- fmt.Errorf(\"accepting session connection: %w\", err)\n\t\t\treturn\n\t\t}\n\t\twg.Add(1)\n\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tdefer channel.Close()\n\n\t\t\tfor req := range reqs {\n\t\t\t\tswitch req.Type {\n\t\t\t\tcase \"exec\":\n\t\t\t\t\tif !req.WantReply {\n\t\t\t\t\t\terrorCh <- fmt.Errorf(\"no reply wanted for request %+v\", req)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tn := binary.BigEndian.Uint32(req.Payload[:4])\n\t\t\t\t\tcommand := string(req.Payload[4 : n+4])\n\t\t\t\t\tif command != testCommandFlat {\n\t\t\t\t\t\terrorCh <- fmt.Errorf(\"unexpected request command: %q\", command)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\terr = req.Reply(true, nil)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrorCh <- fmt.Errorf(\"error sending reply: %w\", err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tchannel.Write([]byte(\"abc value\\n\"))\n\t\t\t\t\t_, err := channel.SendRequest(\"exit-status\", false, cryptossh.Marshal(&struct{ n uint32 }{0}))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\terrorCh <- fmt.Errorf(\"error sending request: %w\", err)\n\t\t\t\t\t}\n\t\t\t\t\treturn\n\n\t\t\t\tdefault:\n\t\t\t\t\terrorCh <- fmt.Errorf(\"unexpected request type: %q\", req.Type)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc newClient(c *gc.C) (*ssh.GoCryptoClient, cryptossh.PublicKey) {\n\tprivate, _, err := ssh.GenerateKey(\"test-client\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tkey, err := cryptossh.ParsePrivateKey([]byte(private))\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tclient, err := ssh.NewGoCryptoClient(key)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\treturn client, key.PublicKey()\n}\n\ntype SSHGoCryptoCommandSuite struct {\n\ttesting.IsolationSuite\n\tclient         ssh.Client\n\tknownHostsFile string\n\n\ttestPrivateKeys map[string]any\n\ttestSigners     map[string]cryptossh.Signer\n\ttestPublicKeys  map[string]cryptossh.PublicKey\n}\n\nvar _ = gc.Suite(&SSHGoCryptoCommandSuite{})\n\nfunc (s *SSHGoCryptoCommandSuite) SetUpSuite(c *gc.C) {\n\ts.IsolationSuite.SetUpSuite(c)\n\tvar err error\n\n\tn := len(testdata.PEMBytes)\n\ts.testPrivateKeys = make(map[string]any, n)\n\ts.testSigners = make(map[string]cryptossh.Signer, n)\n\ts.testPublicKeys = make(map[string]cryptossh.PublicKey, n)\n\tfor t, k := range testdata.PEMBytes {\n\t\ts.testPrivateKeys[t], err = cryptossh.ParseRawPrivateKey(k)\n\t\tc.Assert(err, jc.ErrorIsNil)\n\t\ts.testSigners[t], err = cryptossh.NewSignerFromKey(s.testPrivateKeys[t])\n\t\tc.Assert(err, jc.ErrorIsNil)\n\t\ts.testPublicKeys[t] = s.testSigners[t].PublicKey()\n\t}\n\n\t// Create a cert and sign it for use in tests.\n\ttestCert := &cryptossh.Certificate{\n\t\tNonce:           []byte{},                       // To pass reflect.DeepEqual after marshal & parse, this must be non-nil\n\t\tValidPrincipals: []string{\"gopher1\", \"gopher2\"}, // increases test coverage\n\t\tValidAfter:      0,                              // unix epoch\n\t\tValidBefore:     cryptossh.CertTimeInfinity,     // The end of currently representable time.\n\t\tReserved:        []byte{},                       // To pass reflect.DeepEqual after marshal & parse, this must be non-nil\n\t\tKey:             s.testPublicKeys[\"ecdsa\"],\n\t\tSignatureKey:    s.testPublicKeys[\"ed25519\"],\n\t\tPermissions: cryptossh.Permissions{\n\t\t\tCriticalOptions: map[string]string{},\n\t\t\tExtensions:      map[string]string{},\n\t\t},\n\t}\n\terr = testCert.SignCert(rand.Reader, s.testSigners[\"ed25519\"])\n\tc.Assert(err, jc.ErrorIsNil)\n\ts.testPrivateKeys[\"cert\"] = s.testPrivateKeys[\"ecdsa\"]\n\ts.testSigners[\"cert\"], err = cryptossh.NewCertSigner(testCert, s.testSigners[\"ecdsa\"])\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\n\tgenerateKeyRestorer := overrideGenerateKey()\n\ts.AddCleanup(func(*gc.C) { generateKeyRestorer.Restore() })\n\n\ts.knownHostsFile = filepath.Join(c.MkDir(), \"known_hosts\")\n\tssh.SetGoCryptoKnownHostsFile(s.knownHostsFile)\n\tssh.PatchNilTerminal(&s.CleanupSuite)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) newServer(c *gc.C, serverConfig cryptossh.ServerConfig) (*sshServer, cryptossh.PublicKey) {\n\tserver := &sshServer{cfg: &serverConfig}\n\tserver.cfg.AddHostKey(s.testSigners[\"ed25519\"])\n\tvar err error\n\tserver.listener, err = net.Listen(\"tcp\", \"127.0.0.1:0\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Logf(\"Server listening on %s\", server.listener.Addr().String())\n\n\treturn server, s.testPublicKeys[\"ed25519\"]\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestNewGoCryptoClient(c *gc.C) {\n\t_, err := ssh.NewGoCryptoClient()\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tprivate, _, err := ssh.GenerateKey(\"test-client\")\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tkey, err := cryptossh.ParsePrivateKey([]byte(private))\n\tc.Assert(err, jc.ErrorIsNil)\n\n\t_, err = ssh.NewGoCryptoClient(key)\n\tc.Assert(err, jc.ErrorIsNil)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestClientNoKeys(c *gc.C) {\n\tclient, err := ssh.NewGoCryptoClient()\n\tc.Assert(err, jc.ErrorIsNil)\n\tcmd := client.Command(\"0.1.2.3\", []string{\"echo\", \"123\"}, nil)\n\t_, err = cmd.Output()\n\tc.Assert(err, gc.ErrorMatches, \"no private keys available\")\n\tdefer ssh.ClearClientKeys()\n\terr = ssh.LoadClientKeys(c.MkDir())\n\tc.Assert(err, jc.ErrorIsNil)\n\n\ts.PatchValue(ssh.SSHDial, func(network, address string, cfg *cryptossh.ClientConfig) (*cryptossh.Client, error) {\n\t\treturn nil, errors.New(\"ssh.Dial failed\")\n\t})\n\tcmd = client.Command(\"0.1.2.3\", []string{\"echo\", \"123\"}, nil)\n\t_, err = cmd.Output()\n\t// error message differs based on whether using cgo or not\n\tc.Assert(err, gc.ErrorMatches, \"ssh.Dial failed\")\n}\n\nfunc waitForServer(c *gc.C, errorCh chan error) error {\n\tselect {\n\tcase err, _ := <-errorCh:\n\t\treturn err\n\tcase <-time.After(testing.LongWait):\n\t\tc.Fatal(\"timed out waiting for ssh server\")\n\t\treturn nil\n\t}\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestCommand(c *gc.C) {\n\tclient, clientKey := newClient(c)\n\tserver, serverKey := s.newServer(c, cryptossh.ServerConfig{})\n\tserverPort := server.listener.Addr().(*net.TCPAddr).Port\n\tvar opts ssh.Options\n\topts.SetPort(serverPort)\n\topts.SetStrictHostKeyChecking(ssh.StrictHostChecksNo)\n\tcmd := client.Command(\"127.0.0.1\", testCommand, &opts)\n\tcheckedKey := false\n\tserver.cfg.PublicKeyCallback = func(conn cryptossh.ConnMetadata, pubkey cryptossh.PublicKey) (*cryptossh.Permissions, error) {\n\t\tc.Check(pubkey, gc.DeepEquals, clientKey)\n\t\tcheckedKey = true\n\t\treturn nil, nil\n\t}\n\terrorCh := make(chan error, 1)\n\tdone := make(chan bool)\n\tdefer close(done)\n\tgo server.run(errorCh, done)\n\n\tout, err := cmd.Output()\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(out), gc.Equals, \"abc value\\n\")\n\tc.Assert(checkedKey, jc.IsTrue)\n\n\tknownHosts, err := ioutil.ReadFile(s.knownHostsFile)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(knownHosts), gc.Equals, fmt.Sprintf(\n\t\t\"[127.0.0.1]:%d %s\",\n\t\tserverPort,\n\t\tcryptossh.MarshalAuthorizedKey(serverKey)),\n\t)\n\tc.Assert(waitForServer(c, errorCh), jc.ErrorIsNil)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestCopy(c *gc.C) {\n\tclient, err := ssh.NewGoCryptoClient()\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = client.Copy([]string{\"0.1.2.3:b\", c.MkDir()}, nil)\n\tc.Assert(err, gc.ErrorMatches, `scp command is not implemented \\(OpenSSH scp not available in PATH\\)`)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestProxyCommand(c *gc.C) {\n\trealNetcat, err := exec.LookPath(\"nc\")\n\tif err != nil {\n\t\tc.Skip(\"skipping test, couldn't find netcat: %v\")\n\t\treturn\n\t}\n\tnetcat := filepath.Join(c.MkDir(), \"nc\")\n\terr = ioutil.WriteFile(netcat, []byte(\"#!/bin/sh\\necho $0 \\\"$@\\\" > $0.args && exec \"+realNetcat+\" \\\"$@\\\"\"), 0755)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tclient, _ := newClient(c)\n\tserver, _ := s.newServer(c, cryptossh.ServerConfig{})\n\tvar opts ssh.Options\n\tport := server.listener.Addr().(*net.TCPAddr).Port\n\topts.SetProxyCommand(netcat, \"-q0\", \"%h\", \"%p\")\n\topts.SetPort(port)\n\tcmd := client.Command(\"127.0.0.1\", testCommand, &opts)\n\tserver.cfg.PublicKeyCallback = func(_ cryptossh.ConnMetadata, pubkey cryptossh.PublicKey) (*cryptossh.Permissions, error) {\n\t\treturn nil, nil\n\t}\n\terrorCh := make(chan error, 1)\n\tdone := make(chan bool)\n\tdefer close(done)\n\tgo server.run(errorCh, done)\n\n\tout, err := cmd.Output()\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(out), gc.Equals, \"abc value\\n\")\n\t// Ensure the proxy command was executed with the appropriate arguments.\n\tdata, err := ioutil.ReadFile(netcat + \".args\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(data), gc.Equals, fmt.Sprintf(\"%s -q0 127.0.0.1 %v\\n\", netcat, port))\n\tc.Assert(waitForServer(c, errorCh), jc.ErrorIsNil)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestStrictHostChecksYes(c *gc.C) {\n\tserver, _ := s.newServer(c, cryptossh.ServerConfig{NoClientAuth: true})\n\tserverPort := server.listener.Addr().(*net.TCPAddr).Port\n\terrorCh := make(chan error, 1)\n\tdone := make(chan bool)\n\tdefer close(done)\n\tgo server.run(errorCh, done)\n\n\tvar opts ssh.Options\n\topts.SetPort(serverPort)\n\topts.SetStrictHostKeyChecking(ssh.StrictHostChecksYes)\n\tclient, _ := newClient(c)\n\tcmd := client.Command(\"127.0.0.1\", testCommand, &opts)\n\t_, err := cmd.Output()\n\tc.Assert(err, gc.ErrorMatches, fmt.Sprintf(\n\t\t\"ssh: handshake failed: no ssh-ed25519 host key is known for 127.0.0.1:%d and you have requested strict checking\",\n\t\tserverPort,\n\t))\n\t_, err = os.Stat(s.knownHostsFile)\n\tc.Assert(err, jc.Satisfies, os.IsNotExist)\n\t_ = waitForServer(c, errorCh)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestStrictHostChecksAskNonTerminal(c *gc.C) {\n\tserver, _ := s.newServer(c, cryptossh.ServerConfig{NoClientAuth: true})\n\tserverPort := server.listener.Addr().(*net.TCPAddr).Port\n\terrorCh := make(chan error, 1)\n\tdone := make(chan bool)\n\tdefer close(done)\n\tgo server.run(errorCh, done)\n\n\tvar opts ssh.Options\n\topts.SetPort(serverPort)\n\topts.SetStrictHostKeyChecking(ssh.StrictHostChecksAsk)\n\tclient, _ := newClient(c)\n\tcmd := client.Command(\"127.0.0.1\", testCommand, &opts)\n\t_, err := cmd.Output()\n\tc.Assert(err, gc.ErrorMatches, \"ssh: handshake failed: not running in a terminal, cannot prompt for verification\")\n\t_, err = os.Stat(s.knownHostsFile)\n\tc.Assert(err, jc.Satisfies, os.IsNotExist)\n\t_ = waitForServer(c, errorCh)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestStrictHostChecksAskTerminalYes(c *gc.C) {\n\tvar readLineWriter mockReadLineWriter\n\tssh.PatchTerminal(&s.CleanupSuite, &readLineWriter)\n\treadLineWriter.addLine(\"\")\n\treadLineWriter.addLine(\"yes\")\n\n\tserver, serverKey := s.newServer(c, cryptossh.ServerConfig{NoClientAuth: true})\n\tserverPort := server.listener.Addr().(*net.TCPAddr).Port\n\terrorCh := make(chan error, 1)\n\tdone := make(chan bool)\n\tdefer close(done)\n\tgo server.run(errorCh, done)\n\n\tvar opts ssh.Options\n\topts.SetPort(serverPort)\n\topts.SetStrictHostKeyChecking(ssh.StrictHostChecksAsk)\n\tclient, _ := newClient(c)\n\tcmd := client.Command(\"127.0.0.1\", testCommand, &opts)\n\t_, err := cmd.Output()\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tknownHosts, err := ioutil.ReadFile(s.knownHostsFile)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(knownHosts), gc.Equals, fmt.Sprintf(\n\t\t\"[127.0.0.1]:%d %s\",\n\t\tserverPort,\n\t\tcryptossh.MarshalAuthorizedKey(serverKey),\n\t))\n\n\tc.Assert(readLineWriter.written.String(), gc.Equals, fmt.Sprintf(`\nThe authenticity of host '127.0.0.1:%[1]d (127.0.0.1:%[1]d)' can't be established.\nssh-ed25519 key fingerprint is %[2]s.\nAre you sure you want to continue connecting (yes/no)? Please type 'yes' or 'no': `[1:],\n\t\tserverPort, cryptossh.FingerprintSHA256(serverKey)))\n\tc.Assert(waitForServer(c, errorCh), jc.ErrorIsNil)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestStrictHostChecksAskTerminalNo(c *gc.C) {\n\tvar readLineWriter mockReadLineWriter\n\tssh.PatchTerminal(&s.CleanupSuite, &readLineWriter)\n\treadLineWriter.addLine(\"no\")\n\n\tserver, serverKey := s.newServer(c, cryptossh.ServerConfig{NoClientAuth: true})\n\tserverPort := server.listener.Addr().(*net.TCPAddr).Port\n\terrorCh := make(chan error, 1)\n\tdone := make(chan bool)\n\tdefer close(done)\n\tgo server.run(errorCh, done)\n\n\tvar opts ssh.Options\n\topts.SetPort(serverPort)\n\topts.SetStrictHostKeyChecking(ssh.StrictHostChecksAsk)\n\tclient, _ := newClient(c)\n\tcmd := client.Command(\"127.0.0.1\", testCommand, &opts)\n\t_, err := cmd.Output()\n\tc.Assert(err, gc.ErrorMatches, \"ssh: handshake failed: Host key verification failed.\")\n\n\t_, err = os.Stat(s.knownHostsFile)\n\tc.Assert(err, jc.Satisfies, os.IsNotExist)\n\n\tc.Assert(readLineWriter.written.String(), gc.Equals, fmt.Sprintf(`\nThe authenticity of host '127.0.0.1:%[1]d (127.0.0.1:%[1]d)' can't be established.\nssh-ed25519 key fingerprint is %[2]s.\nAre you sure you want to continue connecting (yes/no)? `[1:],\n\t\tserverPort, cryptossh.FingerprintSHA256(serverKey)))\n\t_ = waitForServer(c, errorCh)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestStrictHostChecksNoMismatch(c *gc.C) {\n\tvar readLineWriter mockReadLineWriter\n\tssh.PatchTerminal(&s.CleanupSuite, &readLineWriter)\n\n\tserver, serverKey := s.newServer(c, cryptossh.ServerConfig{NoClientAuth: true})\n\tserverPort := server.listener.Addr().(*net.TCPAddr).Port\n\terrorCh := make(chan error, 1)\n\tdone := make(chan bool)\n\tdefer close(done)\n\tgo server.run(errorCh, done)\n\n\t// Write a mismatching key to the known_hosts file. Even with\n\t// StrictHostChecksNo, we should be verifying against an existing\n\t// host key.\n\t_, alternativeKey, err := generateED25519Key(rand.Reader)\n\tc.Assert(err, jc.ErrorIsNil)\n\talternativePublicKey, err := cryptossh.NewPublicKey(alternativeKey.Public())\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ioutil.WriteFile(s.knownHostsFile, []byte(fmt.Sprintf(\n\t\t\"[127.0.0.1]:%d %s\",\n\t\tserverPort,\n\t\tcryptossh.MarshalAuthorizedKey(alternativePublicKey),\n\t)), 0600)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tvar opts ssh.Options\n\topts.SetPort(serverPort)\n\topts.SetStrictHostKeyChecking(ssh.StrictHostChecksNo)\n\tclient, _ := newClient(c)\n\tcmd := client.Command(\"127.0.0.1\", testCommand, &opts)\n\t_, err = cmd.Output()\n\tc.Assert(err, gc.ErrorMatches, \"ssh: handshake failed: knownhosts: key mismatch\")\n\n\tc.Assert(readLineWriter.written.String(), gc.Matches, fmt.Sprintf(`\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!     @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nIT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\nSomeone could be eavesdropping on you right now \\(man-in-the-middle attack\\)!\nIt is also possible that a host key has just been changed.\nThe fingerprint for the ssh-ed25519 key sent by the remote host is\n%s.\nPlease contact your system administrator.\nAdd correct host key in .*/known_hosts to get rid of this message.\nOffending ssh-ed25519 key in .*/known_hosts:1\n`[1:], regexp.QuoteMeta(cryptossh.FingerprintSHA256(serverKey))))\n\t_ = waitForServer(c, errorCh)\n}\n\nfunc (s *SSHGoCryptoCommandSuite) TestStrictHostChecksDifferentKeyTypes(c *gc.C) {\n\tvar readLineWriter mockReadLineWriter\n\tssh.PatchTerminal(&s.CleanupSuite, &readLineWriter)\n\n\tserver, serverKey := s.newServer(c, cryptossh.ServerConfig{NoClientAuth: true})\n\tserverPort := server.listener.Addr().(*net.TCPAddr).Port\n\terrorCh := make(chan error, 1)\n\tdone := make(chan bool)\n\tdefer close(done)\n\tgo server.run(errorCh, done)\n\n\t// Write a mismatching key to the known_hosts file with a different\n\t// key type. Even with StrictHostChecksNo, we should be verifying\n\t// against an existing host key.\n\tdsaKey, err := generateDSAKey(rand.Reader)\n\tc.Assert(err, jc.ErrorIsNil)\n\talternativePublicKey, err := cryptossh.NewPublicKey(&dsaKey.PublicKey)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ioutil.WriteFile(s.knownHostsFile, []byte(fmt.Sprintf(\n\t\t\"[127.0.0.1]:%d %s\",\n\t\tserverPort,\n\t\tcryptossh.MarshalAuthorizedKey(alternativePublicKey),\n\t)), 0600)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tvar opts ssh.Options\n\topts.SetPort(serverPort)\n\topts.SetStrictHostKeyChecking(ssh.StrictHostChecksNo)\n\n\tclient, _ := newClient(c)\n\tcmd := client.Command(\"127.0.0.1\", testCommand, &opts)\n\t_, err = cmd.Output()\n\tc.Assert(err, gc.ErrorMatches, \"ssh: handshake failed: knownhosts: key mismatch\")\n\n\tc.Assert(readLineWriter.written.String(), gc.Matches, fmt.Sprintf(`\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n@    WARNING: REMOTE HOST IDENTIFICATION HAS CHANGED!     @\n@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\nIT IS POSSIBLE THAT SOMEONE IS DOING SOMETHING NASTY!\nSomeone could be eavesdropping on you right now \\(man-in-the-middle attack\\)!\nIt is also possible that a host key has just been changed.\nThe fingerprint for the ssh-ed25519 key sent by the remote host is\n%s.\nPlease contact your system administrator.\nAdd correct host key in .*/known_hosts to get rid of this message.\nHost was previously using different host key algorithms:\n - ssh-dss key in .*/known_hosts:1\n`[1:], regexp.QuoteMeta(cryptossh.FingerprintSHA256(serverKey))))\n\t_ = waitForServer(c, errorCh)\n}\n\ntype mockReadLineWriter struct {\n\ttesting.Stub\n\tlines   []string\n\twritten bytes.Buffer\n}\n\nfunc (m *mockReadLineWriter) addLine(line string) {\n\tm.lines = append(m.lines, line)\n}\n\nfunc (m *mockReadLineWriter) ReadLine() (string, error) {\n\tm.MethodCall(m, \"ReadLine\")\n\tif len(m.lines) == 0 {\n\t\treturn \"\", io.EOF\n\t}\n\tline := m.lines[0]\n\tm.lines = m.lines[1:]\n\treturn line, nil\n}\n\nfunc (m *mockReadLineWriter) Write(data []byte) (int, error) {\n\tm.MethodCall(m, \"Write\", data)\n\treturn m.written.Write(data)\n}\n"
  },
  {
    "path": "ssh/ssh_openssh.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/utils/v4\"\n)\n\n// default identities will not be attempted if\n// -i is specified and they are not explcitly\n// included.\nvar defaultIdentities = []string{\n\t\"~/.ssh/identity\",\n\t\"~/.ssh/id_rsa\",\n\t\"~/.ssh/id_dsa\",\n\t\"~/.ssh/id_ecdsa\",\n\t\"~/.ssh/id_ed25519\",\n}\n\ntype opensshCommandKind int\n\nconst (\n\tsshKind opensshCommandKind = iota\n\tscpKind\n)\n\n// sshpassWrap wraps the command/args with sshpass if it is found in $PATH\n// and the SSHPASS environment variable is set. Otherwise, the original\n// command/args are returned.\nfunc sshpassWrap(cmd string, args []string) (string, []string) {\n\tif os.Getenv(\"SSHPASS\") != \"\" {\n\t\tif path, err := exec.LookPath(\"sshpass\"); err == nil {\n\t\t\treturn path, append([]string{\"-e\", cmd}, args...)\n\t\t}\n\t}\n\treturn cmd, args\n}\n\n// OpenSSHClient is an implementation of Client that\n// uses the ssh and scp executables found in $PATH.\ntype OpenSSHClient struct{}\n\n// NewOpenSSHClient creates a new OpenSSHClient.\n// If the ssh and scp programs cannot be found\n// in $PATH, then an error is returned.\nfunc NewOpenSSHClient() (*OpenSSHClient, error) {\n\tvar c OpenSSHClient\n\tif _, err := exec.LookPath(\"ssh\"); err != nil {\n\t\treturn nil, err\n\t}\n\tif _, err := exec.LookPath(\"scp\"); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &c, nil\n}\n\nfunc opensshOptions(options *Options, commandKind opensshCommandKind) []string {\n\tif options == nil {\n\t\toptions = &Options{}\n\t}\n\tvar args []string\n\n\tvar hostChecks string\n\tswitch options.strictHostKeyChecking {\n\tcase StrictHostChecksYes:\n\t\thostChecks = \"yes\"\n\tcase StrictHostChecksNo:\n\t\thostChecks = \"no\"\n\tcase StrictHostChecksAsk:\n\t\thostChecks = \"ask\"\n\tdefault:\n\t\t// StrictHostChecksUnset and invalid values are handled the\n\t\t// same way (the option doesn't get included).\n\t}\n\tif hostChecks != \"\" {\n\t\targs = append(args, \"-o\", \"StrictHostKeyChecking \"+hostChecks)\n\t}\n\n\tif len(options.proxyCommand) > 0 {\n\t\targs = append(args, \"-o\", \"ProxyCommand \"+utils.CommandString(options.proxyCommand...))\n\t}\n\n\tif !options.passwordAuthAllowed {\n\t\targs = append(args, \"-o\", \"PasswordAuthentication no\")\n\t}\n\n\t// We must set ServerAliveInterval or the server may\n\t// think we've become unresponsive on long running\n\t// command executions such as \"apt-get upgrade\".\n\targs = append(args, \"-o\", \"ServerAliveInterval 30\")\n\n\tif options.allocatePTY {\n\t\targs = append(args, \"-t\", \"-t\") // twice to force\n\t}\n\tif options.knownHostsFile != \"\" {\n\t\targs = append(args, \"-o\", \"UserKnownHostsFile \"+utils.CommandString(options.knownHostsFile))\n\t}\n\tif len(options.hostKeyAlgorithms) > 0 {\n\t\targs = append(args, \"-o\", \"HostKeyAlgorithms \"+utils.CommandString(strings.Join(options.hostKeyAlgorithms, \",\")))\n\t}\n\tidentities := append([]string{}, options.identities...)\n\tif pk := PrivateKeyFiles(); len(pk) > 0 {\n\t\t// Add client keys as implicit identities\n\t\tidentities = append(identities, pk...)\n\t}\n\t// If any identities are specified, the\n\t// default ones must be explicitly specified.\n\tif len(identities) > 0 {\n\t\tfor _, identity := range defaultIdentities {\n\t\t\tpath, err := utils.NormalizePath(identity)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Warningf(\"failed to normalize path %q: %v\", identity, err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif _, err := os.Stat(path); err == nil {\n\t\t\t\tidentities = append(identities, path)\n\t\t\t}\n\t\t}\n\t}\n\tfor _, identity := range identities {\n\t\targs = append(args, \"-i\", identity)\n\t}\n\tif options.port != 0 {\n\t\tport := fmt.Sprint(options.port)\n\t\tif commandKind == scpKind {\n\t\t\t// scp uses -P instead of -p (-p means preserve).\n\t\t\targs = append(args, \"-P\", port)\n\t\t} else {\n\t\t\targs = append(args, \"-p\", port)\n\t\t}\n\t}\n\treturn args\n}\n\n// Command implements Client.Command.\nfunc (c *OpenSSHClient) Command(host string, command []string, options *Options) *Cmd {\n\targs := opensshOptions(options, sshKind)\n\targs = append(args, host)\n\tif len(command) > 0 {\n\t\targs = append(args, command...)\n\t}\n\tbin, args := sshpassWrap(\"ssh\", args)\n\tlogger.Tracef(\"running: %s %s\", bin, utils.CommandString(args...))\n\treturn &Cmd{impl: &opensshCmd{exec.Command(bin, args...)}}\n}\n\n// Copy implements Client.Copy.\nfunc (c *OpenSSHClient) Copy(args []string, userOptions *Options) error {\n\tvar options Options\n\tif userOptions != nil {\n\t\toptions = *userOptions\n\t\toptions.allocatePTY = false // doesn't make sense for scp\n\t}\n\tallArgs := opensshOptions(&options, scpKind)\n\tallArgs = append(allArgs, args...)\n\tbin, allArgs := sshpassWrap(\"scp\", allArgs)\n\tcmd := exec.Command(bin, allArgs...)\n\tvar stderr bytes.Buffer\n\tcmd.Stderr = &stderr\n\tlogger.Tracef(\"running: %s %s\", bin, utils.CommandString(args...))\n\tif err := cmd.Run(); err != nil {\n\t\tstderr := strings.TrimSpace(stderr.String())\n\t\tif len(stderr) > 0 {\n\t\t\terr = errors.Errorf(\"%v (%v)\", err, stderr)\n\t\t}\n\t\treturn err\n\t}\n\treturn nil\n}\n\ntype opensshCmd struct {\n\t*exec.Cmd\n}\n\nfunc (c *opensshCmd) SetStdio(stdin io.Reader, stdout, stderr io.Writer) {\n\tc.Stdin, c.Stdout, c.Stderr = stdin, stdout, stderr\n}\n\nfunc (c *opensshCmd) StdinPipe() (io.WriteCloser, io.Reader, error) {\n\twc, err := c.Cmd.StdinPipe()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn wc, c.Stdin, nil\n}\n\nfunc (c *opensshCmd) StdoutPipe() (io.ReadCloser, io.Writer, error) {\n\trc, err := c.Cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn rc, c.Stdout, nil\n}\n\nfunc (c *opensshCmd) StderrPipe() (io.ReadCloser, io.Writer, error) {\n\trc, err := c.Cmd.StderrPipe()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn rc, c.Stderr, nil\n}\n\nfunc (c *opensshCmd) Kill() error {\n\tif c.Process == nil {\n\t\treturn errors.Errorf(\"process has not been started\")\n\t}\n\treturn c.Process.Kill()\n}\n"
  },
  {
    "path": "ssh/ssh_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build !windows\n// +build !windows\n\npackage ssh_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n\t\"github.com/juju/utils/v4/ssh\"\n)\n\nconst (\n\techoCommand = \"/bin/echo\"\n\techoScript  = \"#!/bin/sh\\n\" + echoCommand + \" $0 \\\"$@\\\" | /usr/bin/tee $0.args\"\n)\n\ntype SSHCommandSuite struct {\n\ttesting.IsolationSuite\n\toriginalPath string\n\ttestbin      string\n\tfakessh      string\n\tfakescp      string\n\tclient       ssh.Client\n}\n\nvar _ = gc.Suite(&SSHCommandSuite{})\n\nfunc (s *SSHCommandSuite) SetUpTest(c *gc.C) {\n\ts.IsolationSuite.SetUpTest(c)\n\ts.testbin = c.MkDir()\n\ts.fakessh = filepath.Join(s.testbin, \"ssh\")\n\ts.fakescp = filepath.Join(s.testbin, \"scp\")\n\terr := ioutil.WriteFile(s.fakessh, []byte(echoScript), 0755)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = ioutil.WriteFile(s.fakescp, []byte(echoScript), 0755)\n\tc.Assert(err, jc.ErrorIsNil)\n\ts.PatchEnvPathPrepend(s.testbin)\n\ts.client, err = ssh.NewOpenSSHClient()\n\tc.Assert(err, jc.ErrorIsNil)\n\ts.PatchValue(ssh.DefaultIdentities, nil)\n}\n\nfunc (s *SSHCommandSuite) command(args ...string) *ssh.Cmd {\n\treturn s.commandOptions(args, nil)\n}\n\nfunc (s *SSHCommandSuite) commandOptions(args []string, opts *ssh.Options) *ssh.Cmd {\n\treturn s.client.Command(\"localhost\", args, opts)\n}\n\nfunc (s *SSHCommandSuite) assertCommandArgs(c *gc.C, cmd *ssh.Cmd, expected string) {\n\tout, err := cmd.Output()\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(strings.TrimSpace(string(out)), gc.Equals, expected)\n}\n\nfunc (s *SSHCommandSuite) TestDefaultClient(c *gc.C) {\n\tssh.InitDefaultClient()\n\tc.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.OpenSSHClient{})\n\ts.PatchEnvironment(\"PATH\", \"\")\n\tssh.InitDefaultClient()\n\tc.Assert(ssh.DefaultClient, gc.FitsTypeOf, &ssh.GoCryptoClient{})\n}\n\nfunc (s *SSHCommandSuite) TestCommandSSHPass(c *gc.C) {\n\t// First create a fake sshpass, but don't set $SSHPASS\n\tfakesshpass := filepath.Join(s.testbin, \"sshpass\")\n\terr := ioutil.WriteFile(fakesshpass, []byte(echoScript), 0755)\n\ts.assertCommandArgs(c, s.command(echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n\t// Now set $SSHPASS.\n\ts.PatchEnvironment(\"SSHPASS\", \"anyoldthing\")\n\ts.assertCommandArgs(c, s.command(echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -e ssh -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123\",\n\t\t\tfakesshpass, echoCommand),\n\t)\n\t// Finally, remove sshpass from $PATH.\n\terr = os.Remove(fakesshpass)\n\tc.Assert(err, jc.ErrorIsNil)\n\ts.assertCommandArgs(c, s.command(echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommand(c *gc.C) {\n\ts.assertCommandArgs(c, s.command(echoCommand, \"123\"),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandEnablePTY(c *gc.C) {\n\tvar opts ssh.Options\n\topts.EnablePTY()\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 -t -t localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandSetKnownHostsFile(c *gc.C) {\n\tvar opts ssh.Options\n\topts.SetKnownHostsFile(\"/tmp/known hosts\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 -o UserKnownHostsFile \\\"/tmp/known hosts\\\" localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestSetStrictHostKeyChecking(c *gc.C) {\n\tcommandPattern := fmt.Sprintf(\"%s%%s -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123\",\n\t\ts.fakessh, echoCommand)\n\n\ttests := []struct {\n\t\tinput    ssh.StrictHostChecksOption\n\t\texpected string\n\t}{\n\t\t{ssh.StrictHostChecksNo, \"no\"},\n\t\t{ssh.StrictHostChecksYes, \"yes\"},\n\t\t{ssh.StrictHostChecksAsk, \"ask\"},\n\t\t{ssh.StrictHostChecksDefault, \"\"},\n\t\t{ssh.StrictHostChecksOption(999), \"\"},\n\t}\n\tfor _, t := range tests {\n\t\tvar opts ssh.Options\n\t\topts.SetStrictHostKeyChecking(t.input)\n\t\texpectedOpt := \"\"\n\t\tif t.expected != \"\" {\n\t\t\texpectedOpt = \" -o StrictHostKeyChecking \" + t.expected\n\t\t}\n\t\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\t\tfmt.Sprintf(commandPattern, expectedOpt))\n\t}\n}\n\nfunc (s *SSHCommandSuite) TestCommandAllowPasswordAuthentication(c *gc.C) {\n\tvar opts ssh.Options\n\topts.AllowPasswordAuthentication()\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o ServerAliveInterval 30 localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandIdentities(c *gc.C) {\n\tvar opts ssh.Options\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandPort(c *gc.C) {\n\tvar opts ssh.Options\n\topts.SetPort(2022)\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 -p 2022 localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCopy(c *gc.C) {\n\tvar opts ssh.Options\n\topts.EnablePTY()\n\topts.AllowPasswordAuthentication()\n\topts.SetIdentities(\"x\", \"y\")\n\topts.SetPort(2022)\n\terr := s.client.Copy([]string{\"/tmp/blah\", \"foo@bar.com:baz\"}, &opts)\n\tc.Assert(err, jc.ErrorIsNil)\n\tout, err := ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, jc.ErrorIsNil)\n\t// EnablePTY has no effect for Copy\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o ServerAliveInterval 30 -i x -i y -P 2022 /tmp/blah foo@bar.com:baz\\n\")\n\n\t// Try passing extra args\n\terr = s.client.Copy([]string{\"/tmp/blah\", \"foo@bar.com:baz\", \"-r\", \"-v\"}, &opts)\n\tc.Assert(err, jc.ErrorIsNil)\n\tout, err = ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o ServerAliveInterval 30 -i x -i y -P 2022 /tmp/blah foo@bar.com:baz -r -v\\n\")\n\n\t// Try interspersing extra args\n\terr = s.client.Copy([]string{\"-r\", \"/tmp/blah\", \"-v\", \"foo@bar.com:baz\"}, &opts)\n\tc.Assert(err, jc.ErrorIsNil)\n\tout, err = ioutil.ReadFile(s.fakescp + \".args\")\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Assert(string(out), gc.Equals, s.fakescp+\" -o ServerAliveInterval 30 -i x -i y -P 2022 -r /tmp/blah -v foo@bar.com:baz\\n\")\n}\n\nfunc (s *SSHCommandSuite) TestCommandClientKeys(c *gc.C) {\n\tdefer overrideGenerateKey().Restore()\n\tclientKeysDir := c.MkDir()\n\tdefer ssh.ClearClientKeys()\n\terr := ssh.LoadClientKeys(clientKeysDir)\n\tc.Assert(err, jc.ErrorIsNil)\n\tck := filepath.Join(clientKeysDir, \"juju_id_ed25519\")\n\tvar opts ssh.Options\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y -i %s localhost %s 123\",\n\t\t\ts.fakessh, ck, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCommandError(c *gc.C) {\n\tvar opts ssh.Options\n\terr := ioutil.WriteFile(s.fakessh, []byte(\"#!/bin/sh\\nexit 42\"), 0755)\n\tc.Assert(err, jc.ErrorIsNil)\n\tcommand := s.client.Command(\"ignored\", []string{echoCommand, \"foo\"}, &opts)\n\terr = command.Run()\n\tc.Assert(utils.IsRcPassthroughError(err), jc.IsTrue)\n}\n\nfunc (s *SSHCommandSuite) TestCommandDefaultIdentities(c *gc.C) {\n\tvar opts ssh.Options\n\ttempdir := c.MkDir()\n\tdef1 := filepath.Join(tempdir, \"def1\")\n\tdef2 := filepath.Join(tempdir, \"def2\")\n\ts.PatchValue(ssh.DefaultIdentities, []string{def1, def2})\n\t// If no identities are specified, then the defaults aren't added.\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 localhost %s 123\",\n\t\t\ts.fakessh, echoCommand),\n\t)\n\t// If identities are specified, then the defaults are must added.\n\t// Only the defaults that exist on disk will be added.\n\terr := ioutil.WriteFile(def2, nil, 0644)\n\tc.Assert(err, jc.ErrorIsNil)\n\topts.SetIdentities(\"x\", \"y\")\n\ts.assertCommandArgs(c, s.commandOptions([]string{echoCommand, \"123\"}, &opts),\n\t\tfmt.Sprintf(\"%s -o PasswordAuthentication no -o ServerAliveInterval 30 -i x -i y -i %s localhost %s 123\",\n\t\t\ts.fakessh, def2, echoCommand),\n\t)\n}\n\nfunc (s *SSHCommandSuite) TestCopyReader(c *gc.C) {\n\tclient := &fakeClient{}\n\tr := bytes.NewBufferString(\"<data>\")\n\n\terr := ssh.TestCopyReader(client, \"foo@bar.com:baz\", \"/tmp/blah\", r, nil)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tclient.checkCalls(c, \"foo@bar.com:baz\", []string{\"cat - > /tmp/blah\"}, nil, nil, \"Command\")\n\tclient.impl.checkCalls(c, r, nil, nil, \"SetStdio\", \"Start\", \"Wait\")\n}\n"
  },
  {
    "path": "ssh/stream.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"bytes\"\n\t\"io\"\n)\n\n// stripCR implements an io.Reader wrapper that removes carriage return bytes.\ntype stripCR struct {\n\treader io.Reader\n}\n\n// StripCRReader returns a new io.Reader wrapper that strips carriage returns.\nfunc StripCRReader(reader io.Reader) io.Reader {\n\tif reader == nil {\n\t\treturn nil\n\t}\n\treturn &stripCR{reader: reader}\n}\n\nvar byteEmpty = []byte{}\nvar byteCR = []byte{'\\r'}\n\n// Read implements io.Reader interface.\n// This copies data around much more than needed so should be optimized if\n// used on a performance critical path.\nfunc (s *stripCR) Read(bufOut []byte) (int, error) {\n\tbufTemp := make([]byte, len(bufOut))\n\tn, err := s.reader.Read(bufTemp)\n\tbufReplaced := bytes.Replace(bufTemp[:n], byteCR, byteEmpty, -1)\n\tcopy(bufOut, bufReplaced)\n\treturn len(bufReplaced), err\n}\n"
  },
  {
    "path": "ssh/stream_test.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh_test\n\nimport (\n\t\"io/ioutil\"\n\t\"strings\"\n\t\"testing/iotest\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/ssh\"\n)\n\ntype SSHStreamSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&SSHStreamSuite{})\n\nfunc (s *SSHStreamSuite) TestNewStripCRNil(c *gc.C) {\n\treader := ssh.StripCRReader(nil)\n\tc.Assert(reader, gc.IsNil)\n}\n\nfunc (s *SSHStreamSuite) TestStripCR(c *gc.C) {\n\treader := ssh.StripCRReader(strings.NewReader(\"One\\r\\nTwo\"))\n\toutput, err := ioutil.ReadAll(reader)\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Check(string(output), gc.Equals, \"One\\nTwo\")\n}\n\nfunc (s *SSHStreamSuite) TestStripCROneByte(c *gc.C) {\n\treader := ssh.StripCRReader(strings.NewReader(\"One\\r\\r\\rTwo\"))\n\toutput, err := ioutil.ReadAll(iotest.OneByteReader(reader))\n\tc.Assert(err, jc.ErrorIsNil)\n\tc.Check(string(output), gc.Equals, \"OneTwo\")\n}\n\nfunc (s *SSHStreamSuite) TestStripCRError(c *gc.C) {\n\treader := ssh.StripCRReader(strings.NewReader(\"One\\r\\r\\rTwo\"))\n\t_, err := ioutil.ReadAll(iotest.TimeoutReader(reader))\n\tc.Assert(err.Error(), gc.Equals, \"timeout\")\n}\n"
  },
  {
    "path": "ssh/stream_wrapper_unix.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build !windows\n// +build !windows\n\npackage ssh\n\nimport (\n\t\"io\"\n)\n\n// WrapStdin returns the original stdin stream on nix platforms.\nfunc WrapStdin(reader io.Reader) io.Reader {\n\treturn reader\n}\n"
  },
  {
    "path": "ssh/stream_wrapper_windows.go",
    "content": "// Copyright 2016 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage ssh\n\nimport (\n\t\"io\"\n)\n\n// WrapStdin returns stdin with carriage returns stripped on windows.\nfunc WrapStdin(reader io.Reader) io.Reader {\n\treturn StripCRReader(reader)\n}\n"
  },
  {
    "path": "ssh/testing/keys.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage testing\n\ntype SSHKey struct {\n\tKey         string\n\tFingerprint string\n}\n\nvar (\n\tValidKeyOne = SSHKey{\n\t\t`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDEX/dPu4PmtvgK3La9zioCEDrJ` +\n\t\t\t`yUr6xEIK7Pr+rLgydcqWTU/kt7w7gKjOw4vvzgHfjKl09CWyvgb+y5dCiTk` +\n\t\t\t`9MxI+erGNhs3pwaoS+EavAbawB7iEqYyTep3YaJK+4RJ4OX7ZlXMAIMrTL+` +\n\t\t\t`UVrK89t56hCkFYaAgo3VY+z6rb/b3bDBYtE1Y2tS7C3au73aDgeb9psIrSV` +\n\t\t\t`86ucKBTl5X62FnYiyGd++xCnLB6uLximM5OKXfLzJQNS/QyZyk12g3D8y69` +\n\t\t\t`Xw1GzCSKX1u1+MQboyf0HJcG2ryUCLHdcDVppApyHx2OLq53hlkQ/yxdflD` +\n\t\t\t`qCqAE4j+doagSsIfC1T2T`,\n\t\t\"86:ed:1b:cd:26:a0:a3:4c:27:35:49:60:95:b7:0f:68\",\n\t}\n\n\tValidKeyTwo = SSHKey{\n\t\t`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDNC6zK8UMazlVgp8en8N7m7H/Y6` +\n\t\t\t`DoMWbmPFjXYRXu6iQJJ18hCtsfMe63E5/PBaOjDT8am0Sx3Eqn4ZzpWMj+z` +\n\t\t\t`knTcSd8xnMHYYxH2HStRWC1akTe4tTno2u2mqzjKd8f62URPtIocYCNRBls` +\n\t\t\t`9yjnq9SogI5EXgcx6taQcrIFcIK0SlthxxcMVSlLpnbReujW65JHtiMqoYA` +\n\t\t\t`OIALyO+Rkmtvb/ObmViDnwCKCN1up/xWt6J10MrAUtpI5b4prqG7FOqVMM/` +\n\t\t\t`zdgrVg6rUghnzdYeQ8QMyEv4mVSLzX0XIPcxorkl9q06s5mZmAzysEbKZCO` +\n\t\t\t`aXcLeNlXx/nkmuWslYCJ`,\n\t\t\"2f:fb:b0:65:68:c8:4e:a6:1b:a6:4b:8d:14:0b:40:79\",\n\t}\n\n\tValidKeyThree = SSHKey{\n\t\t`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCpGj1JMjGjAFt5wjARbIORyjQ/c` +\n\t\t\t`ZAiDyDHe/w8qmLKUG2KTs6586QqqM6DKPZiYesrzXqvZsWYV4B6OjLM1sxq` +\n\t\t\t`WjeDIl56PSnJ0+KP8pUV9KTkkKtRXxAoNg/II4l69e05qGffj9AcQ/7JPxx` +\n\t\t\t`eL14Ulvh/a69r3uVkw1UGVk9Bwm4eCOSCqKalYLA1k5da6crEAXn9hiXLGs` +\n\t\t\t`S9dOn3Lsqj5tK31aaUncue+a3iKb7R5LRFflDizzNS+h8tPuANQflOjOhR0` +\n\t\t\t`Vas0BsurgISseZZ0NIMISyWhZpr0eOBWA/YruN9r++kYPOnDy0eMaOVGLO7` +\n\t\t\t`SQwJ/6QHvf73yksJTncz`,\n\t\t\"1d:cf:ab:66:8a:f6:77:fb:4c:b2:59:6f:12:cf:cb:2f\",\n\t}\n\n\tValidKeyFour = SSHKey{\n\t\t`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCSEDMH5RyjGtEMIqM2RiPYYQgUK` +\n\t\t\t`9wdHCo1/AXkuQ7m1iVjHhACp8Oawf2Grn7hO4e0JUn5FaEZOnDj/9HB2VPw` +\n\t\t\t`EDGBwSN1caVC3yrTVkqQcsxBY9nTV+spQQMsePOdUZALcoEilvAcLRETbyn` +\n\t\t\t`rybaS2bfzpqbA9MEEaKQKLKGdgqiMdNXAj5I/ik/BPp0ziOMlMl1A1zilnS` +\n\t\t\t`UXubs1U49WWV0A70vAASvZVTXr3zrPAmstH+9Ik6FdpeE99um08FXxKYWqZ` +\n\t\t\t`6rZF1M6L1/SqC7ediYdVgRCoti85kKhi7fZBzwrGcCnxer+D0GFz++KDSNS` +\n\t\t\t`iAnVZxyXhmBrwnR6Q/v7`,\n\t\t\"37:99:ab:96:c4:e8:f8:0b:0d:04:3e:1e:ee:66:e8:9e\",\n\t}\n\n\tValidKeyMulti = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDW+8zWO6qqXrHlcMK7obliuYp7D` +\n\t\t`vZBsK6rHlnbeV5Hh38Qn0GUX4Ahm6XeQ/NSx53wqkBQDGOJFY3s4w1a/hbd` +\n\t\t`PyLM2/yFXCYsj5FRf01JmUjAzWhuJMH9ViqzD//l4v8cR/pHC2B8PD6abKd` +\n\t\t`mIH+yLI9Cl3C4ICMKteG54egsUyboBOVKCDIKmWRLAak6sE5DPpqKF53NvD` +\n\t\t`cuDufWtaCfVAOrq6NW8wSQ7PAvfDh8gsG5uvZjY3gcWl9yI3EJVGFHcdxcv` +\n\t\t`4LtQI8mKdeg3JoufnEmeBJTZMoo83Gru5Z7tjv8J4JTUeQpd9uCCED1JAMe` +\n\t\t`cJSKgQ2gZMTbTshobpHr` + \"\\n\" +\n\t\t`ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSgfrzyGpE5eLiXusvLcxEmoE6e` +\n\t\t`SMUDvTW1dd2BZgfvUVwq+toQdZ6C0C1JmbC3X563n8fmKVUAQGo5JavzABG` +\n\t\t`Kpy90L3cwoGCFtb+A28YsT+bfuP+LdnCbFXm9c3DPJQx6Dch8prnDtzRjRV` +\n\t\t`CorbPvm35NY73liUXVF6g58Owlx5rWtb8OnoTh5KQps9JTSfyNckdV9bFxP` +\n\t\t`7bZvMyRYW5X33KaA+CQGpTNAKDHruSuKdAdaS6rBIZRvzzzSCF28BWwFL7Z` +\n\t\t`ghQo0ADlUMnqIeQ58nwRImZHpmvadsZi47aMKFeykk4JQUQlwjbM0xGi0uj` +\n\t\t`+hlaqGYbNo0Evcjn23cj`\n\n\tPartValidKeyMulti = `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZRvG2miYVkbWOr2I+9xHWXqALb` +\n\t\t`eBcyxAlYtbjxBRwrq8oFOw9vtIIZSO0r1FM6+JHzKhLSiPCMR/PK78ZqPgZ` +\n\t\t`fia8Y7cEZKaUWLtZUAl0RF9w8EtsA/2gpuLZErjcoIx6fzfEYFCJcLgcQSc` +\n\t\t`RlKG8VZT6tWIjvoLj9ki6unkG5YGmapkT60afhf3/vd7pCJO/uyszkQ9qU8` +\n\t\t`odUDTTlwftpJtUb8xGmzpEZJTgk1lbZKlZm5pVXwjNEodH7Je88RBzR7PBB` +\n\t\t`Jct+vf8wVJ/UEFXCnamvHLanJTcJIi/I5qRlKns65Bwb8M0HszPYmvTfFRD` +\n\t\t`ZLi3sPUmw6PJCJ0SgATd` + \"\\n\" +\n\t\t`ssh-rsa bad key`\n\n\tMultiInvalid = `ssh-rsa bad key` + \"\\n\" +\n\t\t`ssh-rsa also bad`\n\n\tEmptyKeyMulti = \"\"\n)\n"
  },
  {
    "path": "symlink/export_test.go",
    "content": "// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage symlink\n\nvar (\n\tGetLongPathAsString = getLongPathAsString\n)\n"
  },
  {
    "path": "symlink/symlink.go",
    "content": "// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage symlink\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\n// Replace will do an atomic replacement of a symlink to a new path\nfunc Replace(link, newpath string) error {\n\tdstDir := filepath.Dir(link)\n\tuuid, err := utils.NewUUID()\n\tif err != nil {\n\t\treturn err\n\t}\n\trandStr := uuid.String()\n\ttmpFile := filepath.Join(dstDir, \"tmpfile\"+randStr)\n\t// Create the new symlink before removing the old one. This way, if New()\n\t// fails, we still have a link to the old tools.\n\terr = New(newpath, tmpFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot create symlink: %s\", err)\n\t}\n\t// On Windows, symlinks may not be overwritten. We remove it first,\n\t// and then rename tmpFile\n\tif _, err := os.Stat(link); err == nil {\n\t\terr = os.RemoveAll(link)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\terr = os.Rename(tmpFile, link)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot update tools symlink: %v\", err)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "symlink/symlink_posix.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build linux || darwin\n// +build linux darwin\n\npackage symlink\n\nimport (\n\t\"os\"\n\n\t\"github.com/juju/errors\"\n)\n\n// New is a wrapper function for os.Symlink() on Linux\nfunc New(oldname, newname string) error {\n\treturn os.Symlink(oldname, newname)\n}\n\n// Read is a wrapper for os.Readlink() on Linux\nfunc Read(link string) (string, error) {\n\treturn os.Readlink(link)\n}\n\nfunc IsSymlink(path string) (bool, error) {\n\tst, err := os.Lstat(path)\n\tif err != nil {\n\t\treturn false, errors.Trace(err)\n\t}\n\treturn st.Mode()&os.ModeSymlink != 0, nil\n}\n\n// getLongPathAsString does nothing on linux. Its here for compatibillity\n// with the windows implementation\nfunc getLongPathAsString(path string) (string, error) {\n\treturn path, nil\n}\n"
  },
  {
    "path": "symlink/symlink_test.go",
    "content": "// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage symlink_test\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n\t\"github.com/juju/utils/v4/symlink\"\n)\n\ntype SymlinkSuite struct{}\n\nvar _ = gc.Suite(&SymlinkSuite{})\n\nfunc Test(t *testing.T) {\n\tgc.TestingT(t)\n}\n\nfunc (*SymlinkSuite) TestReplace(c *gc.C) {\n\ttarget, err := symlink.GetLongPathAsString(c.MkDir())\n\tc.Assert(err, gc.IsNil)\n\ttarget_second, err := symlink.GetLongPathAsString(c.MkDir())\n\tc.Assert(err, gc.IsNil)\n\tlink := filepath.Join(target, \"link\")\n\n\t_, err = os.Stat(target)\n\tc.Assert(err, gc.IsNil)\n\t_, err = os.Stat(target_second)\n\tc.Assert(err, gc.IsNil)\n\n\terr = symlink.New(target, link)\n\tc.Assert(err, gc.IsNil)\n\n\tlink_target, err := symlink.Read(link)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(link_target, gc.Equals, filepath.FromSlash(target))\n\n\terr = symlink.Replace(link, target_second)\n\tc.Assert(err, gc.IsNil)\n\n\tlink_target, err = symlink.Read(link)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(link_target, gc.Equals, filepath.FromSlash(target_second))\n}\n\nfunc (*SymlinkSuite) TestIsSymlinkFile(c *gc.C) {\n\tdir, err := symlink.GetLongPathAsString(c.MkDir())\n\tc.Assert(err, gc.IsNil)\n\n\ttarget := filepath.Join(dir, \"file\")\n\terr = ioutil.WriteFile(target, []byte(\"TOP SECRET\"), 0644)\n\tc.Assert(err, gc.IsNil)\n\n\tlink := filepath.Join(dir, \"link\")\n\n\t_, err = os.Stat(target)\n\tc.Assert(err, gc.IsNil)\n\n\terr = symlink.New(target, link)\n\tc.Assert(err, gc.IsNil)\n\n\tisSymlink, err := symlink.IsSymlink(link)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(isSymlink, jc.IsTrue)\n}\n\nfunc (*SymlinkSuite) TestIsSymlinkFolder(c *gc.C) {\n\ttarget, err := symlink.GetLongPathAsString(c.MkDir())\n\tc.Assert(err, gc.IsNil)\n\n\tlink := filepath.Join(target, \"link\")\n\n\t_, err = os.Stat(target)\n\tc.Assert(err, gc.IsNil)\n\n\terr = symlink.New(target, link)\n\tc.Assert(err, gc.IsNil)\n\n\tisSymlink, err := symlink.IsSymlink(link)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(isSymlink, jc.IsTrue)\n}\n\nfunc (*SymlinkSuite) TestIsSymlinkFalseFile(c *gc.C) {\n\tdir := c.MkDir()\n\n\ttarget := filepath.Join(dir, \"file\")\n\terr := ioutil.WriteFile(target, []byte(\"TOP SECRET\"), 0644)\n\tc.Assert(err, gc.IsNil)\n\n\t_, err = os.Stat(target)\n\tc.Assert(err, gc.IsNil)\n\n\tisSymlink, err := symlink.IsSymlink(target)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(isSymlink, jc.IsFalse)\n}\n\nfunc (*SymlinkSuite) TestIsSymlinkFalseFolder(c *gc.C) {\n\ttarget, err := symlink.GetLongPathAsString(c.MkDir())\n\tc.Assert(err, gc.IsNil)\n\n\t_, err = os.Stat(target)\n\tc.Assert(err, gc.IsNil)\n\n\tisSymlink, err := symlink.IsSymlink(target)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(isSymlink, jc.IsFalse)\n}\n\nfunc (*SymlinkSuite) TestIsSymlinkFileDoesNotExist(c *gc.C) {\n\tdir := c.MkDir()\n\n\ttarget := filepath.Join(dir, \"file\")\n\n\tisSymlink, err := symlink.IsSymlink(target)\n\tc.Assert(err, gc.ErrorMatches, \".*\"+utils.NoSuchFileErrRegexp)\n\tc.Assert(isSymlink, jc.IsFalse)\n}\n"
  },
  {
    "path": "symlink/symlink_windows.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n// Author: Robert Tingirica\n\npackage symlink\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"syscall\"\n\t\"unicode/utf16\"\n\t\"unsafe\"\n\n\t\"github.com/juju/errors\"\n)\n\nconst (\n\tSYMBOLIC_LINK_FLAG_DIRECTORY = 1\n\t// This is the equivalent of syscall.GENERIC_EXECUTION\n\t// Using syscall.GENERIC_EXECUTION results in an \"Access denied\" error\n\tGENERIC_EXECUTION = 33554432\n\t// (TODO): bogdanteleaga or anybody else:\n\t// Remove this once we upgrade to a go version that has it in the syscall\n\t// package\n\tFILE_ATTRIBUTE_REPARSE_POINT = 0x00000400\n)\n\n//sys createSymbolicLink(symlinkname *uint16, targetname *uint16, flags uint32) (err error) = CreateSymbolicLinkW\n//sys getFinalPathNameByHandle(handle Handle, buf *uint16, buflen uint32, flags uint32) (n uint32, err error) = GetFinalPathNameByHandleW\n\n// New creates newname as a symbolic link to oldname.\n// If there is an error, it will be of type *LinkError.\nfunc New(oldname, newname string) error {\n\tfi, err := os.Stat(oldname)\n\tif err != nil {\n\t\treturn &os.LinkError{\"symlink\", oldname, newname, err}\n\t}\n\tvar flag uint32\n\tif fi.IsDir() {\n\t\tflag = SYMBOLIC_LINK_FLAG_DIRECTORY\n\t}\n\n\ttargetp, err := getLongPath(oldname)\n\tif err != nil {\n\t\treturn &os.LinkError{\"symlink\", oldname, newname, err}\n\t}\n\n\tlinkp, err := syscall.UTF16PtrFromString(newname)\n\tif err != nil {\n\t\treturn &os.LinkError{\"symlink\", oldname, newname, err}\n\t}\n\n\terr = createSymbolicLink(linkp, &targetp[0], flag)\n\tif err != nil {\n\t\treturn &os.LinkError{\"symlink\", oldname, newname, err}\n\t}\n\treturn nil\n}\n\n// Read returns the destination of the named symbolic link.\n// If there is an error, it will be of type *PathError.\nfunc Read(link string) (string, error) {\n\tlinkp, err := getLongPath(link)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\th, err := syscall.CreateFile(\n\t\t&linkp[0],\n\t\tsyscall.GENERIC_READ,\n\t\tsyscall.FILE_SHARE_READ,\n\t\tnil,\n\t\tsyscall.OPEN_EXISTING,\n\t\tGENERIC_EXECUTION,\n\t\t0)\n\tif err != nil {\n\t\treturn \"\", &os.PathError{\"readlink\", link, err}\n\t}\n\tdefer syscall.CloseHandle(h)\n\n\tpathw := make([]uint16, syscall.MAX_PATH)\n\tn, err := getFinalPathNameByHandle(h, &pathw[0], uint32(len(pathw)), 0)\n\tif err != nil {\n\t\treturn \"\", &os.PathError{\"readlink\", link, err}\n\t}\n\tif n > uint32(len(pathw)) {\n\t\tpathw = make([]uint16, n)\n\t\tn, err = getFinalPathNameByHandle(h, &pathw[0], uint32(len(pathw)), 0)\n\t\tif err != nil {\n\t\t\treturn \"\", &os.PathError{\"readlink\", link, err}\n\t\t}\n\t\tif n > uint32(len(pathw)) {\n\t\t\treturn \"\", &os.PathError{\"readlink\", link, errors.New(\"link length too long\")}\n\t\t}\n\t}\n\tret := string(utf16.Decode(pathw[0:n]))\n\n\tif strings.HasPrefix(ret, `\\\\?\\`) {\n\t\treturn ret[4:], nil\n\t}\n\n\tretp, err := getLongPath(ret)\n\tif err != nil {\n\t\treturn \"\", &os.PathError{\"readlink\", link, err}\n\t}\n\treturn syscall.UTF16ToString(retp), nil\n}\n\nfunc IsSymlink(path string) (bool, error) {\n\tvar fa syscall.Win32FileAttributeData\n\tnamep, err := syscall.UTF16PtrFromString(path)\n\tif err != nil {\n\t\treturn false, errors.Trace(err)\n\t}\n\terr = syscall.GetFileAttributesEx(namep, syscall.GetFileExInfoStandard, (*byte)(unsafe.Pointer(&fa)))\n\tif err != nil {\n\t\treturn false, errors.Trace(err)\n\t}\n\treturn fa.FileAttributes&FILE_ATTRIBUTE_REPARSE_POINT != 0, nil\n}\n\n// getLongPath converts windows 8.1 short style paths (c:\\Progra~1\\foo) to full\n// long paths.\nfunc getLongPath(path string) ([]uint16, error) {\n\tpathp, err := syscall.UTF16FromString(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlongp := pathp\n\tn, err := syscall.GetLongPathName(&pathp[0], &longp[0], uint32(len(longp)))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif n > uint32(len(longp)) {\n\t\tlongp = make([]uint16, n)\n\t\tn, err = syscall.GetLongPathName(&pathp[0], &longp[0], uint32(len(longp)))\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tlongp = longp[:n]\n\n\treturn longp, nil\n}\n\nfunc getLongPathAsString(path string) (string, error) {\n\tlongp, err := getLongPath(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn syscall.UTF16ToString(longp), nil\n}\n"
  },
  {
    "path": "symlink/symlink_windows_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage symlink_test\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/symlink\"\n)\n\nfunc (*SymlinkSuite) TestLongPath(c *gc.C) {\n\tprogramFiles := `C:\\PROGRA~1`\n\tlongProg := `C:\\Program Files`\n\ttarget, err := symlink.GetLongPathAsString(programFiles)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(target, gc.Equals, longProg)\n}\n\nfunc (*SymlinkSuite) TestCreateSymLink(c *gc.C) {\n\ttarget, err := symlink.GetLongPathAsString(c.MkDir())\n\tc.Assert(err, gc.IsNil)\n\n\tlink := filepath.Join(target, \"link\")\n\n\t_, err = os.Stat(target)\n\tc.Assert(err, gc.IsNil)\n\n\terr = symlink.New(target, link)\n\tc.Assert(err, gc.IsNil)\n\n\tlink, err = symlink.Read(link)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(link, gc.Equals, filepath.FromSlash(target))\n}\n\nfunc (*SymlinkSuite) TestReadData(c *gc.C) {\n\tdir := c.MkDir()\n\tsub := filepath.Join(dir, \"sub\")\n\n\terr := os.Mkdir(sub, 0700)\n\tc.Assert(err, gc.IsNil)\n\n\toldname := filepath.Join(sub, \"foo\")\n\tdata := []byte(\"data\")\n\n\terr = ioutil.WriteFile(oldname, data, 0644)\n\tc.Assert(err, gc.IsNil)\n\n\tnewname := filepath.Join(dir, \"bar\")\n\terr = symlink.New(oldname, newname)\n\tc.Assert(err, gc.IsNil)\n\n\tb, err := ioutil.ReadFile(newname)\n\tc.Assert(err, gc.IsNil)\n\n\tc.Assert(string(b), gc.Equals, string(data))\n}\n"
  },
  {
    "path": "symlink/zsymlink_windows_386.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// mksyscall_windows.pl -l32 symlink/symlink_windows.go\n// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\npackage symlink\n\nimport \"unsafe\"\nimport \"syscall\"\n\nvar (\n\tmodkernel32 = syscall.NewLazyDLL(\"kernel32.dll\")\n\n\tprocCreateSymbolicLinkW       = modkernel32.NewProc(\"CreateSymbolicLinkW\")\n\tprocGetFinalPathNameByHandleW = modkernel32.NewProc(\"GetFinalPathNameByHandleW\")\n)\n\nfunc createSymbolicLink(symlinkname *uint16, targetname *uint16, flags uint32) (err error) {\n\tr1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkname)), uintptr(unsafe.Pointer(targetname)), uintptr(flags))\n\tif r1 == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n\nfunc getFinalPathNameByHandle(handle syscall.Handle, buf *uint16, buflen uint32, flags uint32) (n uint32, err error) {\n\tr0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(flags), 0, 0)\n\tn = uint32(r0)\n\tif n == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n"
  },
  {
    "path": "symlink/zsymlink_windows_amd64.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// mksyscall_windows.pl symlink/symlink_windows.go\n// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\npackage symlink\n\nimport \"unsafe\"\nimport \"syscall\"\n\nvar (\n\tmodkernel32 = syscall.NewLazyDLL(\"kernel32.dll\")\n\n\tprocCreateSymbolicLinkW       = modkernel32.NewProc(\"CreateSymbolicLinkW\")\n\tprocGetFinalPathNameByHandleW = modkernel32.NewProc(\"GetFinalPathNameByHandleW\")\n)\n\nfunc createSymbolicLink(symlinkname *uint16, targetname *uint16, flags uint32) (err error) {\n\tr1, _, e1 := syscall.Syscall(procCreateSymbolicLinkW.Addr(), 3, uintptr(unsafe.Pointer(symlinkname)), uintptr(unsafe.Pointer(targetname)), uintptr(flags))\n\tif r1 == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n\nfunc getFinalPathNameByHandle(handle syscall.Handle, buf *uint16, buflen uint32, flags uint32) (n uint32, err error) {\n\tr0, _, e1 := syscall.Syscall6(procGetFinalPathNameByHandleW.Addr(), 4, uintptr(handle), uintptr(unsafe.Pointer(buf)), uintptr(buflen), uintptr(flags), 0, 0)\n\tn = uint32(r0)\n\tif n == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n"
  },
  {
    "path": "systemerrmessages_unix.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n//go:build !windows\n// +build !windows\n\npackage utils\n\n// The following are strings/regex-es which match common Unix error messages\n// that may be returned in case of failed calls to the system.\n// Any extra leading/trailing regex-es are left to be added by the developer.\nconst (\n\tNoSuchUserErrRegexp = `user: unknown user [a-z0-9_-]*`\n\tNoSuchFileErrRegexp = `no such file or directory`\n\tMkdirFailErrRegexp  = `.* not a directory`\n)\n"
  },
  {
    "path": "systemerrmessages_windows.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\n// The following are strings/regex-es which match common Windows error messages\n// that may be returned in case of failed calls to the system.\n// Any extra leading/trailing regex-es are left to be added by the developer.\nconst (\n\tNoSuchUserErrRegexp = `No mapping between account names and security IDs was done\\.`\n\tNoSuchFileErrRegexp = `The system cannot find the (file|path) specified\\.`\n\tMkdirFailErrRegexp  = `mkdir .*` + NoSuchFileErrRegexp\n)\n"
  },
  {
    "path": "tailer/export_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage tailer\n\nvar (\n\tBufferSize    = &bufferSize\n\tNewTestTailer = newTailer\n)\n"
  },
  {
    "path": "tailer/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage tailer_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "tailer/tailer.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage tailer\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"gopkg.in/tomb.v1\"\n)\n\nconst (\n\tdefaultBufferSize = 4096\n\tpolltime          = time.Second\n\tdelimiter         = '\\n'\n)\n\nvar (\n\tbufferSize = defaultBufferSize\n\tdelimiters = []byte{delimiter}\n)\n\n// TailerFilterFunc decides if a line shall be tailed (func is nil or\n// returns true) of shall be omitted (func returns false).\ntype TailerFilterFunc func(line []byte) bool\n\n// Tailer reads an input line by line an tails them into the passed Writer.\n// The lines have to be terminated with a newline.\ntype Tailer struct {\n\ttomb        tomb.Tomb\n\treadSeeker  io.ReadSeeker\n\treader      *bufio.Reader\n\twriteCloser io.WriteCloser\n\twriter      *bufio.Writer\n\tfilter      TailerFilterFunc\n\tpolltime    time.Duration\n}\n\n// NewTailer starts a Tailer which reads strings from the passed\n// ReadSeeker line by line. If a filter function is specified the read\n// lines are filtered. The matching lines are written to the passed\n// Writer.\nfunc NewTailer(readSeeker io.ReadSeeker, writer io.Writer, filter TailerFilterFunc) *Tailer {\n\treturn newTailer(readSeeker, writer, filter, polltime)\n}\n\n// newTailer starts a Tailer like NewTailer but allows the setting of\n// the read buffer size and the time between pollings for testing.\nfunc newTailer(readSeeker io.ReadSeeker, writer io.Writer,\n\tfilter TailerFilterFunc, polltime time.Duration) *Tailer {\n\tt := &Tailer{\n\t\treadSeeker: readSeeker,\n\t\treader:     bufio.NewReaderSize(readSeeker, bufferSize),\n\t\twriter:     bufio.NewWriter(writer),\n\t\tfilter:     filter,\n\t\tpolltime:   polltime,\n\t}\n\tgo func() {\n\t\tdefer t.tomb.Done()\n\t\tt.tomb.Kill(t.loop())\n\t}()\n\treturn t\n}\n\n// Stop tells the tailer to stop working.\nfunc (t *Tailer) Stop() error {\n\tt.tomb.Kill(nil)\n\treturn t.tomb.Wait()\n}\n\n// Wait waits until the tailer is stopped due to command\n// or an error. In case of an error it returns the reason.\nfunc (t *Tailer) Wait() error {\n\treturn t.tomb.Wait()\n}\n\n// Dead returns the channel that can be used to wait until\n// the tailer is stopped.\nfunc (t *Tailer) Dead() <-chan struct{} {\n\treturn t.tomb.Dead()\n}\n\n// Err returns a possible error.\nfunc (t *Tailer) Err() error {\n\treturn t.tomb.Err()\n}\n\n// loop writes the last lines based on the buffer size to the\n// writer and then polls for more data to write it to the\n// writer too.\nfunc (t *Tailer) loop() error {\n\t// Start polling.\n\t// TODO(mue) 2013-12-06\n\t// Handling of read-seeker/files being truncated during\n\t// tailing is currently missing!\n\ttimer := time.NewTimer(0)\n\tfor {\n\t\tselect {\n\t\tcase <-t.tomb.Dying():\n\t\t\treturn nil\n\t\tcase <-timer.C:\n\t\t\tfor {\n\t\t\t\tline, readErr := t.readLine()\n\t\t\t\t_, writeErr := t.writer.Write(line)\n\t\t\t\tif writeErr != nil {\n\t\t\t\t\treturn writeErr\n\t\t\t\t}\n\t\t\t\tif readErr != nil {\n\t\t\t\t\tif readErr != io.EOF {\n\t\t\t\t\t\treturn readErr\n\t\t\t\t\t}\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif writeErr := t.writer.Flush(); writeErr != nil {\n\t\t\t\treturn writeErr\n\t\t\t}\n\t\t\ttimer.Reset(t.polltime)\n\t\t}\n\t}\n}\n\n// SeekLastLines sets the read position of the ReadSeeker to the\n// wanted number of filtered lines before the end.\nfunc SeekLastLines(readSeeker io.ReadSeeker, lines uint, filter TailerFilterFunc) error {\n\toffset, err := readSeeker.Seek(0, os.SEEK_END)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif lines == 0 {\n\t\t// We are done, just seeking to the end is sufficient.\n\t\treturn nil\n\t}\n\tseekPos := int64(0)\n\tfound := uint(0)\n\tbuffer := make([]byte, bufferSize)\nSeekLoop:\n\tfor offset > 0 {\n\t\t// buffer contains the data left over from the\n\t\t// previous iteration.\n\t\tspace := cap(buffer) - len(buffer)\n\t\tif space < bufferSize {\n\t\t\t// Grow buffer.\n\t\t\tnewBuffer := make([]byte, len(buffer), cap(buffer)*2)\n\t\t\tcopy(newBuffer, buffer)\n\t\t\tbuffer = newBuffer\n\t\t\tspace = cap(buffer) - len(buffer)\n\t\t}\n\t\tif int64(space) > offset {\n\t\t\t// Use exactly the right amount of space if there's\n\t\t\t// only a small amount remaining.\n\t\t\tspace = int(offset)\n\t\t}\n\t\t// Copy data remaining from last time to the end of the buffer,\n\t\t// so we can read into the right place.\n\t\tcopy(buffer[space:cap(buffer)], buffer)\n\t\tbuffer = buffer[0 : len(buffer)+space]\n\t\toffset -= int64(space)\n\t\t_, err := readSeeker.Seek(offset, os.SEEK_SET)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t_, err = io.ReadFull(readSeeker, buffer[0:space])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Find the end of the last line in the buffer.\n\t\t// This will discard any unterminated line at the end\n\t\t// of the file.\n\t\tend := bytes.LastIndex(buffer, delimiters)\n\t\tif end == -1 {\n\t\t\t// No end of line found - discard incomplete\n\t\t\t// line and continue looking. If this happens\n\t\t\t// at the beginning of the file, we don't care\n\t\t\t// because we're going to stop anyway.\n\t\t\tbuffer = buffer[:0]\n\t\t\tcontinue\n\t\t}\n\t\tend++\n\t\tfor {\n\t\t\tstart := bytes.LastIndex(buffer[0:end-1], delimiters)\n\t\t\tif start == -1 && offset >= 0 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tstart++\n\t\t\tif filter == nil || filter(buffer[start:end]) {\n\t\t\t\tfound++\n\t\t\t\tif found >= lines {\n\t\t\t\t\tseekPos = offset + int64(start)\n\t\t\t\t\tbreak SeekLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\tend = start\n\t\t}\n\t\t// Leave the last line in buffer, as we don't know whether\n\t\t// it's complete or not.\n\t\tbuffer = buffer[0:end]\n\t}\n\t// Final positioning.\n\treadSeeker.Seek(seekPos, os.SEEK_SET)\n\treturn nil\n}\n\n// readLine reads the next valid line from the reader, even if it is\n// larger than the reader buffer.\nfunc (t *Tailer) readLine() ([]byte, error) {\n\tfor {\n\t\tslice, err := t.reader.ReadSlice(delimiter)\n\t\tif err == nil {\n\t\t\tif t.isValid(slice) {\n\t\t\t\treturn slice, nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tline := append([]byte(nil), slice...)\n\t\tfor err == bufio.ErrBufferFull {\n\t\t\tslice, err = t.reader.ReadSlice(delimiter)\n\t\t\tline = append(line, slice...)\n\t\t}\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tif t.isValid(line) {\n\t\t\t\treturn line, nil\n\t\t\t}\n\t\tcase io.EOF:\n\t\t\t// EOF without delimiter, step back.\n\t\t\tt.readSeeker.Seek(-int64(len(line)), os.SEEK_CUR)\n\t\t\treturn nil, err\n\t\tdefault:\n\t\t\treturn nil, err\n\t\t}\n\t}\n}\n\n// isValid checks if the passed line is valid by checking if the\n// line has content, the filter function is nil or it returns true.\nfunc (t *Tailer) isValid(line []byte) bool {\n\tif t.filter == nil {\n\t\treturn true\n\t}\n\treturn t.filter(line)\n}\n"
  },
  {
    "path": "tailer/tailer_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage tailer_test\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/tailer\"\n)\n\ntype tailerSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&tailerSuite{})\n\nvar alphabetData = []string{\n\t\"alpha alpha\\n\",\n\t\"bravo bravo\\n\",\n\t\"charlie charlie\\n\",\n\t\"delta delta\\n\",\n\t\"echo echo\\n\",\n\t\"foxtrott foxtrott\\n\",\n\t\"golf golf\\n\",\n\t\"hotel hotel\\n\",\n\t\"india india\\n\",\n\t\"juliet juliet\\n\",\n\t\"kilo kilo\\n\",\n\t\"lima lima\\n\",\n\t\"mike mike\\n\",\n\t\"november november\\n\",\n\t\"oscar oscar\\n\",\n\t\"papa papa\\n\",\n\t\"quebec quebec\\n\",\n\t\"romeo romeo\\n\",\n\t\"sierra sierra\\n\",\n\t\"tango tango\\n\",\n\t\"uniform uniform\\n\",\n\t\"victor victor\\n\",\n\t\"whiskey whiskey\\n\",\n\t\"x-ray x-ray\\n\",\n\t\"yankee yankee\\n\",\n\t\"zulu zulu\\n\",\n}\n\nvar tests = []struct {\n\tdescription           string\n\tdata                  []string\n\tinitialLinesWritten   int\n\tinitialLinesRequested uint\n\tbufferSize            int\n\tfilter                tailer.TailerFilterFunc\n\tinjector              func(*tailer.Tailer, *readSeeker) func([]string)\n\tinitialCollectedData  []string\n\tappendedCollectedData []string\n\tfromStart             bool\n\terr                   string\n}{{\n\tdescription: \"lines are longer than buffer size\",\n\tdata: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t},\n\tinitialLinesWritten:   1,\n\tinitialLinesRequested: 1,\n\tbufferSize:            5,\n\tinitialCollectedData: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t},\n\tappendedCollectedData: []string{\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t},\n}, {\n\tdescription: \"lines are longer than buffer size, missing termination of last line\",\n\tdata: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t\t\"the quick brown fox \",\n\t},\n\tinitialLinesWritten:   1,\n\tinitialLinesRequested: 1,\n\tbufferSize:            5,\n\tinitialCollectedData: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t},\n\tappendedCollectedData: []string{\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t},\n}, {\n\tdescription: \"lines are longer than buffer size, last line is terminated later\",\n\tdata: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t\t\"the quick brown fox \",\n\t\t\"jumps over the lazy dog\\n\",\n\t},\n\tinitialLinesWritten:   1,\n\tinitialLinesRequested: 1,\n\tbufferSize:            5,\n\tinitialCollectedData: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t},\n\tappendedCollectedData: []string{\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t\t\"the quick brown fox jumps over the lazy dog\\n\",\n\t},\n}, {\n\tdescription: \"missing termination of last line\",\n\tdata: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t\t\"the quick brown fox \",\n\t},\n\tinitialLinesWritten:   1,\n\tinitialLinesRequested: 1,\n\tinitialCollectedData: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t},\n\tappendedCollectedData: []string{\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t},\n}, {\n\tdescription: \"last line is terminated later\",\n\tdata: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t\t\"the quick brown fox \",\n\t\t\"jumps over the lazy dog\\n\",\n\t},\n\tinitialLinesWritten:   1,\n\tinitialLinesRequested: 1,\n\tinitialCollectedData: []string{\n\t\t\"abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz\\n\",\n\t},\n\tappendedCollectedData: []string{\n\t\t\"0123456789012345678901234567890123456789012345678901\\n\",\n\t\t\"the quick brown fox jumps over the lazy dog\\n\",\n\t},\n}, {\n\tdescription:           \"more lines already written than initially requested\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   5,\n\tinitialLinesRequested: 3,\n\tinitialCollectedData: []string{\n\t\t\"charlie charlie\\n\",\n\t\t\"delta delta\\n\",\n\t\t\"echo echo\\n\",\n\t},\n\tappendedCollectedData: alphabetData[5:],\n}, {\n\tdescription:           \"less lines already written than initially requested\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   3,\n\tinitialLinesRequested: 5,\n\tinitialCollectedData: []string{\n\t\t\"alpha alpha\\n\",\n\t\t\"bravo bravo\\n\",\n\t\t\"charlie charlie\\n\",\n\t},\n\tappendedCollectedData: alphabetData[3:],\n}, {\n\tdescription:           \"lines are longer than buffer size, more lines already written than initially requested\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   5,\n\tinitialLinesRequested: 3,\n\tbufferSize:            5,\n\tinitialCollectedData: []string{\n\t\t\"charlie charlie\\n\",\n\t\t\"delta delta\\n\",\n\t\t\"echo echo\\n\",\n\t},\n\tappendedCollectedData: alphabetData[5:],\n}, {\n\tdescription:           \"ignore current lines\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   5,\n\tbufferSize:            5,\n\tappendedCollectedData: alphabetData[5:],\n}, {\n\tdescription:           \"start from the start\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   5,\n\tbufferSize:            5,\n\tappendedCollectedData: alphabetData,\n\tfromStart:             true,\n}, {\n\tdescription:           \"lines are longer than buffer size, less lines already written than initially requested\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   3,\n\tinitialLinesRequested: 5,\n\tbufferSize:            5,\n\tinitialCollectedData: []string{\n\t\t\"alpha alpha\\n\",\n\t\t\"bravo bravo\\n\",\n\t\t\"charlie charlie\\n\",\n\t},\n\tappendedCollectedData: alphabetData[3:],\n}, {\n\tdescription:           \"filter lines which contain the char 'e'\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   10,\n\tinitialLinesRequested: 3,\n\tfilter: func(line []byte) bool {\n\t\treturn bytes.Contains(line, []byte{'e'})\n\t},\n\tinitialCollectedData: []string{\n\t\t\"echo echo\\n\",\n\t\t\"hotel hotel\\n\",\n\t\t\"juliet juliet\\n\",\n\t},\n\tappendedCollectedData: []string{\n\t\t\"mike mike\\n\",\n\t\t\"november november\\n\",\n\t\t\"quebec quebec\\n\",\n\t\t\"romeo romeo\\n\",\n\t\t\"sierra sierra\\n\",\n\t\t\"whiskey whiskey\\n\",\n\t\t\"yankee yankee\\n\",\n\t},\n}, {\n\tdescription:           \"stop tailing after 10 collected lines\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   5,\n\tinitialLinesRequested: 3,\n\tinjector: func(t *tailer.Tailer, rs *readSeeker) func([]string) {\n\t\treturn func(lines []string) {\n\t\t\tif len(lines) == 10 {\n\t\t\t\tt.Stop()\n\t\t\t}\n\t\t}\n\t},\n\tinitialCollectedData: []string{\n\t\t\"charlie charlie\\n\",\n\t\t\"delta delta\\n\",\n\t\t\"echo echo\\n\",\n\t},\n\tappendedCollectedData: alphabetData[5:],\n}, {\n\tdescription:           \"generate an error after 10 collected lines\",\n\tdata:                  alphabetData,\n\tinitialLinesWritten:   5,\n\tinitialLinesRequested: 3,\n\tinjector: func(t *tailer.Tailer, rs *readSeeker) func([]string) {\n\t\treturn func(lines []string) {\n\t\t\tif len(lines) == 10 {\n\t\t\t\trs.setError(fmt.Errorf(\"ouch after 10 lines\"))\n\t\t\t}\n\t\t}\n\t},\n\tinitialCollectedData: []string{\n\t\t\"charlie charlie\\n\",\n\t\t\"delta delta\\n\",\n\t\t\"echo echo\\n\",\n\t},\n\tappendedCollectedData: alphabetData[5:],\n\terr:                   \"ouch after 10 lines\",\n}, {\n\tdescription: \"more lines already written than initially requested, some empty, unfiltered\",\n\tdata: []string{\n\t\t\"one one\\n\",\n\t\t\"two two\\n\",\n\t\t\"\\n\",\n\t\t\"\\n\",\n\t\t\"three three\\n\",\n\t\t\"four four\\n\",\n\t\t\"\\n\",\n\t\t\"\\n\",\n\t\t\"five five\\n\",\n\t\t\"six six\\n\",\n\t},\n\tinitialLinesWritten:   3,\n\tinitialLinesRequested: 2,\n\tinitialCollectedData: []string{\n\t\t\"two two\\n\",\n\t\t\"\\n\",\n\t},\n\tappendedCollectedData: []string{\n\t\t\"\\n\",\n\t\t\"three three\\n\",\n\t\t\"four four\\n\",\n\t\t\"\\n\",\n\t\t\"\\n\",\n\t\t\"five five\\n\",\n\t\t\"six six\\n\",\n\t},\n}, {\n\tdescription: \"more lines already written than initially requested, some empty, those filtered\",\n\tdata: []string{\n\t\t\"one one\\n\",\n\t\t\"two two\\n\",\n\t\t\"\\n\",\n\t\t\"\\n\",\n\t\t\"three three\\n\",\n\t\t\"four four\\n\",\n\t\t\"\\n\",\n\t\t\"\\n\",\n\t\t\"five five\\n\",\n\t\t\"six six\\n\",\n\t},\n\tinitialLinesWritten:   3,\n\tinitialLinesRequested: 2,\n\tfilter: func(line []byte) bool {\n\t\treturn len(bytes.TrimSpace(line)) > 0\n\t},\n\tinitialCollectedData: []string{\n\t\t\"one one\\n\",\n\t\t\"two two\\n\",\n\t},\n\tappendedCollectedData: []string{\n\t\t\"three three\\n\",\n\t\t\"four four\\n\",\n\t\t\"five five\\n\",\n\t\t\"six six\\n\",\n\t},\n}}\n\nfunc (s *tailerSuite) TestTailer(c *gc.C) {\n\tfor i, test := range tests {\n\t\tc.Logf(\"Test #%d) %s\", i, test.description)\n\t\tbufferSize := test.bufferSize\n\t\tif bufferSize == 0 {\n\t\t\t// Default value.\n\t\t\tbufferSize = 4096\n\t\t}\n\t\ts.PatchValue(tailer.BufferSize, bufferSize)\n\t\treader, writer := io.Pipe()\n\t\tsigc := make(chan struct{}, 1)\n\t\trs := startReadSeeker(c, test.data, test.initialLinesWritten, sigc)\n\t\tif !test.fromStart {\n\t\t\terr := tailer.SeekLastLines(rs, test.initialLinesRequested, test.filter)\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t}\n\t\ttailer := tailer.NewTestTailer(rs, writer, test.filter, 2*time.Millisecond)\n\t\tlinec := startReading(c, tailer, reader, writer)\n\n\t\t// Collect initial data.\n\t\tassertCollected(c, linec, test.initialCollectedData, nil)\n\n\t\tsigc <- struct{}{}\n\n\t\t// Collect remaining data, possibly with injection to stop\n\t\t// earlier or generate an error.\n\t\tvar injection func([]string)\n\t\tif test.injector != nil {\n\t\t\tinjection = test.injector(tailer, rs)\n\t\t}\n\n\t\tassertCollected(c, linec, test.appendedCollectedData, injection)\n\n\t\tif test.err == \"\" {\n\t\t\tc.Assert(tailer.Stop(), gc.IsNil)\n\t\t} else {\n\t\t\tc.Assert(tailer.Err(), gc.ErrorMatches, test.err)\n\t\t}\n\t}\n}\n\n// startReading starts a goroutine receiving the lines out of the reader\n// in the background and passing them to a created string channel. This\n// will used in the assertions.\nfunc startReading(c *gc.C, tailer *tailer.Tailer, reader *io.PipeReader, writer *io.PipeWriter) chan string {\n\tlinec := make(chan string)\n\t// Start goroutine for reading.\n\tgo func() {\n\t\tdefer close(linec)\n\t\treader := bufio.NewReader(reader)\n\t\tfor {\n\t\t\tline, err := reader.ReadString('\\n')\n\t\t\tswitch err {\n\t\t\tcase nil:\n\t\t\t\tlinec <- line\n\t\t\tcase io.EOF:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tc.Fail()\n\t\t\t}\n\t\t}\n\t}()\n\t// Close writer when tailer is stopped or has an error. Tailer using\n\t// components can do it the same way.\n\tgo func() {\n\t\ttailer.Wait()\n\t\twriter.Close()\n\t}()\n\treturn linec\n}\n\n// assertCollected reads lines from the string channel linec. It compares if\n// those are the one passed with compare until a timeout. If the timeout is\n// reached earlier than all lines are collected the assertion fails. The\n// injection function allows to interrupt the processing with a function\n// generating an error or a regular stopping during the tailing. In case the\n// linec is closed due to stopping or an error only the values so far care\n// compared. Checking the reason for termination is done in the test.\nfunc assertCollected(c *gc.C, linec chan string, compare []string, injection func([]string)) {\n\tif len(compare) == 0 {\n\t\treturn\n\t}\n\ttimeout := time.After(10 * time.Second)\n\tlines := []string{}\n\tfor {\n\t\tselect {\n\t\tcase line, ok := <-linec:\n\t\t\tif ok {\n\t\t\t\tlines = append(lines, line)\n\t\t\t\tif injection != nil {\n\t\t\t\t\tinjection(lines)\n\t\t\t\t}\n\t\t\t\tif len(lines) == len(compare) {\n\t\t\t\t\t// All data received.\n\t\t\t\t\tc.Assert(lines, gc.DeepEquals, compare)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// linec closed after stopping or error.\n\t\t\t\tc.Assert(lines, gc.DeepEquals, compare[:len(lines)])\n\t\t\t\treturn\n\t\t\t}\n\t\tcase <-timeout:\n\t\t\tif injection == nil {\n\t\t\t\tc.Fatalf(\"timeout during tailer collection\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}\n\n// startReadSeeker returns a ReadSeeker for the Tailer. It simulates\n// reading and seeking inside a file and also simulating an error.\n// The goroutine waits for a signal that it can start writing the\n// appended lines.\nfunc startReadSeeker(c *gc.C, data []string, initialLeg int, sigc chan struct{}) *readSeeker {\n\t// Write initial lines into the buffer.\n\tvar rs readSeeker\n\tvar i int\n\tfor i = 0; i < initialLeg; i++ {\n\t\trs.write(data[i])\n\t}\n\n\tgo func() {\n\t\t<-sigc\n\n\t\tfor ; i < len(data); i++ {\n\t\t\ttime.Sleep(5 * time.Millisecond)\n\t\t\trs.write(data[i])\n\t\t}\n\t}()\n\treturn &rs\n}\n\ntype readSeeker struct {\n\tmux    sync.Mutex\n\tbuffer []byte\n\tpos    int\n\terr    error\n}\n\nfunc (r *readSeeker) write(s string) {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\tr.buffer = append(r.buffer, []byte(s)...)\n}\n\nfunc (r *readSeeker) setError(err error) {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\tr.err = err\n}\n\nfunc (r *readSeeker) Read(p []byte) (n int, err error) {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\tif r.err != nil {\n\t\treturn 0, r.err\n\t}\n\tif r.pos >= len(r.buffer) {\n\t\treturn 0, io.EOF\n\t}\n\tn = copy(p, r.buffer[r.pos:])\n\tr.pos += n\n\treturn n, nil\n}\n\nfunc (r *readSeeker) Seek(offset int64, whence int) (ret int64, err error) {\n\tr.mux.Lock()\n\tdefer r.mux.Unlock()\n\tvar newPos int64\n\tswitch whence {\n\tcase 0:\n\t\tnewPos = offset\n\tcase 1:\n\t\tnewPos = int64(r.pos) + offset\n\tcase 2:\n\t\tnewPos = int64(len(r.buffer)) + offset\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"invalid whence: %d\", whence)\n\t}\n\tif newPos < 0 {\n\t\treturn 0, fmt.Errorf(\"negative position: %d\", newPos)\n\t}\n\tif newPos >= 1<<31 {\n\t\treturn 0, fmt.Errorf(\"position out of range: %d\", newPos)\n\t}\n\tr.pos = int(newPos)\n\treturn newPos, nil\n}\n"
  },
  {
    "path": "tar/tar.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// This package provides convenience helpers on top of archive/tar\n// to be able to tar/untar files with a functionality closer\n// to gnu tar command.\npackage tar\n\nimport (\n\t\"archive/tar\"\n\t\"crypto/sha1\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/juju/errors\"\n\n\t\"github.com/juju/collections/set\"\n\t\"github.com/juju/utils/v4/symlink\"\n)\n\n// FindFile returns the header and ReadCloser for the entry in the\n// tarfile that matches the filename.  If nothing matches, an\n// errors.NotFound error is returned.\nfunc FindFile(tarFile io.Reader, filename string) (*tar.Header, io.Reader, error) {\n\treader := tar.NewReader(tarFile)\n\tfor {\n\t\theader, err := reader.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, nil, errors.Trace(err)\n\t\t}\n\n\t\tif header.Name == filename {\n\t\t\treturn header, reader, nil\n\t\t}\n\t}\n\n\treturn nil, nil, errors.NotFoundf(filename)\n}\n\n// TarFiles writes a tar stream into target holding the files listed\n// in fileList. strip will be removed from the beginning of all the paths\n// when stored (much like gnu tar -C option)\n// Returns a Sha sum of the tar and nil if everything went well\n// or empty sting and error in case of error.\n// We use a base64 encoded sha1 hash, because this is the hash\n// used by RFC 3230 Digest headers in http responses\n// It is not safe to mutate files passed during this function,\n// however at least the bytes up to the inital size are written\n// successfully if no error is returned.\nfunc TarFiles(fileList []string, target io.Writer, strip string) (shaSum string, err error) {\n\tshahash := sha1.New()\n\tif err := tarAndHashFiles(fileList, target, strip, shahash); err != nil {\n\t\treturn \"\", err\n\t}\n\tencodedHash := base64.StdEncoding.EncodeToString(shahash.Sum(nil))\n\treturn encodedHash, nil\n}\n\nfunc tarAndHashFiles(fileList []string, target io.Writer, strip string, hashw io.Writer) (err error) {\n\tcheckClose := func(w io.Closer) {\n\t\tif closeErr := w.Close(); closeErr != nil && err == nil {\n\t\t\terr = fmt.Errorf(\"error closing tar writer: %v\", closeErr)\n\t\t}\n\t}\n\n\tw := io.MultiWriter(target, hashw)\n\ttarw := tar.NewWriter(w)\n\tdefer checkClose(tarw)\n\tfor _, ent := range fileList {\n\t\tif err := writeContents(ent, strip, tarw); err != nil {\n\t\t\treturn fmt.Errorf(\"write to tar file failed: %v\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\n// writeContents creates an entry for the given file\n// or directory in the given tar archive.\nfunc writeContents(fileName, strip string, tarw *tar.Writer) error {\n\tf, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tfInfo, err := os.Lstat(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlink := \"\"\n\n\tif fInfo.Mode()&os.ModeSymlink == os.ModeSymlink {\n\t\tlink, err = filepath.EvalSymlinks(fileName)\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannnot dereference symlink: %v\", err)\n\t\t}\n\n\t}\n\th, err := tar.FileInfoHeader(fInfo, link)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot create tar header for %q: %v\", fileName, err)\n\t}\n\th.Name = filepath.ToSlash(strings.TrimPrefix(fileName, strip))\n\tif err := tarw.WriteHeader(h); err != nil {\n\t\treturn fmt.Errorf(\"cannot write header for %q: %v\", fileName, err)\n\t}\n\tif fInfo.Mode()&os.ModeSymlink == os.ModeSymlink {\n\t\treturn nil\n\t}\n\tif !fInfo.IsDir() {\n\t\t// Limit data copied to inital stat size included in tar header\n\t\t// or ErrWriteTooLong is raised by archive/tar Writer.\n\t\tif _, err := io.CopyN(tarw, f, fInfo.Size()); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to write %q: %v\", fileName, err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tfor {\n\t\tnames, err := f.Readdirnames(100)\n\t\t// will return at most 100 names and if less than 100 remaining\n\t\t// next call will return io.EOF and no names\n\t\tif err == io.EOF {\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error reading directory %q: %v\", fileName, err)\n\t\t}\n\t\tfor _, name := range names {\n\t\t\tif err := writeContents(filepath.Join(fileName, name), strip, tarw); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n}\n\nfunc createAndFill(filePath string, mode int64, content io.Reader) error {\n\tfh, err := os.Create(filePath)\n\tdefer fh.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"some of the tar contents cannot be written to disk: %v\", err)\n\t}\n\t_, err = io.Copy(fh, content)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed while reading tar contents: %v\", err)\n\t}\n\terr = os.Chmod(fh.Name(), os.FileMode(mode))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cannot set proper mode on file %q: %v\", filePath, err)\n\t}\n\tif err := fh.Sync(); err != nil {\n\t\treturn fmt.Errorf(\"failed to sync contents of file %v: %v\", filePath, err)\n\t}\n\tif err := fh.Close(); err != nil {\n\t\treturn fmt.Errorf(\"failed to close file %v: %v\", filePath, err)\n\t}\n\treturn nil\n}\n\n// UntarFiles will extract the contents of tarFile using\n// outputFolder as root\nfunc UntarFiles(tarFile io.Reader, outputFolder string) error {\n\ttr := tar.NewReader(tarFile)\n\t// Ensure we still make directories for any files where we haven't\n\t// already seen the directory (for example, juju backup generates\n\t// files like this).\n\tseenDirs := set.NewStrings()\n\n\tmaybeMkParentDir := func(path string) error {\n\t\tdirName := filepath.Dir(path)\n\t\tif seenDirs.Contains(dirName) {\n\t\t\treturn nil\n\t\t}\n\t\terr := os.MkdirAll(dirName, os.FileMode(0755))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"cannot create parent directory for %q: %v\", path, err)\n\t\t}\n\t\tseenDirs.Add(dirName)\n\t\treturn nil\n\t}\n\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\t// end of tar archive\n\t\t\treturn nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed while reading tar header: %v\", err)\n\t\t}\n\t\tfullPath := filepath.Join(outputFolder, hdr.Name)\n\t\tswitch hdr.Typeflag {\n\t\tcase tar.TypeDir:\n\t\t\tif err = os.MkdirAll(fullPath, os.FileMode(hdr.Mode)); err != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot extract directory %q: %v\", fullPath, err)\n\t\t\t}\n\t\t\tseenDirs.Add(fullPath)\n\n\t\tcase tar.TypeSymlink:\n\t\t\tif err = maybeMkParentDir(fullPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = symlink.New(hdr.Linkname, fullPath); err != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot extract symlink %q to %q: %v\", hdr.Linkname, fullPath, err)\n\t\t\t}\n\t\t\tcontinue\n\n\t\tcase tar.TypeReg, tar.TypeRegA:\n\t\t\tif err = maybeMkParentDir(fullPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err = createAndFill(fullPath, hdr.Mode, tr); err != nil {\n\t\t\t\treturn fmt.Errorf(\"cannot extract file %q: %v\", fullPath, err)\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "tar/tar_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage tar\n\nimport (\n\t\"archive/tar\"\n\t\"bytes\"\n\t\"crypto/sha1\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\tstdtesting \"testing\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *stdtesting.T) {\n\tgc.TestingT(t)\n}\n\nvar _ = gc.Suite(&TarSuite{})\n\ntype TarSuite struct {\n\ttesting.IsolationSuite\n\tcwd       string\n\ttestFiles []string\n}\n\nfunc (t *TarSuite) SetUpTest(c *gc.C) {\n\tt.cwd = c.MkDir()\n\tt.IsolationSuite.SetUpTest(c)\n}\n\nfunc (t *TarSuite) createTestFiles(c *gc.C) {\n\ttarDirE := filepath.Join(t.cwd, \"TarDirectoryEmpty\")\n\terr := os.Mkdir(tarDirE, os.FileMode(0755))\n\tc.Check(err, gc.IsNil)\n\n\ttarDirP := filepath.Join(t.cwd, \"TarDirectoryPopulated\")\n\terr = os.Mkdir(tarDirP, os.FileMode(0755))\n\tc.Check(err, gc.IsNil)\n\n\ttarlink1 := filepath.Join(t.cwd, \"TarLink\")\n\terr = os.Symlink(tarDirP, tarlink1)\n\tc.Check(err, gc.IsNil)\n\n\ttarSubFile1 := filepath.Join(tarDirP, \"TarSubFile1\")\n\ttarSubFile1Handle, err := os.Create(tarSubFile1)\n\tc.Check(err, gc.IsNil)\n\ttarSubFile1Handle.WriteString(\"TarSubFile1\")\n\ttarSubFile1Handle.Close()\n\n\ttarSublink1 := filepath.Join(tarDirP, \"TarSubLink\")\n\terr = os.Symlink(tarSubFile1, tarSublink1)\n\tc.Check(err, gc.IsNil)\n\n\ttarSubDir := filepath.Join(tarDirP, \"TarDirectoryPopulatedSubDirectory\")\n\terr = os.Mkdir(tarSubDir, os.FileMode(0755))\n\tc.Check(err, gc.IsNil)\n\n\ttarFile1 := filepath.Join(t.cwd, \"TarFile1\")\n\ttarFile1Handle, err := os.Create(tarFile1)\n\tc.Check(err, gc.IsNil)\n\ttarFile1Handle.WriteString(\"TarFile1\")\n\ttarFile1Handle.Close()\n\n\ttarFile2 := filepath.Join(t.cwd, \"TarFile2\")\n\ttarFile2Handle, err := os.Create(tarFile2)\n\tc.Check(err, gc.IsNil)\n\ttarFile2Handle.WriteString(\"TarFile2\")\n\ttarFile2Handle.Close()\n\tt.testFiles = []string{tarDirE, tarDirP, tarlink1, tarFile1, tarFile2}\n\n}\n\nfunc (t *TarSuite) removeTestFiles(c *gc.C) {\n\tfor _, removable := range t.testFiles {\n\t\terr := os.RemoveAll(removable)\n\t\tc.Assert(err, gc.IsNil)\n\t}\n}\n\ntype expectedTarContents struct {\n\tName string\n\tBody string\n}\n\nvar testExpectedTarContents = []expectedTarContents{\n\t{\"TarDirectoryEmpty\", \"\"},\n\t{\"TarDirectoryPopulated\", \"\"},\n\t{\"TarLink\", \"\"},\n\t{\"TarDirectoryPopulated/TarSubFile1\", \"TarSubFile1\"},\n\t{\"TarDirectoryPopulated/TarSubLink\", \"\"},\n\t{\"TarDirectoryPopulated/TarDirectoryPopulatedSubDirectory\", \"\"},\n\t{\"TarFile1\", \"TarFile1\"},\n\t{\"TarFile2\", \"TarFile2\"},\n}\n\n// Assert thar contents checks that the tar reader provided contains the\n// Expected files\n// expectedContents: is a slice of the filenames with relative paths that are\n// expected to be on the tar file\n// tarFile: is the path of the file to be checked\nfunc (t *TarSuite) assertTarContents(c *gc.C, expectedContents []expectedTarContents,\n\ttarFile io.Reader) {\n\ttr := tar.NewReader(tarFile)\n\ttarContents := make(map[string]string)\n\t// Iterate through the files in the archive.\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\t// end of tar archive\n\t\t\tbreak\n\t\t}\n\t\tc.Assert(err, gc.IsNil)\n\t\tbuf, err := ioutil.ReadAll(tr)\n\t\tc.Assert(err, gc.IsNil)\n\t\ttarContents[hdr.Name] = string(buf)\n\t}\n\tfor _, expectedContent := range expectedContents {\n\t\tfullExpectedContent := strings.TrimPrefix(expectedContent.Name, string(os.PathSeparator))\n\t\tbody, ok := tarContents[fullExpectedContent]\n\t\tc.Log(tarContents)\n\t\tc.Log(expectedContents)\n\t\tc.Log(fmt.Sprintf(\"checking for presence of %q on tar file\", fullExpectedContent))\n\t\tc.Assert(ok, gc.Equals, true)\n\t\tif expectedContent.Body != \"\" {\n\t\t\tc.Log(\"Also checking the file contents\")\n\t\t\tc.Assert(body, gc.Equals, expectedContent.Body)\n\t\t}\n\t}\n\n}\n\nfunc (t *TarSuite) assertFilesWhereUntared(c *gc.C,\n\texpectedContents []expectedTarContents,\n\ttarOutputFolder string) {\n\ttarContents := make(map[string]string)\n\tvar walkFn filepath.WalkFunc\n\twalkFn = func(path string, finfo os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfileName := strings.TrimPrefix(path, tarOutputFolder)\n\t\tfileName = strings.TrimPrefix(fileName, string(os.PathSeparator))\n\t\tc.Log(fileName)\n\t\tif fileName == \"\" {\n\t\t\treturn nil\n\t\t}\n\t\tif finfo.IsDir() || finfo.Mode()&os.ModeSymlink == os.ModeSymlink {\n\t\t\ttarContents[fileName] = \"\"\n\t\t} else {\n\t\t\treadable, err := os.Open(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer readable.Close()\n\t\t\tbuf, err := ioutil.ReadAll(readable)\n\t\t\tc.Assert(err, gc.IsNil)\n\t\t\ttarContents[fileName] = string(buf)\n\t\t}\n\t\treturn nil\n\t}\n\tfilepath.Walk(tarOutputFolder, walkFn)\n\tfor _, expectedContent := range expectedContents {\n\t\tfullExpectedContent := strings.TrimPrefix(expectedContent.Name, string(os.PathSeparator))\n\t\texpectedPath := filepath.Join(tarOutputFolder, fullExpectedContent)\n\t\t_, err := os.Lstat(expectedPath)\n\t\tc.Assert(err, gc.Equals, nil)\n\t\tbody, ok := tarContents[fullExpectedContent]\n\t\tc.Log(fmt.Sprintf(\"checking for presence of %q on untar files\", fullExpectedContent))\n\t\tc.Assert(ok, gc.Equals, true)\n\t\tif expectedContent.Body != \"\" {\n\t\t\tc.Log(\"Also checking the file contents\")\n\t\t\tc.Assert(body, gc.Equals, expectedContent.Body)\n\t\t}\n\t}\n\n}\n\nfunc shaSumFile(c *gc.C, fileToSum io.Reader) string {\n\tshahash := sha1.New()\n\t_, err := io.Copy(shahash, fileToSum)\n\tc.Assert(err, gc.IsNil)\n\treturn base64.StdEncoding.EncodeToString(shahash.Sum(nil))\n}\n\n// Tar\nfunc (t *TarSuite) TestTarFiles(c *gc.C) {\n\tt.createTestFiles(c)\n\tvar outputTar bytes.Buffer\n\ttrimPath := fmt.Sprintf(\"%s/\", t.cwd)\n\tshaSum, err := TarFiles(t.testFiles, &outputTar, trimPath)\n\tc.Check(err, gc.IsNil)\n\toutputBytes := outputTar.Bytes()\n\tfileShaSum := shaSumFile(c, bytes.NewBuffer(outputBytes))\n\tc.Assert(shaSum, gc.Equals, fileShaSum)\n\tt.removeTestFiles(c)\n\tt.assertTarContents(c, testExpectedTarContents, bytes.NewBuffer(outputBytes))\n}\n\nfunc (t *TarSuite) TestSymlinksTar(c *gc.C) {\n\ttarDirP := filepath.Join(t.cwd, \"TarDirectory\")\n\terr := os.Mkdir(tarDirP, os.FileMode(0755))\n\tc.Check(err, gc.IsNil)\n\n\ttarlink1 := filepath.Join(t.cwd, \"TarLink\")\n\terr = os.Symlink(tarDirP, tarlink1)\n\tc.Check(err, gc.IsNil)\n\ttestFiles := []string{tarDirP, tarlink1}\n\n\tvar outputTar bytes.Buffer\n\ttrimPath := fmt.Sprintf(\"%s/\", t.cwd)\n\t_, err = TarFiles(testFiles, &outputTar, trimPath)\n\tc.Check(err, gc.IsNil)\n\n\toutputBytes := outputTar.Bytes()\n\ttr := tar.NewReader(bytes.NewBuffer(outputBytes))\n\tsymlinks := 0\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\t// end of tar archive\n\t\t\tbreak\n\t\t}\n\t\tc.Assert(err, gc.IsNil)\n\t\tif hdr.Typeflag == tar.TypeSymlink {\n\t\t\tsymlinks += 1\n\t\t\tc.Assert(hdr.Linkname, gc.Equals, tarDirP)\n\t\t}\n\t}\n\tc.Assert(symlinks, gc.Equals, 1)\n\n}\n\n// UnTar\nfunc (t *TarSuite) TestUnTarFilesUncompressed(c *gc.C) {\n\tt.createTestFiles(c)\n\tvar outputTar bytes.Buffer\n\ttrimPath := fmt.Sprintf(\"%s/\", t.cwd)\n\t_, err := TarFiles(t.testFiles, &outputTar, trimPath)\n\tc.Check(err, gc.IsNil)\n\tt.removeTestFiles(c)\n\n\toutputDir := filepath.Join(t.cwd, \"TarOuputFolder\")\n\terr = os.Mkdir(outputDir, os.FileMode(0755))\n\tc.Check(err, gc.IsNil)\n\n\tUntarFiles(&outputTar, outputDir)\n\tt.assertFilesWhereUntared(c, testExpectedTarContents, outputDir)\n}\n\nfunc (t *TarSuite) TestFindFileFound(c *gc.C) {\n\tt.createTestFiles(c)\n\tvar outputTar bytes.Buffer\n\ttrimPath := fmt.Sprintf(\"%s/\", t.cwd)\n\t_, err := TarFiles(t.testFiles, &outputTar, trimPath)\n\tc.Assert(err, gc.IsNil)\n\tt.removeTestFiles(c)\n\n\t_, file, err := FindFile(&outputTar, \"TarDirectoryPopulated/TarSubFile1\")\n\tc.Assert(err, gc.IsNil)\n\n\tdata, err := ioutil.ReadAll(file)\n\tc.Assert(err, gc.IsNil)\n\n\tc.Check(string(data), gc.Equals, \"TarSubFile1\")\n}\n\nfunc (t *TarSuite) TestFindFileNotFound(c *gc.C) {\n\tt.createTestFiles(c)\n\tvar outputTar bytes.Buffer\n\ttrimPath := fmt.Sprintf(\"%s/\", t.cwd)\n\t_, err := TarFiles(t.testFiles, &outputTar, trimPath)\n\tc.Assert(err, gc.IsNil)\n\tt.removeTestFiles(c)\n\n\t_, _, err = FindFile(&outputTar, \"does_not_exist\")\n\n\tc.Check(err, gc.ErrorMatches, \"does_not_exist not found\")\n}\n\nfunc (t *TarSuite) TestUntarFilesHeadersIgnored(c *gc.C) {\n\tvar buf bytes.Buffer\n\tw := tar.NewWriter(&buf)\n\terr := w.WriteHeader(&tar.Header{\n\t\tName:     \"pax_global_header\",\n\t\tTypeflag: tar.TypeXGlobalHeader,\n\t})\n\tc.Assert(err, gc.IsNil)\n\terr = w.Flush()\n\tc.Assert(err, gc.IsNil)\n\n\terr = UntarFiles(&buf, t.cwd)\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = filepath.Walk(t.cwd, func(path string, finfo os.FileInfo, err error) error {\n\t\tif path != t.cwd {\n\t\t\treturn fmt.Errorf(\"unexpected file: %v\", path)\n\t\t}\n\t\treturn err\n\t})\n\tc.Assert(err, gc.IsNil)\n}\n\nfunc (t *TarSuite) TestUntarFilesWithMissingDirectories(c *gc.C) {\n\tvar buf bytes.Buffer\n\tw := tar.NewWriter(&buf)\n\tcontents := []byte(\"file contents\")\n\terr := w.WriteHeader(&tar.Header{\n\t\tName:     \"missingdir/otherdir/file\",\n\t\tTypeflag: tar.TypeReg,\n\t\tMode:     0700,\n\t\tSize:     int64(len(contents)),\n\t})\n\tc.Assert(err, jc.ErrorIsNil)\n\t_, err = w.Write(contents)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\terr = w.WriteHeader(&tar.Header{\n\t\tName:     \"missingdir/otherdir/link\",\n\t\tTypeflag: tar.TypeSymlink,\n\t\tLinkname: \"viginti\",\n\t})\n\tc.Assert(err, jc.ErrorIsNil)\n\terr = w.Flush()\n\tc.Assert(err, jc.ErrorIsNil)\n\n\terr = UntarFiles(&buf, t.cwd)\n\tc.Assert(err, jc.ErrorIsNil)\n\n\tvar names []string\n\terr = filepath.Walk(t.cwd, func(path string, finfo os.FileInfo, err error) error {\n\t\tnames = append(names, path[len(t.cwd):])\n\t\treturn nil\n\t})\n\tc.Assert(err, jc.ErrorIsNil)\n\n\texpected := []string{\n\t\t\"\",\n\t\t\"/missingdir\",\n\t\t\"/missingdir/otherdir\",\n\t\t\"/missingdir/otherdir/file\",\n\t\t\"/missingdir/otherdir/link\",\n\t}\n\tc.Assert(names, gc.DeepEquals, expected)\n}\n"
  },
  {
    "path": "timer.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Copyright 2015 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"math/rand\"\n\t\"time\"\n\n\t\"github.com/juju/clock\"\n)\n\n// Countdown implements a timer that will call a provided function.\n// after a internally stored duration. The steps as well as min and max\n// durations are declared upon initialization and depend on\n// the particular implementation.\n//\n// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427\ntype Countdown interface {\n\t// Reset stops the timer and resets its duration to the minimum one.\n\t// Start must be called to start the timer again.\n\tReset()\n\n\t// Start starts the internal timer.\n\t// At the end of the timer, if Reset hasn't been called in the mean time\n\t// Func will be called and the duration is increased for the next call.\n\tStart()\n}\n\n// NewBackoffTimer creates and initializes a new BackoffTimer\n// A backoff timer starts at min and gets multiplied by factor\n// until it reaches max. Jitter determines whether a small\n// randomization is added to the duration.\n//\n// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427\nfunc NewBackoffTimer(config BackoffTimerConfig) *BackoffTimer {\n\treturn &BackoffTimer{\n\t\tconfig:          config,\n\t\tcurrentDuration: config.Min,\n\t}\n}\n\n// BackoffTimer implements Countdown.\n// A backoff timer starts at min and gets multiplied by factor\n// until it reaches max. Jitter determines whether a small\n// randomization is added to the duration.\n//\n// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427\ntype BackoffTimer struct {\n\tconfig BackoffTimerConfig\n\n\ttimer           clock.Timer\n\tcurrentDuration time.Duration\n}\n\n// BackoffTimerConfig is a helper struct for backoff timer\n// that encapsulates config information.\n//\n// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427\ntype BackoffTimerConfig struct {\n\t// The minimum duration after which Func is called.\n\tMin time.Duration\n\n\t// The maximum duration after which Func is called.\n\tMax time.Duration\n\n\t// Determines whether a small randomization is applied to\n\t// the duration.\n\tJitter bool\n\n\t// The factor by which you want the duration to increase\n\t// every time.\n\tFactor int64\n\n\t// Func is the function that will be called when the countdown reaches 0.\n\tFunc func()\n\n\t// Clock provides the AfterFunc function used to call func.\n\t// It is exposed here so it's easier to mock it in tests.\n\tClock clock.Clock\n}\n\n// Start implements the Timer interface.\n// Any existing timer execution is stopped before\n// a new one is created.\nfunc (t *BackoffTimer) Start() {\n\tif t.timer != nil {\n\t\tt.timer.Stop()\n\t}\n\tt.timer = t.config.Clock.AfterFunc(t.currentDuration, t.config.Func)\n\n\t// Since it's a backoff timer we will increase\n\t// the duration after each signal.\n\tt.increaseDuration()\n}\n\n// Reset implements the Timer interface.\nfunc (t *BackoffTimer) Reset() {\n\tif t.timer != nil {\n\t\tt.timer.Stop()\n\t}\n\tif t.currentDuration > t.config.Min {\n\t\tt.currentDuration = t.config.Min\n\t}\n}\n\n// increaseDuration will increase the duration based on\n// the current value and the factor. If jitter is true\n// it will add a 0.3% jitter to the final value.\nfunc (t *BackoffTimer) increaseDuration() {\n\tcurrent := int64(t.currentDuration)\n\tnextDuration := time.Duration(current * t.config.Factor)\n\tif t.config.Jitter {\n\t\t// Get a factor in [-1; 1].\n\t\trandFactor := (rand.Float64() * 2) - 1\n\t\tjitter := float64(nextDuration) * randFactor * 0.03\n\t\tnextDuration = nextDuration + time.Duration(jitter)\n\t}\n\tif nextDuration > t.config.Max {\n\t\tnextDuration = t.config.Max\n\t}\n\tt.currentDuration = nextDuration\n}\n"
  },
  {
    "path": "timer_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Copyright 2015 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"math\"\n\t\"time\"\n\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/clock\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\t\"github.com/juju/utils/v4\"\n)\n\ntype TestStdTimer struct {\n\tstdStub *testing.Stub\n}\n\nfunc (t *TestStdTimer) Stop() bool {\n\tt.stdStub.AddCall(\"Stop\")\n\treturn true\n}\n\nfunc (t *TestStdTimer) Reset(d time.Duration) bool {\n\tt.stdStub.AddCall(\"Reset\", d)\n\treturn true\n}\n\nfunc (t *TestStdTimer) Chan() <-chan time.Time {\n\tpanic(\"should not be called\")\n}\n\ntype timerSuite struct {\n\tbaseSuite        testing.CleanupSuite\n\ttimer            *utils.BackoffTimer\n\tafterFuncCalls   int64\n\tproperFuncCalled bool\n\tstub             *testing.Stub\n\n\tmin    time.Duration\n\tmax    time.Duration\n\tfactor int64\n}\n\nvar _ = gc.Suite(&timerSuite{})\n\ntype mockClock struct {\n\tstub             *testing.Stub\n\tc                *gc.C\n\tafterFuncCalls   *int64\n\tproperFuncCalled *bool\n}\n\n// These 2 methods are not used here but are needed to satisfy the intergface\nfunc (c *mockClock) Now() time.Time                         { return time.Now() }\nfunc (c *mockClock) After(d time.Duration) <-chan time.Time { return time.After(d) }\nfunc (c *mockClock) NewTimer(d time.Duration) clock.Timer {\n\tpanic(\"should not be called\")\n}\n\nfunc (c *mockClock) AfterFunc(d time.Duration, f func()) clock.Timer {\n\t*c.afterFuncCalls++\n\tf()\n\tc.c.Assert(*c.properFuncCalled, jc.IsTrue)\n\t*c.properFuncCalled = false\n\treturn &TestStdTimer{c.stub}\n}\n\nfunc (s *timerSuite) SetUpTest(c *gc.C) {\n\ts.baseSuite.SetUpTest(c)\n\ts.stub = nil\n\ts.timer = nil\n}\n\nfunc (s *timerSuite) setup(c *gc.C) {\n\ts.afterFuncCalls = 0\n\ts.stub = &testing.Stub{}\n\n\t// This along with the checks in afterFuncMock below assert\n\t// that mockFunc is indeed passed as the argument to afterFuncMock\n\t// to be executed.\n\tmockFunc := func() { s.properFuncCalled = true }\n\tmockClock := &mockClock{\n\t\tstub:             s.stub,\n\t\tc:                c,\n\t\tafterFuncCalls:   &s.afterFuncCalls,\n\t\tproperFuncCalled: &s.properFuncCalled,\n\t}\n\n\ts.min = 2 * time.Second\n\ts.max = 16 * time.Second\n\ts.factor = 2\n\ts.timer = utils.NewBackoffTimer(\n\t\tutils.BackoffTimerConfig{\n\t\t\tMin:    s.min,\n\t\t\tMax:    s.max,\n\t\t\tJitter: false,\n\t\t\tFactor: s.factor,\n\t\t\tFunc:   mockFunc,\n\t\t\tClock:  mockClock,\n\t\t},\n\t)\n}\n\nfunc (s *timerSuite) TestStart(c *gc.C) {\n\ts.setup(c)\n\ts.timer.Start()\n\ts.testStart(c, 1, 1)\n}\n\nfunc (s *timerSuite) TestMultipleStarts(c *gc.C) {\n\ts.setup(c)\n\ts.timer.Start()\n\ts.testStart(c, 1, 1)\n\n\ts.timer.Start()\n\ts.checkStopCalls(c, 1)\n\ts.testStart(c, 2, 2)\n\n\ts.timer.Start()\n\ts.checkStopCalls(c, 2)\n\ts.testStart(c, 3, 3)\n}\n\nfunc (s *timerSuite) TestResetNoStart(c *gc.C) {\n\ts.setup(c)\n\ts.timer.Reset()\n\tcurrentDuration := utils.ExposeBackoffTimerDuration(s.timer)\n\tc.Assert(currentDuration, gc.Equals, s.min)\n}\n\nfunc (s *timerSuite) TestResetAndStart(c *gc.C) {\n\ts.setup(c)\n\ts.timer.Reset()\n\tcurrentDuration := utils.ExposeBackoffTimerDuration(s.timer)\n\tc.Assert(currentDuration, gc.Equals, s.min)\n\n\t// These variables are used to track the number\n\t// of afterFuncCalls(signalCallsNo) and the number\n\t// of Stop calls(resetStopCallsNo + signalCallsNo)\n\tresetStopCallsNo := 0\n\tsignalCallsNo := 0\n\n\tsignalCallsNo++\n\ts.timer.Start()\n\ts.testStart(c, 1, 1)\n\n\tresetStopCallsNo++\n\ts.timer.Reset()\n\ts.checkStopCalls(c, resetStopCallsNo+signalCallsNo-1)\n\tcurrentDuration = utils.ExposeBackoffTimerDuration(s.timer)\n\tc.Assert(currentDuration, gc.Equals, s.min)\n\n\tfor i := 1; i < 200; i++ {\n\t\tsignalCallsNo++\n\t\ts.timer.Start()\n\t\ts.testStart(c, int64(signalCallsNo), int64(i))\n\t\ts.checkStopCalls(c, resetStopCallsNo+signalCallsNo-1)\n\t}\n\n\tresetStopCallsNo++\n\ts.timer.Reset()\n\ts.checkStopCalls(c, signalCallsNo+resetStopCallsNo-1)\n\n\tfor i := 1; i < 100; i++ {\n\t\tsignalCallsNo++\n\t\ts.timer.Start()\n\t\ts.testStart(c, int64(signalCallsNo), int64(i))\n\t\ts.checkStopCalls(c, resetStopCallsNo+signalCallsNo-1)\n\t}\n\n\tresetStopCallsNo++\n\ts.timer.Reset()\n\ts.checkStopCalls(c, signalCallsNo+resetStopCallsNo-1)\n}\n\nfunc (s *timerSuite) testStart(c *gc.C, afterFuncCalls int64, durationFactor int64) {\n\tc.Assert(s.afterFuncCalls, gc.Equals, afterFuncCalls)\n\tc.Logf(\"iteration %d\", afterFuncCalls)\n\texpectedDuration := time.Duration(math.Pow(float64(s.factor), float64(durationFactor))) * s.min\n\tif expectedDuration > s.max || expectedDuration <= 0 {\n\t\texpectedDuration = s.max\n\t}\n\tcurrentDuration := utils.ExposeBackoffTimerDuration(s.timer)\n\tc.Assert(currentDuration, gc.Equals, expectedDuration)\n}\n\nfunc (s *timerSuite) checkStopCalls(c *gc.C, number int) {\n\tcalls := make([]testing.StubCall, number)\n\tfor i := 0; i < number; i++ {\n\t\tcalls[i] = testing.StubCall{FuncName: \"Stop\"}\n\t}\n\ts.stub.CheckCalls(c, calls)\n}\n"
  },
  {
    "path": "trivial.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"unicode\"\n)\n\n// TODO(ericsnow) Move the quoting helpers into the shell package?\n\n// ShQuote quotes s so that when read by bash, no metacharacters\n// within s will be interpreted as such.\nfunc ShQuote(s string) string {\n\t// single-quote becomes single-quote, double-quote, single-quote, double-quote, single-quote\n\treturn `'` + strings.Replace(s, `'`, `'\"'\"'`, -1) + `'`\n}\n\n// WinPSQuote quotes s so that when read by powershell, no metacharacters\n// within s will be interpreted as such.\nfunc WinPSQuote(s string) string {\n\t// See http://ss64.com/ps/syntax-esc.html#quotes.\n\t// Double quotes inside single quotes are fine, double single quotes inside\n\t// single quotes, not so much so. Having double quoted strings inside single\n\t// quoted strings, ensure no expansion happens.\n\treturn `'` + strings.Replace(s, `'`, `\"`, -1) + `'`\n}\n\n// WinCmdQuote quotes s so that when read by cmd.exe, no metacharacters\n// within s will be interpreted as such.\nfunc WinCmdQuote(s string) string {\n\t// See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx.\n\tquoted := winCmdQuote(s)\n\treturn winCmdEscapeMeta(quoted)\n}\n\nfunc winCmdQuote(s string) string {\n\tvar escaped string\n\tfor _, c := range s {\n\t\tswitch c {\n\t\tcase '\\\\', '\"':\n\t\t\tescaped += `\\`\n\t\t}\n\t\tescaped += string(c)\n\t}\n\treturn `\"` + escaped + `\"`\n}\n\nfunc winCmdEscapeMeta(str string) string {\n\tconst meta = `()%!^\"<>&|`\n\tvar newStr string\n\tfor _, c := range str {\n\t\tif strings.Contains(meta, string(c)) {\n\t\t\tnewStr += \"^\"\n\t\t}\n\t\tnewStr += string(c)\n\t}\n\treturn newStr\n}\n\n// CommandString flattens a sequence of command arguments into a\n// string suitable for executing in a shell, escaping slashes,\n// variables and quotes as necessary; each argument is double-quoted\n// if and only if necessary.\nfunc CommandString(args ...string) string {\n\tvar buf bytes.Buffer\n\tfor i, arg := range args {\n\t\tneedsQuotes := false\n\t\tvar argBuf bytes.Buffer\n\t\tfor _, r := range arg {\n\t\t\tif unicode.IsSpace(r) {\n\t\t\t\tneedsQuotes = true\n\t\t\t} else if r == '\"' || r == '$' || r == '\\\\' {\n\t\t\t\tneedsQuotes = true\n\t\t\t\targBuf.WriteByte('\\\\')\n\t\t\t}\n\t\t\targBuf.WriteRune(r)\n\t\t}\n\t\tif i > 0 {\n\t\t\tbuf.WriteByte(' ')\n\t\t}\n\t\tif needsQuotes {\n\t\t\tbuf.WriteByte('\"')\n\t\t\t_, _ = argBuf.WriteTo(&buf)\n\t\t\tbuf.WriteByte('\"')\n\t\t} else {\n\t\t\t_, _ = argBuf.WriteTo(&buf)\n\t\t}\n\t}\n\treturn buf.String()\n}\n\n// Gzip compresses the given data.\nfunc Gzip(data []byte) []byte {\n\tvar buf bytes.Buffer\n\tw := gzip.NewWriter(&buf)\n\tif _, err := w.Write(data); err != nil {\n\t\t// Compression should never fail unless it fails\n\t\t// to write to the underlying writer, which is a bytes.Buffer\n\t\t// that never fails.\n\t\tpanic(err)\n\t}\n\tif err := w.Close(); err != nil {\n\t\tpanic(err)\n\t}\n\treturn buf.Bytes()\n}\n\n// Gunzip uncompresses the given data.\nfunc Gunzip(data []byte) ([]byte, error) {\n\tr, err := gzip.NewReader(bytes.NewReader(data))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn io.ReadAll(r)\n}\n\n// ReadSHA256 returns the SHA256 hash of the contents read from source\n// (hex encoded) and the size of the source in bytes.\nfunc ReadSHA256(source io.Reader) (string, int64, error) {\n\thash := sha256.New()\n\tsize, err := io.Copy(hash, source)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tdigest := hex.EncodeToString(hash.Sum(nil))\n\treturn digest, size, nil\n}\n\n// ReadFileSHA256 is like ReadSHA256 but reads the contents of the\n// given file.\nfunc ReadFileSHA256(filename string) (string, int64, error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn \"\", 0, err\n\t}\n\tdefer func() { _ = f.Close() }()\n\treturn ReadSHA256(f)\n}\n"
  },
  {
    "path": "trivial_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/juju/testing\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype utilsSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&utilsSuite{})\n\nfunc (*utilsSuite) TestCompression(c *gc.C) {\n\tdata := []byte(strings.Repeat(\"some data to be compressed\\n\", 100))\n\tcompressedData := []byte{\n\t\t0x1f, 0x8b, 0x08, 0x00, 0x33, 0xb5, 0xf6, 0x50,\n\t\t0x00, 0x03, 0xed, 0xc9, 0xb1, 0x0d, 0x00, 0x20,\n\t\t0x08, 0x45, 0xc1, 0xde, 0x29, 0x58, 0x0d, 0xe5,\n\t\t0x97, 0x04, 0x23, 0xee, 0x1f, 0xa7, 0xb0, 0x7b,\n\t\t0xd7, 0x5e, 0x57, 0xca, 0xc2, 0xaf, 0xdb, 0x2d,\n\t\t0x9b, 0xb2, 0x55, 0xb9, 0x8f, 0xba, 0x15, 0xa3,\n\t\t0x29, 0x8a, 0xa2, 0x28, 0x8a, 0xa2, 0x28, 0xea,\n\t\t0x67, 0x3d, 0x71, 0x71, 0x6e, 0xbf, 0x8c, 0x0a,\n\t\t0x00, 0x00,\n\t}\n\tcdata := utils.Gzip(data)\n\tc.Assert(len(cdata) < len(data), gc.Equals, true)\n\tdata1, err := utils.Gunzip(cdata)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(data1, gc.DeepEquals, data)\n\n\tdata1, err = utils.Gunzip(compressedData)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(data1, gc.DeepEquals, data)\n}\n\nfunc checkQuoting(c *gc.C, shQuote func(string) string, tests map[string]string) {\n\tfor str, expected := range tests {\n\t\tc.Logf(\"- checking %q -\", str)\n\t\tquoted := shQuote(str)\n\n\t\tc.Check(quoted, gc.Equals, expected)\n\t}\n}\n\nfunc (*utilsSuite) TestWinCmdQuote(c *gc.C) {\n\targs := map[string]string{\n\t\t\"\":                 `^\"^\"`,\n\t\t\"a\":                `^\"a^\"`,\n\t\t\"'a'\":              `^\"'a'^\"`,\n\t\t`\"a`:               `^\"\\^\"a^\"`,\n\t\t`a\"`:               `^\"a\\^\"^\"`,\n\t\t`\"a\"`:              `^\"\\^\"a\\^\"^\"`,\n\t\t\"abc > xyz 2>&1 &\": `^\"abc ^> xyz 2^>^&1 ^&^\"`,\n\t}\n\tcheckQuoting(c, utils.WinCmdQuote, args)\n}\n\nfunc (*utilsSuite) TestWinPSQuote(c *gc.C) {\n\targs := map[string]string{\n\t\t\"\":                 \"''\",\n\t\t\"a\":                `'a'`,\n\t\t`\"a\"`:              `'\"a\"'`,\n\t\t\"'a\":               `'\"a'`,\n\t\t\"a'\":               `'a\"'`,\n\t\t\"'a'\":              `'\"a\"'`,\n\t\t\"abc > xyz 2>&1 &\": \"'abc > xyz 2>&1 &'\",\n\t}\n\tcheckQuoting(c, utils.WinPSQuote, args)\n}\n\nfunc (*utilsSuite) TestCommandString(c *gc.C) {\n\ttype test struct {\n\t\targs     []string\n\t\texpected string\n\t}\n\ttests := []test{\n\t\t{nil, \"\"},\n\t\t{[]string{\"a\"}, \"a\"},\n\t\t{[]string{\"a$\"}, `\"a\\$\"`},\n\t\t{[]string{\"\"}, \"\"},\n\t\t{[]string{\"\\\\\"}, `\"\\\\\"`},\n\t\t{[]string{\"a\", \"'b'\"}, \"a 'b'\"},\n\t\t{[]string{\"a b\"}, `\"a b\"`},\n\t\t{[]string{\"a\", `\"b\"`}, `a \"\\\"b\\\"\"`},\n\t\t{[]string{\"a\", `\"b\\\"`}, `a \"\\\"b\\\\\\\"\"`},\n\t\t{[]string{\"a\\n\"}, \"\\\"a\\n\\\"\"},\n\t}\n\tfor i, test := range tests {\n\t\tc.Logf(\"test %d: %q\", i, test.args)\n\t\tresult := utils.CommandString(test.args...)\n\t\tc.Assert(result, gc.Equals, test.expected)\n\t}\n}\n\nfunc (*utilsSuite) TestReadSHA256AndReadFileSHA256(c *gc.C) {\n\tsha256Tests := []struct {\n\t\tcontent string\n\t\tsha256  string\n\t}{{\n\t\tcontent: \"\",\n\t\tsha256:  \"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\",\n\t}, {\n\t\tcontent: \"some content\",\n\t\tsha256:  \"290f493c44f5d63d06b374d0a5abd292fae38b92cab2fae5efefe1b0e9347f56\",\n\t}, {\n\t\tcontent: \"foo\",\n\t\tsha256:  \"2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae\",\n\t}, {\n\t\tcontent: \"Foo\",\n\t\tsha256:  \"1cbec737f863e4922cee63cc2ebbfaafcd1cff8b790d8cfd2e6a5d550b648afa\",\n\t}, {\n\t\tcontent: \"multi\\nline\\ntext\\nhere\",\n\t\tsha256:  \"c384f11c0294280792a44d9d6abb81f9fd991904cb7eb851a88311b04114231e\",\n\t}}\n\n\ttempDir := c.MkDir()\n\tfor i, test := range sha256Tests {\n\t\tc.Logf(\"test %d: %q -> %q\", i, test.content, test.sha256)\n\t\tbuf := bytes.NewBufferString(test.content)\n\t\thash, size, err := utils.ReadSHA256(buf)\n\t\tc.Check(err, gc.IsNil)\n\t\tc.Check(hash, gc.Equals, test.sha256)\n\t\tc.Check(int(size), gc.Equals, len(test.content))\n\n\t\ttempFileName := filepath.Join(tempDir, fmt.Sprintf(\"sha256-%d\", i))\n\t\terr = ioutil.WriteFile(tempFileName, []byte(test.content), 0644)\n\t\tc.Check(err, gc.IsNil)\n\t\tfileHash, fileSize, err := utils.ReadFileSHA256(tempFileName)\n\t\tc.Check(err, gc.IsNil)\n\t\tc.Check(fileHash, gc.Equals, hash)\n\t\tc.Check(fileSize, gc.Equals, size)\n\t}\n}\n"
  },
  {
    "path": "uptime/uptime_nix.go",
    "content": "// Copyright 2014 Cloudbase Solutions SRL\n// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n//\n//go:build !windows\n// +build !windows\n\npackage uptime\n\nimport (\n\t\"syscall\"\n)\n\n// Uptime returns the number of seconds since the system has booted\nfunc Uptime() (int64, error) {\n\tinfo := &syscall.Sysinfo_t{}\n\terr := syscall.Sysinfo(info)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn int64(info.Uptime), nil\n}\n"
  },
  {
    "path": "uptime/uptime_windows.go",
    "content": "// Copyright 2014 Cloudbase Solutions SRL\n// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage uptime\n\nimport (\n\t\"fmt\"\n)\n\n//sys getTickCount64() (uptime uint64, err error) =  GetTickCount64\n\n// Uptime returns the number of seconds since the system has booted\nfunc Uptime() (int64, error) {\n\tuptime, err := getTickCount64()\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"Failed to get uptime. Error number: %v\", err)\n\t}\n\treturn int64(uptime) / 1000, nil\n}\n"
  },
  {
    "path": "uptime/zuptime_windows_386.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// mksyscall_windows.pl -l32 uptime_windows.go\n// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\npackage uptime\n\nimport \"syscall\"\n\nvar (\n\tmodkernel32 = syscall.NewLazyDLL(\"kernel32.dll\")\n\n\tprocGetTickCount64 = modkernel32.NewProc(\"GetTickCount64\")\n)\n\nfunc getTickCount64() (uptime uint64, err error) {\n\tr0, _, e1 := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0)\n\tuptime = uint64(r0)\n\tif uptime == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n"
  },
  {
    "path": "uptime/zuptime_windows_amd64.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Copyright 2014 Cloudbase Solutions SRL\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// mksyscall_windows.pl uptime_windows.go\n// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\npackage uptime\n\nimport \"syscall\"\n\nvar (\n\tmodkernel32 = syscall.NewLazyDLL(\"kernel32.dll\")\n\n\tprocGetTickCount64 = modkernel32.NewProc(\"GetTickCount64\")\n)\n\nfunc getTickCount64() (uptime uint64, err error) {\n\tr0, _, e1 := syscall.Syscall(procGetTickCount64.Addr(), 0, 0, 0, 0)\n\tuptime = uint64(r0)\n\tif uptime == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n"
  },
  {
    "path": "username.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"os\"\n\t\"os/user\"\n\n\t\"github.com/juju/errors\"\n)\n\n// ResolveSudo returns the original username if sudo was used. The\n// original username is extracted from the OS environment.\nfunc ResolveSudo(username string) string {\n\treturn resolveSudo(username, os.Getenv)\n}\n\nfunc resolveSudo(username string, getenvFunc func(string) string) string {\n\tif username != \"root\" {\n\t\treturn username\n\t}\n\t// sudo was probably called, get the original user.\n\tif username := getenvFunc(\"SUDO_USER\"); username != \"\" {\n\t\treturn username\n\t}\n\treturn username\n}\n\n// EnvUsername returns the username from the OS environment.\nfunc EnvUsername() (string, error) {\n\treturn os.Getenv(\"USER\"), nil\n}\n\n// OSUsername returns the username of the current OS user (based on UID).\nfunc OSUsername() (string, error) {\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", errors.Trace(err)\n\t}\n\treturn u.Username, nil\n}\n\n// ResolveUsername returns the username determined by the provided\n// functions. The functions are tried in the same order in which they\n// were passed in. An error returned from any of them is immediately\n// returned. If an empty string is returned then that signals that the\n// function did not find the username and the next function is tried.\n// Once a username is found, the provided resolveSudo func (if any) is\n// called with that username and the result is returned. If no username\n// is found then errors.NotFound is returned.\nfunc ResolveUsername(resolveSudo func(string) string, usernameFuncs ...func() (string, error)) (string, error) {\n\tfor _, usernameFunc := range usernameFuncs {\n\t\tusername, err := usernameFunc()\n\t\tif err != nil {\n\t\t\treturn \"\", errors.Trace(err)\n\t\t}\n\t\tif username != \"\" {\n\t\t\tif resolveSudo != nil {\n\t\t\t\tif original := resolveSudo(username); original != \"\" {\n\t\t\t\t\tusername = original\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn username, nil\n\t\t}\n\t}\n\treturn \"\", errors.NotFoundf(\"username\")\n}\n\n// LocalUsername determines the current username on the local host.\nfunc LocalUsername() (string, error) {\n\tusername, err := ResolveUsername(ResolveSudo, EnvUsername, OSUsername)\n\tif err != nil {\n\t\treturn \"\", errors.Annotatef(err, \"cannot get current user from the environment: %v\", os.Environ())\n\t}\n\treturn username, nil\n}\n"
  },
  {
    "path": "username_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"github.com/juju/errors\"\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\nvar _ = gc.Suite(&usernameSuite{})\n\ntype usernameSuite struct {\n\ttesting.IsolationSuite\n}\n\nfunc (s *usernameSuite) TestResolveUsername(c *gc.C) {\n\ttype test struct {\n\t\tuserEnv  string\n\t\tsudoEnv  string\n\t\tuserOS   string\n\t\texpected string\n\t\terr      string\n\t}\n\ttests := []test{{\n\t\tuserEnv:  \"someone\",\n\t\tsudoEnv:  \"notroot\",\n\t\tuserOS:   \"other\",\n\t\texpected: \"someone\",\n\t}, {\n\t\tuserOS:   \"other\",\n\t\texpected: \"other\",\n\t}, {\n\t\tuserEnv:  \"root\",\n\t\texpected: \"root\",\n\t}, {\n\t\tuserEnv:  \"root\",\n\t\tsudoEnv:  \"other\",\n\t\texpected: \"other\",\n\t}, {\n\t\terr: \"failed to determine username for namespace: oh noes\",\n\t}}\n\n\tresolveUsername := func(t test) (string, error) {\n\t\tif t.err != \"\" {\n\t\t\treturn \"\", errors.New(t.err)\n\t\t}\n\n\t\tvar funcs []func() (string, error)\n\t\tif t.userEnv != \"\" {\n\t\t\tfuncs = append(funcs, func() (string, error) {\n\t\t\t\treturn t.userEnv, nil\n\t\t\t})\n\t\t}\n\t\tif t.userOS != \"\" {\n\t\t\tfuncs = append(funcs, func() (string, error) {\n\t\t\t\treturn t.userOS, nil\n\t\t\t})\n\t\t}\n\n\t\tresolveSudo := func(username string) string {\n\t\t\treturn utils.ResolveSudoByFunc(username, func(string) string {\n\t\t\t\treturn t.sudoEnv\n\t\t\t})\n\t\t}\n\n\t\treturn utils.ResolveUsername(resolveSudo, funcs...)\n\t}\n\n\tfor i, test := range tests {\n\t\tc.Logf(\"test %d: %v\", i, test)\n\n\t\tusername, err := resolveUsername(test)\n\n\t\tif test.err == \"\" {\n\t\t\tif c.Check(err, jc.ErrorIsNil) {\n\t\t\t\tc.Check(username, gc.Equals, test.expected)\n\t\t\t}\n\t\t} else {\n\t\t\tc.Check(err, gc.ErrorMatches, test.err)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "uuid.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"crypto/rand\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n// UUID represent a universal identifier with 16 octets.\ntype UUID [16]byte\n\n// regex for validating that the UUID matches RFC 4122.\n// This package generates version 4 UUIDs but\n// accepts any UUID version.\n// http://www.ietf.org/rfc/rfc4122.txt\nvar (\n\tblock1 = \"[0-9a-f]{8}\"\n\tblock2 = \"[0-9a-f]{4}\"\n\tblock3 = \"[0-9a-f]{4}\"\n\tblock4 = \"[0-9a-f]{4}\"\n\tblock5 = \"[0-9a-f]{12}\"\n\n\tUUIDSnippet = block1 + \"-\" + block2 + \"-\" + block3 + \"-\" + block4 + \"-\" + block5\n\tvalidUUID   = regexp.MustCompile(\"^\" + UUIDSnippet + \"$\")\n)\n\nfunc UUIDFromString(s string) (UUID, error) {\n\tif !IsValidUUIDString(s) {\n\t\treturn UUID{}, fmt.Errorf(\"invalid UUID: %q\", s)\n\t}\n\ts = strings.Replace(s, \"-\", \"\", 4)\n\traw, err := hex.DecodeString(s)\n\tif err != nil {\n\t\treturn UUID{}, err\n\t}\n\tvar uuid UUID\n\tcopy(uuid[:], raw)\n\treturn uuid, nil\n}\n\n// IsValidUUIDString returns true, if the given string matches a valid UUID (version 4, variant 2).\nfunc IsValidUUIDString(s string) bool {\n\treturn validUUID.MatchString(s)\n}\n\n// MustNewUUID returns a new uuid, if an error occurs it panics.\nfunc MustNewUUID() UUID {\n\tuuid, err := NewUUID()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn uuid\n}\n\n// NewUUID generates a new version 4 UUID relying only on random numbers.\nfunc NewUUID() (UUID, error) {\n\tuuid := UUID{}\n\tif _, err := io.ReadFull(rand.Reader, []byte(uuid[0:16])); err != nil {\n\t\treturn UUID{}, err\n\t}\n\t// Set version (4) and variant (2) according to RfC 4122.\n\tvar version byte = 4 << 4\n\tvar variant byte = 8 << 4\n\tuuid[6] = version | (uuid[6] & 15)\n\tuuid[8] = variant | (uuid[8] & 15)\n\treturn uuid, nil\n}\n\n// Copy returns a copy of the UUID.\nfunc (uuid UUID) Copy() UUID {\n\tuuidCopy := uuid\n\treturn uuidCopy\n}\n\n// Raw returns a copy of the UUID bytes.\nfunc (uuid UUID) Raw() [16]byte {\n\treturn [16]byte(uuid)\n}\n\n// String returns a hexadecimal string representation with\n// standardized separators.\nfunc (uuid UUID) String() string {\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16])\n}\n"
  },
  {
    "path": "uuid_test.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils_test\n\nimport (\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4\"\n)\n\ntype uuidSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&uuidSuite{})\n\nfunc (*uuidSuite) TestUUID(c *gc.C) {\n\tuuid, err := utils.NewUUID()\n\tc.Assert(err, gc.IsNil)\n\tuuidCopy := uuid.Copy()\n\tuuidRaw := uuid.Raw()\n\tuuidStr := uuid.String()\n\tc.Assert(uuidRaw, gc.HasLen, 16)\n\tc.Assert(uuidStr, jc.Satisfies, utils.IsValidUUIDString)\n\tuuid[0] = 0x00\n\tuuidCopy[0] = 0xFF\n\tc.Assert(uuid, gc.Not(gc.DeepEquals), uuidCopy)\n\tuuidRaw[0] = 0xFF\n\tc.Assert(uuid, gc.Not(gc.DeepEquals), uuidRaw)\n\tnextUUID, err := utils.NewUUID()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(uuid, gc.Not(gc.DeepEquals), nextUUID)\n}\n\nfunc (*uuidSuite) TestIsValidUUIDFailsWhenNotValid(c *gc.C) {\n\ttests := []struct {\n\t\tinput    string\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tutils.UUID{}.String(),\n\t\t\ttrue,\n\t\t},\n\t\t{\n\t\t\t\"\",\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"blah\",\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"blah-9f484882-2f18-4fd2-967d-db9663db7bea\",\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"9f484882-2f18-4fd2-967d-db9663db7bea-blah\",\n\t\t\tfalse,\n\t\t},\n\t\t{\n\t\t\t\"9f484882-2f18-4fd2-967d-db9663db7bea\",\n\t\t\ttrue,\n\t\t},\n\t}\n\tfor i, t := range tests {\n\t\tc.Logf(\"Running test %d\", i)\n\t\tc.Check(utils.IsValidUUIDString(t.input), gc.Equals, t.expected)\n\t}\n}\n\nfunc (*uuidSuite) TestUUIDFromString(c *gc.C) {\n\t_, err := utils.UUIDFromString(\"blah\")\n\tc.Assert(err, gc.ErrorMatches, `invalid UUID: \"blah\"`)\n\tvalidUUID := \"9f484882-2f18-4fd2-967d-db9663db7bea\"\n\tuuid, err := utils.UUIDFromString(validUUID)\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(uuid.String(), gc.Equals, validUUID)\n}\n"
  },
  {
    "path": "voyeur/package_test.go",
    "content": "// Copyright 2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage voyeur\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "voyeur/value.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// Package voyeur implements a concurrency-safe value that can be watched for\n// changes.\npackage voyeur\n\nimport (\n\t\"sync\"\n)\n\n// Value represents a shared value that can be watched for changes. Methods on\n// a Value may be called concurrently. The zero Value is\n// ok to use, and is equivalent to a NewValue result\n// with a nil initial value.\ntype Value struct {\n\tval     any\n\tversion int\n\tmu      sync.RWMutex\n\twait    sync.Cond\n\tclosed  bool\n}\n\n// NewValue creates a new Value holding the given initial value. If initial is\n// nil, any watchers will wait until a value is set.\nfunc NewValue(initial any) *Value {\n\tv := new(Value)\n\tv.init()\n\tif initial != nil {\n\t\tv.val = initial\n\t\tv.version++\n\t}\n\treturn v\n}\n\nfunc (v *Value) needsInit() bool {\n\treturn v.wait.L == nil\n}\n\nfunc (v *Value) init() {\n\tif v.needsInit() {\n\t\tv.wait.L = v.mu.RLocker()\n\t}\n}\n\n// Set sets the shared value to val.\nfunc (v *Value) Set(val any) {\n\tv.mu.Lock()\n\tv.init()\n\tv.val = val\n\tv.version++\n\tv.mu.Unlock()\n\tv.wait.Broadcast()\n}\n\n// Close closes the Value, unblocking any outstanding watchers.  Close always\n// returns nil.\nfunc (v *Value) Close() error {\n\tv.mu.Lock()\n\tv.init()\n\tv.closed = true\n\tv.mu.Unlock()\n\tv.wait.Broadcast()\n\treturn nil\n}\n\n// Closed reports whether the value has been closed.\nfunc (v *Value) Closed() bool {\n\tv.mu.RLock()\n\tdefer v.mu.RUnlock()\n\treturn v.closed\n}\n\n// Get returns the current value.\nfunc (v *Value) Get() any {\n\tv.mu.RLock()\n\tdefer v.mu.RUnlock()\n\treturn v.val\n}\n\n// Watch returns a Watcher that can be used to watch for changes to the value.\nfunc (v *Value) Watch() *Watcher {\n\treturn &Watcher{value: v}\n}\n\n// Watcher represents a single watcher of a shared value.\ntype Watcher struct {\n\tvalue   *Value\n\tversion int\n\tcurrent any\n\tclosed  bool\n}\n\n// Next blocks until there is a new value to be retrieved from the value that is\n// being watched. It also unblocks when the value or the Watcher itself is\n// closed. Next returns false if the value or the Watcher itself have been\n// closed.\nfunc (w *Watcher) Next() bool {\n\tval := w.value\n\tval.mu.RLock()\n\tdefer val.mu.RUnlock()\n\tif val.needsInit() {\n\t\tval.mu.RUnlock()\n\t\tval.mu.Lock()\n\t\tval.init()\n\t\tval.mu.Unlock()\n\t\tval.mu.RLock()\n\t}\n\n\t// We can go around this loop a maximum of two times,\n\t// because the only thing that can cause a Wait to\n\t// return is for the condition to be triggered,\n\t// which can only happen if the value is set (causing\n\t// the version to increment) or it is closed\n\t// causing the closed flag to be set.\n\t// Both these cases will cause Next to return.\n\tfor {\n\t\tif w.version != val.version {\n\t\t\tw.version = val.version\n\t\t\tw.current = val.val\n\t\t\treturn true\n\t\t}\n\t\tif val.closed || w.closed {\n\t\t\treturn false\n\t\t}\n\n\t\t// Wait releases the lock until triggered and then reacquires the lock,\n\t\t// thus avoiding a deadlock.\n\t\tval.wait.Wait()\n\t}\n}\n\n// Close closes the Watcher without closing the underlying\n// value. It may be called concurrently with Next.\nfunc (w *Watcher) Close() {\n\tw.value.mu.Lock()\n\tw.value.init()\n\tw.closed = true\n\tw.value.mu.Unlock()\n\tw.value.wait.Broadcast()\n}\n\n// Value returns the last value that was retrieved from the watched Value by\n// Next.\nfunc (w *Watcher) Value() any {\n\treturn w.current\n}\n"
  },
  {
    "path": "voyeur/value_test.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage voyeur\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n)\n\ntype suite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&suite{})\n\nfunc ExampleWatcher_Next() {\n\tv := NewValue(nil)\n\n\t// The channel is not necessary for normal use of the watcher.\n\t// It just makes the test output predictable.\n\tch := make(chan bool)\n\n\tgo func() {\n\t\tfor x := 0; x < 3; x++ {\n\t\t\tv.Set(fmt.Sprintf(\"value%d\", x))\n\t\t\tch <- true\n\t\t}\n\t\tv.Close()\n\t}()\n\tw := v.Watch()\n\tfor w.Next() {\n\t\tfmt.Println(w.Value())\n\t\t<-ch\n\t}\n\n\t// output:\n\t// value0\n\t// value1\n\t// value2\n}\n\nfunc (s *suite) TestValueGetSet(c *gc.C) {\n\tv := NewValue(nil)\n\texpected := \"12345\"\n\tv.Set(expected)\n\tgot := v.Get()\n\tc.Assert(got, gc.Equals, expected)\n\tc.Assert(v.Closed(), jc.IsFalse)\n}\n\nfunc (s *suite) TestValueInitial(c *gc.C) {\n\texpected := \"12345\"\n\tv := NewValue(expected)\n\tgot := v.Get()\n\tc.Assert(got, gc.Equals, expected)\n\tc.Assert(v.Closed(), jc.IsFalse)\n}\n\nfunc (s *suite) TestValueClose(c *gc.C) {\n\texpected := \"12345\"\n\tv := NewValue(expected)\n\tc.Assert(v.Close(), gc.IsNil)\n\n\tisClosed := v.Closed()\n\tc.Assert(isClosed, jc.IsTrue)\n\tgot := v.Get()\n\tc.Assert(got, gc.Equals, expected)\n\n\t// test that we can close multiple times without a problem\n\tc.Assert(v.Close(), gc.IsNil)\n}\n\nfunc (s *suite) TestWatcher(c *gc.C) {\n\tvals := []string{\"one\", \"two\", \"three\"}\n\n\t// blocking on the channel forces the scheduler to let the other goroutine\n\t// run for a bit, so we get predictable results.  This is not necessary for\n\t// normal use of the watcher.\n\tch := make(chan bool)\n\n\tv := NewValue(nil)\n\n\tgo func() {\n\t\tfor _, s := range vals {\n\t\t\tv.Set(s)\n\t\t\tch <- true\n\t\t}\n\t\tv.Close()\n\t}()\n\n\tw := v.Watch()\n\tc.Assert(w.Next(), jc.IsTrue)\n\tc.Assert(w.Value(), gc.Equals, vals[0])\n\n\t// test that we can get the same value multiple times\n\tc.Assert(w.Value(), gc.Equals, vals[0])\n\t<-ch\n\n\t// now try skipping a value by calling next without getting the value\n\tc.Assert(w.Next(), jc.IsTrue)\n\t<-ch\n\n\tc.Assert(w.Next(), jc.IsTrue)\n\tc.Assert(w.Value(), gc.Equals, vals[2])\n\t<-ch\n\n\tc.Assert(w.Next(), jc.IsFalse)\n}\n\nfunc (s *suite) TestDoubleSet(c *gc.C) {\n\tvals := []string{\"one\", \"two\", \"three\"}\n\n\t// blocking on the channel forces the scheduler to let the other goroutine\n\t// run for a bit, so we get predictable results.  This is not necessary for\n\t// normal use of the watcher.\n\tch := make(chan bool)\n\n\tv := NewValue(nil)\n\n\tgo func() {\n\t\tv.Set(vals[0])\n\t\tch <- true\n\t\tv.Set(vals[1])\n\t\tv.Set(vals[2])\n\t\tch <- true\n\t\tv.Close()\n\t\tch <- true\n\t}()\n\n\tw := v.Watch()\n\tc.Assert(w.Next(), jc.IsTrue)\n\tc.Assert(w.Value(), gc.Equals, vals[0])\n\t<-ch\n\n\t// since we did two sets before sending on the channel,\n\t// we should just get vals[2] here and not get vals[1]\n\tc.Assert(w.Next(), jc.IsTrue)\n\tc.Assert(w.Value(), gc.Equals, vals[2])\n}\n\nfunc (s *suite) TestTwoReceivers(c *gc.C) {\n\tvals := []string{\"one\", \"two\", \"three\"}\n\n\t// blocking on the channel forces the scheduler to let the other goroutine\n\t// run for a bit, so we get predictable results.  This is not necessary for\n\t// normal use of the watcher.\n\tch := make(chan bool)\n\n\tv := NewValue(nil)\n\n\twatcher := func() {\n\t\tw := v.Watch()\n\t\tx := 0\n\t\tfor w.Next() {\n\t\t\tc.Assert(w.Value(), gc.Equals, vals[x])\n\t\t\tx++\n\t\t\t<-ch\n\t\t}\n\t\tc.Assert(x, gc.Equals, len(vals))\n\t\t<-ch\n\t}\n\n\tgo watcher()\n\tgo watcher()\n\n\tfor _, val := range vals {\n\t\tv.Set(val)\n\t\tch <- true\n\t\tch <- true\n\t}\n\n\tv.Close()\n\tch <- true\n\tch <- true\n}\n\nfunc (s *suite) TestCloseWatcher(c *gc.C) {\n\tvals := []string{\"one\", \"two\", \"three\"}\n\n\t// blocking on the channel forces the scheduler to let the other goroutine\n\t// run for a bit, so we get predictable results.  This is not necessary for\n\t// normal use of the watcher.\n\tch := make(chan bool)\n\n\tv := NewValue(nil)\n\n\tw := v.Watch()\n\tgo func() {\n\t\tx := 0\n\t\tfor w.Next() {\n\t\t\tc.Assert(w.Value(), gc.Equals, vals[x])\n\t\t\tx++\n\t\t\t<-ch\n\t\t}\n\t\t// the value will only get set once before the watcher is closed\n\t\tc.Assert(x, gc.Equals, 1)\n\t\t<-ch\n\t}()\n\n\tv.Set(vals[0])\n\tch <- true\n\tw.Close()\n\tch <- true\n\n\t// prove the value is not closed, even though the watcher is\n\tc.Assert(v.Closed(), jc.IsFalse)\n}\n\nfunc (s *suite) TestWatchZeroValue(c *gc.C) {\n\tvar v Value\n\tch := make(chan bool)\n\tgo func() {\n\t\tw := v.Watch()\n\t\tch <- true\n\t\tch <- w.Next()\n\t}()\n\t<-ch\n\tv.Set(struct{}{})\n\tc.Assert(<-ch, jc.IsTrue)\n}\n"
  },
  {
    "path": "yaml.go",
    "content": "// Copyright 2012, 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/juju/errors\"\n\n\t\"gopkg.in/yaml.v2\"\n)\n\n// WriteYaml marshals obj as yaml to a temporary file in the same directory\n// as path, than atomically replaces path with the temporary file.\nfunc WriteYaml(path string, obj any) error {\n\tdata, err := yaml.Marshal(obj)\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\tdir := filepath.Dir(path)\n\tf, err := ioutil.TempFile(dir, \"juju\")\n\tif err != nil {\n\t\treturn errors.Trace(err)\n\t}\n\ttmp := f.Name()\n\tif _, err := f.Write(data); err != nil {\n\t\t_ = f.Close()      // don't leak file handle\n\t\t_ = os.Remove(tmp) // don't leak half written files on disk\n\t\treturn errors.Trace(err)\n\t}\n\n\tif err := f.Sync(); err != nil {\n\t\t_ = f.Close()      // don't leak file handle\n\t\t_ = os.Remove(tmp) // don't leak half written files on disk\n\t\treturn errors.Trace(err)\n\t}\n\t// Explicitly close the file before moving it. This is needed on Windows\n\t// where the OS will not allow us to move a file that still has an open\n\t// file handle. Must check the error on close because filesystems can delay\n\t// reporting errors until the file is closed.\n\tif err := f.Close(); err != nil {\n\t\t_ = os.Remove(tmp) // don't leak half written files on disk\n\t\treturn errors.Trace(err)\n\t}\n\n\t// ioutils.TempFile creates files 0600, but this function has a contract\n\t// that files will be world readable, 0644 after replacement.\n\tif err := os.Chmod(tmp, 0644); err != nil {\n\t\t_ = os.Remove(tmp) // remove file with incorrect permissions.\n\t\treturn errors.Trace(err)\n\t}\n\n\treturn ReplaceFile(tmp, path)\n}\n\n// ReadYaml unmarshals the yaml contained in the file at path into obj. See\n// goyaml.Unmarshal. If path is not found, the error returned will be compatible\n// with os.IsNotExist.\nfunc ReadYaml(path string, obj any) error {\n\tdata, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn err // cannot wrap here because callers check for NotFound.\n\t}\n\treturn yaml.Unmarshal(data, obj)\n}\n\n// ConformYAML ensures all keys of any nested maps are strings.  This is\n// necessary because YAML unmarshals map[any]any in nested\n// maps, which cannot be serialized by json or bson. Also, handle\n// []any. cf. gopkg.in/juju/charm.v4/actions.go cleanse\nfunc ConformYAML(input any) (any, error) {\n\tswitch typedInput := input.(type) {\n\n\tcase map[string]any:\n\t\tnewMap := make(map[string]any)\n\t\tfor key, value := range typedInput {\n\t\t\tnewValue, err := ConformYAML(value)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnewMap[key] = newValue\n\t\t}\n\t\treturn newMap, nil\n\n\tcase map[any]any:\n\t\tnewMap := make(map[string]any)\n\t\tfor key, value := range typedInput {\n\t\t\ttypedKey, ok := key.(string)\n\t\t\tif !ok {\n\t\t\t\treturn nil, errors.New(\"map keyed with non-string value\")\n\t\t\t}\n\t\t\tnewMap[typedKey] = value\n\t\t}\n\t\treturn ConformYAML(newMap)\n\n\tcase []any:\n\t\tnewSlice := make([]any, len(typedInput))\n\t\tfor i, sliceValue := range typedInput {\n\t\t\tnewSliceValue, err := ConformYAML(sliceValue)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"map keyed with non-string value\")\n\t\t\t}\n\t\t\tnewSlice[i] = newSliceValue\n\t\t}\n\t\treturn newSlice, nil\n\n\tdefault:\n\t\treturn input, nil\n\t}\n}\n"
  },
  {
    "path": "yaml_test.go",
    "content": "// Copyright 2015 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage utils\n\nimport (\n\t\"io/ioutil\"\n\t\"os\"\n\t\"path/filepath\"\n\n\tjc \"github.com/juju/testing/checkers\"\n\tgc \"gopkg.in/check.v1\"\n)\n\ntype yamlSuite struct {\n}\n\nvar _ = gc.Suite(&yamlSuite{})\n\nfunc (*yamlSuite) TestYamlRoundTrip(c *gc.C) {\n\t// test happy path of round tripping an object via yaml\n\n\ttype T struct {\n\t\tA int    `yaml:\"a\"`\n\t\tB bool   `yaml:\"deleted\"`\n\t\tC string `yaml:\"omitempty\"`\n\t\tD string\n\t}\n\n\tv := T{A: 1, B: true, C: \"\", D: \"\"}\n\n\tf, err := ioutil.TempFile(c.MkDir(), \"yaml\")\n\tc.Assert(err, gc.IsNil)\n\tpath := f.Name()\n\tf.Close()\n\n\terr = WriteYaml(path, v)\n\tc.Assert(err, gc.IsNil)\n\n\tvar v2 T\n\terr = ReadYaml(path, &v2)\n\tc.Assert(err, gc.IsNil)\n\n\tc.Assert(v, gc.Equals, v2)\n}\n\nfunc (*yamlSuite) TestReadYamlReturnsNotFound(c *gc.C) {\n\t// The contract for ReadYaml requires it returns an error\n\t// that can be inspected by os.IsNotExist. Notably, we cannot\n\t// use juju/errors gift wrapping.\n\tf, err := ioutil.TempFile(c.MkDir(), \"yaml\")\n\tc.Assert(err, gc.IsNil)\n\tpath := f.Name()\n\terr = os.Remove(path)\n\tc.Assert(err, gc.IsNil)\n\terr = ReadYaml(path, nil)\n\n\t// assert that the error is reported as NotExist\n\tc.Assert(os.IsNotExist(err), gc.Equals, true)\n}\n\nfunc (*yamlSuite) TestWriteYamlMissingDirectory(c *gc.C) {\n\t// WriteYaml tries to create a temporary file in the same\n\t// directory as the target. Test what happens if the path's\n\t// directory is missing\n\n\troot := c.MkDir()\n\tmissing := filepath.Join(root, \"missing\", \"filename\")\n\n\tv := struct{ A, B int }{1, 2}\n\terr := WriteYaml(missing, v)\n\tc.Assert(err, gc.NotNil)\n}\n\nfunc (*yamlSuite) TestWriteYamlWriteGarbage(c *gc.C) {\n\tc.Skip(\"https://github.com/go-yaml/yaml/issues/144\")\n\t// some things cannot be marshalled into yaml, check that\n\t// WriteYaml detects this.\n\n\troot := c.MkDir()\n\tpath := filepath.Join(root, \"f\")\n\n\tv := struct{ A, B [10]bool }{}\n\terr := WriteYaml(path, v)\n\tc.Assert(err, gc.NotNil)\n}\n\ntype ConformSuite struct{}\n\nvar _ = gc.Suite(&ConformSuite{})\n\nfunc (s *ConformSuite) TestConformYAML(c *gc.C) {\n\tvar goodInterfaceTests = []struct {\n\t\tdescription       string\n\t\tinputInterface    any\n\t\texpectedInterface map[string]any\n\t\texpectedError     string\n\t}{{\n\t\tdescription: \"An interface requiring no changes.\",\n\t\tinputInterface: map[string]any{\n\t\t\t\"key1\": \"value1\",\n\t\t\t\"key2\": \"value2\",\n\t\t\t\"key3\": map[string]any{\n\t\t\t\t\"foo1\": \"val1\",\n\t\t\t\t\"foo2\": \"val2\"}},\n\t\texpectedInterface: map[string]any{\n\t\t\t\"key1\": \"value1\",\n\t\t\t\"key2\": \"value2\",\n\t\t\t\"key3\": map[string]any{\n\t\t\t\t\"foo1\": \"val1\",\n\t\t\t\t\"foo2\": \"val2\"}},\n\t}, {\n\t\tdescription: \"Substitute a single inner map[i]i.\",\n\t\tinputInterface: map[string]any{\n\t\t\t\"key1\": \"value1\",\n\t\t\t\"key2\": \"value2\",\n\t\t\t\"key3\": map[any]any{\n\t\t\t\t\"foo1\": \"val1\",\n\t\t\t\t\"foo2\": \"val2\"}},\n\t\texpectedInterface: map[string]any{\n\t\t\t\"key1\": \"value1\",\n\t\t\t\"key2\": \"value2\",\n\t\t\t\"key3\": map[string]any{\n\t\t\t\t\"foo1\": \"val1\",\n\t\t\t\t\"foo2\": \"val2\"}},\n\t}, {\n\t\tdescription: \"Substitute nested inner map[i]i.\",\n\t\tinputInterface: map[string]any{\n\t\t\t\"key1a\": \"val1a\",\n\t\t\t\"key2a\": \"val2a\",\n\t\t\t\"key3a\": map[any]any{\n\t\t\t\t\"key1b\": \"val1b\",\n\t\t\t\t\"key2b\": map[any]any{\n\t\t\t\t\t\"key1c\": \"val1c\"}}},\n\t\texpectedInterface: map[string]any{\n\t\t\t\"key1a\": \"val1a\",\n\t\t\t\"key2a\": \"val2a\",\n\t\t\t\"key3a\": map[string]any{\n\t\t\t\t\"key1b\": \"val1b\",\n\t\t\t\t\"key2b\": map[string]any{\n\t\t\t\t\t\"key1c\": \"val1c\"}}},\n\t}, {\n\t\tdescription: \"Substitute nested map[i]i within []i.\",\n\t\tinputInterface: map[string]any{\n\t\t\t\"key1a\": \"val1a\",\n\t\t\t\"key2a\": []any{5, \"foo\", map[string]any{\n\t\t\t\t\"key1b\": \"val1b\",\n\t\t\t\t\"key2b\": map[any]any{\n\t\t\t\t\t\"key1c\": \"val1c\"}}}},\n\t\texpectedInterface: map[string]any{\n\t\t\t\"key1a\": \"val1a\",\n\t\t\t\"key2a\": []any{5, \"foo\", map[string]any{\n\t\t\t\t\"key1b\": \"val1b\",\n\t\t\t\t\"key2b\": map[string]any{\n\t\t\t\t\t\"key1c\": \"val1c\"}}}},\n\t}, {\n\t\tdescription: \"An inner map[any]any with an int key.\",\n\t\tinputInterface: map[string]any{\n\t\t\t\"key1\": \"value1\",\n\t\t\t\"key2\": \"value2\",\n\t\t\t\"key3\": map[any]any{\n\t\t\t\t\"foo1\": \"val1\",\n\t\t\t\t5:      \"val2\"}},\n\t\texpectedError: \"map keyed with non-string value\",\n\t}, {\n\t\tdescription: \"An inner []any containing a map[i]i with an int key.\",\n\t\tinputInterface: map[string]any{\n\t\t\t\"key1a\": \"val1b\",\n\t\t\t\"key2a\": \"val2b\",\n\t\t\t\"key3a\": []any{\"foo1\", 5, map[any]any{\n\t\t\t\t\"key1b\": \"val1b\",\n\t\t\t\t\"key2b\": map[any]any{\n\t\t\t\t\t\"key1c\": \"val1c\",\n\t\t\t\t\t5:       \"val2c\"}}}},\n\t\texpectedError: \"map keyed with non-string value\",\n\t}}\n\n\tfor i, test := range goodInterfaceTests {\n\t\tc.Logf(\"test %d: %s\", i, test.description)\n\t\tinput := test.inputInterface\n\t\tcleansedInterfaceMap, err := ConformYAML(input)\n\t\tif test.expectedError == \"\" {\n\t\t\tif !c.Check(err, jc.ErrorIsNil) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Check(cleansedInterfaceMap, jc.DeepEquals, test.expectedInterface)\n\t\t} else {\n\t\t\tc.Check(err, gc.ErrorMatches, test.expectedError)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "zfile_windows.go",
    "content": "// Copyright 2013 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\n// mksyscall_windows.pl -l32 file_windows.go\n// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\npackage utils\n\nimport \"unsafe\"\nimport \"syscall\"\n\nvar (\n\tmodkernel32 = syscall.NewLazyDLL(\"kernel32.dll\")\n\n\tprocMoveFileExW = modkernel32.NewProc(\"MoveFileExW\")\n)\n\nfunc moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) {\n\tr1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(lpExistingFileName)), uintptr(unsafe.Pointer(lpNewFileName)), uintptr(dwFlags))\n\tif r1 == 0 {\n\t\tif e1 != 0 {\n\t\t\terr = error(e1)\n\t\t} else {\n\t\t\terr = syscall.EINVAL\n\t\t}\n\t}\n\treturn\n}\n"
  },
  {
    "path": "zip/package_test.go",
    "content": "// Copyright 2011-2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage zip_test\n\nimport (\n\t\"testing\"\n\n\tgc \"gopkg.in/check.v1\"\n)\n\nfunc TestPackage(t *testing.T) {\n\tgc.TestingT(t)\n}\n"
  },
  {
    "path": "zip/zip.go",
    "content": "// Copyright 2011-2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage zip\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\n// FindAll returns the cleaned path of every file in the supplied zip reader.\nfunc FindAll(reader *zip.Reader) ([]string, error) {\n\treturn Find(reader, \"*\")\n}\n\n// Find returns the cleaned path of every file in the supplied zip reader whose\n// base name matches the supplied pattern, which is interpreted as in path.Match.\nfunc Find(reader *zip.Reader, pattern string) ([]string, error) {\n\t// path.Match will only return an error if the pattern is not\n\t// valid (*and* the supplied name is not empty, hence \"check\").\n\tif _, err := path.Match(pattern, \"check\"); err != nil {\n\t\treturn nil, err\n\t}\n\tvar matches []string\n\tfor _, zipFile := range reader.File {\n\t\tcleanPath := path.Clean(zipFile.Name)\n\t\tbaseName := path.Base(cleanPath)\n\t\tif match, _ := path.Match(pattern, baseName); match {\n\t\t\tmatches = append(matches, cleanPath)\n\t\t}\n\t}\n\treturn matches, nil\n}\n\n// ExtractAll extracts the supplied zip reader to the target path, overwriting\n// existing files and directories only where necessary.\nfunc ExtractAll(reader *zip.Reader, targetRoot string) error {\n\treturn Extract(reader, targetRoot, \"\")\n}\n\n// Extract extracts files from the supplied zip reader, from the (internal, slash-\n// separated) source path into the (external, OS-specific) target path. If the\n// source path does not reference a directory, the referenced file will be written\n// directly to the target path.\nfunc Extract(reader *zip.Reader, targetRoot, sourceRoot string) error {\n\tsourceRoot = path.Clean(sourceRoot)\n\tif sourceRoot == \".\" {\n\t\tsourceRoot = \"\"\n\t}\n\tif !isSanePath(sourceRoot) {\n\t\treturn fmt.Errorf(\"cannot extract files rooted at %q\", sourceRoot)\n\t}\n\textractor := extractor{targetRoot, sourceRoot}\n\tfor _, zipFile := range reader.File {\n\t\tif err := extractor.extract(zipFile); err != nil {\n\t\t\tcleanName := path.Clean(zipFile.Name)\n\t\t\treturn fmt.Errorf(\"cannot extract %q: %v\", cleanName, err)\n\t\t}\n\t}\n\treturn nil\n}\n\ntype extractor struct {\n\ttargetRoot string\n\tsourceRoot string\n}\n\n// targetPath returns the target path for a given zip file and whether\n// it should be extracted.\nfunc (x extractor) targetPath(zipFile *zip.File) (string, bool) {\n\tcleanPath := path.Clean(zipFile.Name)\n\tif cleanPath == x.sourceRoot {\n\t\treturn x.targetRoot, true\n\t}\n\tcleanPath = strings.TrimPrefix(cleanPath, \"/\")\n\tfor strings.HasPrefix(cleanPath, \"../\") {\n\t\tcleanPath = cleanPath[len(\"../\"):]\n\t}\n\n\tif x.sourceRoot != \"\" {\n\t\tmustPrefix := x.sourceRoot + \"/\"\n\t\tif !strings.HasPrefix(cleanPath, mustPrefix) {\n\t\t\treturn \"\", false\n\t\t}\n\t\tcleanPath = cleanPath[len(mustPrefix):]\n\t}\n\treturn filepath.Join(x.targetRoot, filepath.FromSlash(cleanPath)), true\n}\n\nfunc (x extractor) extract(zipFile *zip.File) error {\n\ttargetPath, ok := x.targetPath(zipFile)\n\tif !ok {\n\t\treturn nil\n\t}\n\tparentPath := filepath.Dir(targetPath)\n\tif err := os.MkdirAll(parentPath, 0777); err != nil {\n\t\treturn err\n\t}\n\tmode := zipFile.Mode()\n\tmodePerm := mode & os.ModePerm\n\tmodeType := mode & os.ModeType\n\tswitch modeType {\n\tcase os.ModeDir:\n\t\treturn x.writeDir(targetPath, modePerm)\n\tcase os.ModeSymlink:\n\t\treturn x.writeSymlink(targetPath, zipFile)\n\tcase 0:\n\t\treturn x.writeFile(targetPath, zipFile, modePerm)\n\t}\n\treturn fmt.Errorf(\"unknown file type %d\", modeType)\n}\n\nfunc (x extractor) writeDir(targetPath string, modePerm os.FileMode) error {\n\tfileInfo, err := os.Lstat(targetPath)\n\tswitch {\n\tcase err == nil:\n\t\tmode := fileInfo.Mode()\n\t\tif mode.IsDir() {\n\t\t\tif mode&os.ModePerm != modePerm {\n\t\t\t\treturn os.Chmod(targetPath, modePerm)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t\tfallthrough\n\tcase !os.IsNotExist(err):\n\t\tif err := os.RemoveAll(targetPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn os.MkdirAll(targetPath, modePerm)\n}\n\nfunc (x extractor) writeFile(targetPath string, zipFile *zip.File, modePerm os.FileMode) error {\n\tif _, err := os.Lstat(targetPath); !os.IsNotExist(err) {\n\t\tif err := os.RemoveAll(targetPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\twriter, err := os.OpenFile(targetPath, os.O_CREATE|os.O_EXCL|os.O_WRONLY, modePerm)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer writer.Close()\n\n\tif err := copyTo(writer, zipFile); err != nil {\n\t\treturn err\n\t}\n\n\tif err := writer.Sync(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := writer.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (x extractor) writeSymlink(targetPath string, zipFile *zip.File) error {\n\tsymlinkTarget, err := x.checkSymlink(targetPath, zipFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := os.Lstat(targetPath); !os.IsNotExist(err) {\n\t\tif err := os.RemoveAll(targetPath); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn os.Symlink(symlinkTarget, targetPath)\n}\n\nfunc (x extractor) checkSymlink(targetPath string, zipFile *zip.File) (string, error) {\n\tvar buffer bytes.Buffer\n\tif err := copyTo(&buffer, zipFile); err != nil {\n\t\treturn \"\", err\n\t}\n\tsymlinkTarget := buffer.String()\n\tif filepath.IsAbs(symlinkTarget) {\n\t\treturn \"\", fmt.Errorf(\"symlink %q is absolute\", symlinkTarget)\n\t}\n\tfinalPath := filepath.Join(filepath.Dir(targetPath), symlinkTarget)\n\trelativePath, err := filepath.Rel(x.targetRoot, finalPath)\n\tif err != nil {\n\t\t// Not tested, because I don't know how to trigger this condition.\n\t\treturn \"\", fmt.Errorf(\"symlink %q not comprehensible\", symlinkTarget)\n\t}\n\tif !isSanePath(relativePath) {\n\t\treturn \"\", fmt.Errorf(\"symlink %q leads out of scope\", symlinkTarget)\n\t}\n\treturn symlinkTarget, nil\n}\n\nfunc copyTo(writer io.Writer, zipFile *zip.File) error {\n\treader, err := zipFile.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = io.Copy(writer, reader)\n\treader.Close()\n\treturn err\n}\n\nfunc isSanePath(path string) bool {\n\tif path == \"..\" || strings.HasPrefix(path, \"../\") {\n\t\treturn false\n\t}\n\treturn true\n}\n"
  },
  {
    "path": "zip/zip_test.go",
    "content": "// Copyright 2011-2014 Canonical Ltd.\n// Licensed under the LGPLv3, see LICENCE file for details.\n\npackage zip_test\n\nimport (\n\tstdzip \"archive/zip\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io/ioutil\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"sort\"\n\n\t\"github.com/juju/testing\"\n\tjc \"github.com/juju/testing/checkers\"\n\tft \"github.com/juju/testing/filetesting\"\n\tgc \"gopkg.in/check.v1\"\n\n\t\"github.com/juju/utils/v4/zip\"\n)\n\ntype ZipSuite struct {\n\ttesting.IsolationSuite\n}\n\nvar _ = gc.Suite(&ZipSuite{})\n\nfunc (s *ZipSuite) makeZip(c *gc.C, entries ...ft.Entry) *stdzip.Reader {\n\tbasePath := c.MkDir()\n\tfor _, entry := range entries {\n\t\tentry.Create(c, basePath)\n\t}\n\tdefer os.RemoveAll(basePath)\n\n\toutPath := filepath.Join(c.MkDir(), \"test.zip\")\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", fmt.Sprintf(\"cd %q; zip --fifo --symlinks -r %q .\", basePath, outPath))\n\toutput, err := cmd.CombinedOutput()\n\tc.Assert(err, gc.IsNil, gc.Commentf(\"Command output: %s\", output))\n\n\tfile, err := os.Open(outPath)\n\tc.Assert(err, gc.IsNil)\n\ts.AddCleanup(func(c *gc.C) {\n\t\terr := file.Close()\n\t\tc.Assert(err, gc.IsNil)\n\t})\n\tfileInfo, err := file.Stat()\n\tc.Assert(err, gc.IsNil)\n\treader, err := stdzip.NewReader(file, fileInfo.Size())\n\tc.Assert(err, gc.IsNil)\n\treturn reader\n}\n\nfunc (s *ZipSuite) TestFind(c *gc.C) {\n\treader := s.makeZip(c,\n\t\tft.File{\"some-file\", \"\", 0644},\n\t\tft.File{\"another-file\", \"\", 0644},\n\t\tft.Symlink{\"some-symlink\", \"some-file\"},\n\t\tft.Dir{\"some-dir\", 0755},\n\t\tft.Dir{\"some-dir/another-dir\", 0755},\n\t\tft.File{\"some-dir/another-file\", \"\", 0644},\n\t)\n\n\tfor i, test := range []struct {\n\t\tpattern string\n\t\texpect  []string\n\t}{{\n\t\t\"\", nil,\n\t}, {\n\t\t\"no-matches\", nil,\n\t}, {\n\t\t\"some-file\", []string{\n\t\t\t\"some-file\"},\n\t}, {\n\t\t\"another-file\", []string{\n\t\t\t\"another-file\",\n\t\t\t\"some-dir/another-file\"},\n\t}, {\n\t\t\"some-*\", []string{\n\t\t\t\"some-file\",\n\t\t\t\"some-symlink\",\n\t\t\t\"some-dir\"},\n\t}, {\n\t\t\"another-*\", []string{\n\t\t\t\"another-file\",\n\t\t\t\"some-dir/another-dir\",\n\t\t\t\"some-dir/another-file\"},\n\t}, {\n\t\t\"*\", []string{\n\t\t\t\"some-file\",\n\t\t\t\"another-file\",\n\t\t\t\"some-symlink\",\n\t\t\t\"some-dir\",\n\t\t\t\"some-dir/another-dir\",\n\t\t\t\"some-dir/another-file\"},\n\t}} {\n\t\tc.Logf(\"test %d: %q\", i, test.pattern)\n\t\tactual, err := zip.Find(reader, test.pattern)\n\t\tc.Assert(err, gc.IsNil)\n\t\tsort.Strings(test.expect)\n\t\tsort.Strings(actual)\n\t\tc.Check(actual, jc.DeepEquals, test.expect)\n\t}\n\n\tc.Logf(\"test $spanish-inquisition: FindAll\")\n\texpect, err := zip.Find(reader, \"*\")\n\tc.Assert(err, gc.IsNil)\n\tactual, err := zip.FindAll(reader)\n\tc.Assert(err, gc.IsNil)\n\tsort.Strings(expect)\n\tsort.Strings(actual)\n\tc.Check(actual, jc.DeepEquals, expect)\n}\n\nfunc (s *ZipSuite) TestFindError(c *gc.C) {\n\treader := s.makeZip(c, ft.File{\"some-file\", \"\", 0644})\n\t_, err := zip.Find(reader, \"[]\")\n\tc.Assert(err, gc.ErrorMatches, \"syntax error in pattern\")\n}\n\nfunc (s *ZipSuite) TestExtractAll(c *gc.C) {\n\tentries := []ft.Entry{\n\t\tft.File{\"some-file\", \"content 1\", 0644},\n\t\tft.File{\"another-file\", \"content 2\", 0640},\n\t\tft.Symlink{\"some-symlink\", \"some-file\"},\n\t\tft.Dir{\"some-dir\", 0750},\n\t\tft.File{\"some-dir/another-file\", \"content 3\", 0644},\n\t\tft.Dir{\"some-dir/another-dir\", 0755},\n\t\tft.Symlink{\"some-dir/another-dir/another-symlink\", \"../../another-file\"},\n\t}\n\treader := s.makeZip(c, entries...)\n\ttargetPath := c.MkDir()\n\terr := zip.ExtractAll(reader, targetPath)\n\tc.Assert(err, gc.IsNil)\n\tfor i, entry := range entries {\n\t\tc.Logf(\"test %d: %#v\", i, entry)\n\t\tentry.Check(c, targetPath)\n\t}\n}\n\nfunc (s *ZipSuite) TestExtractAllOverwriteFiles(c *gc.C) {\n\tname := \"some-file\"\n\tfor i, test := range []ft.Entry{\n\t\tft.File{name, \"content\", 0644},\n\t\tft.Dir{name, 0751},\n\t\tft.Symlink{name, \"wherever\"},\n\t} {\n\t\tc.Logf(\"test %d: %#v\", i, test)\n\t\ttargetPath := c.MkDir()\n\t\tft.File{name, \"original\", 0}.Create(c, targetPath)\n\t\treader := s.makeZip(c, test)\n\t\terr := zip.ExtractAll(reader, targetPath)\n\t\tc.Check(err, gc.IsNil)\n\t\ttest.Check(c, targetPath)\n\t}\n}\n\nfunc (s *ZipSuite) TestExtractAllOverwriteSymlinks(c *gc.C) {\n\tname := \"some-symlink\"\n\tfor i, test := range []ft.Entry{\n\t\tft.File{name, \"content\", 0644},\n\t\tft.Dir{name, 0751},\n\t\tft.Symlink{name, \"wherever\"},\n\t} {\n\t\tc.Logf(\"test %d: %#v\", i, test)\n\t\ttargetPath := c.MkDir()\n\t\toriginal := ft.File{\"original\", \"content\", 0644}\n\t\toriginal.Create(c, targetPath)\n\t\tft.Symlink{name, \"original\"}.Create(c, targetPath)\n\t\treader := s.makeZip(c, test)\n\t\terr := zip.ExtractAll(reader, targetPath)\n\t\tc.Check(err, gc.IsNil)\n\t\ttest.Check(c, targetPath)\n\t\toriginal.Check(c, targetPath)\n\t}\n}\n\nfunc (s *ZipSuite) TestExtractAllOverwriteDirs(c *gc.C) {\n\tname := \"some-dir\"\n\tfor i, test := range []ft.Entry{\n\t\tft.File{name, \"content\", 0644},\n\t\tft.Dir{name, 0751},\n\t\tft.Symlink{name, \"wherever\"},\n\t} {\n\t\tc.Logf(\"test %d: %#v\", i, test)\n\t\ttargetPath := c.MkDir()\n\t\tft.Dir{name, 0}.Create(c, targetPath)\n\t\treader := s.makeZip(c, test)\n\t\terr := zip.ExtractAll(reader, targetPath)\n\t\tc.Check(err, gc.IsNil)\n\t\ttest.Check(c, targetPath)\n\t}\n}\n\nfunc (s *ZipSuite) TestExtractAllMergeDirs(c *gc.C) {\n\ttargetPath := c.MkDir()\n\tft.Dir{\"dir\", 0755}.Create(c, targetPath)\n\toriginals := []ft.Entry{\n\t\tft.Dir{\"dir/original-dir\", 0751},\n\t\tft.File{\"dir/original-file\", \"content 1\", 0600},\n\t\tft.Symlink{\"dir/original-symlink\", \"original-file\"},\n\t}\n\tfor _, entry := range originals {\n\t\tentry.Create(c, targetPath)\n\t}\n\tmerges := []ft.Entry{\n\t\tft.Dir{\"dir\", 0751},\n\t\tft.Dir{\"dir/merge-dir\", 0750},\n\t\tft.File{\"dir/merge-file\", \"content 2\", 0640},\n\t\tft.Symlink{\"dir/merge-symlink\", \"merge-file\"},\n\t}\n\treader := s.makeZip(c, merges...)\n\terr := zip.ExtractAll(reader, targetPath)\n\tc.Assert(err, gc.IsNil)\n\n\tfor i, test := range append(originals, merges...) {\n\t\tc.Logf(\"test %d: %#v\", i, test)\n\t\ttest.Check(c, targetPath)\n\t}\n}\n\nfunc (s *ZipSuite) TestExtractAllSymlinkErrors(c *gc.C) {\n\tfor i, test := range []struct {\n\t\tcontent []ft.Entry\n\t\terror   string\n\t}{{\n\t\tcontent: []ft.Entry{\n\t\t\tft.Symlink{\"symlink\", \"/blah\"},\n\t\t},\n\t\terror: `cannot extract \"symlink\": symlink \"/blah\" is absolute`,\n\t}, {\n\t\tcontent: []ft.Entry{\n\t\t\tft.Symlink{\"symlink\", \"../blah\"},\n\t\t},\n\t\terror: `cannot extract \"symlink\": symlink \"../blah\" leads out of scope`,\n\t}, {\n\t\tcontent: []ft.Entry{\n\t\t\tft.Dir{\"dir\", 0755},\n\t\t\tft.Symlink{\"dir/symlink\", \"../../blah\"},\n\t\t},\n\t\terror: `cannot extract \"dir/symlink\": symlink \"../../blah\" leads out of scope`,\n\t}} {\n\t\tc.Logf(\"test %d: %s\", i, test.error)\n\t\ttargetPath := c.MkDir()\n\t\treader := s.makeZip(c, test.content...)\n\t\terr := zip.ExtractAll(reader, targetPath)\n\t\tc.Check(err, gc.ErrorMatches, test.error)\n\t}\n}\n\nfunc (s *ZipSuite) TestExtractDir(c *gc.C) {\n\treader := s.makeZip(c,\n\t\tft.File{\"bad-file\", \"xxx\", 0644},\n\t\tft.Dir{\"bad-dir\", 0755},\n\t\tft.Symlink{\"bad-symlink\", \"bad-file\"},\n\t\tft.Dir{\"some-dir\", 0751},\n\t\tft.File{\"some-dir-bad-lol\", \"xxx\", 0644},\n\t\tft.File{\"some-dir/some-file\", \"content 1\", 0644},\n\t\tft.File{\"some-dir/another-file\", \"content 2\", 0600},\n\t\tft.Dir{\"some-dir/another-dir\", 0750},\n\t\tft.Symlink{\"some-dir/another-dir/some-symlink\", \"../some-file\"},\n\t)\n\ttargetParent := c.MkDir()\n\ttargetPath := filepath.Join(targetParent, \"random-dir\")\n\terr := zip.Extract(reader, targetPath, \"some-dir\")\n\tc.Assert(err, gc.IsNil)\n\n\tfor i, test := range []ft.Entry{\n\t\tft.Dir{\"random-dir\", 0751},\n\t\tft.File{\"random-dir/some-file\", \"content 1\", 0644},\n\t\tft.File{\"random-dir/another-file\", \"content 2\", 0600},\n\t\tft.Dir{\"random-dir/another-dir\", 0750},\n\t\tft.Symlink{\"random-dir/another-dir/some-symlink\", \"../some-file\"},\n\t} {\n\t\tc.Logf(\"test %d: %#v\", i, test)\n\t\ttest.Check(c, targetParent)\n\t}\n\n\tfileInfos, err := ioutil.ReadDir(targetParent)\n\tc.Check(err, gc.IsNil)\n\tc.Check(fileInfos, gc.HasLen, 1)\n\n\tfileInfos, err = ioutil.ReadDir(targetPath)\n\tc.Check(err, gc.IsNil)\n\tc.Check(fileInfos, gc.HasLen, 3)\n}\n\nfunc (s *ZipSuite) TestExtractSingleFile(c *gc.C) {\n\treader := s.makeZip(c,\n\t\tft.Dir{\"dir\", 0755},\n\t\tft.Dir{\"dir/dir\", 0755},\n\t\tft.File{\"dir/dir/some-file\", \"content 1\", 0644},\n\t\tft.File{\"dir/dir/some-file-wtf\", \"content 2\", 0644},\n\t)\n\ttargetParent := c.MkDir()\n\ttargetPath := filepath.Join(targetParent, \"just-the-one-file\")\n\terr := zip.Extract(reader, targetPath, \"dir/dir/some-file\")\n\tc.Assert(err, gc.IsNil)\n\tfileInfos, err := ioutil.ReadDir(targetParent)\n\tc.Check(err, gc.IsNil)\n\tc.Check(fileInfos, gc.HasLen, 1)\n\tft.File{\"just-the-one-file\", \"content 1\", 0644}.Check(c, targetParent)\n}\n\nfunc (s *ZipSuite) TestClosesFile(c *gc.C) {\n\treader := s.makeZip(c, ft.File{\"f\", \"echo hullo!\", 0755})\n\ttargetPath := c.MkDir()\n\terr := zip.ExtractAll(reader, targetPath)\n\tc.Assert(err, gc.IsNil)\n\tcmd := exec.Command(\"/bin/sh\", \"-c\", filepath.Join(targetPath, \"f\"))\n\tvar buffer bytes.Buffer\n\tcmd.Stdout = &buffer\n\terr = cmd.Run()\n\tc.Assert(err, gc.IsNil)\n\tc.Assert(buffer.String(), gc.Equals, \"hullo!\\n\")\n}\n\nfunc (s *ZipSuite) TestExtractSymlinkErrors(c *gc.C) {\n\tfor i, test := range []struct {\n\t\tcontent []ft.Entry\n\t\tsource  string\n\t\terror   string\n\t}{{\n\t\tcontent: []ft.Entry{\n\t\t\tft.Dir{\"dir\", 0755},\n\t\t\tft.Symlink{\"dir/symlink\", \"/blah\"},\n\t\t},\n\t\tsource: \"dir\",\n\t\terror:  `cannot extract \"dir/symlink\": symlink \"/blah\" is absolute`,\n\t}, {\n\t\tcontent: []ft.Entry{\n\t\t\tft.Dir{\"dir\", 0755},\n\t\t\tft.Symlink{\"dir/symlink\", \"../blah\"},\n\t\t},\n\t\tsource: \"dir\",\n\t\terror:  `cannot extract \"dir/symlink\": symlink \"../blah\" leads out of scope`,\n\t}, {\n\t\tcontent: []ft.Entry{\n\t\t\tft.Symlink{\"symlink\", \"blah\"},\n\t\t},\n\t\tsource: \"symlink\",\n\t\terror:  `cannot extract \"symlink\": symlink \"blah\" leads out of scope`,\n\t}} {\n\t\tc.Logf(\"test %d: %s\", i, test.error)\n\t\ttargetPath := c.MkDir()\n\t\treader := s.makeZip(c, test.content...)\n\t\terr := zip.Extract(reader, targetPath, test.source)\n\t\tc.Check(err, gc.ErrorMatches, test.error)\n\t}\n}\n\nfunc (s *ZipSuite) TestExtractSourceError(c *gc.C) {\n\treader := s.makeZip(c, ft.Dir{\"dir\", 0755})\n\terr := zip.Extract(reader, c.MkDir(), \"../lol\")\n\tc.Assert(err, gc.ErrorMatches, `cannot extract files rooted at \"../lol\"`)\n}\n"
  }
]