Repository: appc/docker2aci Branch: master Commit: 248258bd708a Files: 181 Total size: 863.6 KB Directory structure: gitextract_sbk4zkuw/ ├── .gitignore ├── CHANGELOG.md ├── Documentation/ │ └── devel/ │ └── release.md ├── LICENSE ├── MAINTAINERS ├── README.md ├── build.sh ├── glide.yaml ├── lib/ │ ├── common/ │ │ ├── common.go │ │ └── common_test.go │ ├── conversion_store.go │ ├── docker2aci.go │ ├── internal/ │ │ ├── backend/ │ │ │ ├── file/ │ │ │ │ └── file.go │ │ │ └── repository/ │ │ │ ├── repository.go │ │ │ ├── repository1.go │ │ │ └── repository2.go │ │ ├── docker/ │ │ │ └── docker.go │ │ ├── internal.go │ │ ├── internal_test.go │ │ ├── tarball/ │ │ │ ├── tarfile.go │ │ │ └── walk.go │ │ ├── types/ │ │ │ └── docker_types.go │ │ ├── typesV2/ │ │ │ └── docker_types.go │ │ └── util/ │ │ └── util.go │ ├── tests/ │ │ ├── common.go │ │ ├── server.go │ │ └── v22_test.go │ └── version.go ├── main.go ├── pkg/ │ └── log/ │ └── log.go ├── scripts/ │ ├── bump-release │ └── glide-update ├── tests/ │ ├── README.md │ ├── fixture-test-depsloop/ │ │ ├── check.sh │ │ └── fixture-test-depsloop.docker │ ├── fixture-test-invalidlayerid/ │ │ ├── check.sh │ │ └── fixture-test-invalidlayerid.docker │ ├── rkt-v1.1.0.md5sum │ ├── test-basic/ │ │ ├── Dockerfile │ │ └── check.sh │ ├── test-pwl/ │ │ ├── Dockerfile │ │ └── check.sh │ ├── test-whiteouts/ │ │ ├── Dockerfile │ │ └── check.sh │ └── test.sh └── vendor/ ├── github.com/ │ ├── appc/ │ │ └── spec/ │ │ ├── LICENSE │ │ ├── aci/ │ │ │ ├── build.go │ │ │ ├── doc.go │ │ │ ├── file.go │ │ │ ├── layout.go │ │ │ └── writer.go │ │ ├── pkg/ │ │ │ ├── acirenderer/ │ │ │ │ ├── acirenderer.go │ │ │ │ └── resolve.go │ │ │ ├── device/ │ │ │ │ ├── device_linux.go │ │ │ │ └── device_posix.go │ │ │ └── tarheader/ │ │ │ ├── doc.go │ │ │ ├── pop_darwin.go │ │ │ ├── pop_linux.go │ │ │ ├── pop_posix.go │ │ │ └── tarheader.go │ │ └── schema/ │ │ ├── common/ │ │ │ └── common.go │ │ ├── doc.go │ │ ├── image.go │ │ ├── kind.go │ │ ├── pod.go │ │ ├── types/ │ │ │ ├── acidentifier.go │ │ │ ├── ackind.go │ │ │ ├── acname.go │ │ │ ├── annotations.go │ │ │ ├── app.go │ │ │ ├── date.go │ │ │ ├── dependencies.go │ │ │ ├── doc.go │ │ │ ├── environment.go │ │ │ ├── errors.go │ │ │ ├── event_handler.go │ │ │ ├── exec.go │ │ │ ├── hash.go │ │ │ ├── isolator.go │ │ │ ├── isolator_linux_specific.go │ │ │ ├── isolator_resources.go │ │ │ ├── isolator_unix.go │ │ │ ├── labels.go │ │ │ ├── mountpoint.go │ │ │ ├── port.go │ │ │ ├── resource/ │ │ │ │ ├── amount.go │ │ │ │ ├── math.go │ │ │ │ ├── quantity.go │ │ │ │ ├── scale_int.go │ │ │ │ └── suffix.go │ │ │ ├── semver.go │ │ │ ├── url.go │ │ │ ├── user_annotations.go │ │ │ ├── user_labels.go │ │ │ ├── uuid.go │ │ │ └── volume.go │ │ └── version.go │ ├── coreos/ │ │ ├── go-semver/ │ │ │ ├── LICENSE │ │ │ ├── example.go │ │ │ └── semver/ │ │ │ ├── semver.go │ │ │ └── sort.go │ │ ├── ioprogress/ │ │ │ ├── LICENSE │ │ │ ├── draw.go │ │ │ └── reader.go │ │ └── pkg/ │ │ ├── LICENSE │ │ ├── NOTICE │ │ └── progressutil/ │ │ ├── iocopy.go │ │ └── progressbar.go │ ├── docker/ │ │ └── distribution/ │ │ ├── LICENSE │ │ ├── blobs.go │ │ ├── digestset/ │ │ │ └── set.go │ │ ├── doc.go │ │ ├── errors.go │ │ ├── manifests.go │ │ ├── reference/ │ │ │ ├── helpers.go │ │ │ ├── normalize.go │ │ │ ├── reference.go │ │ │ └── regexp.go │ │ ├── registry.go │ │ └── tags.go │ ├── klauspost/ │ │ ├── compress/ │ │ │ ├── LICENSE │ │ │ └── flate/ │ │ │ ├── copy.go │ │ │ ├── crc32_amd64.go │ │ │ ├── crc32_amd64.s │ │ │ ├── crc32_noasm.go │ │ │ ├── deflate.go │ │ │ ├── dict_decoder.go │ │ │ ├── gen.go │ │ │ ├── huffman_bit_writer.go │ │ │ ├── huffman_code.go │ │ │ ├── inflate.go │ │ │ ├── reverse_bits.go │ │ │ ├── snappy.go │ │ │ └── token.go │ │ ├── cpuid/ │ │ │ ├── LICENSE │ │ │ ├── cpuid.go │ │ │ ├── cpuid_386.s │ │ │ ├── cpuid_amd64.s │ │ │ ├── detect_intel.go │ │ │ ├── detect_ref.go │ │ │ ├── generate.go │ │ │ └── private-gen.go │ │ ├── crc32/ │ │ │ ├── LICENSE │ │ │ ├── crc32.go │ │ │ ├── crc32_amd64.go │ │ │ ├── crc32_amd64.s │ │ │ ├── crc32_amd64p32.go │ │ │ ├── crc32_amd64p32.s │ │ │ └── crc32_generic.go │ │ └── pgzip/ │ │ ├── LICENSE │ │ ├── gunzip.go │ │ └── gzip.go │ ├── opencontainers/ │ │ ├── go-digest/ │ │ │ ├── LICENSE.code │ │ │ ├── LICENSE.docs │ │ │ ├── algorithm.go │ │ │ ├── digest.go │ │ │ ├── digester.go │ │ │ ├── doc.go │ │ │ └── verifiers.go │ │ └── image-spec/ │ │ ├── LICENSE │ │ └── specs-go/ │ │ ├── v1/ │ │ │ ├── config.go │ │ │ ├── descriptor.go │ │ │ ├── manifest.go │ │ │ ├── manifest_list.go │ │ │ └── mediatype.go │ │ ├── version.go │ │ └── versioned.go │ └── spf13/ │ └── pflag/ │ ├── LICENSE │ └── flag.go ├── go4.org/ │ ├── LICENSE │ └── errorutil/ │ └── highlight.go ├── golang.org/ │ └── x/ │ └── crypto/ │ ├── LICENSE │ ├── PATENTS │ └── ssh/ │ └── terminal/ │ ├── terminal.go │ ├── util.go │ ├── util_bsd.go │ ├── util_linux.go │ └── util_windows.go └── gopkg.in/ └── inf.v0/ ├── LICENSE ├── dec.go └── rounder.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .gitignore ================================================ # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof bin/ gopath/ tests/distribution tests/rendered-test-* tests/rkt-uuid-test-* tests/*.docker *.swp *.aci ================================================ FILE: CHANGELOG.md ================================================ ## v0.17.2 This is a bugfix release to ensure compatibility with newer go-1.10 toolchain. - lib/internal: fix tar header format ([#260](https://github.com/appc/docker2aci/pull/260)). - tests: update script to run within gopath ([#261](https://github.com/appc/docker2aci/pull/261)). ## v0.17.1 This is a bugfix release that fixes pulling certain images from the Google Container Registry. - lib: add signed manifest media type ([#255](https://github.com/appc/docker2aci/pull/255)). ## v0.17.0 This is mostly a bugfix release that fixes a couple of panics and supports additional docker image syntax. - Avoid panicking on scratch images ([248](https://github.com/appc/docker2aci/pull/248)). - Bugfix/panic on invalid env entry ([#249](https://github.com/appc/docker2aci/pull/249)). - lib/common: update `ParseDockerURL` ([#250](https://github.com/appc/docker2aci/pull/250)). ## v0.16.0 This release adds a manifest hash annotation on converted images and introduces some API changes to allow for more granular control on registries and media types. - Annotate manifest hash ([#237](https://github.com/appc/docker2aci/pull/237)). - Allow selective disabling of registries and media types ([#239](https://github.com/appc/docker2aci/pull/239)). - Update appc/spec to 0.8.10 ([#242](https://github.com/appc/docker2aci/pull/242)). ## v0.15.0 This release improves translation of arch labels and image name annotations. It also changes the default output image filename. - Translate "os" and "arch" labels of image manifest ([#234](https://github.com/appc/docker2aci/pull/234)). - Minor style changes ([#230](https://github.com/appc/docker2aci/pull/230)). - Bump appc/spec library version to 0.8.9 ([#233](https://github.com/appc/docker2aci/pull/233)). - Image from file improvements; guesses at "originalname" and fixes for "--image"([#229](https://github.com/appc/docker2aci/pull/229)). ## v0.14.0 This release adds compatibility for OCI v1.0.0-rc2 types, introduces supports for converting image labels, and fixes some issues related to automatic fallback to registry API v1. - log: introduce Logger interface ([#218](https://github.com/appc/docker2aci/pull/218)) - lib/internal: set UserLabels to be Docker image labels ([#223](https://github.com/appc/docker2aci/pull/223)). - fetch: annotate originally requested name ([#224](https://github.com/appc/docker2aci/pull/224)). - types: update OCI image-spec to rc2 ([#226](https://github.com/appc/docker2aci/pull/226)). - lib/internal: fix v2 registry check URL ([#220](https://github.com/appc/docker2aci/pull/220)) - lib/internal: allow auto fallback from v2 API to v1 ([#222](https://github.com/appc/docker2aci/pull/222)). ## v0.13.0 This release adds support for converting local OCI bundles and fixes two security issues (CVE-2016-7569 and CVE-2016-8579). It also includes fixes for several image fetching and conversion bugs. - docker2aci: add support for converting OCI tarfiles ([#200](https://github.com/appc/docker2aci/pull/200)). - docker2aci: additional validation on malformed images ([#204](https://github.com/appc/docker2aci/pull/204)). Fixes (CVE-2016-7569 and CVE-2016-8579). - lib: Use the new media types for oci ([#213](https://github.com/appc/docker2aci/pull/213)). - backend/repository: assume no v2 on unexpected status ([#214](https://github.com/appc/docker2aci/pull/214)). - lib/internal: do not compare tag when pulling by digest ([#207](https://github.com/appc/docker2aci/pull/207)). - lib/internal: re-use uid value when gid is missing ([#206](https://github.com/appc/docker2aci/pull/206)). - lib/internal: add entrypoint/cmd annotations to v21 images ([#199](https://github.com/appc/docker2aci/pull/199)). ## v0.12.3 This is another bugfix release. - lib/repository2: get the correct layer index ([#188](https://github.com/appc/docker2aci/pull/188)). This fixes layer ordering for the Docker API v2.1. - lib/repository2: fix manifest v2.2 layer ordering ([#190](https://github.com/appc/docker2aci/pull/190)). This fixes layer ordering for the Docker API v2.2. ## v0.12.2 This is a bugfix release. - lib/repository2: populate reverseLayers correctly ([#185](https://github.com/appc/docker2aci/pull/185)). It caused converted Image Manifests to have the wrong fields. Add a test to make sure this won't go unnoticed again. - tests: remove redundant code and simplify ([#186](https://github.com/appc/docker2aci/pull/186)). ## v0.12.1 This release fixes a couple of bugs, adds image fetching tests, and replaces godep with glide for vendoring. - Replace Godeps with glide ([#174](https://github.com/appc/docker2aci/pull/174)). - Avoid O(N) and fix defer reader close ([#180](https://github.com/appc/docker2aci/pull/180)). - Add golang tests to lib/test to test image fetching ([#181](https://github.com/appc/docker2aci/pull/181)). ## v0.12.0 v0.12.0 introduces support for the Docker v2.2 image format and OCI image format. It also fixes a bug that prevented pulling by digest to work. - backend/repository2: don't ignore when there's an image digest ([#171](https://github.com/appc/docker2aci/pull/171)). - lib/repository2: add support for docker v2.2 and OCI ([#176](https://github.com/appc/docker2aci/pull/176)). ## v0.11.1 v0.11.1 is a bugfix release. - Fix parallel pull synchronisation ([#167](https://github.com/appc/docker2aci/pull/167), [#168](https://github.com/appc/docker2aci/pull/168)). ## v0.11.0 This release splits the `--insecure` flag in two, `--insecure-skip-verify` to skip TLS verification, and `--insecure-allow-http` to allow unencrypted connections when fetching images. It also includes a couple of bugfixes. - Add missing message to channel on successful layer download ([#161](https://github.com/appc/docker2aci/pull/161)). - Fix a panic when a layer being fetched encounters an error ([#162](https://github.com/appc/docker2aci/pull/162)). - Split `--insecure` flag in two ([#163](https://github.com/appc/docker2aci/pull/163)). ## v0.10.0 This release includes two major performance optimizations: parallel layer pull and parallel ACI compression. - Pull layers in parallel ([#158](https://github.com/appc/docker2aci/pull/158)). - Use a parallel compression library ([#157](https://github.com/appc/docker2aci/pull/157)). - Fix auth token parsing to handle services with spaces in their names ([#150](https://github.com/appc/docker2aci/pull/150)). ## v0.9.3 v0.9.3 is a minor bug fix release. - Use the default transport when doing HTTP requests ([#147](https://github.com/appc/docker2aci/pull/147)). We were using an empty transport which didn't pass on the proxy configuration. ## v0.9.2 v0.9.2 is a minor release with a bug fix and a cleanup over the previous one. - Use upstream docker functions to parse docker URLs and parse digest ([#140](https://github.com/appc/docker2aci/pull/140)). - Change docker entrypoint/cmd annotations to json ([#142](https://github.com/appc/docker2aci/pull/142)). ## v0.9.1 v0.9.1 is mainly a bugfix and cleanup release. - Remove redundant dependency fetching, we're vendoring them now ([#134](https://github.com/appc/docker2aci/pull/134)). - Export ParseDockerURL which is used by rkt ([#135](https://github.com/appc/docker2aci/pull/135)). - Export annotations so people can use them outside docker2aci ([#135](https://github.com/appc/docker2aci/pull/135)). - Refactor the library so internal functions are in the "internal" package ([#135](https://github.com/appc/docker2aci/pull/135)). - Document release process and add a bump-version script ([#137](https://github.com/appc/docker2aci/pull/137)). ## v0.9.0 v0.9.0 is the initial release of docker2aci. docker2aci converts to ACI Docker images from a remote repository or from a local file generated with "docker save". It supports v1 and v2 Docker registries, compression, and layer squashing. ================================================ FILE: Documentation/devel/release.md ================================================ # docker2aci release guide How to perform a release of docker2aci. This guide is probably unnecessarily verbose, so improvements welcomed. Only parts of the procedure are automated; this is somewhat intentional (manual steps for sanity checking) but it can probably be further scripted, please help. The following example assumes we're going from version 0.9.0 (`v0.9.0`) to 0.9.1 (`v0.9.1`). Let's get started: - Start at the relevant milestone on GitHub (e.g. https://github.com/appc/docker2aci/milestones/v0.9.1): ensure all referenced issues are closed (or moved elsewhere, if they're not done). Close the milestone. - Branch from the latest master, make sure your git status is clean - Ensure the build is clean! - `git clean -ffdx && ./build.sh && ./tests/test.sh` should work - Integration tests on CI should be green - Update the [release notes](https://github.com/appc/docker2aci/blob/master/CHANGELOG.md). Try to capture most of the salient changes since the last release, but don't go into unnecessary detail (better to link/reference the documentation wherever possible). The docker2aci version is [hardcoded in the repository](https://github.com/appc/docker2aci/blob/master/lib/version.go#L19), so the first thing to do is bump it: - Run `scripts/bump-release v0.9.1`. This should generate two commits: a bump to the actual release (e.g. v0.9.1), and then a bump to the release+git (e.g. v0.9.1+git). The actual release version should only exist in a single commit! - Sanity check what the script did with `git diff HEAD^^` or similar. - If the script didn't work, yell at the author and/or fix it. It can almost certainly be improved. - File a PR and get a review from another [MAINTAINER](https://github.com/appc/docker2aci/blob/master/MAINTAINERS). This is useful to a) sanity check the diff, and b) be very explicit/public that a release is happening - Ensure the CI on the release PR is green! After merging and going back to master branch, we check out the release version and tag it: - `git checkout HEAD^` should work; sanity check lib/version.go (the `Version` variable) after doing this - Add a signed tag: `git tag -s v0.9.1`. - Build docker2aci - `sudo git clean -ffdx && ./build.sh` - Sanity check `bin/docker2aci -version` - Push the tag to GitHub: `git push --tags` Now we switch to the GitHub web UI to conduct the release: - https://github.com/appc/docker2aci/releases/new - For now, check "This is a pre-release" - Tag "v0.9.1", release title "v0.9.1" - Copy-paste the release notes you added earlier in [CHANGELOG.md](https://github.com/appc/docker2aci/blob/master/CHANGELOG.md) - You can also add a little more detail and polish to the release notes here if you wish, as it is more targeted towards users (vs the changelog being more for developers); use your best judgement and see previous releases on GH for examples. - Attach the release. This is a simple tarball: ``` export NAME="docker2aci-v0.9.1" mkdir $NAME cp bin/docker2aci $NAME/ sudo chown -R root:root $NAME/ tar czvf $NAME.tar.gz --numeric-owner $NAME/ ``` - Attach the release signature; your personal GPG is okay for now: ``` gpg --detach-sign $NAME.tar.gz ``` - Publish the release! - Clean your git tree: `sudo git clean -ffdx`. ================================================ FILE: LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: MAINTAINERS ================================================ Alban Crequy (@alban) Iago López Galeiras (@iaguis) Krzesimir Nowak (@krnowak) ================================================ FILE: README.md ================================================ # docker2aci - Convert docker images to ACI [![Build Status](https://semaphoreci.com/api/v1/projects/4472761c-2b88-41f2-b2de-bf0447a8a290/610597/badge.svg)](https://semaphoreci.com/appc/docker2aci) docker2aci is a small library and CLI binary that converts Docker images to [ACI][aci]. It takes as input either a file generated by "docker save" or a Docker registry URL. It gets all the layers of a Docker image and squashes them into an ACI image. Optionally, it can generate one ACI for each layer, setting the correct dependencies. All ACIs generated are compressed with gzip by default. Compression can be disabled by specifying `--compression=none`. ## Build Requirements: golang 1.6+ git clone git://github.com/appc/docker2aci cd docker2aci ./build.sh ## Volumes Docker Volumes get converted to mountPoints in the [Image Manifest Schema][imageschema]. Since mountPoints need a name and Docker Volumes don't, docker2aci generates a name by appending the path to `volume-` replacing non-alphanumeric characters with dashes. That is, if a Volume has `/var/tmp` as path, the resulting mountPoint name will be `volume-var-tmp`. When the docker2aci CLI binary converts a Docker Volume to a mountPoint it will print its name, path and whether it is read-only or not. ## Ports Docker Ports get converted to ports in the [Image Manifest Schema][imageschema]. The resulting port name will be the port number and the protocol separated by a dash. For example: `6379-tcp`. ## CLI examples ``` $ docker2aci docker://busybox Downloading sha256:55dc925c23d: [==============================] 674 KB/674 KB Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Generated ACI(s): library-busybox-latest.aci $ actool --debug validate library-busybox-latest.aci library-busybox-latest.aci: valid app container image ``` ``` $ /docker2aci --nosquash docker://quay.io/coreos/etcd:latest Downloading sha256:f05e5379dcb: [==============================] 3.98 MB/3.98 MB Downloading sha256:af1897d2d32: [==============================] 3.5 MB/3.5 MB Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Converted ports: name: "2379-tcp", protocol: "tcp", port: 2379, count: 1, socketActivated: false name: "2380-tcp", protocol: "tcp", port: 2380, count: 1, socketActivated: false name: "4001-tcp", protocol: "tcp", port: 4001, count: 1, socketActivated: false name: "7001-tcp", protocol: "tcp", port: 7001, count: 1, socketActivated: false Generated ACI(s): coreos-etcd-d21dd9a5886270b7c2c379c02fc548e0696b139c43bb12fdb2d9b63409717485-latest-linux-amd64-3.aci coreos-etcd-620329641f386e62c7b0e0fa60a9acef100e71058124ddc7f1969557c72b2458-latest-linux-amd64-2.aci coreos-etcd-9cd3f08f7ccfaad24c73757a5b4f79601f2790726d6ccdd556a82e5c9c5ddbfa-latest-linux-amd64-1.aci coreos-etcd-9cd3f08f7ccfaad24c73757a5b4f79601f2790726d6ccdd556a82e5c9c5ddbfa-latest-linux-amd64-0.aci ``` ``` $ docker save -o ubuntu.docker ubuntu $ docker2aci ubuntu.docker Extracting 706766fe1019 Extracting a62a42e77c9c Extracting 2c014f14d3d9 Extracting b7cf8f0d9e82 Generated ACI(s): ubuntu-latest.aci $ actool --debug validate ubuntu-latest.aci ubuntu-latest.aci: valid app container image ``` ``` $ docker2aci docker://redis Downloading sha256:c666c10c893: [==============================] 37.2 MB/37.2 MB Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:d6f52360d0a: [==============================] 1.69 KB/1.69 KB Downloading sha256:8c3a687fd4c: [==============================] 5.93 MB/5.93 MB Downloading sha256:15554e0e598: [==============================] 109 KB/109 KB Downloading sha256:3286d490a29: [==============================] 611 KB/611 KB Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:a3d89b95a63: [==============================] 3.04 MB/3.04 MB Downloading sha256:1c4db557158: [==============================] 98 B/98 B Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:a1a961e320b: [==============================] 196 B/196 B Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Downloading sha256:a3ed95caeb0: [==============================] 32 B/32 B Converted volumes: name: "volume-data", path: "/data", readOnly: false Converted ports: name: "6379-tcp", protocol: "tcp", port: 6379, count: 1, socketActivated: false Generated ACI(s): library-redis-latest.aci $ actool --debug validate library-redis-latest.aci library-redis-latest.aci: valid app container image ``` [aci]: https://github.com/appc/spec/blob/master/SPEC.md#app-container-image [imageschema]: https://github.com/appc/spec/blob/master/spec/aci.md#image-manifest-schema ================================================ FILE: build.sh ================================================ #!/usr/bin/env bash set -e # Gets the directory that this script is stored in. # https://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" ORG_PATH="github.com/appc" REPO_PATH="${ORG_PATH}/docker2aci" VERSION=$(git describe --dirty --always) GLDFLAGS="-X ${REPO_PATH}/lib.Version=${VERSION}" if [ ! -h ${DIR}/gopath/src/${REPO_PATH} ]; then mkdir -p ${DIR}/gopath/src/${ORG_PATH} cd ${DIR} && ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255 fi export GO15VENDOREXPERIMENT=1 export GOBIN=${DIR}/bin export GOPATH=${DIR}/gopath export GOOS GOARCH eval $(go env) if [ "${GOOS}" = "freebsd" ]; then # /usr/bin/cc is clang on freebsd, but we need to tell it to go to # make it generate proper flavour of code that doesn't emit # warnings. export CC=clang fi echo "Building docker2aci..." go build -o ${GOBIN}/docker2aci -ldflags "${GLDFLAGS}" ${REPO_PATH} ================================================ FILE: glide.yaml ================================================ package: github.com/appc/docker2aci import: - package: github.com/appc/spec version: 0.8.10 subpackages: - aci - pkg/acirenderer - schema - schema/types - package: github.com/coreos/ioprogress version: 4637e494fd9b23c5565ee193e89f91fdc1639bc0 - package: github.com/coreos/pkg version: 2.0.0 subpackages: - progressutil - package: github.com/docker/distribution version: 5db89f0ca68677abc5eefce8f2a0a772c98ba52d subpackages: - reference - package: github.com/klauspost/pgzip version: 1.0.0 - package: github.com/opencontainers/image-spec version: v1.0.0-rc2 subpackages: - specs-go - package: github.com/opencontainers/go-digest version: 21dfd564fd89c944783d00d069f33e3e7123c448 ================================================ FILE: lib/common/common.go ================================================ // Copyright 2016 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package common provides misc types and variables. package common import ( "fmt" "regexp" "github.com/appc/docker2aci/lib/internal/docker" "github.com/docker/distribution/reference" spec "github.com/opencontainers/image-spec/specs-go/v1" ) type Compression int const ( NoCompression = iota GzipCompression ) var ( validId = regexp.MustCompile(`^(\w+:)?([A-Fa-f0-9]+)$`) ) const ( // AppcDockerOriginalName is the unmodified name this image was originally // referenced by for fetching, e.g. something like "nginx:tag" or // "quay.io/user/image:latest" This is identical in most cases to // 'registryurl/repository:tag' but may differ for the default Dockerhub // registry or if the tag was inferred as latest. AppcDockerOriginalName = "appc.io/docker/originalname" AppcDockerRegistryURL = "appc.io/docker/registryurl" AppcDockerRepository = "appc.io/docker/repository" AppcDockerTag = "appc.io/docker/tag" AppcDockerImageID = "appc.io/docker/imageid" AppcDockerParentImageID = "appc.io/docker/parentimageid" AppcDockerEntrypoint = "appc.io/docker/entrypoint" AppcDockerCmd = "appc.io/docker/cmd" AppcDockerManifestHash = "appc.io/docker/manifesthash" ) const defaultTag = "latest" // ParsedDockerURL represents a parsed Docker URL. type ParsedDockerURL struct { OriginalName string IndexURL string ImageName string Tag string Digest string } type ErrSeveralImages struct { Msg string Images []string } // InsecureConfig represents the different insecure options available type InsecureConfig struct { SkipVerify bool AllowHTTP bool } func (e *ErrSeveralImages) Error() string { return e.Msg } // ParseDockerURL takes a Docker URL and returns a ParsedDockerURL with its // index URL, image name, and tag. func ParseDockerURL(arg string) (*ParsedDockerURL, error) { r, err := reference.ParseNormalizedNamed(arg) if err != nil { return nil, err } var tag, digest string switch x := r.(type) { case reference.Canonical: digest = x.Digest().String() case reference.NamedTagged: tag = x.Tag() default: tag = defaultTag } indexURL, remoteName := docker.SplitReposName(reference.FamiliarName(r)) return &ParsedDockerURL{ OriginalName: arg, IndexURL: indexURL, ImageName: remoteName, Tag: tag, Digest: digest, }, nil } // ValidateLayerId validates a layer ID func ValidateLayerId(id string) error { if ok := validId.MatchString(id); !ok { return fmt.Errorf("invalid layer ID %q", id) } return nil } /* * Media Type Selectors Section */ const ( MediaTypeDockerV21Manifest = "application/vnd.docker.distribution.manifest.v1+json" MediaTypeDockerV21SignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" MediaTypeDockerV21ManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" MediaTypeDockerV22Manifest = "application/vnd.docker.distribution.manifest.v2+json" MediaTypeDockerV22ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" MediaTypeDockerV22Config = "application/vnd.docker.container.image.v1+json" MediaTypeDockerV22RootFS = "application/vnd.docker.image.rootfs.diff.tar.gzip" MediaTypeOCIV1Manifest = spec.MediaTypeImageManifest MediaTypeOCIV1ManifestList = spec.MediaTypeImageManifestList MediaTypeOCIV1Config = spec.MediaTypeImageConfig MediaTypeOCIV1Layer = spec.MediaTypeImageLayer ) // MediaTypeOption represents the media types for a given docker image (or oci) // spec. type MediaTypeOption int const ( MediaTypeOptionDockerV21 = iota MediaTypeOptionDockerV22 MediaTypeOptionOCIV1Pre ) // MediaTypeSet represents a set of media types which docker2aci is to use when // fetchimg images. As an example if a MediaTypeSet is equal to // {MediaTypeOptionDockerV22, MediaTypeOptionOCIV1Pre}, then when an image pull // is made V2.1 images will not be fetched. This doesn't apply to V1 pulls. As // an edge case if a MedaTypeSet is nil or empty, that means that _every_ type // of media type is enabled. This type is intended to be a set, and putting // duplicates in this set is generally unadvised. type MediaTypeSet []MediaTypeOption func (m MediaTypeSet) ManifestMediaTypes() []string { if len(m) == 0 { return []string{ MediaTypeDockerV21Manifest, MediaTypeDockerV21SignedManifest, MediaTypeDockerV22Manifest, MediaTypeOCIV1Manifest, } } ret := []string{} for _, option := range m { switch option { case MediaTypeOptionDockerV21: ret = append(ret, MediaTypeDockerV21Manifest) ret = append(ret, MediaTypeDockerV21SignedManifest) case MediaTypeOptionDockerV22: ret = append(ret, MediaTypeDockerV22Manifest) case MediaTypeOptionOCIV1Pre: ret = append(ret, MediaTypeOCIV1Manifest) } } return ret } func (m MediaTypeSet) ConfigMediaTypes() []string { if len(m) == 0 { return []string{ MediaTypeDockerV22Config, MediaTypeOCIV1Config, } } ret := []string{} for _, option := range m { switch option { case MediaTypeOptionDockerV21: case MediaTypeOptionDockerV22: ret = append(ret, MediaTypeDockerV22Config) case MediaTypeOptionOCIV1Pre: ret = append(ret, MediaTypeOCIV1Config) } } return ret } func (m MediaTypeSet) LayerMediaTypes() []string { if len(m) == 0 { return []string{ MediaTypeDockerV22RootFS, MediaTypeOCIV1Layer, } } ret := []string{} for _, option := range m { switch option { case MediaTypeOptionDockerV21: case MediaTypeOptionDockerV22: ret = append(ret, MediaTypeDockerV22RootFS) case MediaTypeOptionOCIV1Pre: ret = append(ret, MediaTypeOCIV1Layer) } } return ret } // RegistryOption represents a type of a registry, based on the version of the // docker http API. type RegistryOption int const ( RegistryOptionV1 = iota RegistryOptionV2 ) // RegistryOptionSet represents a set of registry types which docker2aci is to // use when fetching images. As an example if a RegistryOptionSet is equal to // {RegistryOptionV2}, then v1 pulls are disabled. As an edge case if a // RegistryOptionSet is nil or empty, that means that _every_ type of registry // is enabled. This type is intended to be a set, and putting duplicates in this // set is generally unadvised. type RegistryOptionSet []RegistryOption func (r RegistryOptionSet) AllowsV1() bool { if len(r) == 0 { return true } for _, o := range r { if o == RegistryOptionV1 { return true } } return false } func (r RegistryOptionSet) AllowsV2() bool { if len(r) == 0 { return true } for _, o := range r { if o == RegistryOptionV2 { return true } } return false } ================================================ FILE: lib/common/common_test.go ================================================ // Copyright 2017 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( _ "crypto/sha256" "reflect" "testing" ) func TestMediaTypeSet(t *testing.T) { tests := []struct { ms MediaTypeSet expectedManifestTypes []string expectedConfigTypes []string expectedLayerTypes []string }{ { MediaTypeSet{MediaTypeOptionDockerV21}, []string{MediaTypeDockerV21Manifest, MediaTypeDockerV21SignedManifest}, []string{}, []string{}, }, { MediaTypeSet{MediaTypeOptionDockerV22}, []string{MediaTypeDockerV22Manifest}, []string{MediaTypeDockerV22Config}, []string{MediaTypeDockerV22RootFS}, }, { MediaTypeSet{MediaTypeOptionOCIV1Pre}, []string{MediaTypeOCIV1Manifest}, []string{MediaTypeOCIV1Config}, []string{MediaTypeOCIV1Layer}, }, { MediaTypeSet{}, []string{MediaTypeDockerV21Manifest, MediaTypeDockerV21SignedManifest, MediaTypeDockerV22Manifest, MediaTypeOCIV1Manifest}, []string{MediaTypeDockerV22Config, MediaTypeOCIV1Config}, []string{MediaTypeDockerV22RootFS, MediaTypeOCIV1Layer}, }, { MediaTypeSet{MediaTypeOptionDockerV21, MediaTypeOptionDockerV22, MediaTypeOptionOCIV1Pre}, []string{MediaTypeDockerV21Manifest, MediaTypeDockerV21SignedManifest, MediaTypeDockerV22Manifest, MediaTypeOCIV1Manifest}, []string{MediaTypeDockerV22Config, MediaTypeOCIV1Config}, []string{MediaTypeDockerV22RootFS, MediaTypeOCIV1Layer}, }, { MediaTypeSet{MediaTypeOptionDockerV21, MediaTypeOptionOCIV1Pre}, []string{MediaTypeDockerV21Manifest, MediaTypeDockerV21SignedManifest, MediaTypeOCIV1Manifest}, []string{MediaTypeOCIV1Config}, []string{MediaTypeOCIV1Layer}, }, } for _, test := range tests { if !isEqual(test.expectedManifestTypes, test.ms.ManifestMediaTypes()) { t.Errorf("expected manifest media types didn't match what was returned:\n%v\n%v", test.expectedManifestTypes, test.ms.ManifestMediaTypes()) } if !isEqual(test.expectedConfigTypes, test.ms.ConfigMediaTypes()) { t.Errorf("expected config media types didn't match what was returned:\n%v\n%v", test.expectedConfigTypes, test.ms.ConfigMediaTypes()) } if !isEqual(test.expectedLayerTypes, test.ms.LayerMediaTypes()) { t.Errorf("expected layer media types didn't match what was returned:\n%v\n%v", test.expectedLayerTypes, test.ms.LayerMediaTypes()) } } } func TestRegistryOptionSet(t *testing.T) { tests := []struct { rs RegistryOptionSet allowsV1 bool allowsV2 bool }{ { RegistryOptionSet{RegistryOptionV1}, true, false, }, { RegistryOptionSet{RegistryOptionV2}, false, true, }, { RegistryOptionSet{RegistryOptionV1, RegistryOptionV2}, true, true, }, { RegistryOptionSet{}, true, true, }, } for _, test := range tests { if test.allowsV1 != test.rs.AllowsV1() { t.Errorf("doesn't allow V1 when it should") } if test.allowsV2 != test.rs.AllowsV2() { t.Errorf("doesn't allow V1 when it should") } } } func isEqual(val1, val2 []string) bool { if len(val1) != len(val2) { return false } loop1: for _, thing1 := range val1 { for _, thing2 := range val2 { if thing1 == thing2 { continue loop1 } } return false } return true } func TestParseDockerURL(t *testing.T) { tests := []struct { input string expected *ParsedDockerURL }{ { "busybox", &ParsedDockerURL{ OriginalName: "busybox", IndexURL: "registry-1.docker.io", ImageName: "library/busybox", Tag: "latest", Digest: "", }, }, { "library/busybox", &ParsedDockerURL{ OriginalName: "library/busybox", IndexURL: "registry-1.docker.io", ImageName: "library/busybox", Tag: "latest", Digest: "", }, }, { "docker.io/library/busybox:1", &ParsedDockerURL{ OriginalName: "docker.io/library/busybox:1", IndexURL: "registry-1.docker.io", ImageName: "library/busybox", Tag: "1", Digest: "", }, }, { "docker.io/library/busybox", &ParsedDockerURL{ OriginalName: "docker.io/library/busybox", IndexURL: "registry-1.docker.io", ImageName: "library/busybox", Tag: "latest", Digest: "", }, }, { "gcr.io/google-samples/node-hello:1.0", &ParsedDockerURL{ OriginalName: "gcr.io/google-samples/node-hello:1.0", IndexURL: "gcr.io", ImageName: "google-samples/node-hello", Tag: "1.0", Digest: "", }, }, { "alpine@sha256:ea0d1389812f43e474c50155ec4914e1b48792d420820c15cab28c0794034950", &ParsedDockerURL{ OriginalName: "alpine@sha256:ea0d1389812f43e474c50155ec4914e1b48792d420820c15cab28c0794034950", IndexURL: "registry-1.docker.io", ImageName: "library/alpine", Tag: "", Digest: "sha256:ea0d1389812f43e474c50155ec4914e1b48792d420820c15cab28c0794034950", }, }, } for _, test := range tests { parsed, err := ParseDockerURL(test.input) if err != nil && test.expected != nil { t.Errorf("error when parsing %q: %v\nexpected: %+v", test.input, err, test.expected) } else if err == nil && test.expected == nil { t.Errorf("expected %q to result in error\n", test.input) } else if !reflect.DeepEqual(test.expected, parsed) { t.Errorf("expected and parsed `&ParsedDockerURL{}` differ:\nexpected: %+v\nparsed: %+v\n", test.expected, parsed) } } } ================================================ FILE: lib/conversion_store.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package docker2aci import ( "crypto/sha512" "fmt" "hash" "io" "io/ioutil" "os" "github.com/appc/spec/aci" "github.com/appc/spec/schema" "github.com/appc/spec/schema/types" ) const ( hashPrefix = "sha512-" ) type aciInfo struct { path string key string ImageManifest *schema.ImageManifest } // conversionStore is an simple implementation of the acirenderer.ACIRegistry // interface. It stores the Docker layers converted to ACI so we can take // advantage of acirenderer to generate a squashed ACI Image. type conversionStore struct { acis map[string]*aciInfo } func newConversionStore() *conversionStore { return &conversionStore{acis: make(map[string]*aciInfo)} } func (ms *conversionStore) WriteACI(path string) (string, error) { f, err := os.Open(path) if err != nil { return "", err } defer f.Close() cr, err := aci.NewCompressedReader(f) if err != nil { return "", err } defer cr.Close() h := sha512.New() r := io.TeeReader(cr, h) // read the file so we can get the hash if _, err := io.Copy(ioutil.Discard, r); err != nil { return "", fmt.Errorf("error reading ACI: %v", err) } im, err := aci.ManifestFromImage(f) if err != nil { return "", err } key := ms.HashToKey(h) ms.acis[key] = &aciInfo{path: path, key: key, ImageManifest: im} return key, nil } func (ms *conversionStore) GetImageManifest(key string) (*schema.ImageManifest, error) { aci, ok := ms.acis[key] if !ok { return nil, fmt.Errorf("aci with key: %s not found", key) } return aci.ImageManifest, nil } func (ms *conversionStore) GetACI(name types.ACIdentifier, labels types.Labels) (string, error) { for _, aci := range ms.acis { // we implement this function to comply with the interface so don't // bother implementing a proper label check if aci.ImageManifest.Name.String() == name.String() { return aci.key, nil } } return "", fmt.Errorf("aci not found") } func (ms *conversionStore) ReadStream(key string) (io.ReadCloser, error) { img, ok := ms.acis[key] if !ok { return nil, fmt.Errorf("stream for key: %s not found", key) } f, err := os.Open(img.path) if err != nil { return nil, fmt.Errorf("error opening aci: %s", img.path) } tr, err := aci.NewCompressedReader(f) if err != nil { return nil, err } return tr, nil } func (ms *conversionStore) ResolveKey(key string) (string, error) { return key, nil } func (ms *conversionStore) HashToKey(h hash.Hash) string { s := h.Sum(nil) return fmt.Sprintf("%s%x", hashPrefix, s) } ================================================ FILE: lib/docker2aci.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package docker2aci implements a simple library for converting docker images to // App Container Images (ACIs). package docker2aci import ( "archive/tar" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/lib/internal" "github.com/appc/docker2aci/lib/internal/backend/file" "github.com/appc/docker2aci/lib/internal/backend/repository" "github.com/appc/docker2aci/lib/internal/docker" "github.com/appc/docker2aci/lib/internal/tarball" "github.com/appc/docker2aci/lib/internal/util" "github.com/appc/docker2aci/pkg/log" "github.com/appc/spec/pkg/acirenderer" "github.com/appc/spec/schema" appctypes "github.com/appc/spec/schema/types" gzip "github.com/klauspost/pgzip" ) // CommonConfig represents the shared configuration options for converting // Docker images. type CommonConfig struct { Squash bool // squash the layers in one file OutputDir string // where to put the resulting ACI TmpDir string // directory to use for temporary files Compression common.Compression // which compression to use for the resulting file(s) CurrentManifestHashes []string // any manifest hashes the caller already has Info log.Logger Debug log.Logger } func (c *CommonConfig) initLogger() { if c.Info == nil { c.Info = log.NewStdLogger(os.Stderr) } if c.Debug == nil { c.Debug = log.NewNopLogger() } } // RemoteConfig represents the remote repository specific configuration for // converting Docker images. type RemoteConfig struct { CommonConfig Username string // username to use if the image to convert needs authentication Password string // password to use if the image to convert needs authentication Insecure common.InsecureConfig // Insecure options MediaTypes common.MediaTypeSet RegistryOptions common.RegistryOptionSet } // FileConfig represents the saved file specific configuration for converting // Docker images. type FileConfig struct { CommonConfig DockerURL string // select an image if there are several images/tags in the file, Syntax: "{docker registry URL}/{image name}:{tag}" } // ConvertRemoteRepo generates ACI images from docker registry URLs. It takes // as input a dockerURL of the form: // // {registry URL}/{repository}:{reference[tag|digest]} // // It then gets all the layers of the requested image and converts each of // them to ACI. // It returns the list of generated ACI paths. func ConvertRemoteRepo(dockerURL string, config RemoteConfig) ([]string, error) { config.initLogger() return (&converter{ backend: repository.NewRepositoryBackend( config.Username, config.Password, config.Insecure, config.Debug, config.MediaTypes, config.RegistryOptions, ), dockerURL: dockerURL, config: config.CommonConfig, }).convert() } // ConvertSavedFile generates ACI images from a file generated with "docker // save". If there are several images/tags in the file, a particular image can // be chosen via FileConfig.DockerURL. // // It returns the list of generated ACI paths. func ConvertSavedFile(dockerSavedFile string, config FileConfig) ([]string, error) { config.initLogger() f, err := os.Open(dockerSavedFile) if err != nil { return nil, fmt.Errorf("error opening file: %v", err) } defer f.Close() return (&converter{ backend: file.NewFileBackend(f, config.Debug, config.Info), dockerURL: config.DockerURL, config: config.CommonConfig, }).convert() } // GetIndexName returns the docker index server from a docker URL. func GetIndexName(dockerURL string) string { index, _ := docker.SplitReposName(dockerURL) return index } // GetDockercfgAuth reads a ~/.dockercfg file and returns the username and password // of the given docker index server. func GetDockercfgAuth(indexServer string) (string, string, error) { return docker.GetAuthInfo(indexServer) } type converter struct { backend internal.Docker2ACIBackend dockerURL string config CommonConfig } func (c *converter) convert() ([]string, error) { c.config.Debug.Println("Getting image info...") ancestry, manhash, parsedDockerURL, err := c.backend.GetImageInfo(c.dockerURL) if err != nil { return nil, err } if len(ancestry) == 0 { return nil, fmt.Errorf("backend image had no useful layers: not creating ACI") } for _, h := range c.config.CurrentManifestHashes { if manhash == h { return nil, nil } } layersOutputDir := c.config.OutputDir if c.config.Squash { layersOutputDir, err = ioutil.TempDir(c.config.TmpDir, "docker2aci-") if err != nil { return nil, fmt.Errorf("error creating dir: %v", err) } defer os.RemoveAll(layersOutputDir) } conversionStore := newConversionStore() // only compress individual layers if we're not squashing layerCompression := c.config.Compression if c.config.Squash { layerCompression = common.NoCompression } aciLayerPaths, aciManifests, err := c.backend.BuildACI(ancestry, manhash, parsedDockerURL, layersOutputDir, c.config.TmpDir, layerCompression) if err != nil { return nil, err } var images acirenderer.Images for i, aciLayerPath := range aciLayerPaths { key, err := conversionStore.WriteACI(aciLayerPath) if err != nil { return nil, fmt.Errorf("error inserting in the conversion store: %v", err) } images = append(images, acirenderer.Image{Im: aciManifests[i], Key: key, Level: uint16(len(aciLayerPaths) - 1 - i)}) } // acirenderer expects images in order from upper to base layer images = util.ReverseImages(images) if c.config.Squash { squashedImagePath, err := squashLayers(images, conversionStore, *parsedDockerURL, c.config.OutputDir, c.config.Compression, c.config.Debug) if err != nil { return nil, fmt.Errorf("error squashing image: %v", err) } aciLayerPaths = []string{squashedImagePath} } return aciLayerPaths, nil } // squashLayers receives a list of ACI layer file names ordered from base image // to application image and squashes them into one ACI func squashLayers(images []acirenderer.Image, aciRegistry acirenderer.ACIRegistry, parsedDockerURL common.ParsedDockerURL, outputDir string, compression common.Compression, debug log.Logger) (path string, err error) { debug.Println("Squashing layers...") debug.Println("Rendering ACI...") renderedACI, err := acirenderer.GetRenderedACIFromList(images, aciRegistry) if err != nil { return "", fmt.Errorf("error rendering squashed image: %v", err) } manifests, err := getManifests(renderedACI, aciRegistry) if err != nil { return "", fmt.Errorf("error getting manifests: %v", err) } squashedFilename := getSquashedFilename(parsedDockerURL) squashedImagePath := filepath.Join(outputDir, squashedFilename) squashedTempFile, err := ioutil.TempFile(outputDir, "docker2aci-squashedFile-") if err != nil { return "", err } defer func() { if err == nil { err = squashedTempFile.Close() } else { // remove temp file on error // we ignore its error to not mask the real error os.Remove(squashedTempFile.Name()) } }() debug.Println("Writing squashed ACI...") if err := writeSquashedImage(squashedTempFile, renderedACI, aciRegistry, manifests, compression); err != nil { return "", fmt.Errorf("error writing squashed image: %v", err) } debug.Println("Validating squashed ACI...") if err := internal.ValidateACI(squashedTempFile.Name()); err != nil { return "", fmt.Errorf("error validating image: %v", err) } if err := os.Rename(squashedTempFile.Name(), squashedImagePath); err != nil { return "", err } debug.Println("ACI squashed!") return squashedImagePath, nil } func getSquashedFilename(parsedDockerURL common.ParsedDockerURL) string { squashedFilename := strings.Replace(parsedDockerURL.ImageName, "/", "-", -1) if parsedDockerURL.Tag != "" { squashedFilename += "-" + parsedDockerURL.Tag } squashedFilename += ".aci" return squashedFilename } func getManifests(renderedACI acirenderer.RenderedACI, aciRegistry acirenderer.ACIRegistry) ([]schema.ImageManifest, error) { var manifests []schema.ImageManifest for _, aci := range renderedACI { im, err := aciRegistry.GetImageManifest(aci.Key) if err != nil { return nil, err } manifests = append(manifests, *im) } return manifests, nil } func writeSquashedImage(outputFile *os.File, renderedACI acirenderer.RenderedACI, aciProvider acirenderer.ACIProvider, manifests []schema.ImageManifest, compression common.Compression) error { var tarWriterTarget io.WriteCloser = outputFile switch compression { case common.NoCompression: case common.GzipCompression: tarWriterTarget = gzip.NewWriter(outputFile) defer tarWriterTarget.Close() default: return fmt.Errorf("unexpected compression enum value: %d", compression) } outputWriter := tar.NewWriter(tarWriterTarget) defer outputWriter.Close() finalManifest := mergeManifests(manifests) if err := internal.WriteManifest(outputWriter, finalManifest); err != nil { return err } if err := internal.WriteRootfsDir(outputWriter); err != nil { return err } type hardLinkEntry struct { firstLinkCleanName string firstLinkHeader tar.Header keepOriginal bool walked bool } // map aciFileKey -> cleanTarget -> hardLinkEntry hardLinks := make(map[string]map[string]hardLinkEntry) // first pass: read all the entries and build the hardLinks map in memory // but don't write on disk for _, aciFile := range renderedACI { rs, err := aciProvider.ReadStream(aciFile.Key) if err != nil { return err } defer rs.Close() hardLinks[aciFile.Key] = map[string]hardLinkEntry{} squashWalker := func(t *tarball.TarFile) error { cleanName := filepath.Clean(t.Name()) // the rootfs and the squashed manifest are added separately if cleanName == "manifest" || cleanName == "rootfs" { return nil } _, keep := aciFile.FileMap[cleanName] if keep && t.Header.Typeflag == tar.TypeLink { cleanTarget := filepath.Clean(t.Linkname()) if _, ok := hardLinks[aciFile.Key][cleanTarget]; !ok { _, keepOriginal := aciFile.FileMap[cleanTarget] hardLinks[aciFile.Key][cleanTarget] = hardLinkEntry{cleanName, *t.Header, keepOriginal, false} } } return nil } tr := tar.NewReader(rs) if err := tarball.Walk(*tr, squashWalker); err != nil { return err } } // second pass: write on disk for _, aciFile := range renderedACI { rs, err := aciProvider.ReadStream(aciFile.Key) if err != nil { return err } defer rs.Close() squashWalker := func(t *tarball.TarFile) error { cleanName := filepath.Clean(t.Name()) // the rootfs and the squashed manifest are added separately if cleanName == "manifest" || cleanName == "rootfs" { return nil } _, keep := aciFile.FileMap[cleanName] if link, ok := hardLinks[aciFile.Key][cleanName]; ok { if keep != link.keepOriginal { return fmt.Errorf("logic error: should we keep file %q?", cleanName) } if keep { if err := outputWriter.WriteHeader(t.Header); err != nil { return fmt.Errorf("error writing header: %v", err) } if _, err := io.Copy(outputWriter, t.TarStream); err != nil { return fmt.Errorf("error copying file into the tar out: %v", err) } } else { // The current file does not remain but there is a hard link pointing to // it. Write the current file but with the filename of the first hard link // pointing to it. That first hard link will not be written later, see // variable "alreadyWritten". link.firstLinkHeader.Size = t.Header.Size link.firstLinkHeader.Typeflag = t.Header.Typeflag link.firstLinkHeader.Linkname = "" if err := outputWriter.WriteHeader(&link.firstLinkHeader); err != nil { return fmt.Errorf("error writing header: %v", err) } if _, err := io.Copy(outputWriter, t.TarStream); err != nil { return fmt.Errorf("error copying file into the tar out: %v", err) } } } else if keep { alreadyWritten := false if t.Header.Typeflag == tar.TypeLink { cleanTarget := filepath.Clean(t.Linkname()) if link, ok := hardLinks[aciFile.Key][cleanTarget]; ok { if !link.keepOriginal { if link.walked { t.Header.Linkname = link.firstLinkCleanName } else { alreadyWritten = true } } link.walked = true hardLinks[aciFile.Key][cleanTarget] = link } } if !alreadyWritten { if err := outputWriter.WriteHeader(t.Header); err != nil { return fmt.Errorf("error writing header: %v", err) } if _, err := io.Copy(outputWriter, t.TarStream); err != nil { return fmt.Errorf("error copying file into the tar out: %v", err) } } } return nil } tr := tar.NewReader(rs) if err := tarball.Walk(*tr, squashWalker); err != nil { return err } } return nil } func mergeManifests(manifests []schema.ImageManifest) schema.ImageManifest { // FIXME(iaguis) we take app layer's manifest as the final manifest for now manifest := manifests[0] manifest.Dependencies = nil layerIndex := -1 for i, l := range manifest.Labels { if l.Name.String() == "layer" { layerIndex = i } } if layerIndex != -1 { manifest.Labels = append(manifest.Labels[:layerIndex], manifest.Labels[layerIndex+1:]...) } nameWithoutLayerID := appctypes.MustACIdentifier(stripLayerID(manifest.Name.String())) manifest.Name = *nameWithoutLayerID // once the image is squashed, we don't need a pathWhitelist manifest.PathWhitelist = nil return manifest } // striplayerID strips the layer ID from an app name: // // myregistry.com/organization/app-name-85738f8f9a7f1b04b5329c590ebcb9e425925c6d0984089c43a022de4f19c281 // myregistry.com/organization/app-name func stripLayerID(layerName string) string { n := strings.LastIndex(layerName, "-") return layerName[:n] } ================================================ FILE: lib/internal/backend/file/file.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package file is an implementation of Docker2ACIBackend for files saved via // "docker save". // // Note: this package is an implementation detail and shouldn't be used outside // of docker2aci. package file import ( "archive/tar" "encoding/json" "fmt" "io" "io/ioutil" "os" "path" "path/filepath" "strings" "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/lib/internal" "github.com/appc/docker2aci/lib/internal/tarball" "github.com/appc/docker2aci/lib/internal/types" "github.com/appc/docker2aci/lib/internal/typesV2" "github.com/appc/docker2aci/pkg/log" "github.com/appc/spec/schema" spec "github.com/opencontainers/image-spec/specs-go/v1" ) type FileBackend struct { file *os.File debug, info log.Logger } func NewFileBackend(file *os.File, debug, info log.Logger) *FileBackend { return &FileBackend{ file: file, debug: debug, info: info, } } // GetImageInfo, given the url for a docker image, will return the // following: // - []string: an ordered list of all layer hashes // - string: a unique identifier for this image, like a hash of the manifest // - *common.ParsedDockerURL: a parsed docker URL // - error: an error if one occurred func (lb *FileBackend) GetImageInfo(dockerURL string) ([]string, string, *common.ParsedDockerURL, error) { // a missing Docker URL could mean that the file only contains one // image so it's okay for dockerURL to be blank var parsedDockerURL *common.ParsedDockerURL if dockerURL != "" { var err error parsedDockerURL, err = common.ParseDockerURL(dockerURL) if err != nil { return nil, "", nil, fmt.Errorf("image provided couldnot be parsed: %v", err) } } var ancestry []string // default file name is the tar name stripped name := strings.Split(filepath.Base(lb.file.Name()), ".")[0] appImageID, ancestry, parsedDockerURL, err := getImageID(lb.file, parsedDockerURL, name, lb.debug) if err != nil { return nil, "", nil, err } if len(ancestry) == 0 { ancestry, err = getAncestry(lb.file, appImageID, lb.debug) if err != nil { return nil, "", nil, fmt.Errorf("error getting ancestry: %v", err) } } else { // for oci the first image is the config ancestry = append([]string{appImageID}, ancestry...) } return ancestry, appImageID, parsedDockerURL, nil } func (lb *FileBackend) BuildACI(layerIDs []string, manhash string, dockerURL *common.ParsedDockerURL, outputDir string, tmpBaseDir string, compression common.Compression) ([]string, []*schema.ImageManifest, error) { if strings.Contains(layerIDs[0], ":") { return lb.BuildACIV22(layerIDs, manhash, dockerURL, outputDir, tmpBaseDir, compression) } var aciLayerPaths []string var aciManifests []*schema.ImageManifest var curPwl []string tmpDir, err := ioutil.TempDir(tmpBaseDir, "docker2aci-") if err != nil { return nil, nil, fmt.Errorf("error creating dir: %v", err) } defer os.RemoveAll(tmpDir) for i := len(layerIDs) - 1; i >= 0; i-- { if err := common.ValidateLayerId(layerIDs[i]); err != nil { return nil, nil, err } j, err := getJson(lb.file, layerIDs[i]) if err != nil { return nil, nil, fmt.Errorf("error getting layer json: %v", err) } layerData := types.DockerImageData{} if err := json.Unmarshal(j, &layerData); err != nil { return nil, nil, fmt.Errorf("error unmarshaling layer data: %v", err) } tmpLayerPath := path.Join(tmpDir, layerIDs[i]) tmpLayerPath += ".tar" layerTarPath := path.Join(layerIDs[i], "layer.tar") layerFile, err := extractEmbeddedLayer(lb.file, layerTarPath, tmpLayerPath, lb.info) if err != nil { return nil, nil, fmt.Errorf("error getting layer from file: %v", err) } defer layerFile.Close() lb.debug.Println("Generating layer ACI...") aciPath, manifest, err := internal.GenerateACI(i, manhash, layerData, dockerURL, outputDir, layerFile, curPwl, compression, lb.debug) if err != nil { return nil, nil, fmt.Errorf("error generating ACI: %v", err) } aciLayerPaths = append(aciLayerPaths, aciPath) aciManifests = append(aciManifests, manifest) curPwl = manifest.PathWhitelist } return aciLayerPaths, aciManifests, nil } func (lb *FileBackend) BuildACIV22(layerIDs []string, manhash string, dockerURL *common.ParsedDockerURL, outputDir string, tmpBaseDir string, compression common.Compression) ([]string, []*schema.ImageManifest, error) { if len(layerIDs) < 2 { return nil, nil, fmt.Errorf("insufficient layers for oci image") } var aciLayerPaths []string var aciManifests []*schema.ImageManifest var curPwl []string imageID := layerIDs[0] layerIDs = layerIDs[1:] j, err := getJsonV22(lb.file, imageID) if err != nil { return nil, nil, fmt.Errorf("error getting layer from file: %v", err) } imageConfig := typesV2.ImageConfig{} if err := json.Unmarshal(j, &imageConfig); err != nil { return nil, nil, fmt.Errorf("error unmarshaling image data: %v", err) } tmpDir, err := ioutil.TempDir(tmpBaseDir, "docker2aci-") if err != nil { return nil, nil, fmt.Errorf("error creating dir: %v", err) } defer os.RemoveAll(tmpDir) for i := len(layerIDs) - 1; i >= 0; i-- { parts := strings.Split(layerIDs[i], ":") tmpLayerPath := path.Join(tmpDir, parts[1]) tmpLayerPath += ".tar" layerTarPath := path.Join(append([]string{"blobs"}, parts...)...) layerFile, err := extractEmbeddedLayer(lb.file, layerTarPath, tmpLayerPath, lb.info) if err != nil { return nil, nil, fmt.Errorf("error getting layer from file: %v", err) } defer layerFile.Close() lb.debug.Println("Generating layer ACI...") var aciPath string var manifest *schema.ImageManifest if i != 0 { aciPath, manifest, err = internal.GenerateACI22LowerLayer(dockerURL, parts[1], outputDir, layerFile, curPwl, compression) } else { aciPath, manifest, err = internal.GenerateACI22TopLayer(dockerURL, manhash, &imageConfig, parts[1], outputDir, layerFile, curPwl, compression, aciManifests, lb.debug) } if err != nil { return nil, nil, fmt.Errorf("error generating ACI: %v", err) } aciLayerPaths = append(aciLayerPaths, aciPath) aciManifests = append(aciManifests, manifest) curPwl = manifest.PathWhitelist } return aciLayerPaths, aciManifests, nil } func getImageID(file *os.File, dockerURL *common.ParsedDockerURL, name string, debug log.Logger) (string, []string, *common.ParsedDockerURL, error) { debug.Println("getting image id...") type tags map[string]string type apps map[string]tags _, err := file.Seek(0, 0) if err != nil { return "", nil, nil, fmt.Errorf("error seeking file: %v", err) } tag := "latest" if dockerURL != nil { tag = dockerURL.Tag } var imageID string var ancestry []string var appName string reposWalker := func(t *tarball.TarFile) error { clean := filepath.Clean(t.Name()) if clean == "repositories" { repob, err := ioutil.ReadAll(t.TarStream) if err != nil { return fmt.Errorf("error reading repositories file: %v", err) } var unparsedRepositories apps if err := json.Unmarshal(repob, &unparsedRepositories); err != nil { return fmt.Errorf("error unmarshaling repositories file") } repositories := make(apps, 0) // Normalize repository keys since the image potentially passed in is // normalized for key, val := range unparsedRepositories { parsed, err := common.ParseDockerURL(key) if err != nil { return fmt.Errorf("error parsing key %q in repositories: %v", key, err) } repositories[parsed.ImageName] = val } if dockerURL == nil { n := len(repositories) switch { case n == 1: for key, _ := range repositories { appName = key } case n > 1: var appNames []string for key, _ := range repositories { appNames = append(appNames, key) } return &common.ErrSeveralImages{ Msg: "several images found", Images: appNames, } default: return fmt.Errorf("no images found") } } else { appName = dockerURL.ImageName } app, ok := repositories[appName] if !ok { return fmt.Errorf("app %q not found", appName) } _, ok = app[tag] if !ok { if len(app) == 1 { for key, _ := range app { tag = key } } else { return fmt.Errorf("tag %q not found", tag) } } if dockerURL == nil { dockerURL = &common.ParsedDockerURL{ OriginalName: "", IndexURL: "", Tag: tag, ImageName: appName, } } imageID = string(app[tag]) } if clean == "refs/"+tag { refb, err := ioutil.ReadAll(t.TarStream) if err != nil { return fmt.Errorf("error reading ref descriptor for tag %s: %v", tag, err) } if dockerURL == nil { dockerURL = &common.ParsedDockerURL{ IndexURL: "", Tag: tag, ImageName: name, } } var ref spec.Descriptor if err := json.Unmarshal(refb, &ref); err != nil { return fmt.Errorf("error unmarshaling ref descriptor for tag %s", tag) } imageID, ancestry, err = getDataFromManifest(file, ref.Digest) if err != nil { return err } return io.EOF } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, reposWalker); err != nil && err != io.EOF { return "", nil, nil, err } if imageID == "" { return "", nil, nil, fmt.Errorf("Could not find image") } return imageID, ancestry, dockerURL, nil } func getDataFromManifest(file *os.File, manifestID string) (string, []string, error) { _, err := file.Seek(0, 0) if err != nil { return "", nil, fmt.Errorf("error seeking file: %v", err) } parts := append([]string{"blobs"}, strings.Split(manifestID, ":")...) jsonPath := path.Join(parts...) var imageID string var ancestry []string reposWalker := func(t *tarball.TarFile) error { clean := filepath.Clean(t.Name()) if clean == jsonPath { manb, err := ioutil.ReadAll(t.TarStream) if err != nil { return fmt.Errorf("error reading image manifest: %v", err) } var manifest typesV2.ImageManifest if err := json.Unmarshal(manb, &manifest); err != nil { return fmt.Errorf("error unmarshaling image manifest") } if manifest.Config == nil { return fmt.Errorf("manifest does not contain a config") } imageID = manifest.Config.Digest // put them in reverse order for i := len(manifest.Layers) - 1; i >= 0; i-- { ancestry = append(ancestry, manifest.Layers[i].Digest) } } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, reposWalker); err != nil { return "", nil, err } return imageID, ancestry, nil } func getJson(file *os.File, layerID string) ([]byte, error) { jsonPath := path.Join(layerID, "json") return getTarFileBytes(file, jsonPath) } func getJsonV22(file *os.File, layerID string) ([]byte, error) { parts := append([]string{"blobs"}, strings.Split(layerID, ":")...) jsonPath := path.Join(parts...) return getTarFileBytes(file, jsonPath) } func getTarFileBytes(file *os.File, path string) ([]byte, error) { _, err := file.Seek(0, 0) if err != nil { return nil, fmt.Errorf("error seeking file: %v", err) } var fileBytes []byte fileWalker := func(t *tarball.TarFile) error { if filepath.Clean(t.Name()) == path { fileBytes, err = ioutil.ReadAll(t.TarStream) if err != nil { return err } } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, fileWalker); err != nil { return nil, err } if fileBytes == nil { return nil, fmt.Errorf("file %q not found", path) } return fileBytes, nil } func extractEmbeddedLayer(file *os.File, layerTarPath string, outputPath string, info log.Logger) (*os.File, error) { info.Println("Extracting ", layerTarPath) _, err := file.Seek(0, 0) if err != nil { return nil, fmt.Errorf("error seeking file: %v", err) } var layerFile *os.File fileWalker := func(t *tarball.TarFile) error { if filepath.Clean(t.Name()) == layerTarPath { layerFile, err = os.Create(outputPath) if err != nil { return fmt.Errorf("error creating layer: %v", err) } _, err = io.Copy(layerFile, t.TarStream) if err != nil { return fmt.Errorf("error getting layer: %v", err) } } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, fileWalker); err != nil { return nil, err } if layerFile == nil { return nil, fmt.Errorf("file %q not found", layerTarPath) } return layerFile, nil } // getAncestry computes an image ancestry, returning an ordered list // of dependencies starting from the topmost image to the base. // It checks for dependency loops via duplicate detection in the image // chain and errors out in such cases. func getAncestry(file *os.File, imgID string, debug log.Logger) ([]string, error) { var ancestry []string deps := make(map[string]bool) curImgID := imgID var err error for curImgID != "" { if deps[curImgID] { return nil, fmt.Errorf("dependency loop detected at image %q", curImgID) } deps[curImgID] = true ancestry = append(ancestry, curImgID) debug.Printf("Getting ancestry for layer %q", curImgID) curImgID, err = getParent(file, curImgID, debug) if err != nil { return nil, err } } return ancestry, nil } func getParent(file *os.File, imgID string, debug log.Logger) (string, error) { var parent string _, err := file.Seek(0, 0) if err != nil { return "", fmt.Errorf("error seeking file: %v", err) } jsonPath := filepath.Join(imgID, "json") parentWalker := func(t *tarball.TarFile) error { if filepath.Clean(t.Name()) == jsonPath { jsonb, err := ioutil.ReadAll(t.TarStream) if err != nil { return fmt.Errorf("error reading layer json: %v", err) } var dockerData types.DockerImageData if err := json.Unmarshal(jsonb, &dockerData); err != nil { return fmt.Errorf("error unmarshaling layer data: %v", err) } parent = dockerData.Parent } return nil } tr := tar.NewReader(file) if err := tarball.Walk(*tr, parentWalker); err != nil { return "", err } debug.Printf("Layer %q depends on layer %q", imgID, parent) return parent, nil } ================================================ FILE: lib/internal/backend/repository/repository.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package repository is an implementation of Docker2ACIBackend for Docker // remote registries. // // Note: this package is an implementation detail and shouldn't be used outside // of docker2aci. package repository import ( "fmt" "net/http" "net/url" "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/lib/internal/typesV2" "github.com/appc/docker2aci/lib/internal/util" "github.com/appc/docker2aci/pkg/log" "github.com/appc/spec/schema" ) type registryVersion int const ( registryV1 registryVersion = iota registryV2 ) type httpStatusErr struct { StatusCode int URL *url.URL } func (e httpStatusErr) Error() string { return fmt.Sprintf("Unexpected HTTP code: %d, URL: %s", e.StatusCode, e.URL.String()) } func isErrHTTP404(err error) bool { if httperr, ok := err.(*httpStatusErr); ok && httperr.StatusCode == http.StatusNotFound { return true } return false } type RepositoryBackend struct { repoData *RepoData username string password string insecure common.InsecureConfig hostsV1fallback bool hostsV2Support map[string]bool hostsV2AuthTokens map[string]map[string]string schema string imageManifests map[common.ParsedDockerURL]v2Manifest imageV2Manifests map[common.ParsedDockerURL]*typesV2.ImageManifest imageConfigs map[common.ParsedDockerURL]*typesV2.ImageConfig layersIndex map[string]int mediaTypes common.MediaTypeSet registryOptions common.RegistryOptionSet debug log.Logger } func NewRepositoryBackend(username, password string, insecure common.InsecureConfig, debug log.Logger, mediaTypes common.MediaTypeSet, registryOptions common.RegistryOptionSet) *RepositoryBackend { return &RepositoryBackend{ username: username, password: password, insecure: insecure, hostsV1fallback: false, hostsV2Support: make(map[string]bool), hostsV2AuthTokens: make(map[string]map[string]string), imageManifests: make(map[common.ParsedDockerURL]v2Manifest), imageV2Manifests: make(map[common.ParsedDockerURL]*typesV2.ImageManifest), imageConfigs: make(map[common.ParsedDockerURL]*typesV2.ImageConfig), layersIndex: make(map[string]int), mediaTypes: mediaTypes, registryOptions: registryOptions, debug: debug, } } // GetImageInfo, given the url for a docker image, will return the // following: // - []string: an ordered list of all layer hashes // - string: a unique identifier for this image, like a hash of the manifest // - *common.ParsedDockerURL: a parsed docker URL // - error: an error if one occurred func (rb *RepositoryBackend) GetImageInfo(url string) ([]string, string, *common.ParsedDockerURL, error) { dockerURL, err := common.ParseDockerURL(url) if err != nil { return nil, "", nil, err } var supportsV2, supportsV1, ok bool var URLSchema string if supportsV2, ok = rb.hostsV2Support[dockerURL.IndexURL]; !ok { var err error URLSchema, supportsV2, err = rb.supportsRegistry(dockerURL.IndexURL, registryV2) if err != nil { return nil, "", nil, err } rb.schema = URLSchema + "://" rb.hostsV2Support[dockerURL.IndexURL] = supportsV2 } // try v2 if supportsV2 && rb.registryOptions.AllowsV2() { layers, manhash, dockerURL, err := rb.getImageInfoV2(dockerURL) if !isErrHTTP404(err) { return layers, manhash, dockerURL, err } // fallback on 404 failure rb.hostsV1fallback = true // unless we can't fallback if !rb.registryOptions.AllowsV1() { return nil, "", nil, err } } if !rb.registryOptions.AllowsV1() { return nil, "", nil, fmt.Errorf("no remaining enabled registry options") } URLSchema, supportsV1, err = rb.supportsRegistry(dockerURL.IndexURL, registryV1) if err != nil { return nil, "", nil, err } if !supportsV1 && rb.hostsV1fallback { return nil, "", nil, fmt.Errorf("attempted fallback to API v1 but not supported") } if !supportsV1 && !supportsV2 { return nil, "", nil, fmt.Errorf("registry doesn't support API v2 nor v1") } rb.schema = URLSchema + "://" // try v1, hard fail on failure return rb.getImageInfoV1(dockerURL) } func (rb *RepositoryBackend) BuildACI(layerIDs []string, manhash string, dockerURL *common.ParsedDockerURL, outputDir string, tmpBaseDir string, compression common.Compression) ([]string, []*schema.ImageManifest, error) { if rb.hostsV1fallback || !rb.hostsV2Support[dockerURL.IndexURL] { return rb.buildACIV1(layerIDs, manhash, dockerURL, outputDir, tmpBaseDir, compression) } else { return rb.buildACIV2(layerIDs, manhash, dockerURL, outputDir, tmpBaseDir, compression) } } // checkRegistryStatus determines registry API version compatibility according to spec: // https://docs.docker.com/registry/spec/api/#/api-version-check func checkRegistryStatus(statusCode int, hdr http.Header, version registryVersion) (bool, error) { switch statusCode { case http.StatusOK, http.StatusUnauthorized: ok := true if version == registryV2 { // According to v2 spec, registries SHOULD set this header value // and clients MAY fallback to v1 if missing, as done here. ok = hdr.Get("Docker-Distribution-API-Version") == "registry/2.0" } return ok, nil } return false, nil } func (rb *RepositoryBackend) supportsRegistry(indexURL string, version registryVersion) (schema string, ok bool, err error) { var URLPath string switch version { case registryV1: URLPath = "v1/_ping" case registryV2: URLPath = "v2/" } fetch := func(schema string) (res *http.Response, err error) { u := url.URL{Scheme: schema, Host: indexURL, Path: URLPath} req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return nil, err } rb.setBasicAuth(req) client := util.GetTLSClient(rb.insecure.SkipVerify) res, err = client.Do(req) return } schema = "https" res, err := fetch(schema) if err == nil { ok, err = checkRegistryStatus(res.StatusCode, res.Header, version) defer res.Body.Close() } if err != nil || !ok { if rb.insecure.AllowHTTP { schema = "http" res, err = fetch(schema) if err == nil { ok, err = checkRegistryStatus(res.StatusCode, res.Header, version) defer res.Body.Close() } } return schema, ok, err } return schema, ok, err } ================================================ FILE: lib/internal/backend/repository/repository1.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "encoding/json" "fmt" "io" "io/ioutil" "net/http" "os" "path" "strconv" "strings" "time" "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/lib/internal" "github.com/appc/docker2aci/lib/internal/types" "github.com/appc/docker2aci/lib/internal/util" "github.com/appc/spec/schema" "github.com/coreos/ioprogress" ) type RepoData struct { Tokens []string Endpoints []string Cookie []string } func (rb *RepositoryBackend) getImageInfoV1(dockerURL *common.ParsedDockerURL) ([]string, string, *common.ParsedDockerURL, error) { repoData, err := rb.getRepoDataV1(dockerURL.IndexURL, dockerURL.ImageName) if err != nil { return nil, "", nil, fmt.Errorf("error getting repository data: %v", err) } // TODO(iaguis) check more endpoints appImageID, err := rb.getImageIDFromTagV1(repoData.Endpoints[0], dockerURL.ImageName, dockerURL.Tag, repoData) if err != nil { return nil, "", nil, fmt.Errorf("error getting ImageID from tag %s: %v", dockerURL.Tag, err) } ancestry, err := rb.getAncestryV1(appImageID, repoData.Endpoints[0], repoData) if err != nil { return nil, "", nil, err } rb.repoData = repoData return ancestry, appImageID, dockerURL, nil } func (rb *RepositoryBackend) buildACIV1(layerIDs []string, manhash string, dockerURL *common.ParsedDockerURL, outputDir string, tmpBaseDir string, compression common.Compression) ([]string, []*schema.ImageManifest, error) { layerFiles := make([]*os.File, len(layerIDs)) layerDatas := make([]types.DockerImageData, len(layerIDs)) tmpParentDir, err := ioutil.TempDir(tmpBaseDir, "docker2aci-") if err != nil { return nil, nil, err } defer os.RemoveAll(tmpParentDir) var doneChannels []chan error for i, layerID := range layerIDs { if err := common.ValidateLayerId(layerID); err != nil { return nil, nil, err } doneChan := make(chan error) doneChannels = append(doneChannels, doneChan) // https://github.com/golang/go/wiki/CommonMistakes i := i // golang-- layerID := layerID go func() { tmpDir, err := ioutil.TempDir(tmpParentDir, "") if err != nil { doneChan <- fmt.Errorf("error creating dir: %v", err) return } j, size, err := rb.getJsonV1(layerID, rb.repoData.Endpoints[0], rb.repoData) if err != nil { doneChan <- fmt.Errorf("error getting image json: %v", err) return } layerDatas[i] = types.DockerImageData{} if err := json.Unmarshal(j, &layerDatas[i]); err != nil { doneChan <- fmt.Errorf("error unmarshaling layer data: %v", err) return } layerFiles[i], err = rb.getLayerV1(layerID, rb.repoData.Endpoints[0], rb.repoData, size, tmpDir) if err != nil { doneChan <- fmt.Errorf("error getting the remote layer: %v", err) return } doneChan <- nil }() } for _, doneChan := range doneChannels { err := <-doneChan if err != nil { return nil, nil, err } } var aciLayerPaths []string var aciManifests []*schema.ImageManifest var curPwl []string for i := len(layerIDs) - 1; i >= 0; i-- { rb.debug.Println("Generating layer ACI...") aciPath, manifest, err := internal.GenerateACI(i, manhash, layerDatas[i], dockerURL, outputDir, layerFiles[i], curPwl, compression, rb.debug) if err != nil { return nil, nil, fmt.Errorf("error generating ACI: %v", err) } aciLayerPaths = append(aciLayerPaths, aciPath) aciManifests = append(aciManifests, manifest) curPwl = manifest.PathWhitelist layerFiles[i].Close() } return aciLayerPaths, aciManifests, nil } func (rb *RepositoryBackend) getRepoDataV1(indexURL string, remote string) (*RepoData, error) { client := util.GetTLSClient(rb.insecure.SkipVerify) repositoryURL := rb.schema + path.Join(indexURL, "v1", "repositories", remote, "images") req, err := http.NewRequest("GET", repositoryURL, nil) if err != nil { return nil, err } if rb.username != "" && rb.password != "" { req.SetBasicAuth(rb.username, rb.password) } req.Header.Set("X-Docker-Token", "true") res, err := client.Do(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { return nil, &httpStatusErr{res.StatusCode, req.URL} } var tokens []string if res.Header.Get("X-Docker-Token") != "" { tokens = res.Header["X-Docker-Token"] } var cookies []string if res.Header.Get("Set-Cookie") != "" { cookies = res.Header["Set-Cookie"] } var endpoints []string if res.Header.Get("X-Docker-Endpoints") != "" { endpoints = makeEndpointsListV1(res.Header["X-Docker-Endpoints"]) } else { // Assume same endpoint endpoints = append(endpoints, indexURL) } return &RepoData{ Endpoints: endpoints, Tokens: tokens, Cookie: cookies, }, nil } func (rb *RepositoryBackend) getImageIDFromTagV1(registry string, appName string, tag string, repoData *RepoData) (string, error) { client := util.GetTLSClient(rb.insecure.SkipVerify) // we get all the tags instead of directly getting the imageID of the // requested one (.../tags/TAG) because even though it's specified in the // Docker API, some registries (e.g. Google Container Registry) don't // implement it. req, err := http.NewRequest("GET", rb.schema+path.Join(registry, "repositories", appName, "tags"), nil) if err != nil { return "", fmt.Errorf("failed to get Image ID: %s, URL: %s", err, req.URL) } setAuthTokenV1(req, repoData.Tokens) setCookieV1(req, repoData.Cookie) res, err := client.Do(req) if err != nil { return "", fmt.Errorf("failed to get Image ID: %s, URL: %s", err, req.URL) } defer res.Body.Close() if res.StatusCode != 200 { return "", &httpStatusErr{res.StatusCode, req.URL} } j, err := ioutil.ReadAll(res.Body) if err != nil { return "", err } var tags map[string]string if err := json.Unmarshal(j, &tags); err != nil { return "", fmt.Errorf("error unmarshaling: %v", err) } imageID, ok := tags[tag] if !ok { return "", fmt.Errorf("tag %s not found", tag) } return imageID, nil } func (rb *RepositoryBackend) getAncestryV1(imgID, registry string, repoData *RepoData) ([]string, error) { client := util.GetTLSClient(rb.insecure.SkipVerify) req, err := http.NewRequest("GET", rb.schema+path.Join(registry, "images", imgID, "ancestry"), nil) if err != nil { return nil, err } setAuthTokenV1(req, repoData.Tokens) setCookieV1(req, repoData.Cookie) res, err := client.Do(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { return nil, &httpStatusErr{res.StatusCode, req.URL} } var ancestry []string j, err := ioutil.ReadAll(res.Body) if err != nil { return nil, fmt.Errorf("Failed to read downloaded json: %s (%s)", err, j) } if err := json.Unmarshal(j, &ancestry); err != nil { return nil, fmt.Errorf("error unmarshaling: %v", err) } return ancestry, nil } func (rb *RepositoryBackend) getJsonV1(imgID, registry string, repoData *RepoData) ([]byte, int64, error) { client := util.GetTLSClient(rb.insecure.SkipVerify) req, err := http.NewRequest("GET", rb.schema+path.Join(registry, "images", imgID, "json"), nil) if err != nil { return nil, -1, err } setAuthTokenV1(req, repoData.Tokens) setCookieV1(req, repoData.Cookie) res, err := client.Do(req) if err != nil { return nil, -1, err } defer res.Body.Close() if res.StatusCode != 200 { return nil, -1, &httpStatusErr{res.StatusCode, req.URL} } imageSize := int64(-1) if hdr := res.Header.Get("X-Docker-Size"); hdr != "" { imageSize, err = strconv.ParseInt(hdr, 10, 64) if err != nil { return nil, -1, err } } b, err := ioutil.ReadAll(res.Body) if err != nil { return nil, -1, fmt.Errorf("failed to read downloaded json: %v (%s)", err, b) } return b, imageSize, nil } func (rb *RepositoryBackend) getLayerV1(imgID, registry string, repoData *RepoData, imgSize int64, tmpDir string) (*os.File, error) { client := util.GetTLSClient(rb.insecure.SkipVerify) req, err := http.NewRequest("GET", rb.schema+path.Join(registry, "images", imgID, "layer"), nil) if err != nil { return nil, err } setAuthTokenV1(req, repoData.Tokens) setCookieV1(req, repoData.Cookie) res, err := client.Do(req) if err != nil { return nil, err } defer res.Body.Close() if res.StatusCode != 200 { res.Body.Close() return nil, &httpStatusErr{res.StatusCode, req.URL} } // if we didn't receive the size via X-Docker-Size when we retrieved the // layer's json, try Content-Length if imgSize == -1 { if hdr := res.Header.Get("Content-Length"); hdr != "" { imgSize, err = strconv.ParseInt(hdr, 10, 64) if err != nil { return nil, err } } } prefix := "Downloading " + imgID[:12] fmtBytesSize := 18 barSize := int64(80 - len(prefix) - fmtBytesSize) bar := ioprogress.DrawTextFormatBarForW(barSize, os.Stderr) fmtfunc := func(progress, total int64) string { return fmt.Sprintf( "%s: %s %s", prefix, bar(progress, total), ioprogress.DrawTextFormatBytes(progress, total), ) } progressReader := &ioprogress.Reader{ Reader: res.Body, Size: imgSize, DrawFunc: ioprogress.DrawTerminalf(os.Stderr, fmtfunc), DrawInterval: 500 * time.Millisecond, } layerFile, err := ioutil.TempFile(tmpDir, "dockerlayer-") if err != nil { return nil, err } _, err = io.Copy(layerFile, progressReader) if err != nil { return nil, err } if err := layerFile.Sync(); err != nil { return nil, err } return layerFile, nil } func setAuthTokenV1(req *http.Request, token []string) { if req.Header.Get("Authorization") == "" { req.Header.Set("Authorization", "Token "+strings.Join(token, ",")) } } func setCookieV1(req *http.Request, cookie []string) { if req.Header.Get("Cookie") == "" { req.Header.Set("Cookie", strings.Join(cookie, "")) } } func makeEndpointsListV1(headers []string) []string { var endpoints []string for _, ep := range headers { endpointsList := strings.Split(ep, ",") for _, endpointEl := range endpointsList { endpoints = append( endpoints, path.Join(strings.TrimSpace(endpointEl), "v1")) } } return endpoints } ================================================ FILE: lib/internal/backend/repository/repository2.go ================================================ // Copyright 2016 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package repository import ( "encoding/json" "errors" "fmt" "io" "io/ioutil" "net/http" "os" "path" "strconv" "strings" "sync" "time" "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/lib/internal" "github.com/appc/docker2aci/lib/internal/types" "github.com/appc/docker2aci/lib/internal/typesV2" "github.com/appc/docker2aci/lib/internal/util" "github.com/appc/spec/schema" "github.com/coreos/pkg/progressutil" godigest "github.com/opencontainers/go-digest" ) const ( defaultIndexURL = "registry-1.docker.io" ) // A manifest conforming to the docker v2.1 spec type v2Manifest struct { Name string `json:"name"` Tag string `json:"tag"` FSLayers []struct { BlobSum string `json:"blobSum"` } `json:"fsLayers"` History []struct { V1Compatibility string `json:"v1Compatibility"` } `json:"history"` Signature []byte `json:"signature"` } func (rb *RepositoryBackend) getImageInfoV2(dockerURL *common.ParsedDockerURL) ([]string, string, *common.ParsedDockerURL, error) { layers, manhash, err := rb.getManifestV2(dockerURL) if err != nil { return nil, "", nil, err } return layers, manhash, dockerURL, nil } func (rb *RepositoryBackend) buildACIV2(layerIDs []string, manhash string, dockerURL *common.ParsedDockerURL, outputDir string, tmpBaseDir string, compression common.Compression) ([]string, []*schema.ImageManifest, error) { _, isVersion22 := rb.imageV2Manifests[*dockerURL] if isVersion22 { return rb.buildACIV22(layerIDs, manhash, dockerURL, outputDir, tmpBaseDir, compression) } return rb.buildACIV21(layerIDs, manhash, dockerURL, outputDir, tmpBaseDir, compression) } func (rb *RepositoryBackend) buildACIV21(layerIDs []string, manhash string, dockerURL *common.ParsedDockerURL, outputDir string, tmpBaseDir string, compression common.Compression) ([]string, []*schema.ImageManifest, error) { layerFiles := make([]*os.File, len(layerIDs)) layerDatas := make([]types.DockerImageData, len(layerIDs)) tmpParentDir, err := ioutil.TempDir(tmpBaseDir, "docker2aci-") if err != nil { return nil, nil, err } defer os.RemoveAll(tmpParentDir) copier := progressutil.NewCopyProgressPrinter() var errChannels []chan error closers := make([]io.ReadCloser, len(layerIDs)) var wg sync.WaitGroup for i, layerID := range layerIDs { if err := common.ValidateLayerId(layerID); err != nil { return nil, nil, err } wg.Add(1) errChan := make(chan error, 1) errChannels = append(errChannels, errChan) // https://github.com/golang/go/wiki/CommonMistakes i := i // golang-- layerID := layerID go func() { defer wg.Done() manifest := rb.imageManifests[*dockerURL] layerIndex, ok := rb.layersIndex[layerID] if !ok { errChan <- fmt.Errorf("layer not found in manifest: %s", layerID) return } if len(manifest.History) <= layerIndex { errChan <- fmt.Errorf("history not found for layer %s", layerID) return } layerDatas[i] = types.DockerImageData{} if err := json.Unmarshal([]byte(manifest.History[layerIndex].V1Compatibility), &layerDatas[i]); err != nil { errChan <- fmt.Errorf("error unmarshaling layer data: %v", err) return } tmpDir, err := ioutil.TempDir(tmpParentDir, "") if err != nil { errChan <- fmt.Errorf("error creating dir: %v", err) return } layerFiles[i], closers[i], err = rb.getLayerV2(layerID, dockerURL, tmpDir, copier) if err != nil { errChan <- fmt.Errorf("error getting the remote layer: %v", err) return } errChan <- nil }() } // Need to wait for all of the readers to be added to the copier (which happens during rb.getLayerV2) wg.Wait() err = copier.PrintAndWait(os.Stderr, 500*time.Millisecond, nil) if err != nil { return nil, nil, err } for _, closer := range closers { if closer != nil { closer.Close() } } for _, errChan := range errChannels { err := <-errChan if err != nil { return nil, nil, err } } for _, layerFile := range layerFiles { err := layerFile.Sync() if err != nil { return nil, nil, err } } var aciLayerPaths []string var aciManifests []*schema.ImageManifest var curPwl []string for i := len(layerIDs) - 1; i >= 0; i-- { rb.debug.Println("Generating layer ACI...") aciPath, aciManifest, err := internal.GenerateACI(i, manhash, layerDatas[i], dockerURL, outputDir, layerFiles[i], curPwl, compression, rb.debug) if err != nil { return nil, nil, fmt.Errorf("error generating ACI: %v", err) } aciLayerPaths = append(aciLayerPaths, aciPath) aciManifests = append(aciManifests, aciManifest) curPwl = aciManifest.PathWhitelist layerFiles[i].Close() } return aciLayerPaths, aciManifests, nil } type layer struct { index int file *os.File closer io.Closer err error } func (rb *RepositoryBackend) buildACIV22(layerIDs []string, manhash string, dockerURL *common.ParsedDockerURL, outputDir string, tmpBaseDir string, compression common.Compression) ([]string, []*schema.ImageManifest, error) { layerFiles := make([]*os.File, len(layerIDs)) tmpParentDir, err := ioutil.TempDir(tmpBaseDir, "docker2aci-") if err != nil { return nil, nil, err } defer os.RemoveAll(tmpParentDir) copier := progressutil.NewCopyProgressPrinter() resultChan := make(chan layer, len(layerIDs)) for i, layerID := range layerIDs { if err := common.ValidateLayerId(layerID); err != nil { return nil, nil, err } // https://github.com/golang/go/wiki/CommonMistakes i := i // golang-- layerID := layerID go func() { tmpDir, err := ioutil.TempDir(tmpParentDir, "") if err != nil { resultChan <- layer{ index: i, err: fmt.Errorf("error creating dir: %v", err), } return } layerFile, closer, err := rb.getLayerV2(layerID, dockerURL, tmpDir, copier) if err != nil { resultChan <- layer{ index: i, err: fmt.Errorf("error getting the remote layer: %v", err), } return } resultChan <- layer{ index: i, file: layerFile, closer: closer, err: nil, } }() } var errs []error for i := 0; i < len(layerIDs); i++ { res := <-resultChan if res.closer != nil { defer res.closer.Close() } if res.file != nil { defer res.file.Close() } if res.err != nil { errs = append(errs, res.err) } layerFiles[res.index] = res.file } if len(errs) > 0 { return nil, nil, errs[0] } err = copier.PrintAndWait(os.Stderr, 500*time.Millisecond, nil) if err != nil { return nil, nil, err } for _, layerFile := range layerFiles { err := layerFile.Sync() if err != nil { return nil, nil, err } } var aciLayerPaths []string var aciManifests []*schema.ImageManifest var curPwl []string var i int for i = 0; i < len(layerIDs)-1; i++ { rb.debug.Println("Generating layer ACI...") aciPath, aciManifest, err := internal.GenerateACI22LowerLayer(dockerURL, layerIDs[i], outputDir, layerFiles[i], curPwl, compression) if err != nil { return nil, nil, fmt.Errorf("error generating ACI: %v", err) } aciLayerPaths = append(aciLayerPaths, aciPath) aciManifests = append(aciManifests, aciManifest) curPwl = aciManifest.PathWhitelist } rb.debug.Println("Generating layer ACI...") aciPath, aciManifest, err := internal.GenerateACI22TopLayer(dockerURL, manhash, rb.imageConfigs[*dockerURL], layerIDs[i], outputDir, layerFiles[i], curPwl, compression, aciManifests, rb.debug) if err != nil { return nil, nil, fmt.Errorf("error generating ACI: %v", err) } aciLayerPaths = append(aciLayerPaths, aciPath) aciManifests = append(aciManifests, aciManifest) return aciLayerPaths, aciManifests, nil } func (rb *RepositoryBackend) getManifestV2(dockerURL *common.ParsedDockerURL) ([]string, string, error) { var reference string if dockerURL.Digest != "" { reference = dockerURL.Digest } else { reference = dockerURL.Tag } url := rb.schema + path.Join(dockerURL.IndexURL, "v2", dockerURL.ImageName, "manifests", reference) req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, "", err } rb.setBasicAuth(req) res, err := rb.makeRequest(req, dockerURL.ImageName, rb.mediaTypes.ManifestMediaTypes()) if err != nil { return nil, "", err } defer res.Body.Close() if res.StatusCode != http.StatusOK { return nil, "", &httpStatusErr{res.StatusCode, req.URL} } switch res.Header.Get("content-type") { case common.MediaTypeDockerV22Manifest, common.MediaTypeOCIV1Manifest: return rb.getManifestV22(dockerURL, res) case common.MediaTypeDockerV21Manifest: return rb.getManifestV21(dockerURL, res) } return rb.getManifestV21(dockerURL, res) } func (rb *RepositoryBackend) getManifestV21(dockerURL *common.ParsedDockerURL, res *http.Response) ([]string, string, error) { manblob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, "", err } manifest := &v2Manifest{} err = json.Unmarshal(manblob, manifest) if err != nil { return nil, "", err } manhash := godigest.FromBytes(manblob) if manifest.Name != dockerURL.ImageName { return nil, "", fmt.Errorf("name doesn't match what was requested, expected: %s, downloaded: %s", dockerURL.ImageName, manifest.Name) } if dockerURL.Tag != "" && manifest.Tag != dockerURL.Tag { return nil, "", fmt.Errorf("tag doesn't match what was requested, expected: %s, downloaded: %s", dockerURL.Tag, manifest.Tag) } if err := fixManifestLayers(manifest); err != nil { return nil, "", err } //TODO: verify signature here layers := make([]string, len(manifest.FSLayers)) for i, layer := range manifest.FSLayers { if _, ok := rb.layersIndex[layer.BlobSum]; !ok { rb.layersIndex[layer.BlobSum] = i } layers[i] = layer.BlobSum } rb.imageManifests[*dockerURL] = *manifest return layers, string(manhash), nil } func (rb *RepositoryBackend) getManifestV22(dockerURL *common.ParsedDockerURL, res *http.Response) ([]string, string, error) { manblob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, "", err } manifest := &typesV2.ImageManifest{} err = json.Unmarshal(manblob, manifest) if err != nil { return nil, "", err } manhash := godigest.FromBytes(manblob) //TODO: verify signature here layers := make([]string, len(manifest.Layers)) for i, layer := range manifest.Layers { layers[i] = layer.Digest } err = rb.getConfigV22(dockerURL, manifest.Config.Digest) if err != nil { return nil, "", err } rb.imageV2Manifests[*dockerURL] = manifest return layers, string(manhash), nil } func (rb *RepositoryBackend) getConfigV22(dockerURL *common.ParsedDockerURL, configDigest string) error { url := rb.schema + path.Join(dockerURL.IndexURL, "v2", dockerURL.ImageName, "blobs", configDigest) req, err := http.NewRequest("GET", url, nil) if err != nil { return err } rb.setBasicAuth(req) res, err := rb.makeRequest(req, dockerURL.ImageName, rb.mediaTypes.ConfigMediaTypes()) if err != nil { return err } defer res.Body.Close() confblob, err := ioutil.ReadAll(res.Body) if err != nil { return err } config := &typesV2.ImageConfig{} err = json.Unmarshal(confblob, config) if err != nil { return err } rb.imageConfigs[*dockerURL] = config return nil } func fixManifestLayers(manifest *v2Manifest) error { type imageV1 struct { ID string Parent string } imgs := make([]*imageV1, len(manifest.FSLayers)) for i := range manifest.FSLayers { img := &imageV1{} if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { return err } imgs[i] = img if err := common.ValidateLayerId(img.ID); err != nil { return err } } if imgs[len(imgs)-1].Parent != "" { return errors.New("Invalid parent ID in the base layer of the image.") } // check general duplicates to error instead of a deadlock idmap := make(map[string]struct{}) var lastID string for _, img := range imgs { // skip IDs that appear after each other, we handle those later if _, exists := idmap[img.ID]; img.ID != lastID && exists { return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) } lastID = img.ID idmap[lastID] = struct{}{} } // backwards loop so that we keep the remaining indexes after removing items for i := len(imgs) - 2; i >= 0; i-- { if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) } else if imgs[i].Parent != imgs[i+1].ID { return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) } } return nil } func (rb *RepositoryBackend) getLayerV2(layerID string, dockerURL *common.ParsedDockerURL, tmpDir string, copier *progressutil.CopyProgressPrinter) (*os.File, io.ReadCloser, error) { var ( err error res *http.Response url = rb.schema + path.Join(dockerURL.IndexURL, "v2", dockerURL.ImageName, "blobs", layerID) ) req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, nil, err } rb.setBasicAuth(req) res, err = rb.makeRequest(req, dockerURL.ImageName, rb.mediaTypes.LayerMediaTypes()) if err != nil { return nil, nil, err } defer func() { if err != nil && res != nil { res.Body.Close() } }() if res.StatusCode == http.StatusTemporaryRedirect || res.StatusCode == http.StatusFound { location := res.Header.Get("Location") if location != "" { req, err = http.NewRequest("GET", location, nil) if err != nil { return nil, nil, err } res.Body.Close() res = nil res, err = rb.makeRequest(req, dockerURL.ImageName, rb.mediaTypes.LayerMediaTypes()) if err != nil { return nil, nil, err } } } if res.StatusCode != http.StatusOK { return nil, nil, &httpStatusErr{res.StatusCode, req.URL} } var in io.Reader in = res.Body var size int64 if hdr := res.Header.Get("Content-Length"); hdr != "" { size, err = strconv.ParseInt(hdr, 10, 64) if err != nil { return nil, nil, err } } name := "Downloading " + layerID[:18] layerFile, err := ioutil.TempFile(tmpDir, "dockerlayer-") if err != nil { return nil, nil, err } err = copier.AddCopy(in, name, size, layerFile) if err != nil { return nil, nil, err } return layerFile, res.Body, nil } func (rb *RepositoryBackend) makeRequest(req *http.Request, repo string, acceptHeaders []string) (*http.Response, error) { setBearerHeader := false hostAuthTokens, ok := rb.hostsV2AuthTokens[req.URL.Host] if ok { authToken, ok := hostAuthTokens[repo] if ok { req.Header.Set("Authorization", "Bearer "+authToken) setBearerHeader = true } } for _, acceptHeader := range acceptHeaders { req.Header.Add("Accept", acceptHeader) } client := util.GetTLSClient(rb.insecure.SkipVerify) res, err := client.Do(req) if err != nil { return nil, err } if res.StatusCode == http.StatusUnauthorized && setBearerHeader { return res, err } hdr := res.Header.Get("www-authenticate") if res.StatusCode != http.StatusUnauthorized || hdr == "" { return res, err } tokens := strings.Split(hdr, ",") if len(tokens) != 3 || !strings.HasPrefix(strings.ToLower(tokens[0]), "bearer realm") { return res, err } res.Body.Close() var realm, service, scope string for _, token := range tokens { if strings.HasPrefix(strings.ToLower(token), "bearer realm") { realm = strings.Trim(token[len("bearer realm="):], "\"") } if strings.HasPrefix(token, "service") { service = strings.Trim(token[len("service="):], "\"") } if strings.HasPrefix(token, "scope") { scope = strings.Trim(token[len("scope="):], "\"") } } if realm == "" { return nil, fmt.Errorf("missing realm in bearer auth challenge") } if service == "" { return nil, fmt.Errorf("missing service in bearer auth challenge") } // The scope can be empty if we're not getting a token for a specific repo if scope == "" && repo != "" { // If the scope is empty and it shouldn't be, we can infer it based on the repo scope = fmt.Sprintf("repository:%s:pull", repo) } authReq, err := http.NewRequest("GET", realm, nil) if err != nil { return nil, err } getParams := authReq.URL.Query() getParams.Add("service", service) if scope != "" { getParams.Add("scope", scope) } authReq.URL.RawQuery = getParams.Encode() rb.setBasicAuth(authReq) res, err = client.Do(authReq) if err != nil { return nil, err } defer res.Body.Close() switch res.StatusCode { case http.StatusUnauthorized: return nil, fmt.Errorf("unable to retrieve auth token: 401 unauthorized") case http.StatusOK: break default: return nil, &httpStatusErr{res.StatusCode, authReq.URL} } tokenBlob, err := ioutil.ReadAll(res.Body) if err != nil { return nil, err } tokenStruct := struct { Token string `json:"token"` }{} err = json.Unmarshal(tokenBlob, &tokenStruct) if err != nil { return nil, err } hostAuthTokens, ok = rb.hostsV2AuthTokens[req.URL.Host] if !ok { hostAuthTokens = make(map[string]string) rb.hostsV2AuthTokens[req.URL.Host] = hostAuthTokens } hostAuthTokens[repo] = tokenStruct.Token return rb.makeRequest(req, repo, acceptHeaders) } func (rb *RepositoryBackend) setBasicAuth(req *http.Request) { if rb.username != "" && rb.password != "" { req.SetBasicAuth(rb.username, rb.password) } } ================================================ FILE: lib/internal/docker/docker.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package docker import ( "encoding/base64" "encoding/json" "fmt" "io/ioutil" "os" "path" "runtime" "strings" "github.com/appc/docker2aci/lib/internal/types" ) const ( dockercfgFileName = "config.json" dockercfgFileNameOld = ".dockercfg" defaultIndexURL = "registry-1.docker.io" defaultIndexURLAuth = "https://index.docker.io/v1/" defaultRepoPrefix = "library/" ) // SplitReposName breaks a repo name into an index name and remote name. func SplitReposName(name string) (indexName, remoteName string) { i := strings.IndexRune(name, '/') if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { indexName, remoteName = defaultIndexURL, name } else { indexName, remoteName = name[:i], name[i+1:] } if indexName == defaultIndexURL && !strings.ContainsRune(remoteName, '/') { remoteName = defaultRepoPrefix + remoteName } return } // Get a repos name and returns the right reposName + tag // The tag can be confusing because of a port in a repository name. // Ex: localhost.localdomain:5000/samalba/hipache:latest func parseRepositoryTag(repos string) (string, string) { n := strings.LastIndex(repos, ":") if n < 0 { return repos, "" } if tag := repos[n+1:]; !strings.Contains(tag, "/") { return repos[:n], tag } return repos, "" } func decodeDockerAuth(s string) (string, string, error) { decoded, err := base64.StdEncoding.DecodeString(s) if err != nil { return "", "", err } parts := strings.SplitN(string(decoded), ":", 2) if len(parts) != 2 { return "", "", fmt.Errorf("invalid auth configuration file") } user := parts[0] password := strings.Trim(parts[1], "\x00") return user, password, nil } func getHomeDir() string { if runtime.GOOS == "windows" { return os.Getenv("USERPROFILE") } return os.Getenv("HOME") } // GetDockercfgAuth reads a ~/.dockercfg file and returns the username and password // of the given docker index server. func GetAuthInfo(indexServer string) (string, string, error) { // official docker registry if indexServer == defaultIndexURL { indexServer = defaultIndexURLAuth } dockerCfgPath := path.Join(getHomeDir(), ".docker", dockercfgFileName) if _, err := os.Stat(dockerCfgPath); err == nil { j, err := ioutil.ReadFile(dockerCfgPath) if err != nil { return "", "", err } var dockerAuth types.DockerConfigFile if err := json.Unmarshal(j, &dockerAuth); err != nil { return "", "", err } // try the normal case if c, ok := dockerAuth.AuthConfigs[indexServer]; ok { return decodeDockerAuth(c.Auth) } } else if os.IsNotExist(err) { oldDockerCfgPath := path.Join(getHomeDir(), dockercfgFileNameOld) if _, err := os.Stat(oldDockerCfgPath); err != nil { return "", "", nil //missing file is not an error } j, err := ioutil.ReadFile(oldDockerCfgPath) if err != nil { return "", "", err } var dockerAuthOld map[string]types.DockerAuthConfigOld if err := json.Unmarshal(j, &dockerAuthOld); err != nil { return "", "", err } if c, ok := dockerAuthOld[indexServer]; ok { return decodeDockerAuth(c.Auth) } } else { // if file is there but we can't stat it for any reason other // than it doesn't exist then stop return "", "", fmt.Errorf("%s - %v", dockerCfgPath, err) } return "", "", nil } ================================================ FILE: lib/internal/internal.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package internal provides functions shared by different parts of docker2aci. // // Note: this package is an implementation detail and shouldn't be used outside // of docker2aci. package internal import ( "archive/tar" "encoding/json" "fmt" "io" "os" "path" "path/filepath" "runtime" "sort" "strconv" "strings" "time" "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/lib/internal/tarball" "github.com/appc/docker2aci/lib/internal/types" "github.com/appc/docker2aci/lib/internal/typesV2" "github.com/appc/docker2aci/lib/internal/util" "github.com/appc/docker2aci/pkg/log" "github.com/appc/spec/aci" "github.com/appc/spec/schema" appctypes "github.com/appc/spec/schema/types" gzip "github.com/klauspost/pgzip" ) // Docker2ACIBackend is the interface that abstracts converting Docker layers // to ACI from where they're stored (remote or file). // // GetImageInfo takes a Docker URL and returns a list of layers and the parsed // Docker URL. // // BuildACI takes a Docker layer, converts it to ACI and returns its output // path and its converted ImageManifest. type Docker2ACIBackend interface { // GetImageInfo, given the url for a docker image, will return the // following: // - []string: an ordered list of all layer hashes // - string: a unique identifier for this image, like a hash of the manifest // - *common.ParsedDockerURL: a parsed docker URL // - error: an error if one occurred GetImageInfo(dockerUrl string) ([]string, string, *common.ParsedDockerURL, error) BuildACI(layerIDs []string, manhash string, dockerURL *common.ParsedDockerURL, outputDir string, tmpBaseDir string, compression common.Compression) ([]string, []*schema.ImageManifest, error) } // GenerateACI takes a Docker layer and generates an ACI from it. func GenerateACI(layerNumber int, manhash string, layerData types.DockerImageData, dockerURL *common.ParsedDockerURL, outputDir string, layerFile *os.File, curPwl []string, compression common.Compression, debug log.Logger) (string, *schema.ImageManifest, error) { manifest, err := GenerateManifest(layerData, manhash, dockerURL, debug) if err != nil { return "", nil, fmt.Errorf("error generating the manifest: %v", err) } imageName := strings.Replace(dockerURL.ImageName, "/", "-", -1) aciPath := generateACIPath(outputDir, imageName, layerData.ID, dockerURL.Tag, layerData.OS, layerData.Architecture, layerNumber) manifest, err = writeACI(layerFile, *manifest, curPwl, aciPath, compression) if err != nil { return "", nil, fmt.Errorf("error writing ACI: %v", err) } if err := ValidateACI(aciPath); err != nil { return "", nil, fmt.Errorf("invalid ACI generated: %v", err) } return aciPath, manifest, nil } func GenerateACI22LowerLayer(dockerURL *common.ParsedDockerURL, layerDigest string, outputDir string, layerFile *os.File, curPwl []string, compression common.Compression) (string, *schema.ImageManifest, error) { formattedDigest := strings.Replace(layerDigest, ":", "-", -1) aciName := fmt.Sprintf("%s/%s-%s", dockerURL.IndexURL, dockerURL.ImageName, formattedDigest) sanitizedAciName, err := appctypes.SanitizeACIdentifier(aciName) if err != nil { return "", nil, err } manifest, err := GenerateEmptyManifest(sanitizedAciName) if err != nil { return "", nil, err } aciPath := generateACIPath(outputDir, aciName, layerDigest, dockerURL.Tag, runtime.GOOS, runtime.GOARCH, -1) manifest, err = writeACI(layerFile, *manifest, curPwl, aciPath, compression) if err != nil { return "", nil, err } err = ValidateACI(aciPath) if err != nil { return "", nil, fmt.Errorf("invalid ACI generated: %v", err) } return aciPath, manifest, nil } func GenerateACI22TopLayer(dockerURL *common.ParsedDockerURL, manhash string, imageConfig *typesV2.ImageConfig, layerDigest string, outputDir string, layerFile *os.File, curPwl []string, compression common.Compression, lowerLayers []*schema.ImageManifest, debug log.Logger) (string, *schema.ImageManifest, error) { aciName := fmt.Sprintf("%s/%s-%s", dockerURL.IndexURL, dockerURL.ImageName, layerDigest) sanitizedAciName, err := appctypes.SanitizeACIdentifier(aciName) if err != nil { return "", nil, err } manifest, err := GenerateManifestV22(sanitizedAciName, manhash, layerDigest, dockerURL, imageConfig, lowerLayers, debug) if err != nil { return "", nil, err } aciPath := generateACIPath(outputDir, aciName, layerDigest, dockerURL.Tag, runtime.GOOS, runtime.GOARCH, -1) manifest, err = writeACI(layerFile, *manifest, curPwl, aciPath, compression) if err != nil { return "", nil, err } err = ValidateACI(aciPath) if err != nil { return "", nil, fmt.Errorf("invalid ACI generated: %v", err) } return aciPath, manifest, nil } func generateACIPath(outputDir, imageName, digest, tag, osString, arch string, layerNum int) string { aciPath := imageName if tag != "" { aciPath += "-" + tag } if osString != "" { aciPath += "-" + osString if arch != "" { aciPath += "-" + arch } } if layerNum != -1 { aciPath += "-" + strconv.Itoa(layerNum) } aciPath += ".aci" return path.Join(outputDir, aciPath) } func generateEPCmdAnnotation(dockerEP, dockerCmd []string) (string, string, error) { var entrypointAnnotation, cmdAnnotation string if len(dockerEP) > 0 { entry, err := json.Marshal(dockerEP) if err != nil { return "", "", err } entrypointAnnotation = string(entry) } if len(dockerCmd) > 0 { cmd, err := json.Marshal(dockerCmd) if err != nil { return "", "", err } cmdAnnotation = string(cmd) } return entrypointAnnotation, cmdAnnotation, nil } // setLabel sets the label entries associated with non-empty key // to the single non-empty value. It replaces any existing values // associated with key. func setLabel(labels map[appctypes.ACIdentifier]string, key, val string) { if key != "" && val != "" { labels[*appctypes.MustACIdentifier(key)] = val } } // setOSArch translates the given OS and architecture strings into // the compatible with application container specification and sets // the respective label entries. // // Returns an error if label translation fails. func setOSArch(labels map[appctypes.ACIdentifier]string, os, arch string) error { // Translate arch tuple into the appc arch tuple. appcOS, appcArch, err := appctypes.ToAppcOSArch(os, arch, "") if err != nil { return err } // Set translated labels. setLabel(labels, "os", appcOS) setLabel(labels, "arch", appcArch) return nil } // setAnnotation sets the annotation entries associated with non-empty // key to the single non-empty value. It replaces any existing values // associated with key. func setAnnotation(annotations *appctypes.Annotations, key, val string) { if key != "" && val != "" { annotations.Set(*appctypes.MustACIdentifier(key), val) } } // GenerateManifest converts the docker manifest format to an appc // ImageManifest. func GenerateManifest(layerData types.DockerImageData, manhash string, dockerURL *common.ParsedDockerURL, debug log.Logger) (*schema.ImageManifest, error) { dockerConfig := layerData.Config genManifest := &schema.ImageManifest{} appURL := "" appURL = dockerURL.IndexURL + "/" appURL += dockerURL.ImageName + "-" + layerData.ID appURL, err := appctypes.SanitizeACIdentifier(appURL) if err != nil { return nil, err } name := appctypes.MustACIdentifier(appURL) genManifest.Name = *name acVersion, err := appctypes.NewSemVer(schema.AppContainerVersion.String()) if err != nil { panic("invalid appc spec version") } genManifest.ACVersion = *acVersion genManifest.ACKind = appctypes.ACKind(schema.ImageManifestKind) var annotations appctypes.Annotations labels := make(map[appctypes.ACIdentifier]string) parentLabels := make(map[appctypes.ACIdentifier]string) setLabel(labels, "layer", layerData.ID) setLabel(labels, "version", dockerURL.Tag) setOSArch(labels, layerData.OS, layerData.Architecture) setOSArch(parentLabels, layerData.OS, layerData.Architecture) setAnnotation(&annotations, "authors", layerData.Author) epoch := time.Unix(0, 0) if !layerData.Created.Equal(epoch) { setAnnotation(&annotations, "created", layerData.Created.Format(time.RFC3339)) } setAnnotation(&annotations, "docker-comment", layerData.Comment) setAnnotation(&annotations, common.AppcDockerOriginalName, dockerURL.OriginalName) setAnnotation(&annotations, common.AppcDockerRegistryURL, dockerURL.IndexURL) setAnnotation(&annotations, common.AppcDockerRepository, dockerURL.ImageName) setAnnotation(&annotations, common.AppcDockerImageID, layerData.ID) setAnnotation(&annotations, common.AppcDockerParentImageID, layerData.Parent) setAnnotation(&annotations, common.AppcDockerManifestHash, manhash) if dockerConfig != nil { exec := getExecCommand(dockerConfig.Entrypoint, dockerConfig.Cmd) user, group := parseDockerUser(dockerConfig.User) var env appctypes.Environment for _, v := range dockerConfig.Env { parts := strings.SplitN(v, "=", 2) if len(parts) == 2 { env.Set(parts[0], parts[1]) } } app := &appctypes.App{ Exec: exec, User: user, Group: group, Environment: env, WorkingDirectory: dockerConfig.WorkingDir, } app.UserLabels = dockerConfig.Labels app.MountPoints, err = convertVolumesToMPs(dockerConfig.Volumes) if err != nil { return nil, err } app.Ports, err = convertPorts(dockerConfig.ExposedPorts, dockerConfig.PortSpecs, debug) if err != nil { return nil, err } ep, cmd, err := generateEPCmdAnnotation(dockerConfig.Entrypoint, dockerConfig.Cmd) if err != nil { return nil, err } if len(ep) > 0 { setAnnotation(&annotations, common.AppcDockerEntrypoint, ep) } if len(cmd) > 0 { setAnnotation(&annotations, common.AppcDockerCmd, cmd) } genManifest.App = app } if layerData.Parent != "" { indexPrefix := "" // omit docker hub index URL in app name indexPrefix = dockerURL.IndexURL + "/" parentImageNameString := indexPrefix + dockerURL.ImageName + "-" + layerData.Parent parentImageNameString, err := appctypes.SanitizeACIdentifier(parentImageNameString) if err != nil { return nil, err } parentImageName := appctypes.MustACIdentifier(parentImageNameString) plbl, err := appctypes.LabelsFromMap(labels) if err != nil { return nil, err } genManifest.Dependencies = append(genManifest.Dependencies, appctypes.Dependency{ImageName: *parentImageName, Labels: plbl}) setAnnotation(&annotations, common.AppcDockerTag, dockerURL.Tag) } genManifest.Labels, err = appctypes.LabelsFromMap(labels) if err != nil { return nil, err } genManifest.Annotations = annotations return genManifest, nil } func GenerateEmptyManifest(name string) (*schema.ImageManifest, error) { acid, err := appctypes.NewACIdentifier(name) if err != nil { return nil, err } labelsMap := make(map[appctypes.ACIdentifier]string) err = setOSArch(labelsMap, runtime.GOOS, runtime.GOARCH) if err != nil { return nil, err } labels, err := appctypes.LabelsFromMap(labelsMap) if err != nil { return nil, err } return &schema.ImageManifest{ ACKind: schema.ImageManifestKind, ACVersion: schema.AppContainerVersion, Name: *acid, Labels: labels, }, nil } // GenerateManifestV22, given a large set of information (documented a couple // lines down), will produce an image manifest compliant with the Dockver V2.2 // image spec func GenerateManifestV22( name string, // The name of this image manhash string, // The hash of this image's manifest imageDigest string, // The digest of the image dockerURL *common.ParsedDockerURL, // The parsed docker URL config *typesV2.ImageConfig, // The image config lowerLayers []*schema.ImageManifest, // A list of manifests for the lower layers debug log.Logger, // The debug logger, for logging debug information ) (*schema.ImageManifest, error) { manifest, err := GenerateEmptyManifest(name) if err != nil { return nil, err } labels := manifest.Labels.ToMap() annotations := manifest.Annotations setLabel(labels, "version", dockerURL.Tag) setOSArch(labels, config.OS, config.Architecture) setAnnotation(&annotations, "author", config.Author) setAnnotation(&annotations, "created", config.Created) setAnnotation(&annotations, common.AppcDockerOriginalName, dockerURL.OriginalName) setAnnotation(&annotations, common.AppcDockerRegistryURL, dockerURL.IndexURL) setAnnotation(&annotations, common.AppcDockerRepository, dockerURL.ImageName) setAnnotation(&annotations, common.AppcDockerImageID, imageDigest) setAnnotation(&annotations, "created", config.Created) setAnnotation(&annotations, common.AppcDockerManifestHash, manhash) if config.Config != nil { innerCfg := config.Config exec := getExecCommand(innerCfg.Entrypoint, innerCfg.Cmd) user, group := parseDockerUser(innerCfg.User) var env appctypes.Environment for _, v := range innerCfg.Env { parts := strings.SplitN(v, "=", 2) if len(parts) == 2 { env.Set(parts[0], parts[1]) } } manifest.App = &appctypes.App{ Exec: exec, User: user, Group: group, Environment: env, WorkingDirectory: innerCfg.WorkingDir, } manifest.App.MountPoints, err = convertVolumesToMPs(innerCfg.Volumes) if err != nil { return nil, err } manifest.App.Ports, err = convertPorts(innerCfg.ExposedPorts, nil, debug) if err != nil { return nil, err } ep, cmd, err := generateEPCmdAnnotation(innerCfg.Entrypoint, innerCfg.Cmd) if err != nil { return nil, err } if len(ep) > 0 { setAnnotation(&annotations, common.AppcDockerEntrypoint, ep) } if len(cmd) > 0 { setAnnotation(&annotations, common.AppcDockerCmd, cmd) } } for _, lowerLayer := range lowerLayers { manifest.Dependencies = append(manifest.Dependencies, appctypes.Dependency{ ImageName: lowerLayer.Name, Labels: lowerLayer.Labels, }) } manifest.Labels, err = appctypes.LabelsFromMap(labels) if err != nil { return nil, err } manifest.Annotations = annotations return manifest, nil } // ValidateACI checks whether the ACI in aciPath is valid. func ValidateACI(aciPath string) error { aciFile, err := os.Open(aciPath) if err != nil { return err } defer aciFile.Close() tr, err := aci.NewCompressedTarReader(aciFile) if err != nil { return err } defer tr.Close() if err := aci.ValidateArchive(tr.Reader); err != nil { return err } return nil } type appcPortSorter []appctypes.Port func (s appcPortSorter) Len() int { return len(s) } func (s appcPortSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s appcPortSorter) Less(i, j int) bool { return s[i].Name.String() < s[j].Name.String() } func convertPorts(dockerExposedPorts map[string]struct{}, dockerPortSpecs []string, debug log.Logger) ([]appctypes.Port, error) { ports := []appctypes.Port{} for ep := range dockerExposedPorts { appcPort, err := parseDockerPort(ep) if err != nil { return nil, err } ports = append(ports, *appcPort) } if dockerExposedPorts == nil && dockerPortSpecs != nil { debug.Println("warning: docker image uses deprecated PortSpecs field") for _, ep := range dockerPortSpecs { appcPort, err := parseDockerPort(ep) if err != nil { return nil, err } ports = append(ports, *appcPort) } } sort.Sort(appcPortSorter(ports)) return ports, nil } func parseDockerPort(dockerPort string) (*appctypes.Port, error) { var portString string proto := "tcp" sp := strings.Split(dockerPort, "/") if len(sp) < 2 { portString = dockerPort } else { proto = sp[1] portString = sp[0] } port, err := strconv.ParseUint(portString, 10, 0) if err != nil { return nil, fmt.Errorf("error parsing port %q: %v", portString, err) } sn, err := appctypes.SanitizeACName(dockerPort) if err != nil { return nil, err } appcPort := &appctypes.Port{ Name: *appctypes.MustACName(sn), Protocol: proto, Port: uint(port), } return appcPort, nil } type appcVolSorter []appctypes.MountPoint func (s appcVolSorter) Len() int { return len(s) } func (s appcVolSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s appcVolSorter) Less(i, j int) bool { return s[i].Name.String() < s[j].Name.String() } func convertVolumesToMPs(dockerVolumes map[string]struct{}) ([]appctypes.MountPoint, error) { mps := []appctypes.MountPoint{} dup := make(map[string]int) for p := range dockerVolumes { n := filepath.Join("volume", p) sn, err := appctypes.SanitizeACName(n) if err != nil { return nil, err } // check for duplicate names if i, ok := dup[sn]; ok { dup[sn] = i + 1 sn = fmt.Sprintf("%s-%d", sn, i) } else { dup[sn] = 1 } mp := appctypes.MountPoint{ Name: *appctypes.MustACName(sn), Path: p, } mps = append(mps, mp) } sort.Sort(appcVolSorter(mps)) return mps, nil } func writeACI(layer io.ReadSeeker, manifest schema.ImageManifest, curPwl []string, output string, compression common.Compression) (*schema.ImageManifest, error) { dir, _ := path.Split(output) if dir != "" { err := os.MkdirAll(dir, 0755) if err != nil { return nil, fmt.Errorf("error creating ACI parent dir: %v", err) } } aciFile, err := os.Create(output) if err != nil { return nil, fmt.Errorf("error creating ACI file: %v", err) } defer aciFile.Close() var w io.WriteCloser = aciFile if compression == common.GzipCompression { w = gzip.NewWriter(aciFile) defer w.Close() } trw := tar.NewWriter(w) defer trw.Close() if err := WriteRootfsDir(trw); err != nil { return nil, fmt.Errorf("error writing rootfs entry: %v", err) } fileMap := make(map[string]struct{}) var whiteouts []string convWalker := func(t *tarball.TarFile) error { name := t.Name() if name == "./" { return nil } newName := path.Join("rootfs", name) absolutePath := strings.TrimPrefix(newName, "rootfs") if filepath.Clean(absolutePath) == "/dev" && t.Header.Typeflag != tar.TypeDir { return fmt.Errorf(`invalid layer: "/dev" is not a directory`) } fileMap[absolutePath] = struct{}{} if strings.Contains(newName, "/.wh.") { whiteouts = append(whiteouts, strings.Replace(absolutePath, ".wh.", "", 1)) return nil } newHeader := &tar.Header{ Typeflag: t.Header.Typeflag, Name: newName, Linkname: t.Header.Linkname, Size: t.Header.Size, Mode: t.Header.Mode, Uid: t.Header.Uid, Gid: t.Header.Gid, Uname: t.Header.Uname, Gname: t.Header.Gname, ModTime: t.Header.ModTime, AccessTime: t.Header.AccessTime, ChangeTime: t.Header.ChangeTime, Devmajor: t.Header.Devmajor, Devminor: t.Header.Devminor, Xattrs: t.Header.Xattrs, } if t.Header.Typeflag == tar.TypeLink { newHeader.Linkname = path.Join("rootfs", t.Linkname()) } if err := trw.WriteHeader(newHeader); err != nil { return err } if _, err := io.Copy(trw, t.TarStream); err != nil { return err } if !util.In(curPwl, absolutePath) { curPwl = append(curPwl, absolutePath) } return nil } tr, err := aci.NewCompressedTarReader(layer) if err == nil { defer tr.Close() // write files in rootfs/ if err := tarball.Walk(*tr.Reader, convWalker); err != nil { return nil, err } } else { // ignore errors: empty layers in tars generated by docker save are not // valid tar files so we ignore errors trying to open them. Converted // ACIs will have the manifest and an empty rootfs directory in any // case. } newPwl := subtractWhiteouts(curPwl, whiteouts) newPwl, err = writeStdioSymlinks(trw, fileMap, newPwl) if err != nil { return nil, err } // Let's copy the newly generated PathWhitelist to avoid unintended // side-effects manifest.PathWhitelist = make([]string, len(newPwl)) copy(manifest.PathWhitelist, newPwl) if err := WriteManifest(trw, manifest); err != nil { return nil, fmt.Errorf("error writing manifest: %v", err) } return &manifest, nil } func getExecCommand(entrypoint []string, cmd []string) appctypes.Exec { return append(entrypoint, cmd...) } func parseDockerUser(dockerUser string) (string, string) { // if the docker user is empty assume root user and group if dockerUser == "" { return "0", "0" } dockerUserParts := strings.Split(dockerUser, ":") // when only the user is given, the docker spec says that the default and // supplementary groups of the user in /etc/passwd should be applied. // To avoid inspecting image content, we set gid to the same value as uid. if len(dockerUserParts) < 2 { return dockerUserParts[0], dockerUserParts[0] } return dockerUserParts[0], dockerUserParts[1] } func subtractWhiteouts(pathWhitelist []string, whiteouts []string) []string { matchPaths := []string{} for _, path := range pathWhitelist { // If one of the parent dirs of the current path matches the // whiteout then also this path should be removed curPath := path for curPath != "/" { for _, whiteout := range whiteouts { if curPath == whiteout { matchPaths = append(matchPaths, path) } } curPath = filepath.Dir(curPath) } } for _, matchPath := range matchPaths { idx := util.IndexOf(pathWhitelist, matchPath) if idx != -1 { pathWhitelist = append(pathWhitelist[:idx], pathWhitelist[idx+1:]...) } } sort.Sort(sort.StringSlice(pathWhitelist)) return pathWhitelist } // WriteManifest writes a schema.ImageManifest entry on a tar.Writer. func WriteManifest(outputWriter *tar.Writer, manifest schema.ImageManifest) error { b, err := json.Marshal(manifest) if err != nil { return err } hdr := getGenericTarHeader() hdr.Name = "manifest" hdr.Mode = 0644 hdr.Size = int64(len(b)) hdr.Typeflag = tar.TypeReg if err := outputWriter.WriteHeader(hdr); err != nil { return err } if _, err := outputWriter.Write(b); err != nil { return err } return nil } // WriteRootfsDir writes a "rootfs" dir entry on a tar.Writer. func WriteRootfsDir(tarWriter *tar.Writer) error { hdr := getGenericTarHeader() hdr.Name = "rootfs" hdr.Mode = 0755 hdr.Size = int64(0) hdr.Typeflag = tar.TypeDir return tarWriter.WriteHeader(hdr) } type symlink struct { linkname string target string } // writeStdioSymlinks adds the /dev/stdin, /dev/stdout, /dev/stderr, and // /dev/fd symlinks expected by Docker to the converted ACIs so apps can find // them as expected func writeStdioSymlinks(tarWriter *tar.Writer, fileMap map[string]struct{}, pwl []string) ([]string, error) { stdioSymlinks := []symlink{ {"/dev/stdin", "/proc/self/fd/0"}, // Docker makes /dev/{stdout,stderr} point to /proc/self/fd/{1,2} but // we point to /dev/console instead in order to support the case when // stdout/stderr is a Unix socket (e.g. for the journal). {"/dev/stdout", "/dev/console"}, {"/dev/stderr", "/dev/console"}, {"/dev/fd", "/proc/self/fd"}, } for _, s := range stdioSymlinks { name := s.linkname target := s.target if _, exists := fileMap[name]; exists { continue } hdr := &tar.Header{ Name: filepath.Join("rootfs", name), Mode: 0777, Typeflag: tar.TypeSymlink, Linkname: target, } if err := tarWriter.WriteHeader(hdr); err != nil { return nil, err } if !util.In(pwl, name) { pwl = append(pwl, name) } } return pwl, nil } func getGenericTarHeader() *tar.Header { // FIXME(iaguis) Use docker image time instead of the Unix Epoch? hdr := &tar.Header{ Uid: 0, Gid: 0, ModTime: time.Unix(0, 0), Uname: "0", Gname: "0", ChangeTime: time.Unix(0, 0), } return hdr } ================================================ FILE: lib/internal/internal_test.go ================================================ // Copyright 2017 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package internal import ( "testing" "github.com/appc/spec/schema/types" ) func TestSetLabel(t *testing.T) { labels := make(map[types.ACIdentifier]string) tests := []struct { key, value string ok bool }{ {"", "amd64", false}, {"freebsd", "", false}, {"", "", false}, {"version", "1.2.3", true}, {"os", "linux", true}, {"arch", "aarch64", true}, {"arch", "amd64", true}, } for i, tt := range tests { setLabel(labels, tt.key, tt.value) value, ok := labels[types.ACIdentifier(tt.key)] if ok != tt.ok { const text = "#%d failed on label existence validation: %v != %v" t.Errorf(text, i, ok, tt.ok) } if tt.ok && value != tt.value { const text = "#%d wrong label for %s key: %v != %v" t.Errorf(text, i, tt.key, value, tt.value) } } } func TestSetAnnotation(t *testing.T) { var annotations types.Annotations tests := []struct { key, value string ok bool }{ {"", "", false}, {"", "name", false}, {"gentoo", "", false}, {"entrypoint", "/bin/bash", true}, {"entrypoint", "/bin/sh", true}, {"cmd", "-c", true}, } for i, tt := range tests { setAnnotation(&annotations, tt.key, tt.value) value, ok := annotations.Get(tt.key) if ok != tt.ok { const text = "#%d failed on annotation existence validation: %v != %v" t.Errorf(text, i, ok, tt.ok) } if tt.ok && value != tt.value { const text = "#%d wrong annotation for %s key: %v != %v" t.Errorf(text, i, tt.key, value, tt.value) } } } func TestOSArch(t *testing.T) { tests := []struct { srcOS, srcArch string dstOS, dstArch string err bool }{ {"", "", "", "", false}, {"TempleOS", "ia64", "", "", false}, {"linux", "amd64", "linux", "amd64", true}, {"linux", "arm64", "linux", "aarch64", true}, {"freebsd", "386", "freebsd", "i386", true}, } for i, tt := range tests { labels := make(map[types.ACIdentifier]string) err := setOSArch(labels, tt.srcOS, tt.srcArch) if tt.err != (err == nil) { const text = "#%d unexpected result of os/arch conversion: %v" t.Errorf(text, i, err) } if labels["os"] != tt.dstOS { const text = "#%d expected %v os, got %v instead" t.Errorf(text, i, tt.dstOS, labels["os"]) } if labels["arch"] != tt.dstArch { const text = "#%d expected %v arch, got %v instead" t.Errorf(text, i, tt.dstArch, labels["arch"]) } } } ================================================ FILE: lib/internal/tarball/tarfile.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package tarball provides functions to manipulate tar files. // // Note: this package is an implementation detail and shouldn't be used outside // of docker2aci. package tarball import ( "archive/tar" "io" ) // TarFile is a representation of a file in a tarball. It consists of two parts, // the Header and the Stream. The Header is a regular tar header, the Stream // is a byte stream that can be used to read the file's contents. type TarFile struct { Header *tar.Header TarStream io.Reader } // Name returns the name of the file as reported by the header. func (t *TarFile) Name() string { return t.Header.Name } // Linkname returns the Linkname of the file as reported by the header. func (t *TarFile) Linkname() string { return t.Header.Linkname } ================================================ FILE: lib/internal/tarball/walk.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tarball import ( "archive/tar" "fmt" "io" ) // WalkFunc is a func for handling each file (header and byte stream) in a tarball type WalkFunc func(t *TarFile) error // Walk walks through the files in the tarball represented by tarstream and // passes each of them to the WalkFunc provided as an argument func Walk(tarReader tar.Reader, walkFunc func(t *TarFile) error) error { for { hdr, err := tarReader.Next() if err == io.EOF { // end of tar archive break } if err != nil { return fmt.Errorf("Error reading tar entry: %v", err) } if err := walkFunc(&TarFile{Header: hdr, TarStream: &tarReader}); err != nil { return err } } return nil } ================================================ FILE: lib/internal/types/docker_types.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import "time" // DockerImageData stores the JSON structure of a Docker image. // Taken and adapted from upstream Docker. type DockerImageData struct { ID string `json:"id"` Parent string `json:"parent,omitempty"` Comment string `json:"comment,omitempty"` Created time.Time `json:"created"` Container string `json:"container,omitempty"` ContainerConfig DockerImageConfig `json:"container_config,omitempty"` DockerVersion string `json:"docker_version,omitempty"` Author string `json:"author,omitempty"` Config *DockerImageConfig `json:"config,omitempty"` Architecture string `json:"architecture,omitempty"` OS string `json:"os,omitempty"` Checksum string `json:"checksum"` } // Note: the Config structure should hold only portable information about the container. // Here, "portable" means "independent from the host we are running on". // Non-portable information *should* appear in HostConfig. // Taken and adapted from upstream Docker. type DockerImageConfig struct { Hostname string Domainname string User string Memory int64 // Memory limit (in bytes) MemorySwap int64 // Total memory usage (memory + swap); set `-1' to disable swap CpuShares int64 // CPU shares (relative weight vs. other containers) Cpuset string // Cpuset 0-2, 0,1 AttachStdin bool AttachStdout bool AttachStderr bool PortSpecs []string // Deprecated - Can be in the format of 8080/tcp ExposedPorts map[string]struct{} Tty bool // Attach standard streams to a tty, including stdin if it is not closed. OpenStdin bool // Open stdin StdinOnce bool // If true, close stdin after the 1 attached client disconnects. Env []string Cmd []string Image string // Name of the image as it was passed by the operator (eg. could be symbolic) Volumes map[string]struct{} WorkingDir string Entrypoint []string NetworkDisabled bool MacAddress string OnBuild []string Labels map[string]string } // DockerAuthConfigOld represents the deprecated ~/.dockercfg auth // configuration. // Taken from upstream Docker. type DockerAuthConfigOld struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Auth string `json:"auth"` Email string `json:"email"` ServerAddress string `json:"serveraddress,omitempty"` } // DockerAuthConfig represents a config.json auth entry. // Taken from upstream Docker. type DockerAuthConfig struct { Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` Auth string `json:"auth,omitempty"` ServerAddress string `json:"serveraddress,omitempty"` RegistryToken string `json:"registrytoken,omitempty"` } // DockerConfigFile represents a config.json auth file. // Taken from upstream docker. type DockerConfigFile struct { AuthConfigs map[string]DockerAuthConfig `json:"auths"` } ================================================ FILE: lib/internal/typesV2/docker_types.go ================================================ // Copyright 2016 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package typesV2 import ( "encoding/json" "errors" "github.com/appc/docker2aci/lib/common" ) var ( ErrIncorrectMediaType = errors.New("incorrect mediaType") ErrMissingConfig = errors.New("the config field is empty") ErrMissingLayers = errors.New("the layers field is empty") ) type ImageManifest struct { SchemaVersion int `json:"schemaVersion"` MediaType string `json:"mediaType"` Config *ImageManifestDigest `json:"config"` Layers []*ImageManifestDigest `json:"layers"` Annotations map[string]string `json:"annotations"` } type ImageManifestDigest struct { MediaType string `json:"mediaType"` Size int `json:"size"` Digest string `json:"digest"` } func (im *ImageManifest) String() string { manblob, err := json.Marshal(im) if err != nil { return err.Error() } return string(manblob) } func (im *ImageManifest) PrettyString() string { manblob, err := json.MarshalIndent(im, "", " ") if err != nil { return err.Error() } return string(manblob) } func (im *ImageManifest) Validate() error { if im.MediaType != common.MediaTypeDockerV22Manifest && im.MediaType != common.MediaTypeOCIV1Manifest { return ErrIncorrectMediaType } if im.Config == nil { return ErrMissingConfig } if len(im.Layers) == 0 { return ErrMissingLayers } return nil } type ImageConfig struct { Created string `json:"created"` Author string `json:"author"` Architecture string `json:"architecture"` OS string `json:"os"` Config *ImageConfigConfig `json:"config"` RootFS *ImageConfigRootFS `json:"rootfs"` History []*ImageConfigHistory `json:"history"` } type ImageConfigConfig struct { User string `json:"User"` Memory int `json:"Memory"` MemorySwap int `json:"MemorySwap"` CpuShares int `json:"CpuShares"` ExposedPorts map[string]struct{} `json:"ExposedPorts"` Env []string `json:"Env"` Entrypoint []string `json:"Entrypoint"` Cmd []string `json:"Cmd"` Volumes map[string]struct{} `json:"Volumes"` WorkingDir string `json:"WorkingDir"` } type ImageConfigRootFS struct { DiffIDs []string `json:"diff_ids"` Type string `json:"type"` } type ImageConfigHistory struct { Created string `json:"created,omitempty"` Author string `json:"author,omitempty"` CreatedBy string `json:"created_by,omitempty"` Comment string `json:"comment,omitempty"` EmptyLayer bool `json:"empty_layer,omitempty"` } func (ic *ImageConfig) String() string { manblob, err := json.Marshal(ic) if err != nil { return err.Error() } return string(manblob) } func (ic *ImageConfig) PrettyString() string { manblob, err := json.MarshalIndent(ic, "", " ") if err != nil { return err.Error() } return string(manblob) } ================================================ FILE: lib/internal/util/util.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package util defines convenience functions for handling slices and debugging. // // Note: this package is an implementation detail and shouldn't be used outside // of docker2aci. package util import ( "crypto/tls" "fmt" "net" "net/http" "time" "github.com/appc/spec/pkg/acirenderer" ) var ( secureClient = newClient(false) insecureClient = newClient(true) ) // Quote takes a slice of strings and returns another slice with them quoted. func Quote(l []string) []string { var quoted []string for _, s := range l { quoted = append(quoted, fmt.Sprintf("%q", s)) } return quoted } // ReverseImages takes an acirenderer.Images and reverses it. func ReverseImages(s acirenderer.Images) acirenderer.Images { var o acirenderer.Images for i := len(s) - 1; i >= 0; i-- { o = append(o, s[i]) } return o } // In checks whether el is in list. func In(list []string, el string) bool { return IndexOf(list, el) != -1 } // IndexOf returns the index of el in list, or -1 if it's not found. func IndexOf(list []string, el string) int { for i, x := range list { if el == x { return i } } return -1 } // GetTLSClient gets an HTTP client that behaves like the default HTTP // client, but optionally skips the TLS certificate verification. func GetTLSClient(skipTLSCheck bool) *http.Client { if skipTLSCheck { return insecureClient } return secureClient } func newClient(skipTLSCheck bool) *http.Client { dialer := &net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, } // values taken from stdlib v1.5.3 tr := &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: dialer.Dial, TLSHandshakeTimeout: 10 * time.Second, } // values taken from stdlib v1.5.3 if skipTLSCheck { tr.TLSClientConfig = &tls.Config{ InsecureSkipVerify: true, } } return &http.Client{ Transport: tr, } } ================================================ FILE: lib/tests/common.go ================================================ package test import ( "archive/tar" "bytes" "crypto/sha256" "encoding/hex" "encoding/json" "io/ioutil" "os" "path" "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/lib/internal/typesV2" ) type Layer map[*tar.Header][]byte type Docker22Image struct { RepoTags []string Layers []Layer Config typesV2.ImageConfig } func GenerateDocker22(destPath string, img Docker22Image) error { layerHashes, err := GenLayers(destPath, img.Layers) if err != nil { return err } configHash, err := GenDocker22Config(destPath, img.Config, layerHashes) if err != nil { return err } err = GenDocker22Manifest(destPath, configHash, layerHashes) if err != nil { return err } return nil } func GenLayers(destPath string, layers []Layer) ([]string, error) { var layerHashes []string for _, l := range layers { layerBuffer := &bytes.Buffer{} tw := tar.NewWriter(layerBuffer) for hdr, contents := range l { hdr.Size = int64(len(contents)) err := tw.WriteHeader(hdr) if err != nil { tw.Close() return nil, err } _, err = tw.Write(contents) if err != nil { tw.Close() return nil, err } } tw.Close() layerTarBlob := layerBuffer.Bytes() h := sha256.New() h.Write(layerTarBlob) hashStr := hex.EncodeToString(h.Sum(nil)) layerHashes = append(layerHashes, hashStr) err := ioutil.WriteFile(path.Join(destPath, hashStr), layerTarBlob, 0644) if err != nil { return nil, err } } return layerHashes, nil } func GenDocker22Config(destPath string, conf typesV2.ImageConfig, layerHashes []string) (string, error) { conf.RootFS = &typesV2.ImageConfigRootFS{} conf.RootFS.Type = "layers" for _, h := range layerHashes { conf.RootFS.DiffIDs = append(conf.RootFS.DiffIDs, "sha256:"+h) } confblob, err := json.Marshal(conf) if err != nil { return "", err } h := sha256.New() h.Write(confblob) hashStr := hex.EncodeToString(h.Sum(nil)) err = ioutil.WriteFile(path.Join(destPath, hashStr), confblob, 0644) if err != nil { return "", err } return hashStr, nil } func GenDocker22Manifest(destPath, configHash string, layerHashes []string) error { getDigestSize := func(digest string) (int64, error) { fi, err := os.Stat(path.Join(destPath, digest)) if err != nil { return 0, err } return fi.Size(), nil } configSize, err := getDigestSize(configHash) if err != nil { return err } manifest := &typesV2.ImageManifest{ SchemaVersion: 2, MediaType: common.MediaTypeDockerV22Manifest, Config: &typesV2.ImageManifestDigest{ MediaType: common.MediaTypeDockerV22Config, Size: int(configSize), Digest: "sha256:" + configHash, }, } for _, h := range layerHashes { layerSize, err := getDigestSize(h) if err != nil { return err } manifest.Layers = append(manifest.Layers, &typesV2.ImageManifestDigest{ MediaType: common.MediaTypeDockerV22RootFS, Size: int(layerSize), Digest: "sha256:" + h, }) } manblob, err := json.Marshal(manifest) if err != nil { return err } err = ioutil.WriteFile(path.Join(destPath, "manifest.json"), manblob, 0644) if err != nil { return err } return nil } ================================================ FILE: lib/tests/server.go ================================================ package test import ( "fmt" "io" "net/http" "net/http/httptest" "os" "path" "strings" "testing" ) func RunDockerRegistry(t *testing.T, imgPath, imgName, imgRef, manifestMediaType string) *httptest.Server { handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { t.Logf("path requested: %s", r.URL.Path) if r.URL.Path == "/v2/" { w.Header().Add("Docker-Distribution-API-Version", "registry/2.0") w.WriteHeader(http.StatusOK) return } if strings.Contains(r.URL.Path, "manifests") { GetManifest(t, w, r, imgPath, imgName, imgRef, manifestMediaType) return } if strings.Contains(r.URL.Path, "blobs") { GetBlob(t, w, r, imgPath, imgName, imgRef) return } t.Errorf("invalid path: %s", r.URL.Path) }) server := httptest.NewServer(handler) return server } func GetManifest(t *testing.T, w http.ResponseWriter, r *http.Request, imgPath, imgName, imgRef, manifestMediaType string) { parsedImgName, parsedRef, err := parseURL("manifests", r.URL.Path) if err != nil { w.WriteHeader(http.StatusNotFound) t.Errorf("get manifest: error parsing path: %v", err) return } if parsedImgName != imgName { w.WriteHeader(http.StatusNotFound) t.Errorf("get manifest: invalid image name requested: %q", parsedImgName) return } if parsedRef != imgRef { w.WriteHeader(http.StatusNotFound) t.Errorf("get manifest: invalid image ref requested: %q", parsedImgName) return } manFile, err := os.Open(path.Join(imgPath, "manifest.json")) if err != nil { w.WriteHeader(http.StatusInternalServerError) t.Errorf("get manifest: couldn't open manifest: %v", err) return } defer manFile.Close() w.Header().Add("content-type", manifestMediaType) _, err = io.Copy(w, manFile) if err != nil { w.WriteHeader(http.StatusInternalServerError) t.Errorf("get manifest: couldn't copy manifest: %v", err) return } } func GetBlob(t *testing.T, w http.ResponseWriter, r *http.Request, imgPath, imgName, imgRef string) { parsedImgName, digest, err := parseURL("blobs", r.URL.Path) if err != nil { w.WriteHeader(http.StatusNotFound) t.Errorf("get blob: %v", err) return } digest = strings.TrimPrefix(digest, "sha256:") if parsedImgName != imgName { w.WriteHeader(http.StatusNotFound) t.Errorf("get blob: invalid image name requested: %s", parsedImgName) return } blobFile, err := os.Open(path.Join(imgPath, digest)) if err != nil { w.WriteHeader(http.StatusInternalServerError) t.Errorf("get blob: couldn't open manifest: %v", err) return } defer blobFile.Close() _, err = io.Copy(w, blobFile) if err != nil { w.WriteHeader(http.StatusInternalServerError) t.Errorf("get blob: couldn't copy manifest: %v", err) return } } func parseURL(resource, input string) (string, string, error) { tokens := strings.Split(input, "/") tokLen := len(tokens) if tokLen < 5 { return "", "", fmt.Errorf("invalid number of tokens in path: %d", len(tokens)) } if tokens[0] != "" { return "", "", fmt.Errorf("path parse error: tok0 = %s", tokens[0]) } if tokens[1] != "v2" { return "", "", fmt.Errorf("path parse error: tok1 = %s", tokens[1]) } if tokens[tokLen-2] != resource { return "", "", fmt.Errorf("path parse error: tok-2 = %s", tokens[tokLen-2]) } return path.Join(tokens[2 : tokLen-2]...), tokens[tokLen-1], nil } ================================================ FILE: lib/tests/v22_test.go ================================================ package test import ( "testing" "archive/tar" "fmt" "io/ioutil" "os" "path" "reflect" "strings" "time" docker2aci "github.com/appc/docker2aci/lib" d2acommon "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/lib/internal/typesV2" "github.com/appc/spec/aci" "github.com/appc/spec/schema" "github.com/appc/spec/schema/types" ) const variableTestValue = "variant" // osArchTuple is a placeholder for operating system name and respective // supported architecture. type osArchTuple struct { Os string Arch string } // osArchTuples defines the list of Go os/arch pairs used to test the // conversion of Docker images to ACIs. var osArchTuples = []osArchTuple{ {"linux", "amd64"}, {"linux", "386"}, {"linux", "arm64"}, {"linux", "arm"}, {"linux", "ppc64"}, {"linux", "ppc64le"}, {"linux", "s390x"}, {"freebsd", "amd64"}, {"freebsd", "386"}, {"freebsd", "arm"}, {"darwin", "amd64"}, {"darwin", "386"}, } // dockerImageConfig defines the common image configuration. var dockerImageConfig = typesV2.ImageConfigConfig{ User: "", Memory: 12345, MemorySwap: 0, CpuShares: 9001, ExposedPorts: map[string]struct{}{ "80": struct{}{}, }, Env: []string{ "FOO=1", }, Entrypoint: []string{ "/bin/sh", "-c", "echo", }, Cmd: []string{ "foo", }, Volumes: nil, WorkingDir: "/", } // testDocker22Images generates the Docker images v22 for all supported // os/arch pairs and calls the passed testing function. func testDocker22Images(layers []Layer, fn func(Docker22Image)) { for _, tuple := range osArchTuples { config := typesV2.ImageConfig{ Created: "2016-06-02T21:43:31.291506236Z", Author: "rkt developer ", Architecture: tuple.Arch, OS: tuple.Os, Config: &dockerImageConfig, } // Create a new Docker image configuration and pass it to // the testing function. fn(Docker22Image{ RepoTags: []string{"testimage:latest"}, Layers: layers, Config: config, }) } } func expectedManifest(registryUrl, imageName, imageOs, imageArch string) schema.ImageManifest { return schema.ImageManifest{ ACKind: types.ACKind("ImageManifest"), ACVersion: schema.AppContainerVersion, Name: *types.MustACIdentifier("variant"), Labels: []types.Label{ types.Label{ Name: *types.MustACIdentifier("arch"), Value: imageArch, }, types.Label{ Name: *types.MustACIdentifier("os"), Value: imageOs, }, types.Label{ Name: *types.MustACIdentifier("version"), Value: "v0.1.0", }, }, App: &types.App{ Exec: []string{ "/bin/sh", "-c", "echo", "foo", }, User: "0", Group: "0", Environment: []types.EnvironmentVariable{ { Name: "FOO", Value: "1", }, }, WorkingDirectory: "/", Ports: []types.Port{ { Name: "80", Protocol: "tcp", Port: 80, Count: 1, SocketActivated: false, }, }, }, Annotations: []types.Annotation{ { Name: *types.MustACIdentifier("author"), Value: "rkt developer ", }, { Name: *types.MustACIdentifier("created"), Value: "2016-06-02T21:43:31.291506236Z", }, { Name: *types.MustACIdentifier("appc.io/docker/registryurl"), Value: registryUrl, }, { Name: *types.MustACIdentifier("appc.io/docker/repository"), Value: "docker2aci/dockerv22test", }, { Name: *types.MustACIdentifier("appc.io/docker/imageid"), Value: variableTestValue, // Different each testrun for unknown reasons }, { Name: *types.MustACIdentifier("appc.io/docker/manifesthash"), Value: variableTestValue, }, { Name: *types.MustACIdentifier("appc.io/docker/originalname"), Value: imageName, }, { Name: *types.MustACIdentifier("appc.io/docker/entrypoint"), Value: "[\"/bin/sh\",\"-c\",\"echo\"]", }, { Name: *types.MustACIdentifier("appc.io/docker/cmd"), Value: "[\"foo\"]", }, }, } } func fetchImage(imgName, outputDir string, squash bool) ([]string, error) { conversionTmpDir, err := ioutil.TempDir("", "docker2aci-test-") if err != nil { return nil, err } defer os.RemoveAll(conversionTmpDir) conf := docker2aci.RemoteConfig{ CommonConfig: docker2aci.CommonConfig{ Squash: squash, OutputDir: outputDir, TmpDir: conversionTmpDir, Compression: d2acommon.GzipCompression, }, Username: "", Password: "", Insecure: d2acommon.InsecureConfig{ SkipVerify: true, AllowHTTP: true, }, } return docker2aci.ConvertRemoteRepo(imgName, conf) } func TestFetchingByTagV22(t *testing.T) { layers := []Layer{ Layer{ &tar.Header{ Name: "thisisafile", Mode: 0644, ModTime: time.Now(), }: []byte("these are its contents"), }, } testDocker22Images(layers, func(img Docker22Image) { tmpDir, err := ioutil.TempDir("", "docker2aci-test-") if err != nil { t.Fatalf("%v", err) } defer os.RemoveAll(tmpDir) err = GenerateDocker22(tmpDir, img) if err != nil { t.Fatalf("%v", err) } imgName := "docker2aci/dockerv22test" imgRef := "v0.1.0" server := RunDockerRegistry(t, tmpDir, imgName, imgRef, d2acommon.MediaTypeDockerV22Manifest) defer server.Close() bareServerURL := strings.TrimPrefix(server.URL, "http://") localUrl := path.Join(bareServerURL, imgName) + ":" + imgRef // Convert the Docker image os/arch pair into values compatible // with application container image specification. imgOs, imgArch := img.Config.OS, img.Config.Architecture imgOs, imgArch, err = types.ToAppcOSArch(imgOs, imgArch, "") if err != nil { t.Errorf("unexpected error: %v", err) } expectedImageManifest := expectedManifest(bareServerURL, localUrl, imgOs, imgArch) outputDir, err := ioutil.TempDir("", "docker2aci-test-") if err != nil { t.Fatalf("%v", err) } defer os.RemoveAll(outputDir) acis, err := fetchImage(localUrl, outputDir, true) if err != nil { t.Fatalf("%v", err) } converted := acis[0] f, err := os.Open(converted) if err != nil { t.Fatalf("%v", err) } defer f.Close() manifest, err := aci.ManifestFromImage(f) if err != nil { t.Fatalf("%v", err) } if err := manifestEqual(manifest, &expectedImageManifest); err != nil { t.Errorf("manifest doesn't match expected manifest: %v", err) } }) } func manifestEqual(manifest, expected *schema.ImageManifest) error { if manifest.ACKind != expected.ACKind { return fmt.Errorf("expected ACKind %q, got %q", expected.ACKind, manifest.ACKind) } if manifest.ACVersion != expected.ACVersion { return fmt.Errorf("expected ACVersion %q, got %q", expected.ACVersion, manifest.ACVersion) } if !reflect.DeepEqual(*manifest.App, *expected.App) { return fmt.Errorf("expected App %v, got %v", *expected.App, *manifest.App) } if len(manifest.Labels) != len(expected.Labels) { return fmt.Errorf("Labels not equal: %v != %v", manifest.Labels, expected.Labels) } for _, label := range manifest.Labels { el, ok := expected.Labels.Get(label.Name.String()) if !ok { return fmt.Errorf("expected label %v to exist, did not", label.Name) } if label.Value != el { return fmt.Errorf("expected label %v values to match, but %v != %v", label.Name, el, label.Value) } } if len(manifest.Annotations) != len(expected.Annotations) { return fmt.Errorf("annotations not equal: %v != %v", manifest.Annotations, expected.Annotations) } for _, ann := range manifest.Annotations { ea, ok := expected.Annotations.Get(ann.Name.String()) if ea == variableTestValue { // marker to let us know we don't have to assert on this value; skip it continue } if !ok { return fmt.Errorf("expected annotation %v to exist, did not", ann.Name) } if ea != ann.Value { return fmt.Errorf("expected annotation %v values to match, but %v != %v", ann.Name, ea, ann.Value) } } return nil } func TestFetchingByDigestV22(t *testing.T) { layers := []Layer{ Layer{ &tar.Header{ Name: "thisisafile", Mode: 0644, ModTime: time.Now(), }: []byte("these are its contents"), }, } testDocker22Images(layers, func(img Docker22Image) { tmpDir, err := ioutil.TempDir("", "docker2aci-test-") if err != nil { t.Fatalf("%v", err) } defer os.RemoveAll(tmpDir) err = GenerateDocker22(tmpDir, img) if err != nil { t.Fatalf("%v", err) } imgName := "docker2aci/dockerv22test" imgRef := "sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2" server := RunDockerRegistry(t, tmpDir, imgName, imgRef, d2acommon.MediaTypeDockerV22Manifest) defer server.Close() localUrl := path.Join(strings.TrimPrefix(server.URL, "http://"), imgName) + "@" + imgRef outputDir, err := ioutil.TempDir("", "docker2aci-test-") if err != nil { t.Fatalf("%v", err) } defer os.RemoveAll(outputDir) _, err = fetchImage(localUrl, outputDir, true) if err != nil { t.Fatalf("%v", err) } }) } func TestFetchingMultipleLayersV22(t *testing.T) { layers := []Layer{ Layer{ &tar.Header{ Name: "thisisafile", Mode: 0644, ModTime: time.Now(), }: []byte("these are its contents"), }, Layer{ &tar.Header{ Name: "thisisadifferentfile", Mode: 0644, ModTime: time.Now(), }: []byte("the contents of this file are different from the last!"), }, } testDocker22Images(layers, func(img Docker22Image) { tmpDir, err := ioutil.TempDir("", "docker2aci-test-") if err != nil { t.Fatalf("%v", err) } defer os.RemoveAll(tmpDir) err = GenerateDocker22(tmpDir, img) if err != nil { t.Fatalf("%v", err) } imgName := "docker2aci/dockerv22test" imgRef := "v0.1.0" server := RunDockerRegistry(t, tmpDir, imgName, imgRef, d2acommon.MediaTypeDockerV22Manifest) defer server.Close() localUrl := path.Join(strings.TrimPrefix(server.URL, "http://"), imgName) + ":" + imgRef outputDir, err := ioutil.TempDir("", "docker2aci-test-") if err != nil { t.Fatalf("%v", err) } defer os.RemoveAll(outputDir) _, err = fetchImage(localUrl, outputDir, true) if err != nil { t.Fatalf("%v", err) } }) } ================================================ FILE: lib/version.go ================================================ // Copyright 2016 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package docker2aci import "github.com/appc/spec/schema" var Version = "0.17.2+git" var AppcVersion = schema.AppContainerVersion ================================================ FILE: main.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "flag" "fmt" "net/url" "os" "strings" "github.com/appc/docker2aci/lib" "github.com/appc/docker2aci/lib/common" "github.com/appc/docker2aci/pkg/log" "github.com/appc/spec/aci" "github.com/appc/spec/schema" ) var ( flagNoSquash bool flagImage string flagDebug bool flagInsecureSkipVerify bool flagInsecureAllowHTTP bool flagCompression string flagVersion bool ) func init() { flag.BoolVar(&flagNoSquash, "nosquash", false, "Don't squash layers and output every layer as ACI") flag.StringVar(&flagImage, "image", "", "When converting a local file, it selects a particular image to convert. Format: IMAGE_NAME[:TAG]") flag.BoolVar(&flagDebug, "debug", false, "Enables debug messages") flag.BoolVar(&flagInsecureSkipVerify, "insecure-skip-verify", false, "Don't verify certificates when fetching images") flag.BoolVar(&flagInsecureAllowHTTP, "insecure-allow-http", false, "Uses unencrypted connections when fetching images") flag.StringVar(&flagCompression, "compression", "gzip", "Type of compression to use; allowed values: gzip, none") flag.BoolVar(&flagVersion, "version", false, "Print version") } func printVersion() { fmt.Println("docker2aci version", docker2aci.Version) fmt.Println("appc version", docker2aci.AppcVersion) } func runDocker2ACI(arg string) error { debug := log.NewNopLogger() info := log.NewStdLogger(os.Stderr) if flagDebug { debug = log.NewStdLogger(os.Stderr) } squash := !flagNoSquash var aciLayerPaths []string // try to convert a local file u, err := url.Parse(arg) if err != nil { return fmt.Errorf("error parsing argument: %v", err) } var compression common.Compression switch flagCompression { case "none": compression = common.NoCompression case "gzip": compression = common.GzipCompression default: return fmt.Errorf("unknown compression method: %s", flagCompression) } cfg := docker2aci.CommonConfig{ Squash: squash, OutputDir: ".", TmpDir: os.TempDir(), Compression: compression, Debug: debug, Info: info, } if u.Scheme == "docker" { if flagImage != "" { return fmt.Errorf("flag --image works only with files.") } dockerURL := strings.TrimPrefix(arg, "docker://") indexServer := docker2aci.GetIndexName(dockerURL) var username, password string username, password, err = docker2aci.GetDockercfgAuth(indexServer) if err != nil { return fmt.Errorf("error reading .dockercfg file: %v", err) } remoteConfig := docker2aci.RemoteConfig{ CommonConfig: cfg, Username: username, Password: password, Insecure: common.InsecureConfig{ SkipVerify: flagInsecureSkipVerify, AllowHTTP: flagInsecureAllowHTTP, }, } aciLayerPaths, err = docker2aci.ConvertRemoteRepo(dockerURL, remoteConfig) } else { fileConfig := docker2aci.FileConfig{ CommonConfig: cfg, DockerURL: flagImage, } aciLayerPaths, err = docker2aci.ConvertSavedFile(arg, fileConfig) if serr, ok := err.(*common.ErrSeveralImages); ok { err = fmt.Errorf("%s, use option --image with one of:\n\n%s", serr, strings.Join(serr.Images, "\n")) } } if err != nil { return fmt.Errorf("conversion error: %v", err) } // we get last layer's manifest, this will include all the elements in the // previous layers. If we're squashing, the last element of aciLayerPaths // will be the squashed image. manifest, err := getManifest(aciLayerPaths[len(aciLayerPaths)-1]) if err != nil { return err } printConvertedVolumes(*manifest) printConvertedPorts(*manifest) fmt.Printf("\nGenerated ACI(s):\n") for _, aciFile := range aciLayerPaths { fmt.Println(aciFile) } return nil } func printConvertedVolumes(manifest schema.ImageManifest) { if manifest.App == nil { return } if mps := manifest.App.MountPoints; len(mps) > 0 { fmt.Printf("\nConverted volumes:\n") for _, mp := range mps { fmt.Printf("\tname: %q, path: %q, readOnly: %v\n", mp.Name, mp.Path, mp.ReadOnly) } } } func printConvertedPorts(manifest schema.ImageManifest) { if manifest.App == nil { return } if ports := manifest.App.Ports; len(ports) > 0 { fmt.Printf("\nConverted ports:\n") for _, port := range ports { fmt.Printf("\tname: %q, protocol: %q, port: %v, count: %v, socketActivated: %v\n", port.Name, port.Protocol, port.Port, port.Count, port.SocketActivated) } } } func getManifest(aciPath string) (*schema.ImageManifest, error) { f, err := os.Open(aciPath) if err != nil { return nil, fmt.Errorf("error opening converted image: %v", err) } defer f.Close() manifest, err := aci.ManifestFromImage(f) if err != nil { return nil, fmt.Errorf("error reading manifest from converted image: %v", err) } return manifest, nil } func usage() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) fmt.Fprintf(os.Stderr, "docker2aci [-debug] [-nosquash] [-compression=(gzip|none)] IMAGE\n") fmt.Fprintf(os.Stderr, " Where IMAGE is\n") fmt.Fprintf(os.Stderr, " [-image=IMAGE_NAME[:TAG]] FILEPATH\n") fmt.Fprintf(os.Stderr, " or\n") fmt.Fprintf(os.Stderr, " docker://[REGISTRYURL/]IMAGE_NAME[:TAG]\n") fmt.Fprintf(os.Stderr, "Flags:\n") flag.PrintDefaults() } func main() { flag.Usage = usage flag.Parse() args := flag.Args() if flagVersion { printVersion() return } if len(args) != 1 { usage() os.Exit(2) } if err := runDocker2ACI(args[0]); err != nil { fmt.Fprintf(os.Stderr, "Error: %v\n", err) os.Exit(1) } } ================================================ FILE: pkg/log/log.go ================================================ // Copyright 2016 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package log import ( "io" stdlog "log" ) // Logger is the interface that enables logging. // It is compatible with the stdlib "log" methods. // It is also compatible with https://godoc.org/github.com/Sirupsen/logrus#StdLogger. type Logger interface { Print(...interface{}) Printf(string, ...interface{}) Println(...interface{}) } func NewStdLogger(out io.Writer) Logger { return stdlog.New(out, "", 0) } type nopLogger struct{} func NewNopLogger() Logger { return &nopLogger{} } func (l *nopLogger) Print(...interface{}) { // nop } func (l *nopLogger) Printf(string, ...interface{}) { // nop } func (l *nopLogger) Println(...interface{}) { // nop } ================================================ FILE: scripts/bump-release ================================================ #!/bin/bash -e # # Attempt to bump the docker2aci release to the specified version by replacing # all occurrences of the current/previous version. # # Generates two commits: the release itself and the bump to the next +git # version # # YMMV, no disclaimer or warranty, etc. # make sure we are running in a toplevel directory if ! [[ "$0" =~ "scripts/bump-release" ]]; then echo "This script must be run in a toplevel docker2aci directory" exit 255 fi if ! [[ "$1" =~ ^v[[:digit:]]+\.[[:digit:]]+\.[[:digit:]]$ ]]; then echo "Usage: scripts/bump-release " echo " where VERSION must be vX.Y.Z" exit 255 fi function replace_stuff() { local FROM local TO local REPLACE FROM=$1 TO=$2 # escape special characters REPLACE=$(sed -e 's/[]\/$*.^|[]/\\&/g'<<< $FROM) shift 2 echo $* | xargs sed -i --follow-symlinks -e "s/$REPLACE/$TO/g" } function replace_version() { replace_stuff $1 $2 lib/version.go } NEXT=${1:1} # 0.2.3 NEXTGIT="${NEXT}+git" # 0.2.3+git PREVGIT=$(grep -Po 'var Version = "\K[^"]*(?=")' lib/version.go) # 0.1.2+git PREV=${PREVGIT::-4} # 0.1.2 replace_version $PREVGIT $NEXT git commit -am "version: bump to v${NEXT}" replace_version $NEXT $NEXTGIT git commit -am "version: bump to v${NEXTGIT}" ================================================ FILE: scripts/glide-update ================================================ #!/usr/bin/env bash set -e if ! [[ "$0" =~ "scripts/glide-update" ]]; then echo "must be run from repository root" exit 255 fi if [ ! $(command -v glide) ]; then echo "glide: command not found" exit 255 fi if [ ! $(command -v glide-vc) ]; then echo "glide-vc: command not found" exit 255 fi glide update --strip-vendor glide-vc --only-code --no-tests --no-test-imports --use-lock-file ================================================ FILE: tests/README.md ================================================ # docker2aci tests ## Semaphore The tests run on the [Semaphore](https://semaphoreci.com/) CI system. The tests are executed on Semaphore at each Pull Request (PR). Each GitHub PR page should have a link to the [test results on Semaphore](https://semaphoreci.com/appc/docker2aci). ### Build settings The tests will run on two VMs. The "Setup" and "Post thread" sections will be executed on both VMs. The "Thread 1" and "Thread 2" will be executed in parallel in separate VMs. #### Setup ``` ./build.sh ``` #### Thread 1 ``` ./tests/test.sh ``` ### Platform Select `Ubuntu 14.04 LTS v1503 (beta with Docker support)`. The platform with *Docker support* means the tests will run in a VM. ================================================ FILE: tests/fixture-test-depsloop/check.sh ================================================ #!/bin/sh DOCKER2ACI=../bin/docker2aci TESTDIR=$1 TESTNAME=$2 timeout 10s ${DOCKER2ACI} "${TESTDIR}/${TESTNAME}/${TESTNAME}.docker" if [ $? -eq 1 ]; then echo "### Test case ${TESTNAME}: SUCCESS" exit 0 else echo "### Test case ${TESTNAME}: FAIL" exit 1 fi ================================================ FILE: tests/fixture-test-invalidlayerid/check.sh ================================================ #!/bin/sh DOCKER2ACI=../bin/docker2aci TESTDIR=$1 TESTNAME=$2 sudo ${DOCKER2ACI} "${TESTDIR}/${TESTNAME}/${TESTNAME}.docker" if [ $? -eq 1 ]; then echo "### Test case ${TESTNAME}: SUCCESS" exit 0 else echo "### Test case ${TESTNAME}: FAIL" exit 1 fi ================================================ FILE: tests/rkt-v1.1.0.md5sum ================================================ MD5 (rkt-v1.1.0.tar.gz) = d3d9d62429e53d8f631dbec93e4e719f ================================================ FILE: tests/test-basic/Dockerfile ================================================ FROM busybox COPY check.sh / RUN echo file1 > file1 ; ln file1 file2 RUN echo file3 > file3 RUN echo file4 > file4 CMD /check.sh 2>&1 ================================================ FILE: tests/test-basic/check.sh ================================================ #!/bin/sh set -e set -x grep -q file1 file1 grep -q file1 file2 grep -q file3 file3 grep -q file4 file4 if [ "$CHECK" != "rkt-rendered" ] ; then # Skip this test because of: # https://github.com/coreos/rkt/issues/1774 test $(ls -i file1 |awk '{print $1}') -eq $(ls -i file2 |awk '{print $1}') test $(ls -i file3 |awk '{print $1}') -ne $(ls -i file4 |awk '{print $1}') fi echo "SUCCESS" ================================================ FILE: tests/test-pwl/Dockerfile ================================================ FROM gcr.io/google_containers/nginx:1.7.9 COPY check.sh / ENTRYPOINT /check.sh 2>&1 ================================================ FILE: tests/test-pwl/check.sh ================================================ #!/bin/sh set -e set -x ls -l /var/run echo "SUCCESS" ================================================ FILE: tests/test-whiteouts/Dockerfile ================================================ FROM busybox COPY check.sh / RUN echo yes > layer0-file1 ; ln layer0-file1 layer0-file2 ; ln layer0-file1 layer0-file3 RUN echo yes > layer1-file1 ; ln layer1-file1 layer1-file2 ; ln layer1-file1 layer1-file3 RUN echo yes > layer2-file1 ; ln layer2-file1 layer2-file2 ; ln layer2-file1 layer2-file3 RUN echo yes > layer3-file1 ; ln layer3-file1 layer3-file2 ; ln layer3-file1 layer3-file3 RUN rm -f layer1-file1 layer2-file2 layer3-file3 RUN echo yes > layer4-file1 ; ln layer4-file1 layer4-file2 ; ln layer4-file1 layer4-file3 RUN echo yes > layer5-file1 ; ln layer5-file1 layer5-file2 ; ln layer5-file1 layer5-file3 RUN echo yes > layer6-file1 ; ln layer6-file1 layer6-file2 ; ln layer6-file1 layer6-file3 RUN rm -f layer4-file2 layer5-file1 layer6-file1 layer4-file3 layer5-file3 layer6-file2 RUN echo OLD > layer10-file1 ; ln layer10-file1 layer10-file2 ; ln layer10-file1 layer10-file3 RUN echo NEW > layer10-file1 ; ln -f layer10-file1 layer10-file2 ; ln -f layer10-file1 layer10-file3 ; echo foo > foo RUN echo line1 > layer11-file1 ; ln layer11-file1 layer11-file2 ; ln layer11-file1 layer11-file3 RUN echo line2 >> layer11-file1 CMD /check.sh 2>&1 ================================================ FILE: tests/test-whiteouts/check.sh ================================================ #!/bin/sh set -e set -x grep -q yes layer0-file1 grep -q yes layer0-file2 grep -q yes layer0-file3 test ! -e layer1-file1 test -e layer1-file2 test -e layer1-file3 test -e layer2-file1 test ! -e layer2-file2 test -e layer2-file3 test -e layer3-file1 test -e layer3-file2 test ! -e layer3-file3 grep -q yes layer1-file2 grep -q yes layer1-file3 grep -q yes layer2-file1 grep -q yes layer2-file3 grep -q yes layer3-file1 grep -q yes layer3-file2 test -e layer4-file1 test ! -e layer4-file2 test ! -e layer4-file3 test ! -e layer5-file1 test -e layer5-file2 test ! -e layer5-file3 test ! -e layer6-file1 test ! -e layer6-file2 test -e layer6-file3 grep -q yes layer4-file1 grep -q yes layer5-file2 grep -q yes layer6-file3 grep -q NEW layer10-file1 grep -q NEW layer10-file2 grep -q NEW layer10-file3 grep -q line1 layer11-file1 grep -q line1 layer11-file2 grep -q line1 layer11-file3 # # Docker with AUFS or overlay storage backend does not handle this test # # correctly and Semaphore uses AUFS if [ "$DOCKER_STORAGE_BACKEND" == devicemapper ] ; then grep -q line2 layer11-file1 grep -q line2 layer11-file2 grep -q line2 layer11-file3 cmp layer11-file1 layer11-file2 cmp layer11-file1 layer11-file3 fi echo "SUCCESS" ================================================ FILE: tests/test.sh ================================================ #!/bin/bash set -e # Gets the parent of the directory that this script is stored in. # https://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in DIR="$( cd "$( dirname $( dirname "${BASH_SOURCE[0]}" ) )" && pwd )" ORG_PATH="github.com/appc" REPO_PATH="${ORG_PATH}/docker2aci" if [ ! -h ${DIR}/gopath/src/${REPO_PATH} ]; then mkdir -p ${DIR}/gopath/src/${ORG_PATH} cd ${DIR} && ln -s ../../../.. gopath/src/${REPO_PATH} || exit 255 fi export GO15VENDOREXPERIMENT=1 export GOPATH=${DIR}/gopath REPO_GOPATH="${GOPATH}/src/${REPO_PATH}" cd "${REPO_GOPATH}" go vet ./pkg/... go vet ./lib/... go test -v ${REPO_PATH}/lib/tests go test -v ${REPO_PATH}/lib/internal go test -v ${REPO_PATH}/lib/common DOCKER2ACI=../bin/docker2aci PREFIX=docker2aci-tests TESTDIR="${REPO_GOPATH}/tests/" RKTVERSION=v1.1.0 cd $TESTDIR # install rkt in Semaphore if ! which rkt > /dev/null ; then if [ "$SEMAPHORE" != "true" ] ; then echo "Please install rkt" exit 1 fi pushd $SEMAPHORE_CACHE_DIR if ! md5sum -c $TESTDIR/rkt-$RKTVERSION.md5sum; then wget https://github.com/coreos/rkt/releases/download/$RKTVERSION/rkt-$RKTVERSION.tar.gz fi md5sum -c $TESTDIR/rkt-$RKTVERSION.md5sum tar xf rkt-$RKTVERSION.tar.gz export PATH=$PATH:$PWD/rkt-$RKTVERSION/ popd fi RKT=$(which rkt) DOCKER_STORAGE_BACKEND=$(sudo docker info|grep '^Storage Driver:'|sed 's/Storage Driver: //') for i in $(find . -maxdepth 1 -type d -name 'fixture-test*') ; do TESTNAME=$(basename $i) echo "### Test case ${TESTNAME}..." $TESTDIR/${TESTNAME}/check.sh "${TESTDIR}" "${TESTNAME}" done for i in $(find . -maxdepth 1 -type d -name 'test-*') ; do TESTNAME=$(basename $i) echo "### Test case ${TESTNAME}: build..." sudo docker build --tag=$PREFIX/${TESTNAME} --no-cache=true ${TESTNAME} echo "### Test case ${TESTNAME}: test in Docker..." sudo docker run --rm \ --env=CHECK=docker-run \ --env=DOCKER_STORAGE_BACKEND=$DOCKER_STORAGE_BACKEND \ $PREFIX/${TESTNAME} echo "### Test case ${TESTNAME}: converting to ACI..." sudo docker save -o ${TESTNAME}.docker $PREFIX/${TESTNAME} # Docker now writes files as root, so make them readable sudo chmod o+rx ${TESTNAME}.docker $DOCKER2ACI ${TESTNAME}.docker echo "### Test case ${TESTNAME}: test in rkt..." sudo $RKT prepare --insecure-options=image \ --set-env=CHECK=rkt-run \ --set-env=DOCKER_STORAGE_BACKEND=$DOCKER_STORAGE_BACKEND \ ./${PREFIX}-${TESTNAME}-latest.aci \ > rkt-uuid-${TESTNAME} sudo $RKT run-prepared $(cat rkt-uuid-${TESTNAME}) sudo $RKT status $(cat rkt-uuid-${TESTNAME}) | grep app-${TESTNAME}=0 sudo $RKT rm $(cat rkt-uuid-${TESTNAME}) echo "### Test case ${TESTNAME}: test with 'rkt image render'..." sudo $RKT image render --overwrite ${PREFIX}/${TESTNAME} ./rendered-${TESTNAME} pushd rendered-${TESTNAME}/rootfs CHECK=rkt-rendered DOCKER_STORAGE_BACKEND=$DOCKER_STORAGE_BACKEND $TESTDIR/${TESTNAME}/check.sh popd echo "### Test case ${TESTNAME}: SUCCESS" sudo docker rmi $PREFIX/${TESTNAME} done ================================================ FILE: vendor/github.com/appc/spec/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/github.com/appc/spec/aci/build.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aci import ( "archive/tar" "io" "os" "path/filepath" "github.com/appc/spec/pkg/tarheader" ) // TarHeaderWalkFunc is the type of the function which allows setting tar // headers or filtering out tar entries when building an ACI. It will be // applied to every entry in the tar file. // // If true is returned, the entry will be included in the final ACI; if false, // the entry will not be included. type TarHeaderWalkFunc func(hdr *tar.Header) bool // BuildWalker creates a filepath.WalkFunc that walks over the given root // (which should represent an ACI layout on disk) and adds the files in the // rootfs/ subdirectory to the given ArchiveWriter func BuildWalker(root string, aw ArchiveWriter, cb TarHeaderWalkFunc) filepath.WalkFunc { // cache of inode -> filepath, used to leverage hard links in the archive inos := map[uint64]string{} return func(path string, info os.FileInfo, err error) error { if err != nil { return err } relpath, err := filepath.Rel(root, path) if err != nil { return err } if relpath == "." { return nil } if relpath == ManifestFile { // ignore; this will be written by the archive writer // TODO(jonboulle): does this make sense? maybe just remove from archivewriter? return nil } link := "" var r io.Reader switch info.Mode() & os.ModeType { case os.ModeSocket: return nil case os.ModeNamedPipe: case os.ModeCharDevice: case os.ModeDevice: case os.ModeDir: case os.ModeSymlink: target, err := os.Readlink(path) if err != nil { return err } link = target default: file, err := os.Open(path) if err != nil { return err } defer file.Close() r = file } hdr, err := tar.FileInfoHeader(info, link) if err != nil { panic(err) } // Because os.FileInfo's Name method returns only the base // name of the file it describes, it may be necessary to // modify the Name field of the returned header to provide the // full path name of the file. hdr.Name = relpath tarheader.Populate(hdr, info, inos) // If the file is a hard link to a file we've already seen, we // don't need the contents if hdr.Typeflag == tar.TypeLink { hdr.Size = 0 r = nil } if cb != nil { if !cb(hdr) { return nil } } if err := aw.AddFile(hdr, r); err != nil { return err } return nil } } ================================================ FILE: vendor/github.com/appc/spec/aci/doc.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package aci contains various functions for working with App Container Images. package aci ================================================ FILE: vendor/github.com/appc/spec/aci/file.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aci import ( "archive/tar" "bytes" "compress/bzip2" "compress/gzip" "encoding/hex" "errors" "fmt" "io" "io/ioutil" "log" "net/http" "os/exec" "path/filepath" "github.com/appc/spec/schema" ) type FileType string const ( TypeGzip = FileType("gz") TypeBzip2 = FileType("bz2") TypeXz = FileType("xz") TypeTar = FileType("tar") TypeText = FileType("text") TypeUnknown = FileType("unknown") readLen = 512 // max bytes to sniff hexHdrGzip = "1f8b" hexHdrBzip2 = "425a68" hexHdrXz = "fd377a585a00" hexSigTar = "7573746172" tarOffset = 257 textMime = "text/plain; charset=utf-8" ) var ( hdrGzip []byte hdrBzip2 []byte hdrXz []byte sigTar []byte tarEnd int ) func mustDecodeHex(s string) []byte { b, err := hex.DecodeString(s) if err != nil { panic(err) } return b } func init() { hdrGzip = mustDecodeHex(hexHdrGzip) hdrBzip2 = mustDecodeHex(hexHdrBzip2) hdrXz = mustDecodeHex(hexHdrXz) sigTar = mustDecodeHex(hexSigTar) tarEnd = tarOffset + len(sigTar) } // DetectFileType attempts to detect the type of file that the given reader // represents by comparing it against known file signatures (magic numbers) func DetectFileType(r io.Reader) (FileType, error) { var b bytes.Buffer n, err := io.CopyN(&b, r, readLen) if err != nil && err != io.EOF { return TypeUnknown, err } bs := b.Bytes() switch { case bytes.HasPrefix(bs, hdrGzip): return TypeGzip, nil case bytes.HasPrefix(bs, hdrBzip2): return TypeBzip2, nil case bytes.HasPrefix(bs, hdrXz): return TypeXz, nil case n > int64(tarEnd) && bytes.Equal(bs[tarOffset:tarEnd], sigTar): return TypeTar, nil case http.DetectContentType(bs) == textMime: return TypeText, nil default: return TypeUnknown, nil } } // XzReader is an io.ReadCloser which decompresses xz compressed data. type XzReader struct { io.ReadCloser cmd *exec.Cmd closech chan error } // NewXzReader shells out to a command line xz executable (if // available) to decompress the given io.Reader using the xz // compression format and returns an *XzReader. // It is the caller's responsibility to call Close on the XzReader when done. func NewXzReader(r io.Reader) (*XzReader, error) { rpipe, wpipe := io.Pipe() ex, err := exec.LookPath("xz") if err != nil { log.Fatalf("couldn't find xz executable: %v", err) } cmd := exec.Command(ex, "--decompress", "--stdout") closech := make(chan error) cmd.Stdin = r cmd.Stdout = wpipe go func() { err := cmd.Run() wpipe.CloseWithError(err) closech <- err }() return &XzReader{rpipe, cmd, closech}, nil } func (r *XzReader) Close() error { r.ReadCloser.Close() r.cmd.Process.Kill() return <-r.closech } // ManifestFromImage extracts a new schema.ImageManifest from the given ACI image. func ManifestFromImage(rs io.ReadSeeker) (*schema.ImageManifest, error) { var im schema.ImageManifest tr, err := NewCompressedTarReader(rs) if err != nil { return nil, err } defer tr.Close() for { hdr, err := tr.Next() switch err { case io.EOF: return nil, errors.New("missing manifest") case nil: if filepath.Clean(hdr.Name) == ManifestFile { data, err := ioutil.ReadAll(tr) if err != nil { return nil, err } if err := im.UnmarshalJSON(data); err != nil { return nil, err } return &im, nil } default: return nil, fmt.Errorf("error extracting tarball: %v", err) } } } // TarReadCloser embeds a *tar.Reader and the related io.Closer // It is the caller's responsibility to call Close on TarReadCloser when // done. type TarReadCloser struct { *tar.Reader io.Closer } func (r *TarReadCloser) Close() error { return r.Closer.Close() } // NewCompressedTarReader creates a new TarReadCloser reading from the // given ACI image. // It is the caller's responsibility to call Close on the TarReadCloser // when done. func NewCompressedTarReader(rs io.ReadSeeker) (*TarReadCloser, error) { cr, err := NewCompressedReader(rs) if err != nil { return nil, err } return &TarReadCloser{tar.NewReader(cr), cr}, nil } // NewCompressedReader creates a new io.ReaderCloser from the given ACI image. // It is the caller's responsibility to call Close on the Reader when done. func NewCompressedReader(rs io.ReadSeeker) (io.ReadCloser, error) { var ( dr io.ReadCloser err error ) _, err = rs.Seek(0, 0) if err != nil { return nil, err } ftype, err := DetectFileType(rs) if err != nil { return nil, err } _, err = rs.Seek(0, 0) if err != nil { return nil, err } switch ftype { case TypeGzip: dr, err = gzip.NewReader(rs) if err != nil { return nil, err } case TypeBzip2: dr = ioutil.NopCloser(bzip2.NewReader(rs)) case TypeXz: dr, err = NewXzReader(rs) if err != nil { return nil, err } case TypeTar: dr = ioutil.NopCloser(rs) case TypeUnknown: return nil, errors.New("error: unknown image filetype") default: return nil, errors.New("no type returned from DetectFileType?") } return dr, nil } ================================================ FILE: vendor/github.com/appc/spec/aci/layout.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aci /* Image Layout The on-disk layout of an app container is straightforward. It includes a rootfs with all of the files that will exist in the root of the app and a manifest describing the image. The layout MUST contain an image manifest. /manifest /rootfs/ /rootfs/usr/bin/mysql */ import ( "archive/tar" "bytes" "errors" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/appc/spec/schema" "github.com/appc/spec/schema/types" ) const ( // Path to manifest file inside the layout ManifestFile = "manifest" // Path to rootfs directory inside the layout RootfsDir = "rootfs" ) type ErrOldVersion struct { version types.SemVer } func (e ErrOldVersion) Error() string { return fmt.Sprintf("ACVersion too old. Found major version %v, expected %v", e.version.Major, schema.AppContainerVersion.Major) } var ( ErrNoRootFS = errors.New("no rootfs found in layout") ErrNoManifest = errors.New("no image manifest found in layout") ) // ValidateLayout takes a directory and validates that the layout of the directory // matches that expected by the Application Container Image format. // If any errors are encountered during the validation, it will abort and // return the first one. func ValidateLayout(dir string) error { fi, err := os.Stat(dir) if err != nil { return fmt.Errorf("error accessing layout: %v", err) } if !fi.IsDir() { return fmt.Errorf("given path %q is not a directory", dir) } var flist []string var imOK, rfsOK bool var im io.Reader walkLayout := func(fpath string, fi os.FileInfo, err error) error { rpath, err := filepath.Rel(dir, fpath) if err != nil { return err } switch rpath { case ".": case ManifestFile: im, err = os.Open(fpath) if err != nil { return err } imOK = true case RootfsDir: if !fi.IsDir() { return errors.New("rootfs is not a directory") } rfsOK = true default: flist = append(flist, rpath) } return nil } if err := filepath.Walk(dir, walkLayout); err != nil { return err } return validate(imOK, im, rfsOK, flist) } // ValidateArchive takes a *tar.Reader and validates that the layout of the // filesystem the reader encapsulates matches that expected by the // Application Container Image format. If any errors are encountered during // the validation, it will abort and return the first one. func ValidateArchive(tr *tar.Reader) error { var fseen map[string]bool = make(map[string]bool) var imOK, rfsOK bool var im bytes.Buffer Tar: for { hdr, err := tr.Next() switch { case err == nil: case err == io.EOF: break Tar default: return err } name := filepath.Clean(hdr.Name) switch name { case ".": case ManifestFile: _, err := io.Copy(&im, tr) if err != nil { return err } imOK = true case RootfsDir: if !hdr.FileInfo().IsDir() { return fmt.Errorf("rootfs is not a directory") } rfsOK = true default: if _, seen := fseen[name]; seen { return fmt.Errorf("duplicate file entry in archive: %s", name) } fseen[name] = true } } var flist []string for key := range fseen { flist = append(flist, key) } return validate(imOK, &im, rfsOK, flist) } func validate(imOK bool, im io.Reader, rfsOK bool, files []string) error { defer func() { if rc, ok := im.(io.Closer); ok { rc.Close() } }() if !imOK { return ErrNoManifest } if !rfsOK { return ErrNoRootFS } b, err := ioutil.ReadAll(im) if err != nil { return fmt.Errorf("error reading image manifest: %v", err) } var a schema.ImageManifest if err := a.UnmarshalJSON(b); err != nil { return fmt.Errorf("image manifest validation failed: %v", err) } if a.ACVersion.LessThanMajor(schema.AppContainerVersion) { return ErrOldVersion{ version: a.ACVersion, } } for _, f := range files { if !strings.HasPrefix(f, "rootfs") { return fmt.Errorf("unrecognized file path in layout: %q", f) } } return nil } ================================================ FILE: vendor/github.com/appc/spec/aci/writer.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package aci import ( "archive/tar" "bytes" "encoding/json" "io" "time" "github.com/appc/spec/schema" ) // ArchiveWriter writes App Container Images. Users wanting to create an ACI or // should create an ArchiveWriter and add files to it; the ACI will be written // to the underlying tar.Writer type ArchiveWriter interface { AddFile(hdr *tar.Header, r io.Reader) error Close() error } type imageArchiveWriter struct { *tar.Writer am *schema.ImageManifest } // NewImageWriter creates a new ArchiveWriter which will generate an App // Container Image based on the given manifest and write it to the given // tar.Writer func NewImageWriter(am schema.ImageManifest, w *tar.Writer) ArchiveWriter { aw := &imageArchiveWriter{ w, &am, } return aw } func (aw *imageArchiveWriter) AddFile(hdr *tar.Header, r io.Reader) error { err := aw.Writer.WriteHeader(hdr) if err != nil { return err } if r != nil { _, err := io.Copy(aw.Writer, r) if err != nil { return err } } return nil } func (aw *imageArchiveWriter) addFileNow(path string, contents []byte) error { buf := bytes.NewBuffer(contents) now := time.Now() hdr := tar.Header{ Name: path, Mode: 0644, Uid: 0, Gid: 0, Size: int64(buf.Len()), ModTime: now, Typeflag: tar.TypeReg, Uname: "root", Gname: "root", ChangeTime: now, } return aw.AddFile(&hdr, buf) } func (aw *imageArchiveWriter) addManifest(name string, m json.Marshaler) error { out, err := m.MarshalJSON() if err != nil { return err } return aw.addFileNow(name, out) } func (aw *imageArchiveWriter) Close() error { if err := aw.addManifest(ManifestFile, aw.am); err != nil { return err } return aw.Writer.Close() } ================================================ FILE: vendor/github.com/appc/spec/pkg/acirenderer/acirenderer.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package acirenderer import ( "archive/tar" "crypto/sha512" "fmt" "hash" "io" "io/ioutil" "path/filepath" "strings" "github.com/appc/spec/schema" "github.com/appc/spec/schema/types" ) // An ACIRegistry provides all functions of an ACIProvider plus functions to // search for an aci and get its contents type ACIRegistry interface { ACIProvider GetImageManifest(key string) (*schema.ImageManifest, error) GetACI(name types.ACIdentifier, labels types.Labels) (string, error) } // An ACIProvider provides functions to get an ACI contents, to convert an // ACI hash to the key under which the ACI is known to the provider and to resolve an // image ID to the key under which it's known to the provider. type ACIProvider interface { // Read the ACI contents stream given the key. Use ResolveKey to // convert an image ID to the relative provider's key. ReadStream(key string) (io.ReadCloser, error) // Converts an image ID to the, if existent, key under which the // ACI is known to the provider ResolveKey(key string) (string, error) // Converts a Hash to the provider's key HashToKey(h hash.Hash) string } // An Image contains the ImageManifest, the ACIProvider's key and its Level in // the dependency tree. type Image struct { Im *schema.ImageManifest Key string Level uint16 } // Images encapsulates an ordered slice of Image structs. It represents a flat // dependency tree. // The upper Image should be the first in the slice with a level of 0. // For example if A is the upper image and has two deps (in order B and C). And C has one dep (D), // the slice (reporting the app name and excluding im and Hash) should be: // [{A, Level: 0}, {C, Level:1}, {D, Level: 2}, {B, Level: 1}] type Images []Image // ACIFiles represents which files to extract for every ACI type ACIFiles struct { Key string FileMap map[string]struct{} } // RenderedACI is an (ordered) slice of ACIFiles type RenderedACI []*ACIFiles // GetRenderedACIWithImageID, given an imageID, starts with the matching image // available in the store, creates the dependencies list and returns the // RenderedACI list. func GetRenderedACIWithImageID(imageID types.Hash, ap ACIRegistry) (RenderedACI, error) { imgs, err := CreateDepListFromImageID(imageID, ap) if err != nil { return nil, err } return GetRenderedACIFromList(imgs, ap) } // GetRenderedACI, given an image app name and optional labels, starts with the // best matching image available in the store, creates the dependencies list // and returns the RenderedACI list. func GetRenderedACI(name types.ACIdentifier, labels types.Labels, ap ACIRegistry) (RenderedACI, error) { imgs, err := CreateDepListFromNameLabels(name, labels, ap) if err != nil { return nil, err } return GetRenderedACIFromList(imgs, ap) } // GetRenderedACIFromList returns the RenderedACI list. All file outside rootfs // are excluded (at the moment only "manifest"). func GetRenderedACIFromList(imgs Images, ap ACIProvider) (RenderedACI, error) { if len(imgs) == 0 { return nil, fmt.Errorf("image list empty") } allFiles := make(map[string]byte) renderedACI := RenderedACI{} first := true for i, img := range imgs { pwlm := getUpperPWLM(imgs, i) ra, err := getACIFiles(img, ap, allFiles, pwlm) if err != nil { return nil, err } // Use the manifest from the upper ACI if first { ra.FileMap["manifest"] = struct{}{} first = false } renderedACI = append(renderedACI, ra) } return renderedACI, nil } // getUpperPWLM returns the pwl at the lower level for the branch where // img[pos] lives. func getUpperPWLM(imgs Images, pos int) map[string]struct{} { var pwlm map[string]struct{} curlevel := imgs[pos].Level // Start from our position and go back ignoring the other leafs. for i := pos; i >= 0; i-- { img := imgs[i] if img.Level < curlevel && len(img.Im.PathWhitelist) > 0 { pwlm = pwlToMap(img.Im.PathWhitelist) } curlevel = img.Level } return pwlm } // getACIFiles returns the ACIFiles struct for the given image. All files // outside rootfs are excluded (at the moment only "manifest"). func getACIFiles(img Image, ap ACIProvider, allFiles map[string]byte, pwlm map[string]struct{}) (*ACIFiles, error) { rs, err := ap.ReadStream(img.Key) if err != nil { return nil, err } defer rs.Close() hash := sha512.New() r := io.TeeReader(rs, hash) thispwlm := pwlToMap(img.Im.PathWhitelist) ra := &ACIFiles{FileMap: make(map[string]struct{})} if err = Walk(tar.NewReader(r), func(hdr *tar.Header) error { name := hdr.Name cleanName := filepath.Clean(name) // Add the rootfs directory. if cleanName == "rootfs" && hdr.Typeflag == tar.TypeDir { ra.FileMap[cleanName] = struct{}{} allFiles[cleanName] = hdr.Typeflag return nil } // Ignore files outside /rootfs/ (at the moment only "manifest"). if !strings.HasPrefix(cleanName, "rootfs/") { return nil } // Is the file in our PathWhiteList? // If the file is a directory continue also if not in PathWhiteList if hdr.Typeflag != tar.TypeDir { if len(img.Im.PathWhitelist) > 0 { if _, ok := thispwlm[cleanName]; !ok { return nil } } } // Is the file in the lower level PathWhiteList of this img branch? if pwlm != nil { if _, ok := pwlm[cleanName]; !ok { return nil } } // Is the file already provided by a previous image? if _, ok := allFiles[cleanName]; ok { return nil } // Check that the parent dirs are also of type dir in the upper // images parentDir := filepath.Dir(cleanName) for parentDir != "." && parentDir != "/" { if ft, ok := allFiles[parentDir]; ok && ft != tar.TypeDir { return nil } parentDir = filepath.Dir(parentDir) } ra.FileMap[cleanName] = struct{}{} allFiles[cleanName] = hdr.Typeflag return nil }); err != nil { return nil, err } // Tar does not necessarily read the complete file, so ensure we read the entirety into the hash if _, err := io.Copy(ioutil.Discard, r); err != nil { return nil, fmt.Errorf("error reading ACI: %v", err) } if g := ap.HashToKey(hash); g != img.Key { return nil, fmt.Errorf("image hash does not match expected (%s != %s)", g, img.Key) } ra.Key = img.Key return ra, nil } // pwlToMap converts a pathWhiteList slice to a map for faster search // It will also prepend "rootfs/" to the provided paths and they will be // relative to "/" so they can be easily compared with the tar.Header.Name // If pwl length is 0, a nil map is returned func pwlToMap(pwl []string) map[string]struct{} { if len(pwl) == 0 { return nil } m := make(map[string]struct{}, len(pwl)) for _, name := range pwl { relpath := filepath.Join("rootfs", name) m[relpath] = struct{}{} } return m } func Walk(tarReader *tar.Reader, walkFunc func(hdr *tar.Header) error) error { for { hdr, err := tarReader.Next() if err == io.EOF { // end of tar archive break } if err != nil { return fmt.Errorf("Error reading tar entry: %v", err) } if err := walkFunc(hdr); err != nil { return err } } return nil } ================================================ FILE: vendor/github.com/appc/spec/pkg/acirenderer/resolve.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package acirenderer import ( "container/list" "github.com/appc/spec/schema/types" ) // CreateDepListFromImageID returns the flat dependency tree of the image with // the provided imageID func CreateDepListFromImageID(imageID types.Hash, ap ACIRegistry) (Images, error) { key, err := ap.ResolveKey(imageID.String()) if err != nil { return nil, err } return createDepList(key, ap) } // CreateDepListFromNameLabels returns the flat dependency tree of the image // with the provided app name and optional labels. func CreateDepListFromNameLabels(name types.ACIdentifier, labels types.Labels, ap ACIRegistry) (Images, error) { key, err := ap.GetACI(name, labels) if err != nil { return nil, err } return createDepList(key, ap) } // createDepList returns the flat dependency tree as a list of Image type func createDepList(key string, ap ACIRegistry) (Images, error) { imgsl := list.New() im, err := ap.GetImageManifest(key) if err != nil { return nil, err } img := Image{Im: im, Key: key, Level: 0} imgsl.PushFront(img) // Create a flat dependency tree. Use a LinkedList to be able to // insert elements in the list while working on it. for el := imgsl.Front(); el != nil; el = el.Next() { img := el.Value.(Image) dependencies := img.Im.Dependencies for _, d := range dependencies { var depimg Image var depKey string if d.ImageID != nil && !d.ImageID.Empty() { depKey, err = ap.ResolveKey(d.ImageID.String()) if err != nil { return nil, err } } else { var err error depKey, err = ap.GetACI(d.ImageName, d.Labels) if err != nil { return nil, err } } im, err := ap.GetImageManifest(depKey) if err != nil { return nil, err } depimg = Image{Im: im, Key: depKey, Level: img.Level + 1} imgsl.InsertAfter(depimg, el) } } imgs := Images{} for el := imgsl.Front(); el != nil; el = el.Next() { imgs = append(imgs, el.Value.(Image)) } return imgs, nil } ================================================ FILE: vendor/github.com/appc/spec/pkg/device/device_linux.go ================================================ // Copyright 2016 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build linux package device // with glibc/sysdeps/unix/sysv/linux/sys/sysmacros.h as reference func Major(rdev uint64) uint { return uint((rdev>>8)&0xfff) | (uint(rdev>>32) & ^uint(0xfff)) } func Minor(rdev uint64) uint { return uint(rdev&0xff) | uint(uint32(rdev>>12) & ^uint32(0xff)) } func Makedev(maj uint, min uint) uint64 { return uint64(min&0xff) | (uint64(maj&0xfff) << 8) | ((uint64(min) & ^uint64(0xff)) << 12) | ((uint64(maj) & ^uint64(0xfff)) << 32) } ================================================ FILE: vendor/github.com/appc/spec/pkg/device/device_posix.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build freebsd netbsd openbsd darwin package device /* #define _BSD_SOURCE #define _DEFAULT_SOURCE #include unsigned int my_major(dev_t dev) { return major(dev); } unsigned int my_minor(dev_t dev) { return minor(dev); } dev_t my_makedev(unsigned int maj, unsigned int min) { return makedev(maj, min); } */ import "C" func Major(rdev uint64) uint { major := C.my_major(C.dev_t(rdev)) return uint(major) } func Minor(rdev uint64) uint { minor := C.my_minor(C.dev_t(rdev)) return uint(minor) } func Makedev(maj uint, min uint) uint64 { dev := C.my_makedev(C.uint(maj), C.uint(min)) return uint64(dev) } ================================================ FILE: vendor/github.com/appc/spec/pkg/tarheader/doc.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package tarheader contains a simple abstraction to accurately create // tar.Headers on different operating systems. package tarheader ================================================ FILE: vendor/github.com/appc/spec/pkg/tarheader/pop_darwin.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //+build darwin package tarheader import ( "archive/tar" "os" "syscall" "time" ) func init() { populateHeaderStat = append(populateHeaderStat, populateHeaderCtime) } func populateHeaderCtime(h *tar.Header, fi os.FileInfo, _ map[uint64]string) { st, ok := fi.Sys().(*syscall.Stat_t) if !ok { return } sec, nsec := st.Ctimespec.Unix() ctime := time.Unix(sec, nsec) h.ChangeTime = ctime } ================================================ FILE: vendor/github.com/appc/spec/pkg/tarheader/pop_linux.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build linux package tarheader import ( "archive/tar" "os" "syscall" "time" ) func init() { populateHeaderStat = append(populateHeaderStat, populateHeaderCtime) } func populateHeaderCtime(h *tar.Header, fi os.FileInfo, _ map[uint64]string) { st, ok := fi.Sys().(*syscall.Stat_t) if !ok { return } sec, nsec := st.Ctim.Unix() ctime := time.Unix(sec, nsec) h.ChangeTime = ctime } ================================================ FILE: vendor/github.com/appc/spec/pkg/tarheader/pop_posix.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build linux freebsd netbsd openbsd package tarheader import ( "archive/tar" "os" "syscall" "github.com/appc/spec/pkg/device" ) func init() { populateHeaderStat = append(populateHeaderStat, populateHeaderUnix) } func populateHeaderUnix(h *tar.Header, fi os.FileInfo, seen map[uint64]string) { st, ok := fi.Sys().(*syscall.Stat_t) if !ok { return } h.Uid = int(st.Uid) h.Gid = int(st.Gid) if st.Mode&syscall.S_IFMT == syscall.S_IFBLK || st.Mode&syscall.S_IFMT == syscall.S_IFCHR { h.Devminor = int64(device.Minor(uint64(st.Rdev))) h.Devmajor = int64(device.Major(uint64(st.Rdev))) } // If we have already seen this inode, generate a hardlink p, ok := seen[uint64(st.Ino)] if ok { h.Linkname = p h.Typeflag = tar.TypeLink } else { seen[uint64(st.Ino)] = h.Name } } ================================================ FILE: vendor/github.com/appc/spec/pkg/tarheader/tarheader.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package tarheader import ( "archive/tar" "os" ) var populateHeaderStat []func(h *tar.Header, fi os.FileInfo, seen map[uint64]string) func Populate(h *tar.Header, fi os.FileInfo, seen map[uint64]string) { for _, pop := range populateHeaderStat { pop(h, fi, seen) } } ================================================ FILE: vendor/github.com/appc/spec/schema/common/common.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package common import ( "fmt" "net/url" "strings" ) // MakeQueryString takes a comma-separated LABEL=VALUE string and returns an // "&"-separated string with URL escaped values. // // Examples: // version=1.0.0,label=v1+v2 -> version=1.0.0&label=v1%2Bv2 // name=db,source=/tmp$1 -> name=db&source=%2Ftmp%241 func MakeQueryString(app string) (string, error) { parts := strings.Split(app, ",") escapedParts := make([]string, len(parts)) for i, s := range parts { p := strings.SplitN(s, "=", 2) if len(p) != 2 { return "", fmt.Errorf("malformed string %q - has a label without a value: %s", app, p[0]) } escapedParts[i] = fmt.Sprintf("%s=%s", p[0], url.QueryEscape(p[1])) } return strings.Join(escapedParts, "&"), nil } ================================================ FILE: vendor/github.com/appc/spec/schema/doc.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package schema provides definitions for the JSON schema of the different // manifests in the App Container Specification. The manifests are canonically // represented in their respective structs: // - `ImageManifest` // - `PodManifest` // // Validation is performed through serialization: if a blob of JSON data will // unmarshal to one of the *Manifests, it is considered a valid implementation // of the standard. Similarly, if a constructed *Manifest struct marshals // successfully to JSON, it must be valid. package schema ================================================ FILE: vendor/github.com/appc/spec/schema/image.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package schema import ( "bytes" "encoding/json" "errors" "fmt" "github.com/appc/spec/schema/types" "go4.org/errorutil" ) const ( ACIExtension = ".aci" ImageManifestKind = types.ACKind("ImageManifest") ) type ImageManifest struct { ACKind types.ACKind `json:"acKind"` ACVersion types.SemVer `json:"acVersion"` Name types.ACIdentifier `json:"name"` Labels types.Labels `json:"labels,omitempty"` App *types.App `json:"app,omitempty"` Annotations types.Annotations `json:"annotations,omitempty"` Dependencies types.Dependencies `json:"dependencies,omitempty"` PathWhitelist []string `json:"pathWhitelist,omitempty"` } // imageManifest is a model to facilitate extra validation during the // unmarshalling of the ImageManifest type imageManifest ImageManifest func BlankImageManifest() *ImageManifest { return &ImageManifest{ACKind: ImageManifestKind, ACVersion: AppContainerVersion} } func (im *ImageManifest) UnmarshalJSON(data []byte) error { a := imageManifest(*im) err := json.Unmarshal(data, &a) if err != nil { if serr, ok := err.(*json.SyntaxError); ok { line, col, highlight := errorutil.HighlightBytePosition(bytes.NewReader(data), serr.Offset) return fmt.Errorf("\nError at line %d, column %d\n%s%v", line, col, highlight, err) } return err } nim := ImageManifest(a) if err := nim.assertValid(); err != nil { return err } *im = nim return nil } func (im ImageManifest) MarshalJSON() ([]byte, error) { if err := im.assertValid(); err != nil { return nil, err } return json.Marshal(imageManifest(im)) } var imKindError = types.InvalidACKindError(ImageManifestKind) // assertValid performs extra assertions on an ImageManifest to ensure that // fields are set appropriately, etc. It is used exclusively when marshalling // and unmarshalling an ImageManifest. Most field-specific validation is // performed through the individual types being marshalled; assertValid() // should only deal with higher-level validation. func (im *ImageManifest) assertValid() error { if im.ACKind != ImageManifestKind { return imKindError } if im.ACVersion.Empty() { return errors.New(`acVersion must be set`) } if im.Name.Empty() { return errors.New(`name must be set`) } return nil } func (im *ImageManifest) GetLabel(name string) (val string, ok bool) { return im.Labels.Get(name) } func (im *ImageManifest) GetAnnotation(name string) (val string, ok bool) { return im.Annotations.Get(name) } ================================================ FILE: vendor/github.com/appc/spec/schema/kind.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package schema import ( "encoding/json" "github.com/appc/spec/schema/types" ) type Kind struct { ACVersion types.SemVer `json:"acVersion"` ACKind types.ACKind `json:"acKind"` } type kind Kind func (k *Kind) UnmarshalJSON(data []byte) error { nk := kind{} err := json.Unmarshal(data, &nk) if err != nil { return err } *k = Kind(nk) return nil } func (k Kind) MarshalJSON() ([]byte, error) { return json.Marshal(kind(k)) } ================================================ FILE: vendor/github.com/appc/spec/schema/pod.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package schema import ( "bytes" "encoding/json" "errors" "fmt" "github.com/appc/spec/schema/types" "go4.org/errorutil" ) const PodManifestKind = types.ACKind("PodManifest") type PodManifest struct { ACVersion types.SemVer `json:"acVersion"` ACKind types.ACKind `json:"acKind"` Apps AppList `json:"apps"` Volumes []types.Volume `json:"volumes"` Isolators []types.Isolator `json:"isolators"` Annotations types.Annotations `json:"annotations"` Ports []types.ExposedPort `json:"ports"` UserAnnotations types.UserAnnotations `json:"userAnnotations,omitempty"` UserLabels types.UserLabels `json:"userLabels,omitempty"` } // podManifest is a model to facilitate extra validation during the // unmarshalling of the PodManifest type podManifest PodManifest func BlankPodManifest() *PodManifest { return &PodManifest{ACKind: PodManifestKind, ACVersion: AppContainerVersion} } func (pm *PodManifest) UnmarshalJSON(data []byte) error { p := podManifest(*pm) err := json.Unmarshal(data, &p) if err != nil { if serr, ok := err.(*json.SyntaxError); ok { line, col, highlight := errorutil.HighlightBytePosition(bytes.NewReader(data), serr.Offset) return fmt.Errorf("\nError at line %d, column %d\n%s%v", line, col, highlight, err) } return err } npm := PodManifest(p) if err := npm.assertValid(); err != nil { return err } *pm = npm return nil } func (pm PodManifest) MarshalJSON() ([]byte, error) { if err := pm.assertValid(); err != nil { return nil, err } return json.Marshal(podManifest(pm)) } var pmKindError = types.InvalidACKindError(PodManifestKind) // assertValid performs extra assertions on an PodManifest to // ensure that fields are set appropriately, etc. It is used exclusively when // marshalling and unmarshalling an PodManifest. Most // field-specific validation is performed through the individual types being // marshalled; assertValid() should only deal with higher-level validation. func (pm *PodManifest) assertValid() error { if pm.ACKind != PodManifestKind { return pmKindError } return nil } type AppList []RuntimeApp type appList AppList func (al *AppList) UnmarshalJSON(data []byte) error { a := appList{} err := json.Unmarshal(data, &a) if err != nil { return err } nal := AppList(a) if err := nal.assertValid(); err != nil { return err } *al = nal return nil } func (al AppList) MarshalJSON() ([]byte, error) { if err := al.assertValid(); err != nil { return nil, err } return json.Marshal(appList(al)) } func (al AppList) assertValid() error { seen := map[types.ACName]bool{} for _, a := range al { if _, ok := seen[a.Name]; ok { return fmt.Errorf(`duplicate apps of name %q`, a.Name) } seen[a.Name] = true } return nil } // Get retrieves an app by the specified name from the AppList; if there is // no such app, nil is returned. The returned *RuntimeApp MUST be considered // read-only. func (al AppList) Get(name types.ACName) *RuntimeApp { for _, a := range al { if name.Equals(a.Name) { aa := a return &aa } } return nil } // Mount describes the mapping between a volume and the path it is mounted // inside of an app's filesystem. // The AppVolume is optional. If missing, the pod-level Volume of the // same name shall be used. type Mount struct { Volume types.ACName `json:"volume"` Path string `json:"path"` AppVolume *types.Volume `json:"appVolume,omitempty"` } func (r Mount) assertValid() error { if r.Volume.Empty() { return errors.New("volume must be set") } if r.Path == "" { return errors.New("path must be set") } return nil } // RuntimeApp describes an application referenced in a PodManifest type RuntimeApp struct { Name types.ACName `json:"name"` Image RuntimeImage `json:"image"` App *types.App `json:"app,omitempty"` ReadOnlyRootFS bool `json:"readOnlyRootFS,omitempty"` Mounts []Mount `json:"mounts,omitempty"` Annotations types.Annotations `json:"annotations,omitempty"` } // RuntimeImage describes an image referenced in a RuntimeApp type RuntimeImage struct { Name *types.ACIdentifier `json:"name,omitempty"` ID types.Hash `json:"id"` Labels types.Labels `json:"labels,omitempty"` } ================================================ FILE: vendor/github.com/appc/spec/schema/types/acidentifier.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "regexp" "strings" ) var ( // ValidACIdentifier is a regular expression that defines a valid ACIdentifier ValidACIdentifier = regexp.MustCompile("^[a-z0-9]+([-._~/][a-z0-9]+)*$") invalidACIdentifierChars = regexp.MustCompile("[^a-z0-9-._~/]") invalidACIdentifierEdges = regexp.MustCompile("(^[-._~/]+)|([-._~/]+$)") ErrEmptyACIdentifier = ACIdentifierError("ACIdentifier cannot be empty") ErrInvalidEdgeInACIdentifier = ACIdentifierError("ACIdentifier must start and end with only lower case " + "alphanumeric characters") ErrInvalidCharInACIdentifier = ACIdentifierError("ACIdentifier must contain only lower case " + `alphanumeric characters plus "-._~/"`) ) // ACIdentifier (an App-Container Identifier) is a format used by keys in image names // and image labels of the App Container Standard. An ACIdentifier is restricted to numeric // and lowercase URI unreserved characters defined in URI RFC[1]; all alphabetical characters // must be lowercase only. Furthermore, the first and last character ("edges") must be // alphanumeric, and an ACIdentifier cannot be empty. Programmatically, an ACIdentifier must // conform to the regular expression ValidACIdentifier. // // [1] http://tools.ietf.org/html/rfc3986#section-2.3 type ACIdentifier string func (n ACIdentifier) String() string { return string(n) } // Set sets the ACIdentifier to the given value, if it is valid; if not, // an error is returned. func (n *ACIdentifier) Set(s string) error { nn, err := NewACIdentifier(s) if err == nil { *n = *nn } return err } // Equals checks whether a given ACIdentifier is equal to this one. func (n ACIdentifier) Equals(o ACIdentifier) bool { return strings.ToLower(string(n)) == strings.ToLower(string(o)) } // Empty returns a boolean indicating whether this ACIdentifier is empty. func (n ACIdentifier) Empty() bool { return n.String() == "" } // NewACIdentifier generates a new ACIdentifier from a string. If the given string is // not a valid ACIdentifier, nil and an error are returned. func NewACIdentifier(s string) (*ACIdentifier, error) { n := ACIdentifier(s) if err := n.assertValid(); err != nil { return nil, err } return &n, nil } // MustACIdentifier generates a new ACIdentifier from a string, If the given string is // not a valid ACIdentifier, it panics. func MustACIdentifier(s string) *ACIdentifier { n, err := NewACIdentifier(s) if err != nil { panic(err) } return n } func (n ACIdentifier) assertValid() error { s := string(n) if len(s) == 0 { return ErrEmptyACIdentifier } if invalidACIdentifierChars.MatchString(s) { return ErrInvalidCharInACIdentifier } if invalidACIdentifierEdges.MatchString(s) { return ErrInvalidEdgeInACIdentifier } return nil } // UnmarshalJSON implements the json.Unmarshaler interface func (n *ACIdentifier) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return err } nn, err := NewACIdentifier(s) if err != nil { return err } *n = *nn return nil } // MarshalJSON implements the json.Marshaler interface func (n ACIdentifier) MarshalJSON() ([]byte, error) { if err := n.assertValid(); err != nil { return nil, err } return json.Marshal(n.String()) } // SanitizeACIdentifier replaces every invalid ACIdentifier character in s with an underscore // making it a legal ACIdentifier string. If the character is an upper case letter it // replaces it with its lower case. It also removes illegal edge characters // (hyphens, period, underscore, tilde and slash). // // This is a helper function and its algorithm is not part of the spec. It // should not be called without the user explicitly asking for a suggestion. func SanitizeACIdentifier(s string) (string, error) { s = strings.ToLower(s) s = invalidACIdentifierChars.ReplaceAllString(s, "_") s = invalidACIdentifierEdges.ReplaceAllString(s, "") if s == "" { return "", errors.New("must contain at least one valid character") } return s, nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/ackind.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "fmt" ) var ( ErrNoACKind = ACKindError("ACKind must be set") ) // ACKind wraps a string to define a field which must be set with one of // several ACKind values. If it is unset, or has an invalid value, the field // will refuse to marshal/unmarshal. type ACKind string func (a ACKind) String() string { return string(a) } func (a ACKind) assertValid() error { s := a.String() switch s { case "ImageManifest", "PodManifest": return nil case "": return ErrNoACKind default: msg := fmt.Sprintf("bad ACKind: %s", s) return ACKindError(msg) } } func (a ACKind) MarshalJSON() ([]byte, error) { if err := a.assertValid(); err != nil { return nil, err } return json.Marshal(a.String()) } func (a *ACKind) UnmarshalJSON(data []byte) error { var s string err := json.Unmarshal(data, &s) if err != nil { return err } na := ACKind(s) if err := na.assertValid(); err != nil { return err } *a = na return nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/acname.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "regexp" "strings" ) var ( // ValidACName is a regular expression that defines a valid ACName ValidACName = regexp.MustCompile("^[a-z0-9]+([-][a-z0-9]+)*$") invalidACNameChars = regexp.MustCompile("[^a-z0-9-]") invalidACNameEdges = regexp.MustCompile("(^[-]+)|([-]+$)") ErrEmptyACName = ACNameError("ACName cannot be empty") ErrInvalidEdgeInACName = ACNameError("ACName must start and end with only lower case " + "alphanumeric characters") ErrInvalidCharInACName = ACNameError("ACName must contain only lower case " + `alphanumeric characters plus "-"`) ) // ACName (an App-Container Name) is a format used by keys in different formats // of the App Container Standard. An ACName is restricted to numeric and lowercase // characters accepted by the DNS RFC[1] plus "-"; all alphabetical characters must // be lowercase only. Furthermore, the first and last character ("edges") must be // alphanumeric, and an ACName cannot be empty. Programmatically, an ACName must // conform to the regular expression ValidACName. // // [1] http://tools.ietf.org/html/rfc1123#page-13 type ACName string func (n ACName) String() string { return string(n) } // Set sets the ACName to the given value, if it is valid; if not, // an error is returned. func (n *ACName) Set(s string) error { nn, err := NewACName(s) if err == nil { *n = *nn } return err } // Equals checks whether a given ACName is equal to this one. func (n ACName) Equals(o ACName) bool { return strings.ToLower(string(n)) == strings.ToLower(string(o)) } // Empty returns a boolean indicating whether this ACName is empty. func (n ACName) Empty() bool { return n.String() == "" } // NewACName generates a new ACName from a string. If the given string is // not a valid ACName, nil and an error are returned. func NewACName(s string) (*ACName, error) { n := ACName(s) if err := n.assertValid(); err != nil { return nil, err } return &n, nil } // MustACName generates a new ACName from a string, If the given string is // not a valid ACName, it panics. func MustACName(s string) *ACName { n, err := NewACName(s) if err != nil { panic(err) } return n } func (n ACName) assertValid() error { s := string(n) if len(s) == 0 { return ErrEmptyACName } if invalidACNameChars.MatchString(s) { return ErrInvalidCharInACName } if invalidACNameEdges.MatchString(s) { return ErrInvalidEdgeInACName } return nil } // UnmarshalJSON implements the json.Unmarshaler interface func (n *ACName) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return err } nn, err := NewACName(s) if err != nil { return err } *n = *nn return nil } // MarshalJSON implements the json.Marshaler interface func (n ACName) MarshalJSON() ([]byte, error) { if err := n.assertValid(); err != nil { return nil, err } return json.Marshal(n.String()) } // SanitizeACName replaces every invalid ACName character in s with a dash // making it a legal ACName string. If the character is an upper case letter it // replaces it with its lower case. It also removes illegal edge characters // (hyphens). // // This is a helper function and its algorithm is not part of the spec. It // should not be called without the user explicitly asking for a suggestion. func SanitizeACName(s string) (string, error) { s = strings.ToLower(s) s = invalidACNameChars.ReplaceAllString(s, "-") s = invalidACNameEdges.ReplaceAllString(s, "") if s == "" { return "", errors.New("must contain at least one valid character") } return s, nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/annotations.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "fmt" ) type Annotations []Annotation type annotations Annotations type Annotation struct { Name ACIdentifier `json:"name"` Value string `json:"value"` } func (a Annotations) assertValid() error { seen := map[ACIdentifier]string{} for _, anno := range a { _, ok := seen[anno.Name] if ok { return fmt.Errorf(`duplicate annotations of name %q`, anno.Name) } seen[anno.Name] = anno.Value } if c, ok := seen["created"]; ok { if _, err := NewDate(c); err != nil { return err } } if h, ok := seen["homepage"]; ok { if _, err := NewURL(h); err != nil { return err } } if d, ok := seen["documentation"]; ok { if _, err := NewURL(d); err != nil { return err } } return nil } func (a Annotations) MarshalJSON() ([]byte, error) { if err := a.assertValid(); err != nil { return nil, err } return json.Marshal(annotations(a)) } func (a *Annotations) UnmarshalJSON(data []byte) error { var ja annotations if err := json.Unmarshal(data, &ja); err != nil { return err } na := Annotations(ja) if err := na.assertValid(); err != nil { return err } *a = na return nil } // Retrieve the value of an annotation by the given name from Annotations, if // it exists. func (a Annotations) Get(name string) (val string, ok bool) { for _, anno := range a { if anno.Name.String() == name { return anno.Value, true } } return "", false } // Set sets the value of an annotation by the given name, overwriting if one already exists. func (a *Annotations) Set(name ACIdentifier, value string) { for i, anno := range *a { if anno.Name.Equals(name) { (*a)[i] = Annotation{ Name: name, Value: value, } return } } anno := Annotation{ Name: name, Value: value, } *a = append(*a, anno) } ================================================ FILE: vendor/github.com/appc/spec/schema/types/app.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "fmt" "path" ) type App struct { Exec Exec `json:"exec"` EventHandlers []EventHandler `json:"eventHandlers,omitempty"` User string `json:"user"` Group string `json:"group"` SupplementaryGIDs []int `json:"supplementaryGIDs,omitempty"` WorkingDirectory string `json:"workingDirectory,omitempty"` Environment Environment `json:"environment,omitempty"` MountPoints []MountPoint `json:"mountPoints,omitempty"` Ports []Port `json:"ports,omitempty"` Isolators Isolators `json:"isolators,omitempty"` UserAnnotations UserAnnotations `json:"userAnnotations,omitempty"` UserLabels UserLabels `json:"userLabels,omitempty"` } // app is a model to facilitate extra validation during the // unmarshalling of the App type app App func (a *App) UnmarshalJSON(data []byte) error { ja := app(*a) err := json.Unmarshal(data, &ja) if err != nil { return err } na := App(ja) if err := na.assertValid(); err != nil { return err } if na.Environment == nil { na.Environment = make(Environment, 0) } *a = na return nil } func (a App) MarshalJSON() ([]byte, error) { if err := a.assertValid(); err != nil { return nil, err } return json.Marshal(app(a)) } func (a *App) assertValid() error { if err := a.Exec.assertValid(); err != nil { return err } if a.User == "" { return errors.New(`user is required`) } if a.Group == "" { return errors.New(`group is required`) } if !path.IsAbs(a.WorkingDirectory) && a.WorkingDirectory != "" { return errors.New("workingDirectory must be an absolute path") } eh := make(map[string]bool) for _, e := range a.EventHandlers { name := e.Name if eh[name] { return fmt.Errorf("Only one eventHandler of name %q allowed", name) } eh[name] = true } if err := a.Environment.assertValid(); err != nil { return err } if err := a.Isolators.assertValid(); err != nil { return err } return nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/date.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "fmt" "time" ) // Date wraps time.Time to marshal/unmarshal to/from JSON strings in strict // accordance with RFC3339 // TODO(jonboulle): golang's implementation seems slightly buggy here; // according to http://tools.ietf.org/html/rfc3339#section-5.6 , applications // may choose to separate the date and time with a space instead of a T // character (for example, `date --rfc-3339` on GNU coreutils) - but this is // considered an error by go's parser. File a bug? type Date time.Time func NewDate(s string) (*Date, error) { t, err := time.Parse(time.RFC3339, s) if err != nil { return nil, fmt.Errorf("bad Date: %v", err) } d := Date(t) return &d, nil } func (d Date) String() string { return time.Time(d).Format(time.RFC3339) } func (d *Date) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return err } nd, err := NewDate(s) if err != nil { return err } *d = *nd return nil } func (d Date) MarshalJSON() ([]byte, error) { return json.Marshal(d.String()) } ================================================ FILE: vendor/github.com/appc/spec/schema/types/dependencies.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" ) type Dependencies []Dependency type Dependency struct { ImageName ACIdentifier `json:"imageName"` ImageID *Hash `json:"imageID,omitempty"` Labels Labels `json:"labels,omitempty"` Size uint `json:"size,omitempty"` } type dependency Dependency func (d Dependency) assertValid() error { if len(d.ImageName) < 1 { return errors.New(`imageName cannot be empty`) } return nil } func (d Dependency) MarshalJSON() ([]byte, error) { if err := d.assertValid(); err != nil { return nil, err } return json.Marshal(dependency(d)) } func (d *Dependency) UnmarshalJSON(data []byte) error { var jd dependency if err := json.Unmarshal(data, &jd); err != nil { return err } nd := Dependency(jd) if err := nd.assertValid(); err != nil { return err } *d = nd return nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/doc.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Package types contains structs representing the various types in the app // container specification. It is used by the [schema manifest types](../) // to enforce validation. package types ================================================ FILE: vendor/github.com/appc/spec/schema/types/environment.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "fmt" "regexp" ) var ( envPattern = regexp.MustCompile("^[A-Za-z_][A-Za-z_0-9]*$") ) type Environment []EnvironmentVariable type environment Environment type EnvironmentVariable struct { Name string `json:"name"` Value string `json:"value"` } func (ev EnvironmentVariable) assertValid() error { if len(ev.Name) == 0 { return fmt.Errorf(`environment variable name must not be empty`) } if !envPattern.MatchString(ev.Name) { return fmt.Errorf(`environment variable does not have valid identifier %q`, ev.Name) } return nil } func (e Environment) assertValid() error { seen := map[string]bool{} for _, env := range e { if err := env.assertValid(); err != nil { return err } _, ok := seen[env.Name] if ok { return fmt.Errorf(`duplicate environment variable of name %q`, env.Name) } seen[env.Name] = true } return nil } func (e Environment) MarshalJSON() ([]byte, error) { if err := e.assertValid(); err != nil { return nil, err } return json.Marshal(environment(e)) } func (e *Environment) UnmarshalJSON(data []byte) error { var je environment if err := json.Unmarshal(data, &je); err != nil { return err } ne := Environment(je) if err := ne.assertValid(); err != nil { return err } *e = ne return nil } // Retrieve the value of an environment variable by the given name from // Environment, if it exists. func (e Environment) Get(name string) (value string, ok bool) { for _, env := range e { if env.Name == name { return env.Value, true } } return "", false } // Set sets the value of an environment variable by the given name, // overwriting if one already exists. func (e *Environment) Set(name string, value string) { for i, env := range *e { if env.Name == name { (*e)[i] = EnvironmentVariable{ Name: name, Value: value, } return } } env := EnvironmentVariable{ Name: name, Value: value, } *e = append(*e, env) } ================================================ FILE: vendor/github.com/appc/spec/schema/types/errors.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import "fmt" // An ACKindError is returned when the wrong ACKind is set in a manifest type ACKindError string func (e ACKindError) Error() string { return string(e) } func InvalidACKindError(kind ACKind) ACKindError { return ACKindError(fmt.Sprintf("missing or bad ACKind (must be %#v)", kind)) } // An ACVersionError is returned when a bad ACVersion is set in a manifest type ACVersionError string func (e ACVersionError) Error() string { return string(e) } // An ACIdentifierError is returned when a bad value is used for an ACIdentifier type ACIdentifierError string func (e ACIdentifierError) Error() string { return string(e) } // An ACNameError is returned when a bad value is used for an ACName type ACNameError string func (e ACNameError) Error() string { return string(e) } ================================================ FILE: vendor/github.com/appc/spec/schema/types/event_handler.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "fmt" ) type EventHandler struct { Name string `json:"name"` Exec Exec `json:"exec"` } type eventHandler EventHandler func (e EventHandler) assertValid() error { s := e.Name switch s { case "pre-start", "post-stop": return nil case "": return errors.New(`eventHandler "name" cannot be empty`) default: return fmt.Errorf(`bad eventHandler "name": %q`, s) } } func (e EventHandler) MarshalJSON() ([]byte, error) { if err := e.assertValid(); err != nil { return nil, err } return json.Marshal(eventHandler(e)) } func (e *EventHandler) UnmarshalJSON(data []byte) error { var je eventHandler err := json.Unmarshal(data, &je) if err != nil { return err } ne := EventHandler(je) if err := ne.assertValid(); err != nil { return err } *e = ne return nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/exec.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import "encoding/json" type Exec []string type exec Exec func (e Exec) assertValid() error { return nil } func (e Exec) MarshalJSON() ([]byte, error) { if err := e.assertValid(); err != nil { return nil, err } return json.Marshal(exec(e)) } func (e *Exec) UnmarshalJSON(data []byte) error { var je exec err := json.Unmarshal(data, &je) if err != nil { return err } ne := Exec(je) if err := ne.assertValid(); err != nil { return err } *e = ne return nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/hash.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "crypto/sha512" "encoding/json" "errors" "fmt" "reflect" "strings" ) const ( maxHashSize = (sha512.Size / 2) + len("sha512-") ) // Hash encodes a hash specified in a string of the form: // "-" // for example // "sha512-06c733b1838136838e6d2d3e8fa5aea4c7905e92[...]" // Valid types are currently: // * sha512 type Hash struct { typ string Val string } func NewHash(s string) (*Hash, error) { elems := strings.Split(s, "-") if len(elems) != 2 { return nil, errors.New("badly formatted hash string") } nh := Hash{ typ: elems[0], Val: elems[1], } if err := nh.assertValid(); err != nil { return nil, err } return &nh, nil } func (h Hash) String() string { return fmt.Sprintf("%s-%s", h.typ, h.Val) } func (h *Hash) Set(s string) error { nh, err := NewHash(s) if err == nil { *h = *nh } return err } func (h Hash) Empty() bool { return reflect.DeepEqual(h, Hash{}) } func (h Hash) assertValid() error { switch h.typ { case "sha512": case "": return fmt.Errorf("unexpected empty hash type") default: return fmt.Errorf("unrecognized hash type: %v", h.typ) } if h.Val == "" { return fmt.Errorf("unexpected empty hash value") } return nil } func (h *Hash) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return err } nh, err := NewHash(s) if err != nil { return err } *h = *nh return nil } func (h Hash) MarshalJSON() ([]byte, error) { if err := h.assertValid(); err != nil { return nil, err } return json.Marshal(h.String()) } func NewHashSHA512(b []byte) *Hash { h := sha512.New() h.Write(b) nh, _ := NewHash(fmt.Sprintf("sha512-%x", h.Sum(nil))) return nh } func ShortHash(hash string) string { if len(hash) > maxHashSize { return hash[:maxHashSize] } return hash } ================================================ FILE: vendor/github.com/appc/spec/schema/types/isolator.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "fmt" ) var ( isolatorMap map[ACIdentifier]IsolatorValueConstructor // ErrIncompatibleIsolator is returned whenever an Isolators set contains // conflicting IsolatorValue instances ErrIncompatibleIsolator = errors.New("isolators set contains incompatible types") // ErrInvalidIsolator is returned upon validation failures due to improper // or partially constructed Isolator instances (eg. from incomplete direct construction) ErrInvalidIsolator = errors.New("invalid isolator") ) func init() { isolatorMap = make(map[ACIdentifier]IsolatorValueConstructor) } type IsolatorValueConstructor func() IsolatorValue func AddIsolatorValueConstructor(n ACIdentifier, i IsolatorValueConstructor) { isolatorMap[n] = i } func AddIsolatorName(n ACIdentifier, ns map[ACIdentifier]struct{}) { ns[n] = struct{}{} } // Isolators encapsulates a list of individual Isolators for the ImageManifest // and PodManifest schemas. type Isolators []Isolator // assertValid checks that every single isolator is valid and that // the whole set is well built func (isolators Isolators) assertValid() error { typesMap := make(map[ACIdentifier]bool) for _, i := range isolators { v := i.Value() if v == nil { return ErrInvalidIsolator } if err := v.AssertValid(); err != nil { return err } if _, ok := typesMap[i.Name]; ok { if !v.multipleAllowed() { return fmt.Errorf(`isolators set contains too many instances of type %s"`, i.Name) } } for _, c := range v.Conflicts() { if _, found := typesMap[c]; found { return ErrIncompatibleIsolator } } typesMap[i.Name] = true } return nil } // GetByName returns the last isolator in the list by the given name. func (is *Isolators) GetByName(name ACIdentifier) *Isolator { var i Isolator for j := len(*is) - 1; j >= 0; j-- { i = []Isolator(*is)[j] if i.Name == name { return &i } } return nil } // ReplaceIsolatorsByName overrides matching isolator types with a new // isolator, deleting them all and appending the new one instead func (is *Isolators) ReplaceIsolatorsByName(newIs Isolator, oldNames []ACIdentifier) { var i Isolator for j := len(*is) - 1; j >= 0; j-- { i = []Isolator(*is)[j] for _, name := range oldNames { if i.Name == name { *is = append((*is)[:j], (*is)[j+1:]...) } } } *is = append((*is)[:], newIs) return } // Unrecognized returns a set of isolators that are not recognized. // An isolator is not recognized if it has not had an associated // constructor registered with AddIsolatorValueConstructor. func (is *Isolators) Unrecognized() Isolators { u := Isolators{} for _, i := range *is { if i.value == nil { u = append(u, i) } } return u } // IsolatorValue encapsulates the actual value of an Isolator which may be // serialized as any arbitrary JSON blob. Specific Isolator types should // implement this interface to facilitate unmarshalling and validation. type IsolatorValue interface { // UnmarshalJSON unserialize a JSON-encoded isolator UnmarshalJSON(b []byte) error // AssertValid returns a non-nil error value if an IsolatorValue is not valid // according to appc spec AssertValid() error // Conflicts returns a list of conflicting isolators types, which cannot co-exist // together with this IsolatorValue Conflicts() []ACIdentifier // multipleAllowed specifies whether multiple isolator instances are allowed // for this isolator type multipleAllowed() bool } // Isolator is a model for unmarshalling isolator types from their JSON-encoded // representation. type Isolator struct { // Name is the name of the Isolator type as defined in the specification. Name ACIdentifier `json:"name"` // ValueRaw captures the raw JSON value of an Isolator that was // unmarshalled. This field is used for unmarshalling only. It MUST NOT // be referenced by external users of the Isolator struct. It is // exported only to satisfy Go's unfortunate requirement that fields // must be capitalized to be unmarshalled successfully. ValueRaw *json.RawMessage `json:"value"` // value captures the "true" value of the isolator. value IsolatorValue } // isolator is a shadow type used for unmarshalling. type isolator Isolator // Value returns the raw Value of this Isolator. Users should perform a type // switch/assertion on this value to extract the underlying isolator type. func (i *Isolator) Value() IsolatorValue { return i.value } // UnmarshalJSON populates this Isolator from a JSON-encoded representation. To // unmarshal the Value of the Isolator, it will use the appropriate constructor // as registered by AddIsolatorValueConstructor. func (i *Isolator) UnmarshalJSON(b []byte) error { var ii isolator err := json.Unmarshal(b, &ii) if err != nil { return err } var dst IsolatorValue con, ok := isolatorMap[ii.Name] if ok { dst = con() err = dst.UnmarshalJSON(*ii.ValueRaw) if err != nil { return err } err = dst.AssertValid() if err != nil { return err } } i.value = dst i.ValueRaw = ii.ValueRaw i.Name = ii.Name return nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/isolator_linux_specific.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "fmt" "strings" "unicode" ) const ( LinuxCapabilitiesRetainSetName = "os/linux/capabilities-retain-set" LinuxCapabilitiesRevokeSetName = "os/linux/capabilities-remove-set" LinuxNoNewPrivilegesName = "os/linux/no-new-privileges" LinuxSeccompRemoveSetName = "os/linux/seccomp-remove-set" LinuxSeccompRetainSetName = "os/linux/seccomp-retain-set" LinuxOOMScoreAdjName = "os/linux/oom-score-adj" LinuxCPUSharesName = "os/linux/cpu-shares" LinuxSELinuxContextName = "os/linux/selinux-context" ) var LinuxIsolatorNames = make(map[ACIdentifier]struct{}) func init() { for name, con := range map[ACIdentifier]IsolatorValueConstructor{ LinuxCapabilitiesRevokeSetName: func() IsolatorValue { return &LinuxCapabilitiesRevokeSet{} }, LinuxCapabilitiesRetainSetName: func() IsolatorValue { return &LinuxCapabilitiesRetainSet{} }, LinuxNoNewPrivilegesName: func() IsolatorValue { v := LinuxNoNewPrivileges(false); return &v }, LinuxOOMScoreAdjName: func() IsolatorValue { v := LinuxOOMScoreAdj(0); return &v }, LinuxCPUSharesName: func() IsolatorValue { v := LinuxCPUShares(1024); return &v }, LinuxSeccompRemoveSetName: func() IsolatorValue { return &LinuxSeccompRemoveSet{} }, LinuxSeccompRetainSetName: func() IsolatorValue { return &LinuxSeccompRetainSet{} }, LinuxSELinuxContextName: func() IsolatorValue { return &LinuxSELinuxContext{} }, } { AddIsolatorName(name, LinuxIsolatorNames) AddIsolatorValueConstructor(name, con) } } type LinuxNoNewPrivileges bool func (l LinuxNoNewPrivileges) AssertValid() error { return nil } // TODO(lucab): both need to be clarified in spec, // see https://github.com/appc/spec/issues/625 func (l LinuxNoNewPrivileges) multipleAllowed() bool { return true } func (l LinuxNoNewPrivileges) Conflicts() []ACIdentifier { return nil } func (l *LinuxNoNewPrivileges) UnmarshalJSON(b []byte) error { var v bool err := json.Unmarshal(b, &v) if err != nil { return err } *l = LinuxNoNewPrivileges(v) return nil } type AsIsolator interface { AsIsolator() (*Isolator, error) } type LinuxCapabilitiesSet interface { Set() []LinuxCapability AssertValid() error } type LinuxCapability string type linuxCapabilitiesSetValue struct { Set []LinuxCapability `json:"set"` } type linuxCapabilitiesSetBase struct { val linuxCapabilitiesSetValue } func (l linuxCapabilitiesSetBase) AssertValid() error { if len(l.val.Set) == 0 { return errors.New("set must be non-empty") } return nil } // TODO(lucab): both need to be clarified in spec, // see https://github.com/appc/spec/issues/625 func (l linuxCapabilitiesSetBase) multipleAllowed() bool { return true } func (l linuxCapabilitiesSetBase) Conflicts() []ACIdentifier { return nil } func (l *linuxCapabilitiesSetBase) UnmarshalJSON(b []byte) error { var v linuxCapabilitiesSetValue err := json.Unmarshal(b, &v) if err != nil { return err } l.val = v return err } func (l linuxCapabilitiesSetBase) Set() []LinuxCapability { return l.val.Set } type LinuxCapabilitiesRetainSet struct { linuxCapabilitiesSetBase } func NewLinuxCapabilitiesRetainSet(caps ...string) (*LinuxCapabilitiesRetainSet, error) { l := LinuxCapabilitiesRetainSet{ linuxCapabilitiesSetBase{ linuxCapabilitiesSetValue{ make([]LinuxCapability, len(caps)), }, }, } for i, c := range caps { l.linuxCapabilitiesSetBase.val.Set[i] = LinuxCapability(c) } if err := l.AssertValid(); err != nil { return nil, err } return &l, nil } func (l LinuxCapabilitiesRetainSet) AsIsolator() (*Isolator, error) { b, err := json.Marshal(l.linuxCapabilitiesSetBase.val) if err != nil { return nil, err } rm := json.RawMessage(b) return &Isolator{ Name: LinuxCapabilitiesRetainSetName, ValueRaw: &rm, value: &l, }, nil } type LinuxCapabilitiesRevokeSet struct { linuxCapabilitiesSetBase } func NewLinuxCapabilitiesRevokeSet(caps ...string) (*LinuxCapabilitiesRevokeSet, error) { l := LinuxCapabilitiesRevokeSet{ linuxCapabilitiesSetBase{ linuxCapabilitiesSetValue{ make([]LinuxCapability, len(caps)), }, }, } for i, c := range caps { l.linuxCapabilitiesSetBase.val.Set[i] = LinuxCapability(c) } if err := l.AssertValid(); err != nil { return nil, err } return &l, nil } func (l LinuxCapabilitiesRevokeSet) AsIsolator() (*Isolator, error) { b, err := json.Marshal(l.linuxCapabilitiesSetBase.val) if err != nil { return nil, err } rm := json.RawMessage(b) return &Isolator{ Name: LinuxCapabilitiesRevokeSetName, ValueRaw: &rm, value: &l, }, nil } type LinuxSeccompSet interface { Set() []LinuxSeccompEntry Errno() LinuxSeccompErrno AssertValid() error } type LinuxSeccompEntry string type LinuxSeccompErrno string type linuxSeccompValue struct { Set []LinuxSeccompEntry `json:"set"` Errno LinuxSeccompErrno `json:"errno"` } type linuxSeccompBase struct { val linuxSeccompValue } func (l linuxSeccompBase) multipleAllowed() bool { return false } func (l linuxSeccompBase) AssertValid() error { if len(l.val.Set) == 0 { return errors.New("set must be non-empty") } if l.val.Errno == "" { return nil } for _, c := range l.val.Errno { if !unicode.IsUpper(c) { return errors.New("errno must be an upper case string") } } return nil } func (l *linuxSeccompBase) UnmarshalJSON(b []byte) error { var v linuxSeccompValue err := json.Unmarshal(b, &v) if err != nil { return err } l.val = v return nil } func (l linuxSeccompBase) Set() []LinuxSeccompEntry { return l.val.Set } func (l linuxSeccompBase) Errno() LinuxSeccompErrno { return l.val.Errno } type LinuxSeccompRetainSet struct { linuxSeccompBase } func (l LinuxSeccompRetainSet) Conflicts() []ACIdentifier { return []ACIdentifier{LinuxSeccompRemoveSetName} } func NewLinuxSeccompRetainSet(errno string, syscall ...string) (*LinuxSeccompRetainSet, error) { l := LinuxSeccompRetainSet{ linuxSeccompBase{ linuxSeccompValue{ make([]LinuxSeccompEntry, len(syscall)), LinuxSeccompErrno(errno), }, }, } for i, c := range syscall { l.linuxSeccompBase.val.Set[i] = LinuxSeccompEntry(c) } if err := l.AssertValid(); err != nil { return nil, err } return &l, nil } func (l LinuxSeccompRetainSet) AsIsolator() (*Isolator, error) { b, err := json.Marshal(l.linuxSeccompBase.val) if err != nil { return nil, err } rm := json.RawMessage(b) return &Isolator{ Name: LinuxSeccompRetainSetName, ValueRaw: &rm, value: &l, }, nil } type LinuxSeccompRemoveSet struct { linuxSeccompBase } func (l LinuxSeccompRemoveSet) Conflicts() []ACIdentifier { return []ACIdentifier{LinuxSeccompRetainSetName} } func NewLinuxSeccompRemoveSet(errno string, syscall ...string) (*LinuxSeccompRemoveSet, error) { l := LinuxSeccompRemoveSet{ linuxSeccompBase{ linuxSeccompValue{ make([]LinuxSeccompEntry, len(syscall)), LinuxSeccompErrno(errno), }, }, } for i, c := range syscall { l.linuxSeccompBase.val.Set[i] = LinuxSeccompEntry(c) } if err := l.AssertValid(); err != nil { return nil, err } return &l, nil } func (l LinuxSeccompRemoveSet) AsIsolator() (*Isolator, error) { b, err := json.Marshal(l.linuxSeccompBase.val) if err != nil { return nil, err } rm := json.RawMessage(b) return &Isolator{ Name: LinuxSeccompRemoveSetName, ValueRaw: &rm, value: &l, }, nil } // LinuxCPUShares assigns the CPU time share weight to the processes executed. // See https://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#CPUShares=weight, // https://www.kernel.org/doc/Documentation/scheduler/sched-design-CFS.txt type LinuxCPUShares int func NewLinuxCPUShares(val int) (*LinuxCPUShares, error) { l := LinuxCPUShares(val) if err := l.AssertValid(); err != nil { return nil, err } return &l, nil } func (l LinuxCPUShares) AssertValid() error { if l < 2 || l > 262144 { return fmt.Errorf("%s must be between 2 and 262144, got %d", LinuxCPUSharesName, l) } return nil } func (l LinuxCPUShares) multipleAllowed() bool { return false } func (l LinuxCPUShares) Conflicts() []ACIdentifier { return nil } func (l *LinuxCPUShares) UnmarshalJSON(b []byte) error { var v int err := json.Unmarshal(b, &v) if err != nil { return err } *l = LinuxCPUShares(v) return nil } func (l LinuxCPUShares) AsIsolator() Isolator { b, err := json.Marshal(l) if err != nil { panic(err) } rm := json.RawMessage(b) return Isolator{ Name: LinuxCPUSharesName, ValueRaw: &rm, value: &l, } } // LinuxOOMScoreAdj is equivalent to /proc/[pid]/oom_score_adj type LinuxOOMScoreAdj int // -1000 to 1000 func NewLinuxOOMScoreAdj(val int) (*LinuxOOMScoreAdj, error) { l := LinuxOOMScoreAdj(val) if err := l.AssertValid(); err != nil { return nil, err } return &l, nil } func (l LinuxOOMScoreAdj) AssertValid() error { if l < -1000 || l > 1000 { return fmt.Errorf("%s must be between -1000 and 1000, got %d", LinuxOOMScoreAdjName, l) } return nil } func (l LinuxOOMScoreAdj) multipleAllowed() bool { return false } func (l LinuxOOMScoreAdj) Conflicts() []ACIdentifier { return nil } func (l *LinuxOOMScoreAdj) UnmarshalJSON(b []byte) error { var v int err := json.Unmarshal(b, &v) if err != nil { return err } *l = LinuxOOMScoreAdj(v) return nil } func (l LinuxOOMScoreAdj) AsIsolator() Isolator { b, err := json.Marshal(l) if err != nil { panic(err) } rm := json.RawMessage(b) return Isolator{ Name: LinuxOOMScoreAdjName, ValueRaw: &rm, value: &l, } } type LinuxSELinuxUser string type LinuxSELinuxRole string type LinuxSELinuxType string type LinuxSELinuxLevel string type linuxSELinuxValue struct { User LinuxSELinuxUser `json:"user"` Role LinuxSELinuxRole `json:"role"` Type LinuxSELinuxType `json:"type"` Level LinuxSELinuxLevel `json:"level"` } type LinuxSELinuxContext struct { val linuxSELinuxValue } func (l LinuxSELinuxContext) AssertValid() error { if l.val.User == "" || strings.Contains(string(l.val.User), ":") { return fmt.Errorf("invalid user value %q", l.val.User) } if l.val.Role == "" || strings.Contains(string(l.val.Role), ":") { return fmt.Errorf("invalid role value %q", l.val.Role) } if l.val.Type == "" || strings.Contains(string(l.val.Type), ":") { return fmt.Errorf("invalid type value %q", l.val.Type) } if l.val.Level == "" { return fmt.Errorf("invalid level value %q", l.val.Level) } return nil } func (l *LinuxSELinuxContext) UnmarshalJSON(b []byte) error { var v linuxSELinuxValue err := json.Unmarshal(b, &v) if err != nil { return err } l.val = v return nil } func (l LinuxSELinuxContext) User() LinuxSELinuxUser { return l.val.User } func (l LinuxSELinuxContext) Role() LinuxSELinuxRole { return l.val.Role } func (l LinuxSELinuxContext) Type() LinuxSELinuxType { return l.val.Type } func (l LinuxSELinuxContext) Level() LinuxSELinuxLevel { return l.val.Level } func (l LinuxSELinuxContext) multipleAllowed() bool { return false } func (l LinuxSELinuxContext) Conflicts() []ACIdentifier { return nil } func NewLinuxSELinuxContext(selinuxUser, selinuxRole, selinuxType, selinuxLevel string) (*LinuxSELinuxContext, error) { l := LinuxSELinuxContext{ linuxSELinuxValue{ LinuxSELinuxUser(selinuxUser), LinuxSELinuxRole(selinuxRole), LinuxSELinuxType(selinuxType), LinuxSELinuxLevel(selinuxLevel), }, } if err := l.AssertValid(); err != nil { return nil, err } return &l, nil } func (l LinuxSELinuxContext) AsIsolator() (*Isolator, error) { b, err := json.Marshal(l.val) if err != nil { return nil, err } rm := json.RawMessage(b) return &Isolator{ Name: LinuxSELinuxContextName, ValueRaw: &rm, value: &l, }, nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/isolator_resources.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "fmt" "github.com/appc/spec/schema/types/resource" ) var ( ErrDefaultTrue = errors.New("default must be false") ErrDefaultRequired = errors.New("default must be true") ErrRequestNonEmpty = errors.New("request not supported by this resource, must be empty") ResourceIsolatorNames = make(map[ACIdentifier]struct{}) ) const ( ResourceBlockBandwidthName = "resource/block-bandwidth" ResourceBlockIOPSName = "resource/block-iops" ResourceCPUName = "resource/cpu" ResourceMemoryName = "resource/memory" ResourceNetworkBandwidthName = "resource/network-bandwidth" ) func init() { for name, con := range map[ACIdentifier]IsolatorValueConstructor{ ResourceBlockBandwidthName: func() IsolatorValue { return &ResourceBlockBandwidth{} }, ResourceBlockIOPSName: func() IsolatorValue { return &ResourceBlockIOPS{} }, ResourceCPUName: func() IsolatorValue { return &ResourceCPU{} }, ResourceMemoryName: func() IsolatorValue { return &ResourceMemory{} }, ResourceNetworkBandwidthName: func() IsolatorValue { return &ResourceNetworkBandwidth{} }, } { AddIsolatorName(name, ResourceIsolatorNames) AddIsolatorValueConstructor(name, con) } } type Resource interface { Limit() *resource.Quantity Request() *resource.Quantity Default() bool } type ResourceBase struct { val resourceValue } type resourceValue struct { Default bool `json:"default"` Request *resource.Quantity `json:"request"` Limit *resource.Quantity `json:"limit"` } func (r ResourceBase) Limit() *resource.Quantity { return r.val.Limit } func (r ResourceBase) Request() *resource.Quantity { return r.val.Request } func (r ResourceBase) Default() bool { return r.val.Default } func (r *ResourceBase) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &r.val) } func (r ResourceBase) AssertValid() error { return nil } // TODO(lucab): both need to be clarified in spec, // see https://github.com/appc/spec/issues/625 func (l ResourceBase) multipleAllowed() bool { return true } func (l ResourceBase) Conflicts() []ACIdentifier { return nil } type ResourceBlockBandwidth struct { ResourceBase } func (r ResourceBlockBandwidth) AssertValid() error { if r.Default() != true { return ErrDefaultRequired } if r.Request() != nil { return ErrRequestNonEmpty } return nil } type ResourceBlockIOPS struct { ResourceBase } func (r ResourceBlockIOPS) AssertValid() error { if r.Default() != true { return ErrDefaultRequired } if r.Request() != nil { return ErrRequestNonEmpty } return nil } type ResourceCPU struct { ResourceBase } func (r ResourceCPU) String() string { return fmt.Sprintf("ResourceCPU(request=%s, limit=%s)", r.Request(), r.Limit()) } func (r ResourceCPU) AssertValid() error { if r.Default() != false { return ErrDefaultTrue } return nil } func (r ResourceCPU) AsIsolator() Isolator { isol := isolatorMap[ResourceCPUName]() b, err := json.Marshal(r.val) if err != nil { panic(err) } valRaw := json.RawMessage(b) return Isolator{ Name: ResourceCPUName, ValueRaw: &valRaw, value: isol, } } func NewResourceCPUIsolator(request, limit string) (*ResourceCPU, error) { req, err := resource.ParseQuantity(request) if err != nil { return nil, fmt.Errorf("error parsing request: %v", err) } lim, err := resource.ParseQuantity(limit) if err != nil { return nil, fmt.Errorf("error parsing limit: %v", err) } res := &ResourceCPU{ ResourceBase{ resourceValue{ Request: &req, Limit: &lim, }, }, } if err := res.AssertValid(); err != nil { // should never happen return nil, err } return res, nil } type ResourceMemory struct { ResourceBase } func (r ResourceMemory) String() string { return fmt.Sprintf("ResourceMemory(request=%s, limit=%s)", r.Request(), r.Limit()) } func (r ResourceMemory) AssertValid() error { if r.Default() != false { return ErrDefaultTrue } return nil } func (r ResourceMemory) AsIsolator() Isolator { isol := isolatorMap[ResourceMemoryName]() b, err := json.Marshal(r.val) if err != nil { panic(err) } valRaw := json.RawMessage(b) return Isolator{ Name: ResourceMemoryName, ValueRaw: &valRaw, value: isol, } } func NewResourceMemoryIsolator(request, limit string) (*ResourceMemory, error) { req, err := resource.ParseQuantity(request) if err != nil { return nil, fmt.Errorf("error parsing request: %v", err) } lim, err := resource.ParseQuantity(limit) if err != nil { return nil, fmt.Errorf("error parsing limit: %v", err) } res := &ResourceMemory{ ResourceBase{ resourceValue{ Request: &req, Limit: &lim, }, }, } if err := res.AssertValid(); err != nil { // should never happen return nil, err } return res, nil } type ResourceNetworkBandwidth struct { ResourceBase } func (r ResourceNetworkBandwidth) AssertValid() error { if r.Default() != true { return ErrDefaultRequired } if r.Request() != nil { return ErrRequestNonEmpty } return nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/isolator_unix.go ================================================ // Copyright 2016 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" ) var ( UnixIsolatorNames = make(map[ACIdentifier]struct{}) ) const ( //TODO(lucab): add "ulimit" isolators UnixSysctlName = "os/unix/sysctl" ) func init() { for name, con := range map[ACIdentifier]IsolatorValueConstructor{ UnixSysctlName: func() IsolatorValue { return &UnixSysctl{} }, } { AddIsolatorName(name, UnixIsolatorNames) AddIsolatorValueConstructor(name, con) } } type UnixSysctl map[string]string func (s *UnixSysctl) UnmarshalJSON(b []byte) error { var v map[string]string err := json.Unmarshal(b, &v) if err != nil { return err } *s = UnixSysctl(v) return err } func (s UnixSysctl) AssertValid() error { return nil } func (s UnixSysctl) multipleAllowed() bool { return false } func (s UnixSysctl) Conflicts() []ACIdentifier { return nil } func (s UnixSysctl) AsIsolator() Isolator { isol := isolatorMap[UnixSysctlName]() b, err := json.Marshal(s) if err != nil { panic(err) } valRaw := json.RawMessage(b) return Isolator{ Name: UnixSysctlName, ValueRaw: &valRaw, value: isol, } } func NewUnixSysctlIsolator(cfg map[string]string) (*UnixSysctl, error) { s := UnixSysctl(cfg) if err := s.AssertValid(); err != nil { return nil, err } return &s, nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/labels.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "fmt" "sort" ) var ValidOSArch = map[string][]string{ "linux": {"amd64", "i386", "aarch64", "aarch64_be", "armv6l", "armv7l", "armv7b", "ppc64", "ppc64le", "s390x"}, "freebsd": {"amd64", "i386", "arm"}, "darwin": {"x86_64", "i386"}, } type Labels []Label type labels Labels type Label struct { Name ACIdentifier `json:"name"` Value string `json:"value"` } // {appc,go}ArchTuple are internal helper types used to translate arch tuple between go and appc type appcArchTuple struct { appcOs string appcArch string } type goArchTuple struct { goOs string goArch string goArchFlavor string } // IsValidOsArch checks if a OS-architecture combination is valid given a map // of valid OS-architectures func IsValidOSArch(labels map[ACIdentifier]string, validOSArch map[string][]string) error { if os, ok := labels["os"]; ok { if validArchs, ok := validOSArch[os]; !ok { // Not a whitelisted OS. TODO: how to warn rather than fail? validOses := make([]string, 0, len(validOSArch)) for validOs := range validOSArch { validOses = append(validOses, validOs) } sort.Strings(validOses) return fmt.Errorf(`bad os %#v (must be one of: %v)`, os, validOses) } else { // Whitelisted OS. We check arch here, as arch makes sense only // when os is defined. if arch, ok := labels["arch"]; ok { found := false for _, validArch := range validArchs { if arch == validArch { found = true break } } if !found { return fmt.Errorf(`bad arch %#v for %v (must be one of: %v)`, arch, os, validArchs) } } } } return nil } func (l Labels) assertValid() error { seen := map[ACIdentifier]string{} for _, lbl := range l { if lbl.Name == "name" { return fmt.Errorf(`invalid label name: "name"`) } _, ok := seen[lbl.Name] if ok { return fmt.Errorf(`duplicate labels of name %q`, lbl.Name) } seen[lbl.Name] = lbl.Value } return IsValidOSArch(seen, ValidOSArch) } func (l Labels) MarshalJSON() ([]byte, error) { if err := l.assertValid(); err != nil { return nil, err } return json.Marshal(labels(l)) } func (l *Labels) UnmarshalJSON(data []byte) error { var jl labels if err := json.Unmarshal(data, &jl); err != nil { return err } nl := Labels(jl) if err := nl.assertValid(); err != nil { return err } *l = nl return nil } // Get retrieves the value of the label by the given name from Labels, if it exists func (l Labels) Get(name string) (val string, ok bool) { for _, lbl := range l { if lbl.Name.String() == name { return lbl.Value, true } } return "", false } // ToMap creates a map[ACIdentifier]string. func (l Labels) ToMap() map[ACIdentifier]string { labelsMap := make(map[ACIdentifier]string) for _, lbl := range l { labelsMap[lbl.Name] = lbl.Value } return labelsMap } // LabelsFromMap creates Labels from a map[ACIdentifier]string func LabelsFromMap(labelsMap map[ACIdentifier]string) (Labels, error) { labels := Labels{} for n, v := range labelsMap { labels = append(labels, Label{Name: n, Value: v}) } if err := labels.assertValid(); err != nil { return nil, err } return labels, nil } // ToAppcOSArch translates a Golang arch tuple (OS, architecture, flavor) into // an appc arch tuple (OS, architecture) func ToAppcOSArch(goOs string, goArch string, goArchFlavor string) (appcOs string, appcArch string, e error) { tabularAppcToGo := map[goArchTuple]appcArchTuple{ {"linux", "amd64", ""}: {"linux", "amd64"}, {"linux", "386", ""}: {"linux", "i386"}, {"linux", "arm64", ""}: {"linux", "aarch64"}, {"linux", "arm", ""}: {"linux", "armv6l"}, {"linux", "arm", "6"}: {"linux", "armv6l"}, {"linux", "arm", "7"}: {"linux", "armv7l"}, {"linux", "ppc64", ""}: {"linux", "ppc64"}, {"linux", "ppc64le", ""}: {"linux", "ppc64le"}, {"linux", "s390x", ""}: {"linux", "s390x"}, {"freebsd", "amd64", ""}: {"freebsd", "amd64"}, {"freebsd", "386", ""}: {"freebsd", "i386"}, {"freebsd", "arm", ""}: {"freebsd", "arm"}, {"freebsd", "arm", "5"}: {"freebsd", "arm"}, {"freebsd", "arm", "6"}: {"freebsd", "arm"}, {"freebsd", "arm", "7"}: {"freebsd", "arm"}, {"darwin", "amd64", ""}: {"darwin", "x86_64"}, {"darwin", "386", ""}: {"darwin", "i386"}, } archTuple, ok := tabularAppcToGo[goArchTuple{goOs, goArch, goArchFlavor}] if !ok { return "", "", fmt.Errorf("unknown arch tuple: %q - %q - %q", goOs, goArch, goArchFlavor) } return archTuple.appcOs, archTuple.appcArch, nil } // ToGoOSArch translates an appc arch tuple (OS, architecture) into // a Golang arch tuple (OS, architecture, flavor) func ToGoOSArch(appcOs string, appcArch string) (goOs string, goArch string, goArchFlavor string, e error) { tabularGoToAppc := map[appcArchTuple]goArchTuple{ // {"linux", "aarch64_be"}: nil, // {"linux", "armv7b"}: nil, {"linux", "aarch64"}: {"linux", "arm64", ""}, {"linux", "amd64"}: {"linux", "amd64", ""}, {"linux", "armv6l"}: {"linux", "arm", "6"}, {"linux", "armv7l"}: {"linux", "arm", "7"}, {"linux", "i386"}: {"linux", "386", ""}, {"linux", "ppc64"}: {"linux", "ppc64", ""}, {"linux", "ppc64le"}: {"linux", "ppc64le", ""}, {"linux", "s390x"}: {"linux", "s390x", ""}, {"freebsd", "amd64"}: {"freebsd", "amd64", ""}, {"freebsd", "arm"}: {"freebsd", "arm", "6"}, {"freebsd", "386"}: {"freebsd", "i386", ""}, {"darwin", "amd64"}: {"darwin", "x86_64", ""}, {"darwin", "386"}: {"darwin", "i386", ""}, } archTuple, ok := tabularGoToAppc[appcArchTuple{appcOs, appcArch}] if !ok { return "", "", "", fmt.Errorf("unknown arch tuple: %q - %q", appcOs, appcArch) } return archTuple.goOs, archTuple.goArch, archTuple.goArchFlavor, nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/mountpoint.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "errors" "fmt" "net/url" "strconv" "github.com/appc/spec/schema/common" ) // MountPoint is the application-side manifestation of a Volume. type MountPoint struct { Name ACName `json:"name"` Path string `json:"path"` ReadOnly bool `json:"readOnly,omitempty"` } func (mount MountPoint) assertValid() error { if mount.Name.Empty() { return errors.New("name must be set") } if len(mount.Path) == 0 { return errors.New("path must be set") } return nil } // MountPointFromString takes a command line mountpoint parameter and returns a mountpoint // // It is useful for actool patch-manifest --mounts // // Example mountpoint parameters: // database,path=/tmp,readOnly=true func MountPointFromString(mp string) (*MountPoint, error) { var mount MountPoint mp = "name=" + mp mpQuery, err := common.MakeQueryString(mp) if err != nil { return nil, err } v, err := url.ParseQuery(mpQuery) if err != nil { return nil, err } for key, val := range v { if len(val) > 1 { return nil, fmt.Errorf("label %s with multiple values %q", key, val) } switch key { case "name": acn, err := NewACName(val[0]) if err != nil { return nil, err } mount.Name = *acn case "path": mount.Path = val[0] case "readOnly": ro, err := strconv.ParseBool(val[0]) if err != nil { return nil, err } mount.ReadOnly = ro default: return nil, fmt.Errorf("unknown mountpoint parameter %q", key) } } err = mount.assertValid() if err != nil { return nil, err } return &mount, nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/port.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "fmt" "net" "net/url" "strconv" "github.com/appc/spec/schema/common" ) // Port represents a port as offered by an application *inside* // the pod. type Port struct { Name ACName `json:"name"` Protocol string `json:"protocol"` Port uint `json:"port"` Count uint `json:"count"` SocketActivated bool `json:"socketActivated"` } // ExposedPort represents a port listening on the host side. // The PodPort is optional -- if missing, then try and find the pod-side // information by matching names type ExposedPort struct { Name ACName `json:"name"` HostPort uint `json:"hostPort"` HostIP net.IP `json:"hostIP,omitempty"` // optional PodPort *Port `json:"podPort,omitempty"` // optional. If missing, try and find a corresponding App's port } type port Port func (p *Port) UnmarshalJSON(data []byte) error { var pp port if err := json.Unmarshal(data, &pp); err != nil { return err } np := Port(pp) if err := np.assertValid(); err != nil { return err } if np.Count == 0 { np.Count = 1 } *p = np return nil } func (p Port) MarshalJSON() ([]byte, error) { if err := p.assertValid(); err != nil { return nil, err } return json.Marshal(port(p)) } func (p Port) assertValid() error { // Although there are no guarantees, most (if not all) // transport protocols use 16 bit ports if p.Port > 65535 || p.Port < 1 { return errors.New("port must be in 1-65535 range") } if p.Port+p.Count > 65536 { return errors.New("end of port range must be in 1-65535 range") } return nil } // PortFromString takes a command line port parameter and returns a port // // It is useful for actool patch-manifest --ports // // Example port parameters: // health-check,protocol=udp,port=8000 // query,protocol=tcp,port=8080,count=1,socketActivated=true func PortFromString(pt string) (*Port, error) { var port Port pt = "name=" + pt ptQuery, err := common.MakeQueryString(pt) if err != nil { return nil, err } v, err := url.ParseQuery(ptQuery) if err != nil { return nil, err } for key, val := range v { if len(val) > 1 { return nil, fmt.Errorf("label %s with multiple values %q", key, val) } switch key { case "name": acn, err := NewACName(val[0]) if err != nil { return nil, err } port.Name = *acn case "protocol": port.Protocol = val[0] case "port": p, err := strconv.ParseUint(val[0], 10, 16) if err != nil { return nil, err } port.Port = uint(p) case "count": cnt, err := strconv.ParseUint(val[0], 10, 16) if err != nil { return nil, err } port.Count = uint(cnt) case "socketActivated": sa, err := strconv.ParseBool(val[0]) if err != nil { return nil, err } port.SocketActivated = sa default: return nil, fmt.Errorf("unknown port parameter %q", key) } } err = port.assertValid() if err != nil { return nil, err } return &port, nil } ================================================ FILE: vendor/github.com/appc/spec/schema/types/resource/amount.go ================================================ /* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resource import ( "math/big" "strconv" inf "gopkg.in/inf.v0" ) // Scale is used for getting and setting the base-10 scaled value. // Base-2 scales are omitted for mathematical simplicity. // See Quantity.ScaledValue for more details. type Scale int32 // infScale adapts a Scale value to an inf.Scale value. func (s Scale) infScale() inf.Scale { return inf.Scale(-s) // inf.Scale is upside-down } const ( Nano Scale = -9 Micro Scale = -6 Milli Scale = -3 Kilo Scale = 3 Mega Scale = 6 Giga Scale = 9 Tera Scale = 12 Peta Scale = 15 Exa Scale = 18 ) var ( Zero = int64Amount{} // Used by quantity strings - treat as read only zeroBytes = []byte("0") ) // int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster // than operations on inf.Dec for values that can be represented as int64. type int64Amount struct { value int64 scale Scale } // Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0. func (a int64Amount) Sign() int { switch { case a.value == 0: return 0 case a.value > 0: return 1 default: return -1 } } // AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be // represented in an int64 OR would result in a loss of precision. This method is intended as // an optimization to avoid calling AsDec. func (a int64Amount) AsInt64() (int64, bool) { if a.scale == 0 { return a.value, true } if a.scale < 0 { // TODO: attempt to reduce factors, although it is assumed that factors are reduced prior // to the int64Amount being created. return 0, false } return positiveScaleInt64(a.value, a.scale) } // AsScaledInt64 returns an int64 representing the value of this amount at the specified scale, // rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result // in overflow because 1e19 is not representable as an int64. Note that setting a scale larger // than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would // return 1, because 0.000001 is rounded up to 1. func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) { if a.scale < scale { result, _ = negativeScaleInt64(a.value, scale-a.scale) return result, true } return positiveScaleInt64(a.value, a.scale-scale) } // AsDec returns an inf.Dec representation of this value. func (a int64Amount) AsDec() *inf.Dec { var base inf.Dec base.SetUnscaled(a.value) base.SetScale(inf.Scale(-a.scale)) return &base } // Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b. func (a int64Amount) Cmp(b int64Amount) int { switch { case a.scale == b.scale: // compare only the unscaled portion case a.scale > b.scale: result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale) if !exact { return a.AsDec().Cmp(b.AsDec()) } if result == a.value { switch { case remainder == 0: return 0 case remainder > 0: return -1 default: return 1 } } b.value = result default: result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale) if !exact { return a.AsDec().Cmp(b.AsDec()) } if result == b.value { switch { case remainder == 0: return 0 case remainder > 0: return 1 default: return -1 } } a.value = result } switch { case a.value == b.value: return 0 case a.value < b.value: return -1 default: return 1 } } // Add adds two int64Amounts together, matching scales. It will return false and not mutate // a if overflow or underflow would result. func (a *int64Amount) Add(b int64Amount) bool { switch { case b.value == 0: return true case a.value == 0: a.value = b.value a.scale = b.scale return true case a.scale == b.scale: c, ok := int64Add(a.value, b.value) if !ok { return false } a.value = c case a.scale > b.scale: c, ok := positiveScaleInt64(a.value, a.scale-b.scale) if !ok { return false } c, ok = int64Add(c, b.value) if !ok { return false } a.scale = b.scale a.value = c default: c, ok := positiveScaleInt64(b.value, b.scale-a.scale) if !ok { return false } c, ok = int64Add(a.value, c) if !ok { return false } a.value = c } return true } // Sub removes the value of b from the current amount, or returns false if underflow would result. func (a *int64Amount) Sub(b int64Amount) bool { return a.Add(int64Amount{value: -b.value, scale: b.scale}) } // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { if a.scale >= scale { return a, true } result, exact := negativeScaleInt64(a.value, scale-a.scale) return int64Amount{value: result, scale: scale}, exact } // AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns // either that buffer or a larger buffer and the current exponent of the value. The value is adjusted // until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { mantissa := a.value exponent = int32(a.scale) amount, times := removeInt64Factors(mantissa, 10) exponent += int32(times) // make sure exponent is a multiple of 3 var ok bool switch exponent % 3 { case 1, -2: amount, ok = int64MultiplyScale10(amount) if !ok { return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) } exponent = exponent - 1 case 2, -1: amount, ok = int64MultiplyScale100(amount) if !ok { return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) } exponent = exponent - 2 } return strconv.AppendInt(out, amount, 10), exponent } // AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns // either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would // return []byte("2048"), 1. func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { value, ok := a.AsScaledInt64(0) if !ok { return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out) } amount, exponent := removeInt64Factors(value, 1024) return strconv.AppendInt(out, amount, 10), exponent } // infDecAmount implements common operations over an inf.Dec that are specific to the quantity // representation. type infDecAmount struct { *inf.Dec } // AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision // was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) { tmp := &inf.Dec{} tmp.Round(a.Dec, scale.infScale(), inf.RoundUp) return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0 } // AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns // either that buffer or a larger buffer and the current exponent of the value. The value is adjusted // until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { mantissa := a.Dec.UnscaledBig() exponent = int32(-a.Dec.Scale()) amount := big.NewInt(0).Set(mantissa) // move all factors of 10 into the exponent for easy reasoning amount, times := removeBigIntFactors(amount, bigTen) exponent += times // make sure exponent is a multiple of 3 for exponent%3 != 0 { amount.Mul(amount, bigTen) exponent-- } return append(out, amount.String()...), exponent } // AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns // either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would // return []byte("2048"), 1. func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { tmp := &inf.Dec{} tmp.Round(a.Dec, 0, inf.RoundUp) amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024) return append(out, amount.String()...), exponent } ================================================ FILE: vendor/github.com/appc/spec/schema/types/resource/math.go ================================================ /* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resource import ( "math/big" inf "gopkg.in/inf.v0" ) const ( // maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64. // It is also the maximum decimal digits that can be represented with an int64. maxInt64Factors = 18 ) var ( // Commonly needed big.Int values-- treat as read only! bigTen = big.NewInt(10) bigZero = big.NewInt(0) bigOne = big.NewInt(1) bigThousand = big.NewInt(1000) big1024 = big.NewInt(1024) // Commonly needed inf.Dec values-- treat as read only! decZero = inf.NewDec(0, 0) decOne = inf.NewDec(1, 0) decMinusOne = inf.NewDec(-1, 0) decThousand = inf.NewDec(1000, 0) dec1024 = inf.NewDec(1024, 0) decMinus1024 = inf.NewDec(-1024, 0) // Largest (in magnitude) number allowed. maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 // The maximum value we can represent milli-units for. // Compare with the return value of Quantity.Value() to // see if it's safe to use Quantity.MilliValue(). MaxMilliValue = int64(((1 << 63) - 1) / 1000) ) const mostNegative = -(mostPositive + 1) const mostPositive = 1<<63 - 1 // int64Add returns a+b, or false if that would overflow int64. func int64Add(a, b int64) (int64, bool) { c := a + b switch { case a > 0 && b > 0: if c < 0 { return 0, false } case a < 0 && b < 0: if c > 0 { return 0, false } if a == mostNegative && b == mostNegative { return 0, false } } return c, true } // int64Multiply returns a*b, or false if that would overflow or underflow int64. func int64Multiply(a, b int64) (int64, bool) { if a == 0 || b == 0 || a == 1 || b == 1 { return a * b, true } if a == mostNegative || b == mostNegative { return 0, false } c := a * b return c, c/b == a } // int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64. // Use when b is known to be greater than one. func int64MultiplyScale(a int64, b int64) (int64, bool) { if a == 0 || a == 1 { return a * b, true } if a == mostNegative && b != 1 { return 0, false } c := a * b return c, c/b == a } // int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than // int64Multiply(a, 10) because the compiler can optimize constant factor multiplication. func int64MultiplyScale10(a int64) (int64, bool) { if a == 0 || a == 1 { return a * 10, true } if a == mostNegative { return 0, false } c := a * 10 return c, c/10 == a } // int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than // int64Multiply(a, 100) because the compiler can optimize constant factor multiplication. func int64MultiplyScale100(a int64) (int64, bool) { if a == 0 || a == 1 { return a * 100, true } if a == mostNegative { return 0, false } c := a * 100 return c, c/100 == a } // int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than // int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication. func int64MultiplyScale1000(a int64) (int64, bool) { if a == 0 || a == 1 { return a * 1000, true } if a == mostNegative { return 0, false } c := a * 1000 return c, c/1000 == a } // positiveScaleInt64 multiplies base by 10^scale, returning false if the // value overflows. Passing a negative scale is undefined. func positiveScaleInt64(base int64, scale Scale) (int64, bool) { switch scale { case 0: return base, true case 1: return int64MultiplyScale10(base) case 2: return int64MultiplyScale100(base) case 3: return int64MultiplyScale1000(base) case 6: return int64MultiplyScale(base, 1000000) case 9: return int64MultiplyScale(base, 1000000000) default: value := base var ok bool for i := Scale(0); i < scale; i++ { if value, ok = int64MultiplyScale(value, 10); !ok { return 0, false } } return value, true } } // negativeScaleInt64 reduces base by the provided scale, rounding up, until the // value is zero or the scale is reached. Passing a negative scale is undefined. // The value returned, if not exact, is rounded away from zero. func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { if scale == 0 { return base, true } value := base var fraction bool for i := Scale(0); i < scale; i++ { if !fraction && value%10 != 0 { fraction = true } value = value / 10 if value == 0 { if fraction { if base > 0 { return 1, false } return -1, false } return 0, true } } if fraction { if base > 0 { value += 1 } else { value += -1 } } return value, !fraction } func pow10Int64(b int64) int64 { switch b { case 0: return 1 case 1: return 10 case 2: return 100 case 3: return 1000 case 4: return 10000 case 5: return 100000 case 6: return 1000000 case 7: return 10000000 case 8: return 100000000 case 9: return 1000000000 case 10: return 10000000000 case 11: return 100000000000 case 12: return 1000000000000 case 13: return 10000000000000 case 14: return 100000000000000 case 15: return 1000000000000000 case 16: return 10000000000000000 case 17: return 100000000000000000 case 18: return 1000000000000000000 default: return 0 } } // powInt64 raises a to the bth power. Is not overflow aware. func powInt64(a, b int64) int64 { p := int64(1) for b > 0 { if b&1 != 0 { p *= a } b >>= 1 a *= a } return p } // negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or // false if no such division is possible. Dividing by negative scales is undefined. func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) { if scale == 0 { return base, 0, true } // the max scale representable in base 10 in an int64 is 18 decimal places if scale >= 18 { return 0, base, false } divisor := pow10Int64(int64(scale)) return base / divisor, base % divisor, true } // removeInt64Factors divides in a loop; the return values have the property that // value == result * base ^ scale func removeInt64Factors(value int64, base int64) (result int64, times int32) { times = 0 result = value negative := result < 0 if negative { result = -result } switch base { // allow the compiler to optimize the common cases case 10: for result >= 10 && result%10 == 0 { times++ result = result / 10 } // allow the compiler to optimize the common cases case 1024: for result >= 1024 && result%1024 == 0 { times++ result = result / 1024 } default: for result >= base && result%base == 0 { times++ result = result / base } } if negative { result = -result } return result, times } // removeBigIntFactors divides in a loop; the return values have the property that // d == result * factor ^ times // d may be modified in place. // If d == 0, then the return values will be (0, 0) func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) { q := big.NewInt(0) m := big.NewInt(0) for d.Cmp(bigZero) != 0 { q.DivMod(d, factor, m) if m.Cmp(bigZero) != 0 { break } times++ d, q = q, d } return d, times } ================================================ FILE: vendor/github.com/appc/spec/schema/types/resource/quantity.go ================================================ /* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resource import ( "bytes" "errors" "fmt" "math/big" "regexp" "strconv" "strings" flag "github.com/spf13/pflag" inf "gopkg.in/inf.v0" ) // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, // in addition to String() and Int64() accessors. // // The serialization format is: // // ::= // (Note that may be empty, from the "" case in .) // ::= 0 | 1 | ... | 9 // ::= | // ::= | . | . | . // ::= "+" | "-" // ::= | // ::= | | // ::= Ki | Mi | Gi | Ti | Pi | Ei // (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) // ::= m | "" | k | M | G | T | P | E // (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) // ::= "e" | "E" // // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal // places. Numbers larger or more precise will be capped or rounded up. // (E.g.: 0.1m will rounded up to 1m.) // This may be extended in the future if we require larger or smaller quantities. // // When a Quantity is parsed from a string, it will remember the type of suffix // it had, and will use the same type again when it is serialized. // // Before serializing, Quantity will be put in "canonical form". // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: // a. No precision is lost // b. No fractional digits will be emitted // c. The exponent (or suffix) is as large as possible. // The sign will be omitted unless the number is negative. // // Examples: // 1.5 will be serialized as "1500m" // 1.5Gi will be serialized as "1536Mi" // // NOTE: We reserve the right to amend this canonical format, perhaps to // allow 1.5 to be canonical. // TODO: Remove above disclaimer after all bikeshedding about format is over, // or after March 2015. // // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. // // Non-canonical values will still parse as long as they are well formed, // but will be re-emitted in their canonical form. (So always use canonical // form, or don't diff.) // // This format is intended to make it difficult to use these numbers without // writing some sort of special handling code in the hopes that that will // cause implementors to also use a fixed point implementation. // // +gencopy=false // +protobuf=true // +protobuf.embed=string // +protobuf.options.marshal=false // +protobuf.options.(gogoproto.goproto_stringer)=false type Quantity struct { // i is the quantity in int64 scaled form, if d.Dec == nil i int64Amount // d is the quantity in inf.Dec form if d.Dec != nil d infDecAmount // s is the generated value of this quantity to avoid recalculation s string // Change Format at will. See the comment for Canonicalize for // more details. Format } // CanonicalValue allows a quantity amount to be converted to a string. type CanonicalValue interface { // AsCanonicalBytes returns a byte array representing the string representation // of the value mantissa and an int32 representing its exponent in base-10. Callers may // pass a byte slice to the method to avoid allocations. AsCanonicalBytes(out []byte) ([]byte, int32) // AsCanonicalBase1024Bytes returns a byte array representing the string representation // of the value mantissa and an int32 representing its exponent in base-1024. Callers // may pass a byte slice to the method to avoid allocations. AsCanonicalBase1024Bytes(out []byte) ([]byte, int32) } // Format lists the three possible formattings of a quantity. type Format string const ( DecimalExponent = Format("DecimalExponent") // e.g., 12e6 BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20) DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6) ) // MustParse turns the given string into a quantity or panics; for tests // or others cases where you know the string is valid. func MustParse(str string) Quantity { q, err := ParseQuantity(str) if err != nil { panic(fmt.Errorf("cannot parse '%v': %v", str, err)) } return q } const ( // splitREString is used to separate a number from its suffix; as such, // this is overly permissive, but that's OK-- it will be checked later. splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" ) var ( // splitRE is used to get the various parts of a number. splitRE = regexp.MustCompile(splitREString) // Errors that could happen while parsing a string. ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'") ErrNumeric = errors.New("unable to parse numeric part of quantity") ErrSuffix = errors.New("unable to parse quantity's suffix") ) // parseQuantityString is a fast scanner for quantity values. func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) { positive = true pos := 0 end := len(str) // handle leading sign if pos < end { switch str[0] { case '-': positive = false pos++ case '+': pos++ } } // strip leading zeros Zeroes: for i := pos; ; i++ { if i >= end { num = "0" value = num return } switch str[i] { case '0': pos++ default: break Zeroes } } // extract the numerator Num: for i := pos; ; i++ { if i >= end { num = str[pos:end] value = str[0:end] return } switch str[i] { case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': default: num = str[pos:i] pos = i break Num } } // if we stripped all numerator positions, always return 0 if len(num) == 0 { num = "0" } // handle a denominator if pos < end && str[pos] == '.' { pos++ Denom: for i := pos; ; i++ { if i >= end { denom = str[pos:end] value = str[0:end] return } switch str[i] { case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': default: denom = str[pos:i] pos = i break Denom } } // TODO: we currently allow 1.G, but we may not want to in the future. // if len(denom) == 0 { // err = ErrFormatWrong // return // } } value = str[0:pos] // grab the elements of the suffix suffixStart := pos for i := pos; ; i++ { if i >= end { suffix = str[suffixStart:end] return } if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") { pos = i break } } if pos < end { switch str[pos] { case '-', '+': pos++ } } Suffix: for i := pos; ; i++ { if i >= end { suffix = str[suffixStart:end] return } switch str[i] { case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': default: break Suffix } } // we encountered a non decimal in the Suffix loop, but the last character // was not a valid exponent err = ErrFormatWrong return } // ParseQuantity turns str into a Quantity, or returns an error. func ParseQuantity(str string) (Quantity, error) { if len(str) == 0 { return Quantity{}, ErrFormatWrong } if str == "0" { return Quantity{Format: DecimalSI, s: str}, nil } positive, value, num, denom, suf, err := parseQuantityString(str) if err != nil { return Quantity{}, err } base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf)) if !ok { return Quantity{}, ErrSuffix } precision := int32(0) scale := int32(0) mantissa := int64(1) switch format { case DecimalExponent, DecimalSI: scale = exponent precision = maxInt64Factors - int32(len(num)+len(denom)) case BinarySI: scale = 0 switch { case exponent >= 0 && len(denom) == 0: // only handle positive binary numbers with the fast path mantissa = int64(int64(mantissa) << uint64(exponent)) // 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1 default: precision = -1 } } if precision >= 0 { // if we have a denominator, shift the entire value to the left by the number of places in the // denominator scale -= int32(len(denom)) if scale >= int32(Nano) { shifted := num + denom var value int64 value, err := strconv.ParseInt(shifted, 10, 64) if err != nil { return Quantity{}, ErrNumeric } if result, ok := int64Multiply(value, int64(mantissa)); ok { if !positive { result = -result } // if the number is in canonical form, reuse the string switch format { case BinarySI: if exponent%10 == 0 && (value&0x07 != 0) { return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil } default: if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' { return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil } } return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil } } } amount := new(inf.Dec) if _, ok := amount.SetString(value); !ok { return Quantity{}, ErrNumeric } // So that no one but us has to think about suffixes, remove it. if base == 10 { amount.SetScale(amount.Scale() + Scale(exponent).infScale()) } else if base == 2 { // numericSuffix = 2 ** exponent numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent)) ub := amount.UnscaledBig() amount.SetUnscaledBig(ub.Mul(ub, numericSuffix)) } // Cap at min/max bounds. sign := amount.Sign() if sign == -1 { amount.Neg(amount) } // This rounds non-zero values up to the minimum representable value, under the theory that // if you want some resources, you should get some resources, even if you asked for way too small // of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have // the side effect of rounding values < .5n to zero. if v, ok := amount.Unscaled(); v != int64(0) || !ok { amount.Round(amount, Nano.infScale(), inf.RoundUp) } // The max is just a simple cap. // TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 { amount.Set(maxAllowed.Dec) } if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 { // This avoids rounding and hopefully confusion, too. format = DecimalSI } if sign == -1 { amount.Neg(amount) } return Quantity{d: infDecAmount{amount}, Format: format}, nil } // CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). // // Note about BinarySI: // * If q.Format is set to BinarySI and q.Amount represents a non-zero value between // -1 and +1, it will be emitted as if q.Format were DecimalSI. // * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be // rounded up. (1.1i becomes 2i.) func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { if q.IsZero() { return zeroBytes, nil } var rounded CanonicalValue format := q.Format switch format { case DecimalExponent, DecimalSI: case BinarySI: if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 { // This avoids rounding and hopefully confusion, too. format = DecimalSI } else { var exact bool if rounded, exact = q.AsScale(0); !exact { // Don't lose precision-- show as DecimalSI format = DecimalSI } } default: format = DecimalExponent } // TODO: If BinarySI formatting is requested but would cause rounding, upgrade to // one of the other formats. switch format { case DecimalExponent, DecimalSI: number, exponent := q.AsCanonicalBytes(out) suffix, _ := quantitySuffixer.constructBytes(10, exponent, format) return number, suffix default: // format must be BinarySI number, exponent := rounded.AsCanonicalBase1024Bytes(out) suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format) return number, suffix } } // AsInt64 returns a representation of the current value as an int64 if a fast conversion // is possible. If false is returned, callers must use the inf.Dec form of this quantity. func (q *Quantity) AsInt64() (int64, bool) { if q.d.Dec != nil { return 0, false } return q.i.AsInt64() } // ToDec promotes the quantity in place to use an inf.Dec representation and returns itself. func (q *Quantity) ToDec() *Quantity { if q.d.Dec == nil { q.d.Dec = q.i.AsDec() q.i = int64Amount{} } return q } // AsDec returns the quantity as represented by a scaled inf.Dec. func (q *Quantity) AsDec() *inf.Dec { if q.d.Dec != nil { return q.d.Dec } q.d.Dec = q.i.AsDec() q.i = int64Amount{} return q.d.Dec } // AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa // and base 10 exponent. The out byte slice may be passed to the method to avoid an extra // allocation. func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { if q.d.Dec != nil { return q.d.AsCanonicalBytes(out) } return q.i.AsCanonicalBytes(out) } // IsZero returns true if the quantity is equal to zero. func (q *Quantity) IsZero() bool { if q.d.Dec != nil { return q.d.Dec.Sign() == 0 } return q.i.value == 0 } // Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the // quantity is greater than zero. func (q *Quantity) Sign() int { if q.d.Dec != nil { return q.d.Dec.Sign() } return q.i.Sign() } // AsScaled returns the current value, rounded up to the provided scale, and returns // false if the scale resulted in a loss of precision. func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) { if q.d.Dec != nil { return q.d.AsScale(scale) } return q.i.AsScale(scale) } // RoundUp updates the quantity to the provided scale, ensuring that the value is at // least 1. False is returned if the rounding operation resulted in a loss of precision. // Negative numbers are rounded away from zero (-9 scale 1 rounds to -10). func (q *Quantity) RoundUp(scale Scale) bool { if q.d.Dec != nil { q.s = "" d, exact := q.d.AsScale(scale) q.d = d return exact } // avoid clearing the string value if we have already calculated it if q.i.scale >= scale { return true } q.s = "" i, exact := q.i.AsScale(scale) q.i = i return exact } // Add adds the provide y quantity to the current value. If the current value is zero, // the format of the quantity will be updated to the format of y. func (q *Quantity) Add(y Quantity) { q.s = "" if q.d.Dec == nil && y.d.Dec == nil { if q.i.value == 0 { q.Format = y.Format } if q.i.Add(y.i) { return } } else if q.IsZero() { q.Format = y.Format } q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec()) } // Sub subtracts the provided quantity from the current value in place. If the current // value is zero, the format of the quantity will be updated to the format of y. func (q *Quantity) Sub(y Quantity) { q.s = "" if q.IsZero() { q.Format = y.Format } if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) { return } q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) } // Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) Cmp(y Quantity) int { if q.d.Dec == nil && y.d.Dec == nil { return q.i.Cmp(y.i) } return q.AsDec().Cmp(y.AsDec()) } // CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the // quantity is greater than y. func (q *Quantity) CmpInt64(y int64) int { if q.d.Dec != nil { return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0))) } return q.i.Cmp(int64Amount{value: y}) } // Neg sets quantity to be the negative value of itself. func (q *Quantity) Neg() { q.s = "" if q.d.Dec == nil { q.i.value = -q.i.value return } q.d.Dec.Neg(q.d.Dec) } // int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation // of most Quantity values. const int64QuantityExpectedBytes = 18 // String formats the Quantity as a string, caching the result if not calculated. // String is an expensive operation and caching this result significantly reduces the cost of // normal parse / marshal operations on Quantity. func (q *Quantity) String() string { if len(q.s) == 0 { result := make([]byte, 0, int64QuantityExpectedBytes) number, suffix := q.CanonicalizeBytes(result) number = append(number, suffix...) q.s = string(number) } return q.s } // MarshalJSON implements the json.Marshaller interface. func (q Quantity) MarshalJSON() ([]byte, error) { if len(q.s) > 0 { out := make([]byte, len(q.s)+2) out[0], out[len(out)-1] = '"', '"' copy(out[1:], q.s) return out, nil } result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes) result[0] = '"' number, suffix := q.CanonicalizeBytes(result[1:1]) // if the same slice was returned to us that we passed in, avoid another allocation by copying number into // the source slice and returning that if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes { number = append(number, suffix...) number = append(number, '"') return result[:1+len(number)], nil } // if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use // append result = result[:1] result = append(result, number...) result = append(result, suffix...) result = append(result, '"') return result, nil } // UnmarshalJSON implements the json.Unmarshaller interface. // TODO: Remove support for leading/trailing whitespace func (q *Quantity) UnmarshalJSON(value []byte) error { l := len(value) if l == 4 && bytes.Equal(value, []byte("null")) { q.d.Dec = nil q.i = int64Amount{} return nil } if l >= 2 && value[0] == '"' && value[l-1] == '"' { value = value[1 : l-1] } parsed, err := ParseQuantity(strings.TrimSpace(string(value))) if err != nil { return err } // This copy is safe because parsed will not be referred to again. *q = parsed return nil } // NewQuantity returns a new Quantity representing the given // value in the given format. func NewQuantity(value int64, format Format) *Quantity { return &Quantity{ i: int64Amount{value: value}, Format: format, } } // NewMilliQuantity returns a new Quantity representing the given // value * 1/1000 in the given format. Note that BinarySI formatting // will round fractional values, and will be changed to DecimalSI for // values x where (-1 < x < 1) && (x != 0). func NewMilliQuantity(value int64, format Format) *Quantity { return &Quantity{ i: int64Amount{value: value, scale: -3}, Format: format, } } // NewScaledQuantity returns a new Quantity representing the given // value * 10^scale in DecimalSI format. func NewScaledQuantity(value int64, scale Scale) *Quantity { return &Quantity{ i: int64Amount{value: value, scale: scale}, Format: DecimalSI, } } // Value returns the value of q; any fractional part will be lost. func (q *Quantity) Value() int64 { return q.ScaledValue(0) } // MilliValue returns the value of ceil(q * 1000); this could overflow an int64; // if that's a concern, call Value() first to verify the number is small enough. func (q *Quantity) MilliValue() int64 { return q.ScaledValue(Milli) } // ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. // To detect overflow, call Value() first and verify the expected magnitude. func (q *Quantity) ScaledValue(scale Scale) int64 { if q.d.Dec == nil { i, _ := q.i.AsScaledInt64(scale) return i } dec := q.d.Dec return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale())) } // Set sets q's value to be value. func (q *Quantity) Set(value int64) { q.SetScaled(value, 0) } // SetMilli sets q's value to be value * 1/1000. func (q *Quantity) SetMilli(value int64) { q.SetScaled(value, Milli) } // SetScaled sets q's value to be value * 10^scale func (q *Quantity) SetScaled(value int64, scale Scale) { q.s = "" q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } // Copy is a convenience function that makes a deep copy for you. Non-deep // copies of quantities share pointers and you will regret that. func (q *Quantity) Copy() *Quantity { if q.d.Dec == nil { return &Quantity{ s: q.s, i: q.i, Format: q.Format, } } tmp := &inf.Dec{} return &Quantity{ s: q.s, d: infDecAmount{tmp.Set(q.d.Dec)}, Format: q.Format, } } // qFlag is a helper type for the Flag function type qFlag struct { dest *Quantity } // Sets the value of the internal Quantity. (used by flag & pflag) func (qf qFlag) Set(val string) error { q, err := ParseQuantity(val) if err != nil { return err } // This copy is OK because q will not be referenced again. *qf.dest = q return nil } // Converts the value of the internal Quantity to a string. (used by flag & pflag) func (qf qFlag) String() string { return qf.dest.String() } // States the type of flag this is (Quantity). (used by pflag) func (qf qFlag) Type() string { return "quantity" } // QuantityFlag is a helper that makes a quantity flag (using standard flag package). // Will panic if defaultValue is not a valid quantity. func QuantityFlag(flagName, defaultValue, description string) *Quantity { q := MustParse(defaultValue) flag.Var(NewQuantityFlagValue(&q), flagName, description) return &q } // NewQuantityFlagValue returns an object that can be used to back a flag, // pointing at the given Quantity variable. func NewQuantityFlagValue(q *Quantity) flag.Value { return qFlag{q} } ================================================ FILE: vendor/github.com/appc/spec/schema/types/resource/scale_int.go ================================================ /* Copyright 2015 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resource import ( "math" "math/big" "sync" ) var ( // A sync pool to reduce allocation. intPool sync.Pool maxInt64 = big.NewInt(math.MaxInt64) ) func init() { intPool.New = func() interface{} { return &big.Int{} } } // scaledValue scales given unscaled value from scale to new Scale and returns // an int64. It ALWAYS rounds up the result when scale down. The final result might // overflow. // // scale, newScale represents the scale of the unscaled decimal. // The mathematical value of the decimal is unscaled * 10**(-scale). func scaledValue(unscaled *big.Int, scale, newScale int) int64 { dif := scale - newScale if dif == 0 { return unscaled.Int64() } // Handle scale up // This is an easy case, we do not need to care about rounding and overflow. // If any intermediate operation causes overflow, the result will overflow. if dif < 0 { return unscaled.Int64() * int64(math.Pow10(-dif)) } // Handle scale down // We have to be careful about the intermediate operations. // fast path when unscaled < max.Int64 and exp(10,dif) < max.Int64 const log10MaxInt64 = 19 if unscaled.Cmp(maxInt64) < 0 && dif < log10MaxInt64 { divide := int64(math.Pow10(dif)) result := unscaled.Int64() / divide mod := unscaled.Int64() % divide if mod != 0 { return result + 1 } return result } // We should only convert back to int64 when getting the result. divisor := intPool.Get().(*big.Int) exp := intPool.Get().(*big.Int) result := intPool.Get().(*big.Int) defer func() { intPool.Put(divisor) intPool.Put(exp) intPool.Put(result) }() // divisor = 10^(dif) // TODO: create loop up table if exp costs too much. divisor.Exp(bigTen, exp.SetInt64(int64(dif)), nil) // reuse exp remainder := exp // result = unscaled / divisor // remainder = unscaled % divisor result.DivMod(unscaled, divisor, remainder) if remainder.Sign() != 0 { return result.Int64() + 1 } return result.Int64() } ================================================ FILE: vendor/github.com/appc/spec/schema/types/resource/suffix.go ================================================ /* Copyright 2014 The Kubernetes Authors All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package resource import ( "strconv" ) type suffix string // suffixer can interpret and construct suffixes. type suffixer interface { interpret(suffix) (base, exponent int32, fmt Format, ok bool) construct(base, exponent int32, fmt Format) (s suffix, ok bool) constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool) } // quantitySuffixer handles suffixes for all three formats that quantity // can handle. var quantitySuffixer = newSuffixer() type bePair struct { base, exponent int32 } type listSuffixer struct { suffixToBE map[suffix]bePair beToSuffix map[bePair]suffix beToSuffixBytes map[bePair][]byte } func (ls *listSuffixer) addSuffix(s suffix, pair bePair) { if ls.suffixToBE == nil { ls.suffixToBE = map[suffix]bePair{} } if ls.beToSuffix == nil { ls.beToSuffix = map[bePair]suffix{} } if ls.beToSuffixBytes == nil { ls.beToSuffixBytes = map[bePair][]byte{} } ls.suffixToBE[s] = pair ls.beToSuffix[pair] = s ls.beToSuffixBytes[pair] = []byte(s) } func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) { pair, ok := ls.suffixToBE[s] if !ok { return 0, 0, false } return pair.base, pair.exponent, true } func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) { s, ok = ls.beToSuffix[bePair{base, exponent}] return } func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) { s, ok = ls.beToSuffixBytes[bePair{base, exponent}] return } type suffixHandler struct { decSuffixes listSuffixer binSuffixes listSuffixer } type fastLookup struct { *suffixHandler } func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) { switch s { case "": return 10, 0, DecimalSI, true case "n": return 10, -9, DecimalSI, true case "u": return 10, -6, DecimalSI, true case "m": return 10, -3, DecimalSI, true case "k": return 10, 3, DecimalSI, true case "M": return 10, 6, DecimalSI, true case "G": return 10, 9, DecimalSI, true } return l.suffixHandler.interpret(s) } func newSuffixer() suffixer { sh := &suffixHandler{} // IMPORTANT: if you change this section you must change fastLookup sh.binSuffixes.addSuffix("Ki", bePair{2, 10}) sh.binSuffixes.addSuffix("Mi", bePair{2, 20}) sh.binSuffixes.addSuffix("Gi", bePair{2, 30}) sh.binSuffixes.addSuffix("Ti", bePair{2, 40}) sh.binSuffixes.addSuffix("Pi", bePair{2, 50}) sh.binSuffixes.addSuffix("Ei", bePair{2, 60}) // Don't emit an error when trying to produce // a suffix for 2^0. sh.decSuffixes.addSuffix("", bePair{2, 0}) sh.decSuffixes.addSuffix("n", bePair{10, -9}) sh.decSuffixes.addSuffix("u", bePair{10, -6}) sh.decSuffixes.addSuffix("m", bePair{10, -3}) sh.decSuffixes.addSuffix("", bePair{10, 0}) sh.decSuffixes.addSuffix("k", bePair{10, 3}) sh.decSuffixes.addSuffix("M", bePair{10, 6}) sh.decSuffixes.addSuffix("G", bePair{10, 9}) sh.decSuffixes.addSuffix("T", bePair{10, 12}) sh.decSuffixes.addSuffix("P", bePair{10, 15}) sh.decSuffixes.addSuffix("E", bePair{10, 18}) return fastLookup{sh} } func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) { switch fmt { case DecimalSI: return sh.decSuffixes.construct(base, exponent) case BinarySI: return sh.binSuffixes.construct(base, exponent) case DecimalExponent: if base != 10 { return "", false } if exponent == 0 { return "", true } return suffix("e" + strconv.FormatInt(int64(exponent), 10)), true } return "", false } func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) { switch format { case DecimalSI: return sh.decSuffixes.constructBytes(base, exponent) case BinarySI: return sh.binSuffixes.constructBytes(base, exponent) case DecimalExponent: if base != 10 { return nil, false } if exponent == 0 { return nil, true } result := make([]byte, 8, 8) result[0] = 'e' number := strconv.AppendInt(result[1:1], int64(exponent), 10) if &result[1] == &number[0] { return result[:1+len(number)], true } result = append(result[:1], number...) return result, true } return nil, false } func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) { // Try lookup tables first if b, e, ok := sh.decSuffixes.lookup(suffix); ok { return b, e, DecimalSI, true } if b, e, ok := sh.binSuffixes.lookup(suffix); ok { return b, e, BinarySI, true } if len(suffix) > 1 && (suffix[0] == 'E' || suffix[0] == 'e') { parsed, err := strconv.ParseInt(string(suffix[1:]), 10, 64) if err != nil { return 0, 0, DecimalExponent, false } return 10, int32(parsed), DecimalExponent, true } return 0, 0, DecimalExponent, false } ================================================ FILE: vendor/github.com/appc/spec/schema/types/semver.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "github.com/coreos/go-semver/semver" ) var ( ErrNoZeroSemVer = ACVersionError("SemVer cannot be zero") ErrBadSemVer = ACVersionError("SemVer is bad") ) // SemVer implements the Unmarshaler interface to define a field that must be // a semantic version string // TODO(jonboulle): extend upstream instead of wrapping? type SemVer semver.Version // NewSemVer generates a new SemVer from a string. If the given string does // not represent a valid SemVer, nil and an error are returned. func NewSemVer(s string) (*SemVer, error) { nsv, err := semver.NewVersion(s) if err != nil { return nil, ErrBadSemVer } v := SemVer(*nsv) if v.Empty() { return nil, ErrNoZeroSemVer } return &v, nil } func (sv SemVer) LessThanMajor(versionB SemVer) bool { majorA := semver.Version(sv).Major majorB := semver.Version(versionB).Major if majorA < majorB { return true } return false } func (sv SemVer) LessThanExact(versionB SemVer) bool { vA := semver.Version(sv) vB := semver.Version(versionB) return vA.LessThan(vB) } func (sv SemVer) String() string { s := semver.Version(sv) return s.String() } func (sv SemVer) Empty() bool { return semver.Version(sv) == semver.Version{} } // UnmarshalJSON implements the json.Unmarshaler interface func (sv *SemVer) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return err } v, err := NewSemVer(s) if err != nil { return err } *sv = *v return nil } // MarshalJSON implements the json.Marshaler interface func (sv SemVer) MarshalJSON() ([]byte, error) { if sv.Empty() { return nil, ErrNoZeroSemVer } return json.Marshal(sv.String()) } ================================================ FILE: vendor/github.com/appc/spec/schema/types/url.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "fmt" "net/url" ) // URL wraps url.URL to marshal/unmarshal to/from JSON strings and enforce // that the scheme is HTTP/HTTPS only type URL url.URL func NewURL(s string) (*URL, error) { uu, err := url.Parse(s) if err != nil { return nil, fmt.Errorf("bad URL: %v", err) } nu := URL(*uu) if err := nu.assertValidScheme(); err != nil { return nil, err } return &nu, nil } func (u URL) String() string { uu := url.URL(u) return uu.String() } func (u URL) assertValidScheme() error { switch u.Scheme { case "http", "https": return nil default: return fmt.Errorf("bad URL scheme, must be http/https") } } func (u *URL) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return err } nu, err := NewURL(s) if err != nil { return err } *u = *nu return nil } func (u URL) MarshalJSON() ([]byte, error) { if err := u.assertValidScheme(); err != nil { return nil, err } return json.Marshal(u.String()) } ================================================ FILE: vendor/github.com/appc/spec/schema/types/user_annotations.go ================================================ // Copyright 2016 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types // UserAnnotations are arbitrary key-value pairs, to be supplied and interpreted by the user type UserAnnotations map[string]string ================================================ FILE: vendor/github.com/appc/spec/schema/types/user_labels.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types // UserLabels are arbitrary key-value pairs, to be supplied and interpreted by the user type UserLabels map[string]string ================================================ FILE: vendor/github.com/appc/spec/schema/types/uuid.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/hex" "encoding/json" "errors" "fmt" "reflect" "strings" ) var ( ErrNoEmptyUUID = errors.New("UUID cannot be empty") ) // UUID encodes an RFC4122-compliant UUID, marshaled to/from a string // TODO(jonboulle): vendor a package for this? // TODO(jonboulle): consider more flexibility in input string formats. // Right now, we only accept: // "6733C088-A507-4694-AABF-EDBE4FC5266F" // "6733C088A5074694AABFEDBE4FC5266F" type UUID [16]byte func (u UUID) String() string { return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:16]) } func (u *UUID) Set(s string) error { nu, err := NewUUID(s) if err == nil { *u = *nu } return err } // NewUUID generates a new UUID from the given string. If the string does not // represent a valid UUID, nil and an error are returned. func NewUUID(s string) (*UUID, error) { s = strings.Replace(s, "-", "", -1) if len(s) != 32 { return nil, errors.New("bad UUID length != 32") } dec, err := hex.DecodeString(s) if err != nil { return nil, err } var u UUID for i, b := range dec { u[i] = b } return &u, nil } func (u UUID) Empty() bool { return reflect.DeepEqual(u, UUID{}) } func (u *UUID) UnmarshalJSON(data []byte) error { var s string if err := json.Unmarshal(data, &s); err != nil { return err } uu, err := NewUUID(s) if uu.Empty() { return ErrNoEmptyUUID } if err == nil { *u = *uu } return err } func (u UUID) MarshalJSON() ([]byte, error) { if u.Empty() { return nil, ErrNoEmptyUUID } return json.Marshal(u.String()) } ================================================ FILE: vendor/github.com/appc/spec/schema/types/volume.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package types import ( "encoding/json" "errors" "fmt" "net/url" "path/filepath" "strconv" "strings" "github.com/appc/spec/schema/common" ) const ( emptyVolumeDefaultMode = "0755" emptyVolumeDefaultUID = 0 emptyVolumeDefaultGID = 0 ) // Volume encapsulates a volume which should be mounted into the filesystem // of all apps in a PodManifest type Volume struct { Name ACName `json:"name"` Kind string `json:"kind"` // currently used only by "host" // TODO(jonboulle): factor out? Source string `json:"source,omitempty"` ReadOnly *bool `json:"readOnly,omitempty"` Recursive *bool `json:"recursive,omitempty"` // currently used only by "empty" Mode *string `json:"mode,omitempty"` UID *int `json:"uid,omitempty"` GID *int `json:"gid,omitempty"` } type volume Volume func (v Volume) assertValid() error { if v.Name.Empty() { return errors.New("name must be set") } switch v.Kind { case "empty": if v.Source != "" { return errors.New("source for empty volume must be empty") } if v.Mode == nil { return errors.New("mode for empty volume must be set") } if v.UID == nil { return errors.New("uid for empty volume must be set") } if v.GID == nil { return errors.New("gid for empty volume must be set") } return nil case "host": if v.Source == "" { return errors.New("source for host volume cannot be empty") } if v.Mode != nil { return errors.New("mode for host volume cannot be set") } if v.UID != nil { return errors.New("uid for host volume cannot be set") } if v.GID != nil { return errors.New("gid for host volume cannot be set") } if !filepath.IsAbs(v.Source) { return errors.New("source for host volume must be absolute path") } return nil default: return errors.New(`unrecognized volume kind: should be one of "empty", "host"`) } } func (v *Volume) UnmarshalJSON(data []byte) error { var vv volume if err := json.Unmarshal(data, &vv); err != nil { return err } nv := Volume(vv) maybeSetDefaults(&nv) if err := nv.assertValid(); err != nil { return err } *v = nv return nil } func (v Volume) MarshalJSON() ([]byte, error) { if err := v.assertValid(); err != nil { return nil, err } return json.Marshal(volume(v)) } func (v Volume) String() string { s := []string{ v.Name.String(), ",kind=", v.Kind, } if v.Source != "" { s = append(s, ",source=") s = append(s, v.Source) } if v.ReadOnly != nil { s = append(s, ",readOnly=") s = append(s, strconv.FormatBool(*v.ReadOnly)) } if v.Recursive != nil { s = append(s, ",recursive=") s = append(s, strconv.FormatBool(*v.Recursive)) } switch v.Kind { case "empty": if *v.Mode != emptyVolumeDefaultMode { s = append(s, ",mode=") s = append(s, *v.Mode) } if *v.UID != emptyVolumeDefaultUID { s = append(s, ",uid=") s = append(s, strconv.Itoa(*v.UID)) } if *v.GID != emptyVolumeDefaultGID { s = append(s, ",gid=") s = append(s, strconv.Itoa(*v.GID)) } } return strings.Join(s, "") } // VolumeFromString takes a command line volume parameter and returns a volume // // Example volume parameters: // database,kind=host,source=/tmp,readOnly=true,recursive=true func VolumeFromString(vp string) (*Volume, error) { vp = "name=" + vp vpQuery, err := common.MakeQueryString(vp) if err != nil { return nil, err } v, err := url.ParseQuery(vpQuery) if err != nil { return nil, err } return VolumeFromParams(v) } func VolumeFromParams(params map[string][]string) (*Volume, error) { var vol Volume for key, val := range params { val := val if len(val) > 1 { return nil, fmt.Errorf("label %s with multiple values %q", key, val) } switch key { case "name": acn, err := NewACName(val[0]) if err != nil { return nil, err } vol.Name = *acn case "kind": vol.Kind = val[0] case "source": vol.Source = val[0] case "readOnly": ro, err := strconv.ParseBool(val[0]) if err != nil { return nil, err } vol.ReadOnly = &ro case "recursive": rec, err := strconv.ParseBool(val[0]) if err != nil { return nil, err } vol.Recursive = &rec case "mode": vol.Mode = &val[0] case "uid": u, err := strconv.Atoi(val[0]) if err != nil { return nil, err } vol.UID = &u case "gid": g, err := strconv.Atoi(val[0]) if err != nil { return nil, err } vol.GID = &g default: return nil, fmt.Errorf("unknown volume parameter %q", key) } } maybeSetDefaults(&vol) if err := vol.assertValid(); err != nil { return nil, err } return &vol, nil } // maybeSetDefaults sets the correct default values for certain fields on a // Volume if they are not already been set. These fields are not // pre-populated on all Volumes as the Volume type is polymorphic. func maybeSetDefaults(vol *Volume) { if vol.Kind == "empty" { if vol.Mode == nil { m := emptyVolumeDefaultMode vol.Mode = &m } if vol.UID == nil { u := emptyVolumeDefaultUID vol.UID = &u } if vol.GID == nil { g := emptyVolumeDefaultGID vol.GID = &g } } } ================================================ FILE: vendor/github.com/appc/spec/schema/version.go ================================================ // Copyright 2015 The appc Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package schema import ( "github.com/appc/spec/schema/types" ) const ( // version represents the canonical version of the appc spec and tooling. // For now, the schema and tooling is coupled with the spec itself, so // this must be kept in sync with the VERSION file in the root of the repo. version string = "0.8.10" ) var ( // AppContainerVersion is the SemVer representation of version AppContainerVersion types.SemVer ) func init() { v, err := types.NewSemVer(version) if err != nil { panic(err) } AppContainerVersion = *v } ================================================ FILE: vendor/github.com/coreos/go-semver/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/github.com/coreos/go-semver/example.go ================================================ package main import ( "fmt" "github.com/coreos/go-semver/semver" "os" ) func main() { vA, err := semver.NewVersion(os.Args[1]) if err != nil { fmt.Println(err.Error()) } vB, err := semver.NewVersion(os.Args[2]) if err != nil { fmt.Println(err.Error()) } fmt.Printf("%s < %s == %t\n", vA, vB, vA.LessThan(*vB)) } ================================================ FILE: vendor/github.com/coreos/go-semver/semver/semver.go ================================================ // Copyright 2013-2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // Semantic Versions http://semver.org package semver import ( "bytes" "errors" "fmt" "strconv" "strings" ) type Version struct { Major int64 Minor int64 Patch int64 PreRelease PreRelease Metadata string } type PreRelease string func splitOff(input *string, delim string) (val string) { parts := strings.SplitN(*input, delim, 2) if len(parts) == 2 { *input = parts[0] val = parts[1] } return val } func NewVersion(version string) (*Version, error) { v := Version{} v.Metadata = splitOff(&version, "+") v.PreRelease = PreRelease(splitOff(&version, "-")) dotParts := strings.SplitN(version, ".", 3) if len(dotParts) != 3 { return nil, errors.New(fmt.Sprintf("%s is not in dotted-tri format", version)) } parsed := make([]int64, 3, 3) for i, v := range dotParts[:3] { val, err := strconv.ParseInt(v, 10, 64) parsed[i] = val if err != nil { return nil, err } } v.Major = parsed[0] v.Minor = parsed[1] v.Patch = parsed[2] return &v, nil } func Must(v *Version, err error) *Version { if err != nil { panic(err) } return v } func (v Version) String() string { var buffer bytes.Buffer fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch) if v.PreRelease != "" { fmt.Fprintf(&buffer, "-%s", v.PreRelease) } if v.Metadata != "" { fmt.Fprintf(&buffer, "+%s", v.Metadata) } return buffer.String() } func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error { var data string if err := unmarshal(&data); err != nil { return err } vv, err := NewVersion(data) if err != nil { return err } *v = *vv return nil } func (v Version) MarshalJSON() ([]byte, error) { return []byte(`"` + v.String() + `"`), nil } func (v *Version) UnmarshalJSON(data []byte) error { l := len(data) if l == 0 || string(data) == `""` { return nil } if l < 2 || data[0] != '"' || data[l-1] != '"' { return errors.New("invalid semver string") } vv, err := NewVersion(string(data[1 : l-1])) if err != nil { return err } *v = *vv return nil } func (v Version) LessThan(versionB Version) bool { versionA := v cmp := recursiveCompare(versionA.Slice(), versionB.Slice()) if cmp == 0 { cmp = preReleaseCompare(versionA, versionB) } if cmp == -1 { return true } return false } /* Slice converts the comparable parts of the semver into a slice of strings */ func (v Version) Slice() []int64 { return []int64{v.Major, v.Minor, v.Patch} } func (p PreRelease) Slice() []string { preRelease := string(p) return strings.Split(preRelease, ".") } func preReleaseCompare(versionA Version, versionB Version) int { a := versionA.PreRelease b := versionB.PreRelease /* Handle the case where if two versions are otherwise equal it is the * one without a PreRelease that is greater */ if len(a) == 0 && (len(b) > 0) { return 1 } else if len(b) == 0 && (len(a) > 0) { return -1 } // If there is a prelease, check and compare each part. return recursivePreReleaseCompare(a.Slice(), b.Slice()) } func recursiveCompare(versionA []int64, versionB []int64) int { if len(versionA) == 0 { return 0 } a := versionA[0] b := versionB[0] if a > b { return 1 } else if a < b { return -1 } return recursiveCompare(versionA[1:], versionB[1:]) } func recursivePreReleaseCompare(versionA []string, versionB []string) int { // Handle slice length disparity. if len(versionA) == 0 { // Nothing to compare too, so we return 0 return 0 } else if len(versionB) == 0 { // We're longer than versionB so return 1. return 1 } a := versionA[0] b := versionB[0] aInt := false bInt := false aI, err := strconv.Atoi(versionA[0]) if err == nil { aInt = true } bI, err := strconv.Atoi(versionB[0]) if err == nil { bInt = true } // Handle Integer Comparison if aInt && bInt { if aI > bI { return 1 } else if aI < bI { return -1 } } // Handle String Comparison if a > b { return 1 } else if a < b { return -1 } return recursivePreReleaseCompare(versionA[1:], versionB[1:]) } // BumpMajor increments the Major field by 1 and resets all other fields to their default values func (v *Version) BumpMajor() { v.Major += 1 v.Minor = 0 v.Patch = 0 v.PreRelease = PreRelease("") v.Metadata = "" } // BumpMinor increments the Minor field by 1 and resets all other fields to their default values func (v *Version) BumpMinor() { v.Minor += 1 v.Patch = 0 v.PreRelease = PreRelease("") v.Metadata = "" } // BumpPatch increments the Patch field by 1 and resets all other fields to their default values func (v *Version) BumpPatch() { v.Patch += 1 v.PreRelease = PreRelease("") v.Metadata = "" } ================================================ FILE: vendor/github.com/coreos/go-semver/semver/sort.go ================================================ // Copyright 2013-2015 CoreOS, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package semver import ( "sort" ) type Versions []*Version func (s Versions) Len() int { return len(s) } func (s Versions) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s Versions) Less(i, j int) bool { return s[i].LessThan(*s[j]) } // Sort sorts the given slice of Version func Sort(versions []*Version) { sort.Sort(Versions(versions)) } ================================================ FILE: vendor/github.com/coreos/ioprogress/LICENSE ================================================ The MIT License (MIT) Copyright (c) 2014 Mitchell Hashimoto Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/coreos/ioprogress/draw.go ================================================ package ioprogress import ( "fmt" "io" "os" "strings" "golang.org/x/crypto/ssh/terminal" ) // DrawFunc is the callback type for drawing progress. type DrawFunc func(int64, int64) error // DrawTextFormatFunc is a callback used by DrawFuncs that draw text in // order to format the text into some more human friendly format. type DrawTextFormatFunc func(int64, int64) string var defaultDrawFunc DrawFunc func init() { defaultDrawFunc = DrawTerminal(os.Stdout) } // isTerminal returns True when w is going to a tty, and false otherwise. func isTerminal(w io.Writer) bool { if f, ok := w.(*os.File); ok { return terminal.IsTerminal(int(f.Fd())) } return false } // DrawTerminal returns a DrawFunc that draws a progress bar to an io.Writer // that is assumed to be a terminal (and therefore respects carriage returns). func DrawTerminal(w io.Writer) DrawFunc { return DrawTerminalf(w, func(progress, total int64) string { return fmt.Sprintf("%d/%d", progress, total) }) } // DrawTerminalf returns a DrawFunc that draws a progress bar to an io.Writer // that is formatted with the given formatting function. func DrawTerminalf(w io.Writer, f DrawTextFormatFunc) DrawFunc { var maxLength int return func(progress, total int64) error { if progress == -1 && total == -1 { _, err := fmt.Fprintf(w, "\n") return err } // Make sure we pad it to the max length we've ever drawn so that // we don't have trailing characters. line := f(progress, total) if len(line) < maxLength { line = fmt.Sprintf( "%s%s", line, strings.Repeat(" ", maxLength-len(line))) } maxLength = len(line) terminate := "\r" if !isTerminal(w) { terminate = "\n" } _, err := fmt.Fprint(w, line+terminate) return err } } var byteUnits = []string{"B", "KB", "MB", "GB", "TB", "PB"} // DrawTextFormatBytes is a DrawTextFormatFunc that formats the progress // and total into human-friendly byte formats. func DrawTextFormatBytes(progress, total int64) string { return fmt.Sprintf("%s/%s", ByteUnitStr(progress), ByteUnitStr(total)) } // DrawTextFormatBar returns a DrawTextFormatFunc that draws a progress // bar with the given width (in characters). This can be used in conjunction // with another DrawTextFormatFunc to create a progress bar with bytes, for // example: // // bar := DrawTextFormatBar(20) // func(progress, total int64) string { // return fmt.Sprintf( // "%s %s", // bar(progress, total), // DrawTextFormatBytes(progress, total)) // } // func DrawTextFormatBar(width int64) DrawTextFormatFunc { return DrawTextFormatBarForW(width, nil) } // DrawTextFormatBarForW returns a DrawTextFormatFunc as described in the docs // for DrawTextFormatBar, however if the io.Writer passed in is not a tty then // the returned function will always return "". func DrawTextFormatBarForW(width int64, w io.Writer) DrawTextFormatFunc { if w != nil && !isTerminal(w) { return func(progress, total int64) string { return "" } } width -= 2 return func(progress, total int64) string { current := int64((float64(progress) / float64(total)) * float64(width)) if current < 0 || current > width { return fmt.Sprintf("[%s]", strings.Repeat(" ", int(width))) } return fmt.Sprintf( "[%s%s]", strings.Repeat("=", int(current)), strings.Repeat(" ", int(width-current))) } } // ByteUnitStr pretty prints a number of bytes. func ByteUnitStr(n int64) string { var unit string size := float64(n) for i := 1; i < len(byteUnits); i++ { if size < 1000 { unit = byteUnits[i-1] break } size = size / 1000 } return fmt.Sprintf("%.3g %s", size, unit) } ================================================ FILE: vendor/github.com/coreos/ioprogress/reader.go ================================================ package ioprogress import ( "io" "time" ) // Reader is an implementation of io.Reader that draws the progress of // reading some data. type Reader struct { // Reader is the underlying reader to read from Reader io.Reader // Size is the total size of the data coming out of the reader. Size int64 // DrawFunc is the callback to invoke to draw the progress bar. By // default, this will be DrawTerminal(os.Stdout). // // DrawInterval is the minimum time to wait between reads to update the // progress bar. DrawFunc DrawFunc DrawInterval time.Duration progress int64 lastDraw time.Time } // Read reads from the underlying reader and invokes the DrawFunc if // appropriate. The DrawFunc is executed when there is data that is // read (progress is made) and at least DrawInterval time has passed. func (r *Reader) Read(p []byte) (int, error) { // If we haven't drawn before, initialize the progress bar if r.lastDraw.IsZero() { r.initProgress() } // Read from the underlying source n, err := r.Reader.Read(p) // Always increment the progress even if there was an error r.progress += int64(n) // If we don't have any errors, then draw the progress. If we are // at the end of the data, then finish the progress. if err == nil { // Only draw if we read data or we've never read data before (to // initialize the progress bar). if n > 0 { r.drawProgress() } } if err == io.EOF { r.finishProgress() } return n, err } func (r *Reader) drawProgress() { // If we've drawn before, then make sure that the draw interval // has passed before we draw again. interval := r.DrawInterval if interval == 0 { interval = time.Second } if !r.lastDraw.IsZero() { nextDraw := r.lastDraw.Add(interval) if time.Now().Before(nextDraw) { return } } // Draw f := r.drawFunc() f(r.progress, r.Size) // Record this draw so that we don't draw again really quickly r.lastDraw = time.Now() } func (r *Reader) finishProgress() { f := r.drawFunc() f(r.progress, r.Size) // Print a newline f(-1, -1) // Reset lastDraw so we don't finish again var zeroDraw time.Time r.lastDraw = zeroDraw } func (r *Reader) initProgress() { var zeroDraw time.Time r.lastDraw = zeroDraw r.drawProgress() r.lastDraw = zeroDraw } func (r *Reader) drawFunc() DrawFunc { if r.DrawFunc == nil { return defaultDrawFunc } return r.DrawFunc } ================================================ FILE: vendor/github.com/coreos/pkg/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/github.com/coreos/pkg/NOTICE ================================================ CoreOS Project Copyright 2014 CoreOS, Inc This product includes software developed at CoreOS, Inc. (http://www.coreos.com/). ================================================ FILE: vendor/github.com/coreos/pkg/progressutil/iocopy.go ================================================ // Copyright 2016 CoreOS Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package progressutil import ( "errors" "fmt" "io" "sync" "time" ) var ( ErrAlreadyStarted = errors.New("cannot add copies after PrintAndWait has been called") ) type copyReader struct { reader io.Reader current int64 total int64 pb *ProgressBar } func (cr *copyReader) Read(p []byte) (int, error) { n, err := cr.reader.Read(p) cr.current += int64(n) err1 := cr.updateProgressBar() if err == nil { err = err1 } return n, err } func (cr *copyReader) updateProgressBar() error { cr.pb.SetPrintAfter(cr.formattedProgress()) progress := float64(cr.current) / float64(cr.total) if progress > 1 { progress = 1 } return cr.pb.SetCurrentProgress(progress) } // NewCopyProgressPrinter returns a new CopyProgressPrinter func NewCopyProgressPrinter() *CopyProgressPrinter { return &CopyProgressPrinter{results: make(chan error), cancel: make(chan struct{})} } // CopyProgressPrinter will perform an arbitrary number of io.Copy calls, while // continually printing the progress of each copy. type CopyProgressPrinter struct { results chan error cancel chan struct{} // `lock` mutex protects all fields below it in CopyProgressPrinter struct lock sync.Mutex readers []*copyReader started bool pbp *ProgressBarPrinter } // AddCopy adds a copy for this CopyProgressPrinter to perform. An io.Copy call // will be made to copy bytes from reader to dest, and name and size will be // used to label the progress bar and display how much progress has been made. // If size is 0, the total size of the reader is assumed to be unknown. // AddCopy can only be called before PrintAndWait; otherwise, ErrAlreadyStarted // will be returned. func (cpp *CopyProgressPrinter) AddCopy(reader io.Reader, name string, size int64, dest io.Writer) error { cpp.lock.Lock() defer cpp.lock.Unlock() if cpp.started { return ErrAlreadyStarted } if cpp.pbp == nil { cpp.pbp = &ProgressBarPrinter{} cpp.pbp.PadToBeEven = true } cr := ©Reader{ reader: reader, current: 0, total: size, pb: cpp.pbp.AddProgressBar(), } cr.pb.SetPrintBefore(name) cr.pb.SetPrintAfter(cr.formattedProgress()) cpp.readers = append(cpp.readers, cr) go func() { _, err := io.Copy(dest, cr) select { case <-cpp.cancel: return case cpp.results <- err: return } }() return nil } // PrintAndWait will print the progress for each copy operation added with // AddCopy to printTo every printInterval. This will continue until every added // copy is finished, or until cancel is written to. // PrintAndWait may only be called once; any subsequent calls will immediately // return ErrAlreadyStarted. After PrintAndWait has been called, no more // copies may be added to the CopyProgressPrinter. func (cpp *CopyProgressPrinter) PrintAndWait(printTo io.Writer, printInterval time.Duration, cancel chan struct{}) error { cpp.lock.Lock() if cpp.started { cpp.lock.Unlock() return ErrAlreadyStarted } cpp.started = true cpp.lock.Unlock() n := len(cpp.readers) if n == 0 { // Nothing to do. return nil } defer close(cpp.cancel) t := time.NewTicker(printInterval) allDone := false for i := 0; i < n; { select { case <-cancel: return nil case <-t.C: _, err := cpp.pbp.Print(printTo) if err != nil { return err } case err := <-cpp.results: i++ // Once completion is signaled, further on this just drains // (unlikely) errors from the channel. if err == nil && !allDone { allDone, err = cpp.pbp.Print(printTo) } if err != nil { return err } } } return nil } func (cr *copyReader) formattedProgress() string { var totalStr string if cr.total == 0 { totalStr = "?" } else { totalStr = ByteUnitStr(cr.total) } return fmt.Sprintf("%s / %s", ByteUnitStr(cr.current), totalStr) } var byteUnits = []string{"B", "KB", "MB", "GB", "TB", "PB"} // ByteUnitStr pretty prints a number of bytes. func ByteUnitStr(n int64) string { var unit string size := float64(n) for i := 1; i < len(byteUnits); i++ { if size < 1000 { unit = byteUnits[i-1] break } size = size / 1000 } return fmt.Sprintf("%.3g %s", size, unit) } ================================================ FILE: vendor/github.com/coreos/pkg/progressutil/progressbar.go ================================================ // Copyright 2016 CoreOS Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package progressutil import ( "fmt" "io" "os" "strings" "sync" "golang.org/x/crypto/ssh/terminal" ) var ( // ErrorProgressOutOfBounds is returned if the progress is set to a value // not between 0 and 1. ErrorProgressOutOfBounds = fmt.Errorf("progress is out of bounds (0 to 1)") // ErrorNoBarsAdded is returned when no progress bars have been added to a // ProgressBarPrinter before PrintAndWait is called. ErrorNoBarsAdded = fmt.Errorf("AddProgressBar hasn't been called yet") ) // ProgressBar represents one progress bar in a ProgressBarPrinter. Should not // be created directly, use the AddProgressBar on a ProgressBarPrinter to // create these. type ProgressBar struct { lock sync.Mutex currentProgress float64 printBefore string printAfter string done bool } func (pb *ProgressBar) clone() *ProgressBar { pb.lock.Lock() pbClone := &ProgressBar{ currentProgress: pb.currentProgress, printBefore: pb.printBefore, printAfter: pb.printAfter, done: pb.done, } pb.lock.Unlock() return pbClone } func (pb *ProgressBar) GetCurrentProgress() float64 { pb.lock.Lock() val := pb.currentProgress pb.lock.Unlock() return val } // SetCurrentProgress sets the progress of this ProgressBar. The progress must // be between 0 and 1 inclusive. func (pb *ProgressBar) SetCurrentProgress(progress float64) error { if progress < 0 || progress > 1 { return ErrorProgressOutOfBounds } pb.lock.Lock() pb.currentProgress = progress pb.lock.Unlock() return nil } // GetDone returns whether or not this progress bar is done func (pb *ProgressBar) GetDone() bool { pb.lock.Lock() val := pb.done pb.lock.Unlock() return val } // SetDone sets whether or not this progress bar is done func (pb *ProgressBar) SetDone(val bool) { pb.lock.Lock() pb.done = val pb.lock.Unlock() } // GetPrintBefore gets the text printed on the line before the progress bar. func (pb *ProgressBar) GetPrintBefore() string { pb.lock.Lock() val := pb.printBefore pb.lock.Unlock() return val } // SetPrintBefore sets the text printed on the line before the progress bar. func (pb *ProgressBar) SetPrintBefore(before string) { pb.lock.Lock() pb.printBefore = before pb.lock.Unlock() } // GetPrintAfter gets the text printed on the line after the progress bar. func (pb *ProgressBar) GetPrintAfter() string { pb.lock.Lock() val := pb.printAfter pb.lock.Unlock() return val } // SetPrintAfter sets the text printed on the line after the progress bar. func (pb *ProgressBar) SetPrintAfter(after string) { pb.lock.Lock() pb.printAfter = after pb.lock.Unlock() } // ProgressBarPrinter will print out the progress of some number of // ProgressBars. type ProgressBarPrinter struct { lock sync.Mutex // DisplayWidth can be set to influence how large the progress bars are. // The bars will be scaled to attempt to produce lines of this number of // characters, but lines of different lengths may still be printed. When // this value is 0 (aka unset), 80 character columns are assumed. DisplayWidth int // PadToBeEven, when set to true, will make Print pad the printBefore text // with trailing spaces and the printAfter text with leading spaces to make // the progress bars the same length. PadToBeEven bool numLinesInLastPrint int progressBars []*ProgressBar maxBefore int maxAfter int } // AddProgressBar will create a new ProgressBar, register it with this // ProgressBarPrinter, and return it. This must be called at least once before // PrintAndWait is called. func (pbp *ProgressBarPrinter) AddProgressBar() *ProgressBar { pb := &ProgressBar{} pbp.lock.Lock() pbp.progressBars = append(pbp.progressBars, pb) pbp.lock.Unlock() return pb } // Print will print out progress information for each ProgressBar that has been // added to this ProgressBarPrinter. The progress will be written to printTo, // and if printTo is a terminal it will draw progress bars. AddProgressBar // must be called at least once before Print is called. If printing to a // terminal, all draws after the first one will move the cursor up to draw over // the previously printed bars. func (pbp *ProgressBarPrinter) Print(printTo io.Writer) (bool, error) { pbp.lock.Lock() var bars []*ProgressBar for _, bar := range pbp.progressBars { bars = append(bars, bar.clone()) } numColumns := pbp.DisplayWidth pbp.lock.Unlock() if len(bars) == 0 { return false, ErrorNoBarsAdded } if numColumns == 0 { numColumns = 80 } if isTerminal(printTo) { moveCursorUp(printTo, pbp.numLinesInLastPrint) } for _, bar := range bars { beforeSize := len(bar.GetPrintBefore()) afterSize := len(bar.GetPrintAfter()) if beforeSize > pbp.maxBefore { pbp.maxBefore = beforeSize } if afterSize > pbp.maxAfter { pbp.maxAfter = afterSize } } allDone := true for _, bar := range bars { if isTerminal(printTo) { bar.printToTerminal(printTo, numColumns, pbp.PadToBeEven, pbp.maxBefore, pbp.maxAfter) } else { bar.printToNonTerminal(printTo) } allDone = allDone && bar.GetCurrentProgress() == 1 } pbp.numLinesInLastPrint = len(bars) return allDone, nil } // moveCursorUp moves the cursor up numLines in the terminal func moveCursorUp(printTo io.Writer, numLines int) { if numLines > 0 { fmt.Fprintf(printTo, "\033[%dA", numLines) } } func (pb *ProgressBar) printToTerminal(printTo io.Writer, numColumns int, padding bool, maxBefore, maxAfter int) { before := pb.GetPrintBefore() after := pb.GetPrintAfter() if padding { before = before + strings.Repeat(" ", maxBefore-len(before)) after = strings.Repeat(" ", maxAfter-len(after)) + after } progressBarSize := numColumns - (len(fmt.Sprintf("%s [] %s", before, after))) progressBar := "" if progressBarSize > 0 { currentProgress := int(pb.GetCurrentProgress() * float64(progressBarSize)) progressBar = fmt.Sprintf("[%s%s] ", strings.Repeat("=", currentProgress), strings.Repeat(" ", progressBarSize-currentProgress)) } else { // If we can't fit the progress bar, better to not pad the before/after. before = pb.GetPrintBefore() after = pb.GetPrintAfter() } fmt.Fprintf(printTo, "%s %s%s\n", before, progressBar, after) } func (pb *ProgressBar) printToNonTerminal(printTo io.Writer) { if !pb.GetDone() { fmt.Fprintf(printTo, "%s %s\n", pb.printBefore, pb.printAfter) if pb.GetCurrentProgress() == 1 { pb.SetDone(true) } } } // isTerminal returns True when w is going to a tty, and false otherwise. func isTerminal(w io.Writer) bool { if f, ok := w.(*os.File); ok { return terminal.IsTerminal(int(f.Fd())) } return false } ================================================ FILE: vendor/github.com/docker/distribution/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/github.com/docker/distribution/blobs.go ================================================ package distribution import ( "context" "errors" "fmt" "io" "net/http" "time" "github.com/docker/distribution/reference" "github.com/opencontainers/go-digest" ) var ( // ErrBlobExists returned when blob already exists ErrBlobExists = errors.New("blob exists") // ErrBlobDigestUnsupported when blob digest is an unsupported version. ErrBlobDigestUnsupported = errors.New("unsupported blob digest") // ErrBlobUnknown when blob is not found. ErrBlobUnknown = errors.New("unknown blob") // ErrBlobUploadUnknown returned when upload is not found. ErrBlobUploadUnknown = errors.New("blob upload unknown") // ErrBlobInvalidLength returned when the blob has an expected length on // commit, meaning mismatched with the descriptor or an invalid value. ErrBlobInvalidLength = errors.New("blob invalid length") ) // ErrBlobInvalidDigest returned when digest check fails. type ErrBlobInvalidDigest struct { Digest digest.Digest Reason error } func (err ErrBlobInvalidDigest) Error() string { return fmt.Sprintf("invalid digest for referenced layer: %v, %v", err.Digest, err.Reason) } // ErrBlobMounted returned when a blob is mounted from another repository // instead of initiating an upload session. type ErrBlobMounted struct { From reference.Canonical Descriptor Descriptor } func (err ErrBlobMounted) Error() string { return fmt.Sprintf("blob mounted from: %v to: %v", err.From, err.Descriptor) } // Descriptor describes targeted content. Used in conjunction with a blob // store, a descriptor can be used to fetch, store and target any kind of // blob. The struct also describes the wire protocol format. Fields should // only be added but never changed. type Descriptor struct { // MediaType describe the type of the content. All text based formats are // encoded as utf-8. MediaType string `json:"mediaType,omitempty"` // Size in bytes of content. Size int64 `json:"size,omitempty"` // Digest uniquely identifies the content. A byte stream can be verified // against against this digest. Digest digest.Digest `json:"digest,omitempty"` // URLs contains the source URLs of this content. URLs []string `json:"urls,omitempty"` // NOTE: Before adding a field here, please ensure that all // other options have been exhausted. Much of the type relationships // depend on the simplicity of this type. } // Descriptor returns the descriptor, to make it satisfy the Describable // interface. Note that implementations of Describable are generally objects // which can be described, not simply descriptors; this exception is in place // to make it more convenient to pass actual descriptors to functions that // expect Describable objects. func (d Descriptor) Descriptor() Descriptor { return d } // BlobStatter makes blob descriptors available by digest. The service may // provide a descriptor of a different digest if the provided digest is not // canonical. type BlobStatter interface { // Stat provides metadata about a blob identified by the digest. If the // blob is unknown to the describer, ErrBlobUnknown will be returned. Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) } // BlobDeleter enables deleting blobs from storage. type BlobDeleter interface { Delete(ctx context.Context, dgst digest.Digest) error } // BlobEnumerator enables iterating over blobs from storage type BlobEnumerator interface { Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error } // BlobDescriptorService manages metadata about a blob by digest. Most // implementations will not expose such an interface explicitly. Such mappings // should be maintained by interacting with the BlobIngester. Hence, this is // left off of BlobService and BlobStore. type BlobDescriptorService interface { BlobStatter // SetDescriptor assigns the descriptor to the digest. The provided digest and // the digest in the descriptor must map to identical content but they may // differ on their algorithm. The descriptor must have the canonical // digest of the content and the digest algorithm must match the // annotators canonical algorithm. // // Such a facility can be used to map blobs between digest domains, with // the restriction that the algorithm of the descriptor must match the // canonical algorithm (ie sha256) of the annotator. SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error // Clear enables descriptors to be unlinked Clear(ctx context.Context, dgst digest.Digest) error } // BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. type BlobDescriptorServiceFactory interface { BlobAccessController(svc BlobDescriptorService) BlobDescriptorService } // ReadSeekCloser is the primary reader type for blob data, combining // io.ReadSeeker with io.Closer. type ReadSeekCloser interface { io.ReadSeeker io.Closer } // BlobProvider describes operations for getting blob data. type BlobProvider interface { // Get returns the entire blob identified by digest along with the descriptor. Get(ctx context.Context, dgst digest.Digest) ([]byte, error) // Open provides a ReadSeekCloser to the blob identified by the provided // descriptor. If the blob is not known to the service, an error will be // returned. Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) } // BlobServer can serve blobs via http. type BlobServer interface { // ServeBlob attempts to serve the blob, identified by dgst, via http. The // service may decide to redirect the client elsewhere or serve the data // directly. // // This handler only issues successful responses, such as 2xx or 3xx, // meaning it serves data or issues a redirect. If the blob is not // available, an error will be returned and the caller may still issue a // response. // // The implementation may serve the same blob from a different digest // domain. The appropriate headers will be set for the blob, unless they // have already been set by the caller. ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error } // BlobIngester ingests blob data. type BlobIngester interface { // Put inserts the content p into the blob service, returning a descriptor // or an error. Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) // Create allocates a new blob writer to add a blob to this service. The // returned handle can be written to and later resumed using an opaque // identifier. With this approach, one can Close and Resume a BlobWriter // multiple times until the BlobWriter is committed or cancelled. Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) // Resume attempts to resume a write to a blob, identified by an id. Resume(ctx context.Context, id string) (BlobWriter, error) } // BlobCreateOption is a general extensible function argument for blob creation // methods. A BlobIngester may choose to honor any or none of the given // BlobCreateOptions, which can be specific to the implementation of the // BlobIngester receiving them. // TODO (brianbland): unify this with ManifestServiceOption in the future type BlobCreateOption interface { Apply(interface{}) error } // CreateOptions is a collection of blob creation modifiers relevant to general // blob storage intended to be configured by the BlobCreateOption.Apply method. type CreateOptions struct { Mount struct { ShouldMount bool From reference.Canonical // Stat allows to pass precalculated descriptor to link and return. // Blob access check will be skipped if set. Stat *Descriptor } } // BlobWriter provides a handle for inserting data into a blob store. // Instances should be obtained from BlobWriteService.Writer and // BlobWriteService.Resume. If supported by the store, a writer can be // recovered with the id. type BlobWriter interface { io.WriteCloser io.ReaderFrom // Size returns the number of bytes written to this blob. Size() int64 // ID returns the identifier for this writer. The ID can be used with the // Blob service to later resume the write. ID() string // StartedAt returns the time this blob write was started. StartedAt() time.Time // Commit completes the blob writer process. The content is verified // against the provided provisional descriptor, which may result in an // error. Depending on the implementation, written data may be validated // against the provisional descriptor fields. If MediaType is not present, // the implementation may reject the commit or assign "application/octet- // stream" to the blob. The returned descriptor may have a different // digest depending on the blob store, referred to as the canonical // descriptor. Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) // Cancel ends the blob write without storing any data and frees any // associated resources. Any data written thus far will be lost. Cancel // implementations should allow multiple calls even after a commit that // result in a no-op. This allows use of Cancel in a defer statement, // increasing the assurance that it is correctly called. Cancel(ctx context.Context) error } // BlobService combines the operations to access, read and write blobs. This // can be used to describe remote blob services. type BlobService interface { BlobStatter BlobProvider BlobIngester } // BlobStore represent the entire suite of blob related operations. Such an // implementation can access, read, write, delete and serve blobs. type BlobStore interface { BlobService BlobServer BlobDeleter } ================================================ FILE: vendor/github.com/docker/distribution/digestset/set.go ================================================ package digestset import ( "errors" "sort" "strings" "sync" digest "github.com/opencontainers/go-digest" ) var ( // ErrDigestNotFound is used when a matching digest // could not be found in a set. ErrDigestNotFound = errors.New("digest not found") // ErrDigestAmbiguous is used when multiple digests // are found in a set. None of the matching digests // should be considered valid matches. ErrDigestAmbiguous = errors.New("ambiguous digest string") ) // Set is used to hold a unique set of digests which // may be easily referenced by easily referenced by a string // representation of the digest as well as short representation. // The uniqueness of the short representation is based on other // digests in the set. If digests are omitted from this set, // collisions in a larger set may not be detected, therefore it // is important to always do short representation lookups on // the complete set of digests. To mitigate collisions, an // appropriately long short code should be used. type Set struct { mutex sync.RWMutex entries digestEntries } // NewSet creates an empty set of digests // which may have digests added. func NewSet() *Set { return &Set{ entries: digestEntries{}, } } // checkShortMatch checks whether two digests match as either whole // values or short values. This function does not test equality, // rather whether the second value could match against the first // value. func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { if len(hex) == len(shortHex) { if hex != shortHex { return false } if len(shortAlg) > 0 && string(alg) != shortAlg { return false } } else if !strings.HasPrefix(hex, shortHex) { return false } else if len(shortAlg) > 0 && string(alg) != shortAlg { return false } return true } // Lookup looks for a digest matching the given string representation. // If no digests could be found ErrDigestNotFound will be returned // with an empty digest value. If multiple matches are found // ErrDigestAmbiguous will be returned with an empty digest value. func (dst *Set) Lookup(d string) (digest.Digest, error) { dst.mutex.RLock() defer dst.mutex.RUnlock() if len(dst.entries) == 0 { return "", ErrDigestNotFound } var ( searchFunc func(int) bool alg digest.Algorithm hex string ) dgst, err := digest.Parse(d) if err == digest.ErrDigestInvalidFormat { hex = d searchFunc = func(i int) bool { return dst.entries[i].val >= d } } else { hex = dgst.Hex() alg = dgst.Algorithm() searchFunc = func(i int) bool { if dst.entries[i].val == hex { return dst.entries[i].alg >= alg } return dst.entries[i].val >= hex } } idx := sort.Search(len(dst.entries), searchFunc) if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { return "", ErrDigestNotFound } if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { return dst.entries[idx].digest, nil } if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { return "", ErrDigestAmbiguous } return dst.entries[idx].digest, nil } // Add adds the given digest to the set. An error will be returned // if the given digest is invalid. If the digest already exists in the // set, this operation will be a no-op. func (dst *Set) Add(d digest.Digest) error { if err := d.Validate(); err != nil { return err } dst.mutex.Lock() defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { return dst.entries[i].alg >= entry.alg } return dst.entries[i].val >= entry.val } idx := sort.Search(len(dst.entries), searchFunc) if idx == len(dst.entries) { dst.entries = append(dst.entries, entry) return nil } else if dst.entries[idx].digest == d { return nil } entries := append(dst.entries, nil) copy(entries[idx+1:], entries[idx:len(entries)-1]) entries[idx] = entry dst.entries = entries return nil } // Remove removes the given digest from the set. An err will be // returned if the given digest is invalid. If the digest does // not exist in the set, this operation will be a no-op. func (dst *Set) Remove(d digest.Digest) error { if err := d.Validate(); err != nil { return err } dst.mutex.Lock() defer dst.mutex.Unlock() entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} searchFunc := func(i int) bool { if dst.entries[i].val == entry.val { return dst.entries[i].alg >= entry.alg } return dst.entries[i].val >= entry.val } idx := sort.Search(len(dst.entries), searchFunc) // Not found if idx is after or value at idx is not digest if idx == len(dst.entries) || dst.entries[idx].digest != d { return nil } entries := dst.entries copy(entries[idx:], entries[idx+1:]) entries = entries[:len(entries)-1] dst.entries = entries return nil } // All returns all the digests in the set func (dst *Set) All() []digest.Digest { dst.mutex.RLock() defer dst.mutex.RUnlock() retValues := make([]digest.Digest, len(dst.entries)) for i := range dst.entries { retValues[i] = dst.entries[i].digest } return retValues } // ShortCodeTable returns a map of Digest to unique short codes. The // length represents the minimum value, the maximum length may be the // entire value of digest if uniqueness cannot be achieved without the // full value. This function will attempt to make short codes as short // as possible to be unique. func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { dst.mutex.RLock() defer dst.mutex.RUnlock() m := make(map[digest.Digest]string, len(dst.entries)) l := length resetIdx := 0 for i := 0; i < len(dst.entries); i++ { var short string extended := true for extended { extended = false if len(dst.entries[i].val) <= l { short = dst.entries[i].digest.String() } else { short = dst.entries[i].val[:l] for j := i + 1; j < len(dst.entries); j++ { if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { if j > resetIdx { resetIdx = j } extended = true } else { break } } if extended { l++ } } } m[dst.entries[i].digest] = short if i >= resetIdx { l = length } } return m } type digestEntry struct { alg digest.Algorithm val string digest digest.Digest } type digestEntries []*digestEntry func (d digestEntries) Len() int { return len(d) } func (d digestEntries) Less(i, j int) bool { if d[i].val != d[j].val { return d[i].val < d[j].val } return d[i].alg < d[j].alg } func (d digestEntries) Swap(i, j int) { d[i], d[j] = d[j], d[i] } ================================================ FILE: vendor/github.com/docker/distribution/doc.go ================================================ // Package distribution will define the interfaces for the components of // docker distribution. The goal is to allow users to reliably package, ship // and store content related to docker images. // // This is currently a work in progress. More details are available in the // README.md. package distribution ================================================ FILE: vendor/github.com/docker/distribution/errors.go ================================================ package distribution import ( "errors" "fmt" "strings" "github.com/opencontainers/go-digest" ) // ErrAccessDenied is returned when an access to a requested resource is // denied. var ErrAccessDenied = errors.New("access denied") // ErrManifestNotModified is returned when a conditional manifest GetByTag // returns nil due to the client indicating it has the latest version var ErrManifestNotModified = errors.New("manifest not modified") // ErrUnsupported is returned when an unimplemented or unsupported action is // performed var ErrUnsupported = errors.New("operation unsupported") // ErrTagUnknown is returned if the given tag is not known by the tag service type ErrTagUnknown struct { Tag string } func (err ErrTagUnknown) Error() string { return fmt.Sprintf("unknown tag=%s", err.Tag) } // ErrRepositoryUnknown is returned if the named repository is not known by // the registry. type ErrRepositoryUnknown struct { Name string } func (err ErrRepositoryUnknown) Error() string { return fmt.Sprintf("unknown repository name=%s", err.Name) } // ErrRepositoryNameInvalid should be used to denote an invalid repository // name. Reason may set, indicating the cause of invalidity. type ErrRepositoryNameInvalid struct { Name string Reason error } func (err ErrRepositoryNameInvalid) Error() string { return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) } // ErrManifestUnknown is returned if the manifest is not known by the // registry. type ErrManifestUnknown struct { Name string Tag string } func (err ErrManifestUnknown) Error() string { return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) } // ErrManifestUnknownRevision is returned when a manifest cannot be found by // revision within a repository. type ErrManifestUnknownRevision struct { Name string Revision digest.Digest } func (err ErrManifestUnknownRevision) Error() string { return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) } // ErrManifestUnverified is returned when the registry is unable to verify // the manifest. type ErrManifestUnverified struct{} func (ErrManifestUnverified) Error() string { return "unverified manifest" } // ErrManifestVerification provides a type to collect errors encountered // during manifest verification. Currently, it accepts errors of all types, // but it may be narrowed to those involving manifest verification. type ErrManifestVerification []error func (errs ErrManifestVerification) Error() string { var parts []string for _, err := range errs { parts = append(parts, err.Error()) } return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) } // ErrManifestBlobUnknown returned when a referenced blob cannot be found. type ErrManifestBlobUnknown struct { Digest digest.Digest } func (err ErrManifestBlobUnknown) Error() string { return fmt.Sprintf("unknown blob %v on manifest", err.Digest) } // ErrManifestNameInvalid should be used to denote an invalid manifest // name. Reason may set, indicating the cause of invalidity. type ErrManifestNameInvalid struct { Name string Reason error } func (err ErrManifestNameInvalid) Error() string { return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) } ================================================ FILE: vendor/github.com/docker/distribution/manifests.go ================================================ package distribution import ( "context" "fmt" "mime" "github.com/opencontainers/go-digest" ) // Manifest represents a registry object specifying a set of // references and an optional target type Manifest interface { // References returns a list of objects which make up this manifest. // A reference is anything which can be represented by a // distribution.Descriptor. These can consist of layers, resources or other // manifests. // // While no particular order is required, implementations should return // them from highest to lowest priority. For example, one might want to // return the base layer before the top layer. References() []Descriptor // Payload provides the serialized format of the manifest, in addition to // the media type. Payload() (mediaType string, payload []byte, err error) } // ManifestBuilder creates a manifest allowing one to include dependencies. // Instances can be obtained from a version-specific manifest package. Manifest // specific data is passed into the function which creates the builder. type ManifestBuilder interface { // Build creates the manifest from his builder. Build(ctx context.Context) (Manifest, error) // References returns a list of objects which have been added to this // builder. The dependencies are returned in the order they were added, // which should be from base to head. References() []Descriptor // AppendReference includes the given object in the manifest after any // existing dependencies. If the add fails, such as when adding an // unsupported dependency, an error may be returned. // // The destination of the reference is dependent on the manifest type and // the dependency type. AppendReference(dependency Describable) error } // ManifestService describes operations on image manifests. type ManifestService interface { // Exists returns true if the manifest exists. Exists(ctx context.Context, dgst digest.Digest) (bool, error) // Get retrieves the manifest specified by the given digest Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) // Put creates or updates the given manifest returning the manifest digest Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) // Delete removes the manifest specified by the given digest. Deleting // a manifest that doesn't exist will return ErrManifestNotFound Delete(ctx context.Context, dgst digest.Digest) error } // ManifestEnumerator enables iterating over manifests type ManifestEnumerator interface { // Enumerate calls ingester for each manifest. Enumerate(ctx context.Context, ingester func(digest.Digest) error) error } // Describable is an interface for descriptors type Describable interface { Descriptor() Descriptor } // ManifestMediaTypes returns the supported media types for manifests. func ManifestMediaTypes() (mediaTypes []string) { for t := range mappings { if t != "" { mediaTypes = append(mediaTypes, t) } } return } // UnmarshalFunc implements manifest unmarshalling a given MediaType type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) var mappings = make(map[string]UnmarshalFunc, 0) // UnmarshalManifest looks up manifest unmarshal functions based on // MediaType func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { // Need to look up by the actual media type, not the raw contents of // the header. Strip semicolons and anything following them. var mediaType string if ctHeader != "" { var err error mediaType, _, err = mime.ParseMediaType(ctHeader) if err != nil { return nil, Descriptor{}, err } } unmarshalFunc, ok := mappings[mediaType] if !ok { unmarshalFunc, ok = mappings[""] if !ok { return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) } } return unmarshalFunc(p) } // RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This // should be called from specific func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { if _, ok := mappings[mediaType]; ok { return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) } mappings[mediaType] = u return nil } ================================================ FILE: vendor/github.com/docker/distribution/reference/helpers.go ================================================ package reference import "path" // IsNameOnly returns true if reference only contains a repo name. func IsNameOnly(ref Named) bool { if _, ok := ref.(NamedTagged); ok { return false } if _, ok := ref.(Canonical); ok { return false } return true } // FamiliarName returns the familiar name string // for the given named, familiarizing if needed. func FamiliarName(ref Named) string { if nn, ok := ref.(normalizedNamed); ok { return nn.Familiar().Name() } return ref.Name() } // FamiliarString returns the familiar string representation // for the given reference, familiarizing if needed. func FamiliarString(ref Reference) string { if nn, ok := ref.(normalizedNamed); ok { return nn.Familiar().String() } return ref.String() } // FamiliarMatch reports whether ref matches the specified pattern. // See https://godoc.org/path#Match for supported patterns. func FamiliarMatch(pattern string, ref Reference) (bool, error) { matched, err := path.Match(pattern, FamiliarString(ref)) if namedRef, isNamed := ref.(Named); isNamed && !matched { matched, _ = path.Match(pattern, FamiliarName(namedRef)) } return matched, err } ================================================ FILE: vendor/github.com/docker/distribution/reference/normalize.go ================================================ package reference import ( "errors" "fmt" "strings" "github.com/docker/distribution/digestset" "github.com/opencontainers/go-digest" ) var ( legacyDefaultDomain = "index.docker.io" defaultDomain = "docker.io" officialRepoName = "library" defaultTag = "latest" ) // normalizedNamed represents a name which has been // normalized and has a familiar form. A familiar name // is what is used in Docker UI. An example normalized // name is "docker.io/library/ubuntu" and corresponding // familiar name of "ubuntu". type normalizedNamed interface { Named Familiar() Named } // ParseNormalizedNamed parses a string into a named reference // transforming a familiar name from Docker UI to a fully // qualified reference. If the value may be an identifier // use ParseAnyReference. func ParseNormalizedNamed(s string) (Named, error) { if ok := anchoredIdentifierRegexp.MatchString(s); ok { return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) } domain, remainder := splitDockerDomain(s) var remoteName string if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { remoteName = remainder[:tagSep] } else { remoteName = remainder } if strings.ToLower(remoteName) != remoteName { return nil, errors.New("invalid reference format: repository name must be lowercase") } ref, err := Parse(domain + "/" + remainder) if err != nil { return nil, err } named, isNamed := ref.(Named) if !isNamed { return nil, fmt.Errorf("reference %s has no name", ref.String()) } return named, nil } // splitDockerDomain splits a repository name to domain and remotename string. // If no valid domain is found, the default domain is used. Repository name // needs to be already validated before. func splitDockerDomain(name string) (domain, remainder string) { i := strings.IndexRune(name, '/') if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { domain, remainder = defaultDomain, name } else { domain, remainder = name[:i], name[i+1:] } if domain == legacyDefaultDomain { domain = defaultDomain } if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { remainder = officialRepoName + "/" + remainder } return } // familiarizeName returns a shortened version of the name familiar // to to the Docker UI. Familiar names have the default domain // "docker.io" and "library/" repository prefix removed. // For example, "docker.io/library/redis" will have the familiar // name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". // Returns a familiarized named only reference. func familiarizeName(named namedRepository) repository { repo := repository{ domain: named.Domain(), path: named.Path(), } if repo.domain == defaultDomain { repo.domain = "" // Handle official repositories which have the pattern "library/" if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { repo.path = split[1] } } return repo } func (r reference) Familiar() Named { return reference{ namedRepository: familiarizeName(r.namedRepository), tag: r.tag, digest: r.digest, } } func (r repository) Familiar() Named { return familiarizeName(r) } func (t taggedReference) Familiar() Named { return taggedReference{ namedRepository: familiarizeName(t.namedRepository), tag: t.tag, } } func (c canonicalReference) Familiar() Named { return canonicalReference{ namedRepository: familiarizeName(c.namedRepository), digest: c.digest, } } // TagNameOnly adds the default tag "latest" to a reference if it only has // a repo name. func TagNameOnly(ref Named) Named { if IsNameOnly(ref) { namedTagged, err := WithTag(ref, defaultTag) if err != nil { // Default tag must be valid, to create a NamedTagged // type with non-validated input the WithTag function // should be used instead panic(err) } return namedTagged } return ref } // ParseAnyReference parses a reference string as a possible identifier, // full digest, or familiar name. func ParseAnyReference(ref string) (Reference, error) { if ok := anchoredIdentifierRegexp.MatchString(ref); ok { return digestReference("sha256:" + ref), nil } if dgst, err := digest.Parse(ref); err == nil { return digestReference(dgst), nil } return ParseNormalizedNamed(ref) } // ParseAnyReferenceWithSet parses a reference string as a possible short // identifier to be matched in a digest set, a full digest, or familiar name. func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { dgst, err := ds.Lookup(ref) if err == nil { return digestReference(dgst), nil } } else { if dgst, err := digest.Parse(ref); err == nil { return digestReference(dgst), nil } } return ParseNormalizedNamed(ref) } ================================================ FILE: vendor/github.com/docker/distribution/reference/reference.go ================================================ // Package reference provides a general type to represent any way of referencing images within the registry. // Its main purpose is to abstract tags and digests (content-addressable hash). // // Grammar // // reference := name [ ":" tag ] [ "@" digest ] // name := [domain '/'] path-component ['/' path-component]* // domain := domain-component ['.' domain-component]* [':' port-number] // domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ // port-number := /[0-9]+/ // path-component := alpha-numeric [separator alpha-numeric]* // alpha-numeric := /[a-z0-9]+/ // separator := /[_.]|__|[-]*/ // // tag := /[\w][\w.-]{0,127}/ // // digest := digest-algorithm ":" digest-hex // digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* // digest-algorithm-separator := /[+.-_]/ // digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ // digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value // // identifier := /[a-f0-9]{64}/ // short-identifier := /[a-f0-9]{6,64}/ package reference import ( "errors" "fmt" "strings" "github.com/opencontainers/go-digest" ) const ( // NameTotalLengthMax is the maximum total number of characters in a repository name. NameTotalLengthMax = 255 ) var ( // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. ErrReferenceInvalidFormat = errors.New("invalid reference format") // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. ErrTagInvalidFormat = errors.New("invalid tag format") // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. ErrDigestInvalidFormat = errors.New("invalid digest format") // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. ErrNameContainsUppercase = errors.New("repository name must be lowercase") // ErrNameEmpty is returned for empty, invalid repository names. ErrNameEmpty = errors.New("repository name must have at least one component") // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) // ErrNameNotCanonical is returned when a name is not canonical. ErrNameNotCanonical = errors.New("repository name must be canonical") ) // Reference is an opaque object reference identifier that may include // modifiers such as a hostname, name, tag, and digest. type Reference interface { // String returns the full reference String() string } // Field provides a wrapper type for resolving correct reference types when // working with encoding. type Field struct { reference Reference } // AsField wraps a reference in a Field for encoding. func AsField(reference Reference) Field { return Field{reference} } // Reference unwraps the reference type from the field to // return the Reference object. This object should be // of the appropriate type to further check for different // reference types. func (f Field) Reference() Reference { return f.reference } // MarshalText serializes the field to byte text which // is the string of the reference. func (f Field) MarshalText() (p []byte, err error) { return []byte(f.reference.String()), nil } // UnmarshalText parses text bytes by invoking the // reference parser to ensure the appropriately // typed reference object is wrapped by field. func (f *Field) UnmarshalText(p []byte) error { r, err := Parse(string(p)) if err != nil { return err } f.reference = r return nil } // Named is an object with a full name type Named interface { Reference Name() string } // Tagged is an object which has a tag type Tagged interface { Reference Tag() string } // NamedTagged is an object including a name and tag. type NamedTagged interface { Named Tag() string } // Digested is an object which has a digest // in which it can be referenced by type Digested interface { Reference Digest() digest.Digest } // Canonical reference is an object with a fully unique // name including a name with domain and digest type Canonical interface { Named Digest() digest.Digest } // namedRepository is a reference to a repository with a name. // A namedRepository has both domain and path components. type namedRepository interface { Named Domain() string Path() string } // Domain returns the domain part of the Named reference func Domain(named Named) string { if r, ok := named.(namedRepository); ok { return r.Domain() } domain, _ := splitDomain(named.Name()) return domain } // Path returns the name without the domain part of the Named reference func Path(named Named) (name string) { if r, ok := named.(namedRepository); ok { return r.Path() } _, path := splitDomain(named.Name()) return path } func splitDomain(name string) (string, string) { match := anchoredNameRegexp.FindStringSubmatch(name) if len(match) != 3 { return "", name } return match[1], match[2] } // SplitHostname splits a named reference into a // hostname and name string. If no valid hostname is // found, the hostname is empty and the full value // is returned as name // DEPRECATED: Use Domain or Path func SplitHostname(named Named) (string, string) { if r, ok := named.(namedRepository); ok { return r.Domain(), r.Path() } return splitDomain(named.Name()) } // Parse parses s and returns a syntactically valid Reference. // If an error was encountered it is returned, along with a nil Reference. // NOTE: Parse will not handle short digests. func Parse(s string) (Reference, error) { matches := ReferenceRegexp.FindStringSubmatch(s) if matches == nil { if s == "" { return nil, ErrNameEmpty } if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { return nil, ErrNameContainsUppercase } return nil, ErrReferenceInvalidFormat } if len(matches[1]) > NameTotalLengthMax { return nil, ErrNameTooLong } var repo repository nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) if nameMatch != nil && len(nameMatch) == 3 { repo.domain = nameMatch[1] repo.path = nameMatch[2] } else { repo.domain = "" repo.path = matches[1] } ref := reference{ namedRepository: repo, tag: matches[2], } if matches[3] != "" { var err error ref.digest, err = digest.Parse(matches[3]) if err != nil { return nil, err } } r := getBestReferenceType(ref) if r == nil { return nil, ErrNameEmpty } return r, nil } // ParseNamed parses s and returns a syntactically valid reference implementing // the Named interface. The reference must have a name and be in the canonical // form, otherwise an error is returned. // If an error was encountered it is returned, along with a nil Reference. // NOTE: ParseNamed will not handle short digests. func ParseNamed(s string) (Named, error) { named, err := ParseNormalizedNamed(s) if err != nil { return nil, err } if named.String() != s { return nil, ErrNameNotCanonical } return named, nil } // WithName returns a named object representing the given string. If the input // is invalid ErrReferenceInvalidFormat will be returned. func WithName(name string) (Named, error) { if len(name) > NameTotalLengthMax { return nil, ErrNameTooLong } match := anchoredNameRegexp.FindStringSubmatch(name) if match == nil || len(match) != 3 { return nil, ErrReferenceInvalidFormat } return repository{ domain: match[1], path: match[2], }, nil } // WithTag combines the name from "name" and the tag from "tag" to form a // reference incorporating both the name and the tag. func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } var repo repository if r, ok := name.(namedRepository); ok { repo.domain = r.Domain() repo.path = r.Path() } else { repo.path = name.Name() } if canonical, ok := name.(Canonical); ok { return reference{ namedRepository: repo, tag: tag, digest: canonical.Digest(), }, nil } return taggedReference{ namedRepository: repo, tag: tag, }, nil } // WithDigest combines the name from "name" and the digest from "digest" to form // a reference incorporating both the name and the digest. func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } var repo repository if r, ok := name.(namedRepository); ok { repo.domain = r.Domain() repo.path = r.Path() } else { repo.path = name.Name() } if tagged, ok := name.(Tagged); ok { return reference{ namedRepository: repo, tag: tagged.Tag(), digest: digest, }, nil } return canonicalReference{ namedRepository: repo, digest: digest, }, nil } // TrimNamed removes any tag or digest from the named reference. func TrimNamed(ref Named) Named { domain, path := SplitHostname(ref) return repository{ domain: domain, path: path, } } func getBestReferenceType(ref reference) Reference { if ref.Name() == "" { // Allow digest only references if ref.digest != "" { return digestReference(ref.digest) } return nil } if ref.tag == "" { if ref.digest != "" { return canonicalReference{ namedRepository: ref.namedRepository, digest: ref.digest, } } return ref.namedRepository } if ref.digest == "" { return taggedReference{ namedRepository: ref.namedRepository, tag: ref.tag, } } return ref } type reference struct { namedRepository tag string digest digest.Digest } func (r reference) String() string { return r.Name() + ":" + r.tag + "@" + r.digest.String() } func (r reference) Tag() string { return r.tag } func (r reference) Digest() digest.Digest { return r.digest } type repository struct { domain string path string } func (r repository) String() string { return r.Name() } func (r repository) Name() string { if r.domain == "" { return r.path } return r.domain + "/" + r.path } func (r repository) Domain() string { return r.domain } func (r repository) Path() string { return r.path } type digestReference digest.Digest func (d digestReference) String() string { return digest.Digest(d).String() } func (d digestReference) Digest() digest.Digest { return digest.Digest(d) } type taggedReference struct { namedRepository tag string } func (t taggedReference) String() string { return t.Name() + ":" + t.tag } func (t taggedReference) Tag() string { return t.tag } type canonicalReference struct { namedRepository digest digest.Digest } func (c canonicalReference) String() string { return c.Name() + "@" + c.digest.String() } func (c canonicalReference) Digest() digest.Digest { return c.digest } ================================================ FILE: vendor/github.com/docker/distribution/reference/regexp.go ================================================ package reference import "regexp" var ( // alphaNumericRegexp defines the alpha numeric atom, typically a // component of names. This only allows lower case characters and digits. alphaNumericRegexp = match(`[a-z0-9]+`) // separatorRegexp defines the separators allowed to be embedded in name // components. This allow one period, one or two underscore and multiple // dashes. separatorRegexp = match(`(?:[._]|__|[-]*)`) // nameComponentRegexp restricts registry path component names to start // with at least one letter or number, with following parts able to be // separated by one period, one or two underscore and multiple dashes. nameComponentRegexp = expression( alphaNumericRegexp, optional(repeated(separatorRegexp, alphaNumericRegexp))) // domainComponentRegexp restricts the registry domain component of a // repository name to start with a component as defined by DomainRegexp // and followed by an optional port. domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) // DomainRegexp defines the structure of potential domain components // that may be part of image names. This is purposely a subset of what is // allowed by DNS to ensure backwards compatibility with Docker image // names. DomainRegexp = expression( domainComponentRegexp, optional(repeated(literal(`.`), domainComponentRegexp)), optional(literal(`:`), match(`[0-9]+`))) // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. TagRegexp = match(`[\w][\w.-]{0,127}`) // anchoredTagRegexp matches valid tag names, anchored at the start and // end of the matched string. anchoredTagRegexp = anchored(TagRegexp) // DigestRegexp matches valid digests. DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) // anchoredDigestRegexp matches valid digests, anchored at the start and // end of the matched string. anchoredDigestRegexp = anchored(DigestRegexp) // NameRegexp is the format for the name component of references. The // regexp has capturing groups for the domain and name part omitting // the separating forward slash from either. NameRegexp = expression( optional(DomainRegexp, literal(`/`)), nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp))) // anchoredNameRegexp is used to parse a name value, capturing the // domain and trailing components. anchoredNameRegexp = anchored( optional(capture(DomainRegexp), literal(`/`)), capture(nameComponentRegexp, optional(repeated(literal(`/`), nameComponentRegexp)))) // ReferenceRegexp is the full supported format of a reference. The regexp // is anchored and has capturing groups for name, tag, and digest // components. ReferenceRegexp = anchored(capture(NameRegexp), optional(literal(":"), capture(TagRegexp)), optional(literal("@"), capture(DigestRegexp))) // IdentifierRegexp is the format for string identifier used as a // content addressable identifier using sha256. These identifiers // are like digests without the algorithm, since sha256 is used. IdentifierRegexp = match(`([a-f0-9]{64})`) // ShortIdentifierRegexp is the format used to represent a prefix // of an identifier. A prefix may be used to match a sha256 identifier // within a list of trusted identifiers. ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) // anchoredIdentifierRegexp is used to check or match an // identifier value, anchored at start and end of string. anchoredIdentifierRegexp = anchored(IdentifierRegexp) // anchoredShortIdentifierRegexp is used to check if a value // is a possible identifier prefix, anchored at start and end // of string. anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) ) // match compiles the string to a regular expression. var match = regexp.MustCompile // literal compiles s into a literal regular expression, escaping any regexp // reserved characters. func literal(s string) *regexp.Regexp { re := match(regexp.QuoteMeta(s)) if _, complete := re.LiteralPrefix(); !complete { panic("must be a literal") } return re } // expression defines a full expression, where each regular expression must // follow the previous. func expression(res ...*regexp.Regexp) *regexp.Regexp { var s string for _, re := range res { s += re.String() } return match(s) } // optional wraps the expression in a non-capturing group and makes the // production optional. func optional(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `?`) } // repeated wraps the regexp in a non-capturing group to get one or more // matches. func repeated(res ...*regexp.Regexp) *regexp.Regexp { return match(group(expression(res...)).String() + `+`) } // group wraps the regexp in a non-capturing group. func group(res ...*regexp.Regexp) *regexp.Regexp { return match(`(?:` + expression(res...).String() + `)`) } // capture wraps the expression in a capturing group. func capture(res ...*regexp.Regexp) *regexp.Regexp { return match(`(` + expression(res...).String() + `)`) } // anchored anchors the regular expression by adding start and end delimiters. func anchored(res ...*regexp.Regexp) *regexp.Regexp { return match(`^` + expression(res...).String() + `$`) } ================================================ FILE: vendor/github.com/docker/distribution/registry.go ================================================ package distribution import ( "context" "github.com/docker/distribution/reference" ) // Scope defines the set of items that match a namespace. type Scope interface { // Contains returns true if the name belongs to the namespace. Contains(name string) bool } type fullScope struct{} func (f fullScope) Contains(string) bool { return true } // GlobalScope represents the full namespace scope which contains // all other scopes. var GlobalScope = Scope(fullScope{}) // Namespace represents a collection of repositories, addressable by name. // Generally, a namespace is backed by a set of one or more services, // providing facilities such as registry access, trust, and indexing. type Namespace interface { // Scope describes the names that can be used with this Namespace. The // global namespace will have a scope that matches all names. The scope // effectively provides an identity for the namespace. Scope() Scope // Repository should return a reference to the named repository. The // registry may or may not have the repository but should always return a // reference. Repository(ctx context.Context, name reference.Named) (Repository, error) // Repositories fills 'repos' with a lexicographically sorted catalog of repositories // up to the size of 'repos' and returns the value 'n' for the number of entries // which were filled. 'last' contains an offset in the catalog, and 'err' will be // set to io.EOF if there are no more entries to obtain. Repositories(ctx context.Context, repos []string, last string) (n int, err error) // Blobs returns a blob enumerator to access all blobs Blobs() BlobEnumerator // BlobStatter returns a BlobStatter to control BlobStatter() BlobStatter } // RepositoryEnumerator describes an operation to enumerate repositories type RepositoryEnumerator interface { Enumerate(ctx context.Context, ingester func(string) error) error } // ManifestServiceOption is a function argument for Manifest Service methods type ManifestServiceOption interface { Apply(ManifestService) error } // WithTag allows a tag to be passed into Put func WithTag(tag string) ManifestServiceOption { return WithTagOption{tag} } // WithTagOption holds a tag type WithTagOption struct{ Tag string } // Apply conforms to the ManifestServiceOption interface func (o WithTagOption) Apply(m ManifestService) error { // no implementation return nil } // Repository is a named collection of manifests and layers. type Repository interface { // Named returns the name of the repository. Named() reference.Named // Manifests returns a reference to this repository's manifest service. // with the supplied options applied. Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) // Blobs returns a reference to this repository's blob service. Blobs(ctx context.Context) BlobStore // TODO(stevvooe): The above BlobStore return can probably be relaxed to // be a BlobService for use with clients. This will allow such // implementations to avoid implementing ServeBlob. // Tags returns a reference to this repositories tag service Tags(ctx context.Context) TagService } // TODO(stevvooe): Must add close methods to all these. May want to change the // way instances are created to better reflect internal dependency // relationships. ================================================ FILE: vendor/github.com/docker/distribution/tags.go ================================================ package distribution import ( "context" ) // TagService provides access to information about tagged objects. type TagService interface { // Get retrieves the descriptor identified by the tag. Some // implementations may differentiate between "trusted" tags and // "untrusted" tags. If a tag is "untrusted", the mapping will be returned // as an ErrTagUntrusted error, with the target descriptor. Get(ctx context.Context, tag string) (Descriptor, error) // Tag associates the tag with the provided descriptor, updating the // current association, if needed. Tag(ctx context.Context, tag string, desc Descriptor) error // Untag removes the given tag association Untag(ctx context.Context, tag string) error // All returns the set of tags managed by this tag service All(ctx context.Context) ([]string, error) // Lookup returns the set of tags referencing the given digest. Lookup(ctx context.Context, digest Descriptor) ([]string, error) } ================================================ FILE: vendor/github.com/klauspost/compress/LICENSE ================================================ Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/klauspost/compress/flate/copy.go ================================================ // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate // forwardCopy is like the built-in copy function except that it always goes // forward from the start, even if the dst and src overlap. // It is equivalent to: // for i := 0; i < n; i++ { // mem[dst+i] = mem[src+i] // } func forwardCopy(mem []byte, dst, src, n int) { if dst <= src { copy(mem[dst:dst+n], mem[src:src+n]) return } for { if dst >= src+n { copy(mem[dst:dst+n], mem[src:src+n]) return } // There is some forward overlap. The destination // will be filled with a repeated pattern of mem[src:src+k]. // We copy one instance of the pattern here, then repeat. // Each time around this loop k will double. k := dst - src copy(mem[dst:dst+k], mem[src:src+k]) n -= k dst += k } } ================================================ FILE: vendor/github.com/klauspost/compress/flate/crc32_amd64.go ================================================ //+build !noasm //+build !appengine // Copyright 2015, Klaus Post, see LICENSE for details. package flate import ( "github.com/klauspost/cpuid" ) // crc32sse returns a hash for the first 4 bytes of the slice // len(a) must be >= 4. //go:noescape func crc32sse(a []byte) uint32 // crc32sseAll calculates hashes for each 4-byte set in a. // dst must be east len(a) - 4 in size. // The size is not checked by the assembly. //go:noescape func crc32sseAll(a []byte, dst []uint32) // matchLenSSE4 returns the number of matching bytes in a and b // up to length 'max'. Both slices must be at least 'max' // bytes in size. // // TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. // //go:noescape func matchLenSSE4(a, b []byte, max int) int // histogram accumulates a histogram of b in h. // h must be at least 256 entries in length, // and must be cleared before calling this function. //go:noescape func histogram(b []byte, h []int32) // Detect SSE 4.2 feature. func init() { useSSE42 = cpuid.CPU.SSE42() } ================================================ FILE: vendor/github.com/klauspost/compress/flate/crc32_amd64.s ================================================ //+build !noasm //+build !appengine // Copyright 2015, Klaus Post, see LICENSE for details. // func crc32sse(a []byte) uint32 TEXT ·crc32sse(SB), 4, $0 MOVQ a+0(FP), R10 XORQ BX, BX // CRC32 dword (R10), EBX BYTE $0xF2; BYTE $0x41; BYTE $0x0f BYTE $0x38; BYTE $0xf1; BYTE $0x1a MOVL BX, ret+24(FP) RET // func crc32sseAll(a []byte, dst []uint32) TEXT ·crc32sseAll(SB), 4, $0 MOVQ a+0(FP), R8 // R8: src MOVQ a_len+8(FP), R10 // input length MOVQ dst+24(FP), R9 // R9: dst SUBQ $4, R10 JS end JZ one_crc MOVQ R10, R13 SHRQ $2, R10 // len/4 ANDQ $3, R13 // len&3 XORQ BX, BX ADDQ $1, R13 TESTQ R10, R10 JZ rem_loop crc_loop: MOVQ (R8), R11 XORQ BX, BX XORQ DX, DX XORQ DI, DI MOVQ R11, R12 SHRQ $8, R11 MOVQ R12, AX MOVQ R11, CX SHRQ $16, R12 SHRQ $16, R11 MOVQ R12, SI // CRC32 EAX, EBX BYTE $0xF2; BYTE $0x0f BYTE $0x38; BYTE $0xf1; BYTE $0xd8 // CRC32 ECX, EDX BYTE $0xF2; BYTE $0x0f BYTE $0x38; BYTE $0xf1; BYTE $0xd1 // CRC32 ESI, EDI BYTE $0xF2; BYTE $0x0f BYTE $0x38; BYTE $0xf1; BYTE $0xfe MOVL BX, (R9) MOVL DX, 4(R9) MOVL DI, 8(R9) XORQ BX, BX MOVL R11, AX // CRC32 EAX, EBX BYTE $0xF2; BYTE $0x0f BYTE $0x38; BYTE $0xf1; BYTE $0xd8 MOVL BX, 12(R9) ADDQ $16, R9 ADDQ $4, R8 XORQ BX, BX SUBQ $1, R10 JNZ crc_loop rem_loop: MOVL (R8), AX // CRC32 EAX, EBX BYTE $0xF2; BYTE $0x0f BYTE $0x38; BYTE $0xf1; BYTE $0xd8 MOVL BX, (R9) ADDQ $4, R9 ADDQ $1, R8 XORQ BX, BX SUBQ $1, R13 JNZ rem_loop end: RET one_crc: MOVQ $1, R13 XORQ BX, BX JMP rem_loop // func matchLenSSE4(a, b []byte, max int) int TEXT ·matchLenSSE4(SB), 4, $0 MOVQ a_base+0(FP), SI MOVQ b_base+24(FP), DI MOVQ DI, DX MOVQ max+48(FP), CX cmp8: // As long as we are 8 or more bytes before the end of max, we can load and // compare 8 bytes at a time. If those 8 bytes are equal, repeat. CMPQ CX, $8 JLT cmp1 MOVQ (SI), AX MOVQ (DI), BX CMPQ AX, BX JNE bsf ADDQ $8, SI ADDQ $8, DI SUBQ $8, CX JMP cmp8 bsf: // If those 8 bytes were not equal, XOR the two 8 byte values, and return // the index of the first byte that differs. The BSF instruction finds the // least significant 1 bit, the amd64 architecture is little-endian, and // the shift by 3 converts a bit index to a byte index. XORQ AX, BX BSFQ BX, BX SHRQ $3, BX ADDQ BX, DI // Subtract off &b[0] to convert from &b[ret] to ret, and return. SUBQ DX, DI MOVQ DI, ret+56(FP) RET cmp1: // In the slices' tail, compare 1 byte at a time. CMPQ CX, $0 JEQ matchLenEnd MOVB (SI), AX MOVB (DI), BX CMPB AX, BX JNE matchLenEnd ADDQ $1, SI ADDQ $1, DI SUBQ $1, CX JMP cmp1 matchLenEnd: // Subtract off &b[0] to convert from &b[ret] to ret, and return. SUBQ DX, DI MOVQ DI, ret+56(FP) RET // func histogram(b []byte, h []int32) TEXT ·histogram(SB), 4, $0 MOVQ b+0(FP), SI // SI: &b MOVQ b_len+8(FP), R9 // R9: len(b) MOVQ h+24(FP), DI // DI: Histogram MOVQ R9, R8 SHRQ $3, R8 JZ hist1 XORQ R11, R11 loop_hist8: MOVQ (SI), R10 MOVB R10, R11 INCL (DI)(R11*4) SHRQ $8, R10 MOVB R10, R11 INCL (DI)(R11*4) SHRQ $8, R10 MOVB R10, R11 INCL (DI)(R11*4) SHRQ $8, R10 MOVB R10, R11 INCL (DI)(R11*4) SHRQ $8, R10 MOVB R10, R11 INCL (DI)(R11*4) SHRQ $8, R10 MOVB R10, R11 INCL (DI)(R11*4) SHRQ $8, R10 MOVB R10, R11 INCL (DI)(R11*4) SHRQ $8, R10 INCL (DI)(R10*4) ADDQ $8, SI DECQ R8 JNZ loop_hist8 hist1: ANDQ $7, R9 JZ end_hist XORQ R10, R10 loop_hist1: MOVB (SI), R10 INCL (DI)(R10*4) INCQ SI DECQ R9 JNZ loop_hist1 end_hist: RET ================================================ FILE: vendor/github.com/klauspost/compress/flate/crc32_noasm.go ================================================ //+build !amd64 noasm appengine // Copyright 2015, Klaus Post, see LICENSE for details. package flate func init() { useSSE42 = false } // crc32sse should never be called. func crc32sse(a []byte) uint32 { panic("no assembler") } // crc32sseAll should never be called. func crc32sseAll(a []byte, dst []uint32) { panic("no assembler") } // matchLenSSE4 should never be called. func matchLenSSE4(a, b []byte, max int) int { panic("no assembler") return 0 } // histogram accumulates a histogram of b in h. // // len(h) must be >= 256, and h's elements must be all zeroes. func histogram(b []byte, h []int32) { h = h[:256] for _, t := range b { h[t]++ } } ================================================ FILE: vendor/github.com/klauspost/compress/flate/deflate.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Copyright (c) 2015 Klaus Post // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate import ( "fmt" "io" "math" ) const ( NoCompression = 0 BestSpeed = 1 BestCompression = 9 DefaultCompression = -1 // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman // entropy encoding. This mode is useful in compressing data that has // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) // that lacks an entropy encoder. Compression gains are achieved when // certain bytes in the input stream occur more frequently than others. // // Note that HuffmanOnly produces a compressed output that is // RFC 1951 compliant. That is, any valid DEFLATE decompressor will // continue to be able to decompress this output. HuffmanOnly = -2 ConstantCompression = HuffmanOnly // compatibility alias. logWindowSize = 15 windowSize = 1 << logWindowSize windowMask = windowSize - 1 logMaxOffsetSize = 15 // Standard DEFLATE minMatchLength = 4 // The smallest match that the compressor looks for maxMatchLength = 258 // The longest match for the compressor minOffsetSize = 1 // The shortest offset that makes any sense // The maximum number of tokens we put into a single flat block, just too // stop things from getting too large. maxFlateBlockTokens = 1 << 14 maxStoreBlockSize = 65535 hashBits = 17 // After 17 performance degrades hashSize = 1 << hashBits hashMask = (1 << hashBits) - 1 hashShift = (hashBits + minMatchLength - 1) / minMatchLength maxHashOffset = 1 << 24 skipNever = math.MaxInt32 ) var useSSE42 bool type compressionLevel struct { good, lazy, nice, chain, fastSkipHashing, level int } // Compression levels have been rebalanced from zlib deflate defaults // to give a bigger spread in speed and compression. // See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ var levels = []compressionLevel{ {}, // 0 // Level 1-4 uses specialized algorithm - values not used {0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 2}, {0, 0, 0, 0, 0, 3}, {0, 0, 0, 0, 0, 4}, // For levels 5-6 we don't bother trying with lazy matches. // Lazy matching is at least 30% slower, with 1.5% increase. {6, 0, 12, 8, 12, 5}, {8, 0, 24, 16, 16, 6}, // Levels 7-9 use increasingly more lazy matching // and increasingly stringent conditions for "good enough". {8, 8, 24, 16, skipNever, 7}, {10, 16, 24, 64, skipNever, 8}, {32, 258, 258, 4096, skipNever, 9}, } type compressor struct { compressionLevel w *huffmanBitWriter bulkHasher func([]byte, []uint32) // compression algorithm fill func(*compressor, []byte) int // copy data to window step func(*compressor) // process window sync bool // requesting flush // Input hash chains // hashHead[hashValue] contains the largest inputIndex with the specified hash value // If hashHead[hashValue] is within the current window, then // hashPrev[hashHead[hashValue] & windowMask] contains the previous index // with the same hash value. chainHead int hashHead [hashSize]uint32 hashPrev [windowSize]uint32 hashOffset int // input window: unprocessed data is window[index:windowEnd] index int window []byte windowEnd int blockStart int // window index where current tokens start byteAvailable bool // if true, still need to process window[index-1]. // queued output tokens tokens tokens // deflate state length int offset int hash uint32 maxInsertIndex int err error ii uint16 // position of last match, intended to overflow to reset. snap snappyEnc hashMatch [maxMatchLength + minMatchLength]uint32 } func (d *compressor) fillDeflate(b []byte) int { if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { // shift the window by windowSize copy(d.window[:], d.window[windowSize:2*windowSize]) d.index -= windowSize d.windowEnd -= windowSize if d.blockStart >= windowSize { d.blockStart -= windowSize } else { d.blockStart = math.MaxInt32 } d.hashOffset += windowSize if d.hashOffset > maxHashOffset { delta := d.hashOffset - 1 d.hashOffset -= delta d.chainHead -= delta for i, v := range d.hashPrev { if int(v) > delta { d.hashPrev[i] = uint32(int(v) - delta) } else { d.hashPrev[i] = 0 } } for i, v := range d.hashHead { if int(v) > delta { d.hashHead[i] = uint32(int(v) - delta) } else { d.hashHead[i] = 0 } } } } n := copy(d.window[d.windowEnd:], b) d.windowEnd += n return n } func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { if index > 0 || eof { var window []byte if d.blockStart <= index { window = d.window[d.blockStart:index] } d.blockStart = index d.w.writeBlock(tok.tokens[:tok.n], eof, window) return d.w.err } return nil } // writeBlockSkip writes the current block and uses the number of tokens // to determine if the block should be stored on no matches, or // only huffman encoded. func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { if index > 0 || eof { if d.blockStart <= index { window := d.window[d.blockStart:index] // If we removed less than a 64th of all literals // we huffman compress the block. if int(tok.n) > len(window)-int(tok.n>>6) { d.w.writeBlockHuff(eof, window) } else { // Write a dynamic huffman block. d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) } } else { d.w.writeBlock(tok.tokens[:tok.n], eof, nil) } d.blockStart = index return d.w.err } return nil } // fillWindow will fill the current window with the supplied // dictionary and calculate all hashes. // This is much faster than doing a full encode. // Should only be used after a start/reset. func (d *compressor) fillWindow(b []byte) { // Do not fill window if we are in store-only mode, // use constant or Snappy compression. switch d.compressionLevel.level { case 0, 1, 2: return } // If we are given too much, cut it. if len(b) > windowSize { b = b[len(b)-windowSize:] } // Add all to window. n := copy(d.window[d.windowEnd:], b) // Calculate 256 hashes at the time (more L1 cache hits) loops := (n + 256 - minMatchLength) / 256 for j := 0; j < loops; j++ { startindex := j * 256 end := startindex + 256 + minMatchLength - 1 if end > n { end = n } tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize <= 0 { continue } dst := d.hashMatch[:dstSize] d.bulkHasher(tocheck, dst) var newH uint32 for i, val := range dst { di := i + startindex newH = val & hashMask // Get previous value with the same hash. // Our chain should point to the previous value. d.hashPrev[di&windowMask] = d.hashHead[newH] // Set the head of the hash chain to us. d.hashHead[newH] = uint32(di + d.hashOffset) } d.hash = newH } // Update window information. d.windowEnd += n d.index = n } // Try to find a match starting at index whose length is greater than prevSize. // We only look at chainCount possibilities before giving up. // pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { minMatchLook := maxMatchLength if lookahead < minMatchLook { minMatchLook = lookahead } win := d.window[0 : pos+minMatchLook] // We quit when we get a match that's at least nice long nice := len(win) - pos if d.nice < nice { nice = d.nice } // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain length = prevLength if length >= d.good { tries >>= 2 } wEnd := win[pos+length] wPos := win[pos:] minIndex := pos - windowSize for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { n := matchLen(win[i:], wPos, minMatchLook) if n > length && (n > minMatchLength || pos-i <= 4096) { length = n offset = pos - i ok = true if n >= nice { // The match is good enough that we don't try to find a better one. break } wEnd = win[pos+n] } } if i == minIndex { // hashPrev[i & windowMask] has already been overwritten, so stop now. break } i = int(d.hashPrev[i&windowMask]) - d.hashOffset if i < minIndex || i < 0 { break } } return } // Try to find a match starting at index whose length is greater than prevSize. // We only look at chainCount possibilities before giving up. // pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { minMatchLook := maxMatchLength if lookahead < minMatchLook { minMatchLook = lookahead } win := d.window[0 : pos+minMatchLook] // We quit when we get a match that's at least nice long nice := len(win) - pos if d.nice < nice { nice = d.nice } // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain length = prevLength if length >= d.good { tries >>= 2 } wEnd := win[pos+length] wPos := win[pos:] minIndex := pos - windowSize for i := prevHead; tries > 0; tries-- { if wEnd == win[i+length] { n := matchLenSSE4(win[i:], wPos, minMatchLook) if n > length && (n > minMatchLength || pos-i <= 4096) { length = n offset = pos - i ok = true if n >= nice { // The match is good enough that we don't try to find a better one. break } wEnd = win[pos+n] } } if i == minIndex { // hashPrev[i & windowMask] has already been overwritten, so stop now. break } i = int(d.hashPrev[i&windowMask]) - d.hashOffset if i < minIndex || i < 0 { break } } return } func (d *compressor) writeStoredBlock(buf []byte) error { if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { return d.w.err } d.w.writeBytes(buf) return d.w.err } const hashmul = 0x1e35a7bd // hash4 returns a hash representation of the first 4 bytes // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) } // bulkHash4 will compute hashes using the same // algorithm as hash4 func bulkHash4(b []byte, dst []uint32) { if len(b) < minMatchLength { return } hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 dst[0] = (hb * hashmul) >> (32 - hashBits) end := len(b) - minMatchLength + 1 for i := 1; i < end; i++ { hb = (hb << 8) | uint32(b[i+3]) dst[i] = (hb * hashmul) >> (32 - hashBits) } } // matchLen returns the number of matching bytes in a and b // up to length 'max'. Both slices must be at least 'max' // bytes in size. func matchLen(a, b []byte, max int) int { a = a[:max] b = b[:len(a)] for i, av := range a { if b[i] != av { return i } } return max } func (d *compressor) initDeflate() { d.window = make([]byte, 2*windowSize) d.hashOffset = 1 d.length = minMatchLength - 1 d.offset = 0 d.byteAvailable = false d.index = 0 d.hash = 0 d.chainHead = -1 d.bulkHasher = bulkHash4 if useSSE42 { d.bulkHasher = crc32sseAll } } // Assumes that d.fastSkipHashing != skipNever, // otherwise use deflateLazy func (d *compressor) deflate() { // Sanity enables additional runtime tests. // It's intended to be used during development // to supplement the currently ad-hoc unit tests. const sanity = false if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { return } d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) if d.index < d.maxInsertIndex { d.hash = hash4(d.window[d.index : d.index+minMatchLength]) } for { if sanity && d.index > d.windowEnd { panic("index > windowEnd") } lookahead := d.windowEnd - d.index if lookahead < minMatchLength+maxMatchLength { if !d.sync { return } if sanity && d.index > d.windowEnd { panic("index > windowEnd") } if lookahead == 0 { if d.tokens.n > 0 { if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } return } } if d.index < d.maxInsertIndex { // Update the hash d.hash = hash4(d.window[d.index : d.index+minMatchLength]) ch := d.hashHead[d.hash&hashMask] d.chainHead = int(ch) d.hashPrev[d.index&windowMask] = ch d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) } d.length = minMatchLength - 1 d.offset = 0 minIndex := d.index - windowSize if minIndex < 0 { minIndex = 0 } if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { d.length = newLength d.offset = newOffset } } if d.length >= minMatchLength { d.ii = 0 // There was a match at the previous step, and the current match is // not better. Output the previous match. // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) d.tokens.n++ // Insert in the hash table all strings up to the end of the match. // index and index-1 are already inserted. If there is not enough // lookahead, the last two strings are not inserted into the hash // table. if d.length <= d.fastSkipHashing { var newIndex int newIndex = d.index + d.length // Calculate missing hashes end := newIndex if end > d.maxInsertIndex { end = d.maxInsertIndex } end += minMatchLength - 1 startindex := d.index + 1 if startindex > d.maxInsertIndex { startindex = d.maxInsertIndex } tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize > 0 { dst := d.hashMatch[:dstSize] bulkHash4(tocheck, dst) var newH uint32 for i, val := range dst { di := i + startindex newH = val & hashMask // Get previous value with the same hash. // Our chain should point to the previous value. d.hashPrev[di&windowMask] = d.hashHead[newH] // Set the head of the hash chain to us. d.hashHead[newH] = uint32(di + d.hashOffset) } d.hash = newH } d.index = newIndex } else { // For matches this long, we don't bother inserting each individual // item into the table. d.index += d.length if d.index < d.maxInsertIndex { d.hash = hash4(d.window[d.index : d.index+minMatchLength]) } } if d.tokens.n == maxFlateBlockTokens { // The block includes the current character if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } } else { d.ii++ end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1 if end > d.windowEnd { end = d.windowEnd } for i := d.index; i < end; i++ { d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) d.tokens.n++ if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { return } d.tokens.n = 0 } } d.index = end } } } // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, // meaning it always has lazy matching on. func (d *compressor) deflateLazy() { // Sanity enables additional runtime tests. // It's intended to be used during development // to supplement the currently ad-hoc unit tests. const sanity = false if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { return } d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) if d.index < d.maxInsertIndex { d.hash = hash4(d.window[d.index : d.index+minMatchLength]) } for { if sanity && d.index > d.windowEnd { panic("index > windowEnd") } lookahead := d.windowEnd - d.index if lookahead < minMatchLength+maxMatchLength { if !d.sync { return } if sanity && d.index > d.windowEnd { panic("index > windowEnd") } if lookahead == 0 { // Flush current output block if any. if d.byteAvailable { // There is still one pending token that needs to be flushed d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) d.tokens.n++ d.byteAvailable = false } if d.tokens.n > 0 { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } return } } if d.index < d.maxInsertIndex { // Update the hash d.hash = hash4(d.window[d.index : d.index+minMatchLength]) ch := d.hashHead[d.hash&hashMask] d.chainHead = int(ch) d.hashPrev[d.index&windowMask] = ch d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) } prevLength := d.length prevOffset := d.offset d.length = minMatchLength - 1 d.offset = 0 minIndex := d.index - windowSize if minIndex < 0 { minIndex = 0 } if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { d.length = newLength d.offset = newOffset } } if prevLength >= minMatchLength && d.length <= prevLength { // There was a match at the previous step, and the current match is // not better. Output the previous match. d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) d.tokens.n++ // Insert in the hash table all strings up to the end of the match. // index and index-1 are already inserted. If there is not enough // lookahead, the last two strings are not inserted into the hash // table. var newIndex int newIndex = d.index + prevLength - 1 // Calculate missing hashes end := newIndex if end > d.maxInsertIndex { end = d.maxInsertIndex } end += minMatchLength - 1 startindex := d.index + 1 if startindex > d.maxInsertIndex { startindex = d.maxInsertIndex } tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize > 0 { dst := d.hashMatch[:dstSize] bulkHash4(tocheck, dst) var newH uint32 for i, val := range dst { di := i + startindex newH = val & hashMask // Get previous value with the same hash. // Our chain should point to the previous value. d.hashPrev[di&windowMask] = d.hashHead[newH] // Set the head of the hash chain to us. d.hashHead[newH] = uint32(di + d.hashOffset) } d.hash = newH } d.index = newIndex d.byteAvailable = false d.length = minMatchLength - 1 if d.tokens.n == maxFlateBlockTokens { // The block includes the current character if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } } else { // Reset, if we got a match this run. if d.length >= minMatchLength { d.ii = 0 } // We have a byte waiting. Emit it. if d.byteAvailable { d.ii++ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) d.tokens.n++ if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } d.index++ // If we have a long run of no matches, skip additional bytes // Resets when d.ii overflows after 64KB. if d.ii > 31 { n := int(d.ii >> 5) for j := 0; j < n; j++ { if d.index >= d.windowEnd-1 { break } d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) d.tokens.n++ if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } d.index++ } // Flush last byte d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) d.tokens.n++ d.byteAvailable = false // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } } } else { d.index++ d.byteAvailable = true } } } } // Assumes that d.fastSkipHashing != skipNever, // otherwise use deflateLazySSE func (d *compressor) deflateSSE() { // Sanity enables additional runtime tests. // It's intended to be used during development // to supplement the currently ad-hoc unit tests. const sanity = false if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { return } d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) if d.index < d.maxInsertIndex { d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask } for { if sanity && d.index > d.windowEnd { panic("index > windowEnd") } lookahead := d.windowEnd - d.index if lookahead < minMatchLength+maxMatchLength { if !d.sync { return } if sanity && d.index > d.windowEnd { panic("index > windowEnd") } if lookahead == 0 { if d.tokens.n > 0 { if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } return } } if d.index < d.maxInsertIndex { // Update the hash d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask ch := d.hashHead[d.hash] d.chainHead = int(ch) d.hashPrev[d.index&windowMask] = ch d.hashHead[d.hash] = uint32(d.index + d.hashOffset) } d.length = minMatchLength - 1 d.offset = 0 minIndex := d.index - windowSize if minIndex < 0 { minIndex = 0 } if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { d.length = newLength d.offset = newOffset } } if d.length >= minMatchLength { d.ii = 0 // There was a match at the previous step, and the current match is // not better. Output the previous match. // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) d.tokens.n++ // Insert in the hash table all strings up to the end of the match. // index and index-1 are already inserted. If there is not enough // lookahead, the last two strings are not inserted into the hash // table. if d.length <= d.fastSkipHashing { var newIndex int newIndex = d.index + d.length // Calculate missing hashes end := newIndex if end > d.maxInsertIndex { end = d.maxInsertIndex } end += minMatchLength - 1 startindex := d.index + 1 if startindex > d.maxInsertIndex { startindex = d.maxInsertIndex } tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize > 0 { dst := d.hashMatch[:dstSize] crc32sseAll(tocheck, dst) var newH uint32 for i, val := range dst { di := i + startindex newH = val & hashMask // Get previous value with the same hash. // Our chain should point to the previous value. d.hashPrev[di&windowMask] = d.hashHead[newH] // Set the head of the hash chain to us. d.hashHead[newH] = uint32(di + d.hashOffset) } d.hash = newH } d.index = newIndex } else { // For matches this long, we don't bother inserting each individual // item into the table. d.index += d.length if d.index < d.maxInsertIndex { d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask } } if d.tokens.n == maxFlateBlockTokens { // The block includes the current character if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } } else { d.ii++ end := d.index + int(d.ii>>5) + 1 if end > d.windowEnd { end = d.windowEnd } for i := d.index; i < end; i++ { d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) d.tokens.n++ if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { return } d.tokens.n = 0 } } d.index = end } } } // deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, // meaning it always has lazy matching on. func (d *compressor) deflateLazySSE() { // Sanity enables additional runtime tests. // It's intended to be used during development // to supplement the currently ad-hoc unit tests. const sanity = false if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { return } d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) if d.index < d.maxInsertIndex { d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask } for { if sanity && d.index > d.windowEnd { panic("index > windowEnd") } lookahead := d.windowEnd - d.index if lookahead < minMatchLength+maxMatchLength { if !d.sync { return } if sanity && d.index > d.windowEnd { panic("index > windowEnd") } if lookahead == 0 { // Flush current output block if any. if d.byteAvailable { // There is still one pending token that needs to be flushed d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) d.tokens.n++ d.byteAvailable = false } if d.tokens.n > 0 { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } return } } if d.index < d.maxInsertIndex { // Update the hash d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask ch := d.hashHead[d.hash] d.chainHead = int(ch) d.hashPrev[d.index&windowMask] = ch d.hashHead[d.hash] = uint32(d.index + d.hashOffset) } prevLength := d.length prevOffset := d.offset d.length = minMatchLength - 1 d.offset = 0 minIndex := d.index - windowSize if minIndex < 0 { minIndex = 0 } if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { d.length = newLength d.offset = newOffset } } if prevLength >= minMatchLength && d.length <= prevLength { // There was a match at the previous step, and the current match is // not better. Output the previous match. d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) d.tokens.n++ // Insert in the hash table all strings up to the end of the match. // index and index-1 are already inserted. If there is not enough // lookahead, the last two strings are not inserted into the hash // table. var newIndex int newIndex = d.index + prevLength - 1 // Calculate missing hashes end := newIndex if end > d.maxInsertIndex { end = d.maxInsertIndex } end += minMatchLength - 1 startindex := d.index + 1 if startindex > d.maxInsertIndex { startindex = d.maxInsertIndex } tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize > 0 { dst := d.hashMatch[:dstSize] crc32sseAll(tocheck, dst) var newH uint32 for i, val := range dst { di := i + startindex newH = val & hashMask // Get previous value with the same hash. // Our chain should point to the previous value. d.hashPrev[di&windowMask] = d.hashHead[newH] // Set the head of the hash chain to us. d.hashHead[newH] = uint32(di + d.hashOffset) } d.hash = newH } d.index = newIndex d.byteAvailable = false d.length = minMatchLength - 1 if d.tokens.n == maxFlateBlockTokens { // The block includes the current character if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } } else { // Reset, if we got a match this run. if d.length >= minMatchLength { d.ii = 0 } // We have a byte waiting. Emit it. if d.byteAvailable { d.ii++ d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) d.tokens.n++ if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } d.index++ // If we have a long run of no matches, skip additional bytes // Resets when d.ii overflows after 64KB. if d.ii > 31 { n := int(d.ii >> 6) for j := 0; j < n; j++ { if d.index >= d.windowEnd-1 { break } d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) d.tokens.n++ if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } d.index++ } // Flush last byte d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) d.tokens.n++ d.byteAvailable = false // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength if d.tokens.n == maxFlateBlockTokens { if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { return } d.tokens.n = 0 } } } else { d.index++ d.byteAvailable = true } } } } func (d *compressor) store() { if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { d.err = d.writeStoredBlock(d.window[:d.windowEnd]) d.windowEnd = 0 } } // fillWindow will fill the buffer with data for huffman-only compression. // The number of bytes copied is returned. func (d *compressor) fillBlock(b []byte) int { n := copy(d.window[d.windowEnd:], b) d.windowEnd += n return n } // storeHuff will compress and store the currently added data, // if enough has been accumulated or we at the end of the stream. // Any error that occurred will be in d.err func (d *compressor) storeHuff() { if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { return } d.w.writeBlockHuff(false, d.window[:d.windowEnd]) d.err = d.w.err d.windowEnd = 0 } // storeHuff will compress and store the currently added data, // if enough has been accumulated or we at the end of the stream. // Any error that occurred will be in d.err func (d *compressor) storeSnappy() { // We only compress if we have maxStoreBlockSize. if d.windowEnd < maxStoreBlockSize { if !d.sync { return } // Handle extremely small sizes. if d.windowEnd < 128 { if d.windowEnd == 0 { return } if d.windowEnd <= 32 { d.err = d.writeStoredBlock(d.window[:d.windowEnd]) d.tokens.n = 0 d.windowEnd = 0 } else { d.w.writeBlockHuff(false, d.window[:d.windowEnd]) d.err = d.w.err } d.tokens.n = 0 d.windowEnd = 0 d.snap.Reset() return } } d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) // If we made zero matches, store the block as is. if int(d.tokens.n) == d.windowEnd { d.err = d.writeStoredBlock(d.window[:d.windowEnd]) // If we removed less than 1/16th, huffman compress the block. } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { d.w.writeBlockHuff(false, d.window[:d.windowEnd]) d.err = d.w.err } else { d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) d.err = d.w.err } d.tokens.n = 0 d.windowEnd = 0 } // write will add input byte to the stream. // Unless an error occurs all bytes will be consumed. func (d *compressor) write(b []byte) (n int, err error) { if d.err != nil { return 0, d.err } n = len(b) for len(b) > 0 { d.step(d) b = b[d.fill(d, b):] if d.err != nil { return 0, d.err } } return n, d.err } func (d *compressor) syncFlush() error { d.sync = true if d.err != nil { return d.err } d.step(d) if d.err == nil { d.w.writeStoredHeader(0, false) d.w.flush() d.err = d.w.err } d.sync = false return d.err } func (d *compressor) init(w io.Writer, level int) (err error) { d.w = newHuffmanBitWriter(w) switch { case level == NoCompression: d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).store case level == ConstantCompression: d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeHuff case level >= 1 && level <= 4: d.snap = newSnappy(level) d.window = make([]byte, maxStoreBlockSize) d.fill = (*compressor).fillBlock d.step = (*compressor).storeSnappy case level == DefaultCompression: level = 5 fallthrough case 5 <= level && level <= 9: d.compressionLevel = levels[level] d.initDeflate() d.fill = (*compressor).fillDeflate if d.fastSkipHashing == skipNever { if useSSE42 { d.step = (*compressor).deflateLazySSE } else { d.step = (*compressor).deflateLazy } } else { if useSSE42 { d.step = (*compressor).deflateSSE } else { d.step = (*compressor).deflate } } default: return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) } return nil } // reset the state of the compressor. func (d *compressor) reset(w io.Writer) { d.w.reset(w) d.sync = false d.err = nil // We only need to reset a few things for Snappy. if d.snap != nil { d.snap.Reset() d.windowEnd = 0 d.tokens.n = 0 return } switch d.compressionLevel.chain { case 0: // level was NoCompression or ConstantCompresssion. d.windowEnd = 0 default: d.chainHead = -1 for i := range d.hashHead { d.hashHead[i] = 0 } for i := range d.hashPrev { d.hashPrev[i] = 0 } d.hashOffset = 1 d.index, d.windowEnd = 0, 0 d.blockStart, d.byteAvailable = 0, false d.tokens.n = 0 d.length = minMatchLength - 1 d.offset = 0 d.hash = 0 d.ii = 0 d.maxInsertIndex = 0 } } func (d *compressor) close() error { if d.err != nil { return d.err } d.sync = true d.step(d) if d.err != nil { return d.err } if d.w.writeStoredHeader(0, true); d.w.err != nil { return d.w.err } d.w.flush() return d.w.err } // NewWriter returns a new Writer compressing data at the given level. // Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); // higher levels typically run slower but compress more. // Level 0 (NoCompression) does not attempt any compression; it only adds the // necessary DEFLATE framing. // Level -1 (DefaultCompression) uses the default compression level. // Level -2 (ConstantCompression) will use Huffman compression only, giving // a very fast compression for all types of input, but sacrificing considerable // compression efficiency. // // If level is in the range [-2, 9] then the error returned will be nil. // Otherwise the error returned will be non-nil. func NewWriter(w io.Writer, level int) (*Writer, error) { var dw Writer if err := dw.d.init(w, level); err != nil { return nil, err } return &dw, nil } // NewWriterDict is like NewWriter but initializes the new // Writer with a preset dictionary. The returned Writer behaves // as if the dictionary had been written to it without producing // any compressed output. The compressed data written to w // can only be decompressed by a Reader initialized with the // same dictionary. func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { dw := &dictWriter{w} zw, err := NewWriter(dw, level) if err != nil { return nil, err } zw.d.fillWindow(dict) zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. return zw, err } type dictWriter struct { w io.Writer } func (w *dictWriter) Write(b []byte) (n int, err error) { return w.w.Write(b) } // A Writer takes data written to it and writes the compressed // form of that data to an underlying writer (see NewWriter). type Writer struct { d compressor dict []byte } // Write writes data to w, which will eventually write the // compressed form of data to its underlying writer. func (w *Writer) Write(data []byte) (n int, err error) { return w.d.write(data) } // Flush flushes any pending data to the underlying writer. // It is useful mainly in compressed network protocols, to ensure that // a remote reader has enough data to reconstruct a packet. // Flush does not return until the data has been written. // Calling Flush when there is no pending data still causes the Writer // to emit a sync marker of at least 4 bytes. // If the underlying writer returns an error, Flush returns that error. // // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. func (w *Writer) Flush() error { // For more about flushing: // http://www.bolet.org/~pornin/deflate-flush.html return w.d.syncFlush() } // Close flushes and closes the writer. func (w *Writer) Close() error { return w.d.close() } // Reset discards the writer's state and makes it equivalent to // the result of NewWriter or NewWriterDict called with dst // and w's level and dictionary. func (w *Writer) Reset(dst io.Writer) { if dw, ok := w.d.w.writer.(*dictWriter); ok { // w was created with NewWriterDict dw.w = dst w.d.reset(dw) w.d.fillWindow(w.dict) } else { // w was created with NewWriter w.d.reset(dst) } } // ResetDict discards the writer's state and makes it equivalent to // the result of NewWriter or NewWriterDict called with dst // and w's level, but sets a specific dictionary. func (w *Writer) ResetDict(dst io.Writer, dict []byte) { w.dict = dict w.d.reset(dst) w.d.fillWindow(w.dict) } ================================================ FILE: vendor/github.com/klauspost/compress/flate/dict_decoder.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate // dictDecoder implements the LZ77 sliding dictionary as used in decompression. // LZ77 decompresses data through sequences of two forms of commands: // // * Literal insertions: Runs of one or more symbols are inserted into the data // stream as is. This is accomplished through the writeByte method for a // single symbol, or combinations of writeSlice/writeMark for multiple symbols. // Any valid stream must start with a literal insertion if no preset dictionary // is used. // // * Backward copies: Runs of one or more symbols are copied from previously // emitted data. Backward copies come as the tuple (dist, length) where dist // determines how far back in the stream to copy from and length determines how // many bytes to copy. Note that it is valid for the length to be greater than // the distance. Since LZ77 uses forward copies, that situation is used to // perform a form of run-length encoding on repeated runs of symbols. // The writeCopy and tryWriteCopy are used to implement this command. // // For performance reasons, this implementation performs little to no sanity // checks about the arguments. As such, the invariants documented for each // method call must be respected. type dictDecoder struct { hist []byte // Sliding window history // Invariant: 0 <= rdPos <= wrPos <= len(hist) wrPos int // Current output position in buffer rdPos int // Have emitted hist[:rdPos] already full bool // Has a full window length been written yet? } // init initializes dictDecoder to have a sliding window dictionary of the given // size. If a preset dict is provided, it will initialize the dictionary with // the contents of dict. func (dd *dictDecoder) init(size int, dict []byte) { *dd = dictDecoder{hist: dd.hist} if cap(dd.hist) < size { dd.hist = make([]byte, size) } dd.hist = dd.hist[:size] if len(dict) > len(dd.hist) { dict = dict[len(dict)-len(dd.hist):] } dd.wrPos = copy(dd.hist, dict) if dd.wrPos == len(dd.hist) { dd.wrPos = 0 dd.full = true } dd.rdPos = dd.wrPos } // histSize reports the total amount of historical data in the dictionary. func (dd *dictDecoder) histSize() int { if dd.full { return len(dd.hist) } return dd.wrPos } // availRead reports the number of bytes that can be flushed by readFlush. func (dd *dictDecoder) availRead() int { return dd.wrPos - dd.rdPos } // availWrite reports the available amount of output buffer space. func (dd *dictDecoder) availWrite() int { return len(dd.hist) - dd.wrPos } // writeSlice returns a slice of the available buffer to write data to. // // This invariant will be kept: len(s) <= availWrite() func (dd *dictDecoder) writeSlice() []byte { return dd.hist[dd.wrPos:] } // writeMark advances the writer pointer by cnt. // // This invariant must be kept: 0 <= cnt <= availWrite() func (dd *dictDecoder) writeMark(cnt int) { dd.wrPos += cnt } // writeByte writes a single byte to the dictionary. // // This invariant must be kept: 0 < availWrite() func (dd *dictDecoder) writeByte(c byte) { dd.hist[dd.wrPos] = c dd.wrPos++ } // writeCopy copies a string at a given (dist, length) to the output. // This returns the number of bytes copied and may be less than the requested // length if the available space in the output buffer is too small. // // This invariant must be kept: 0 < dist <= histSize() func (dd *dictDecoder) writeCopy(dist, length int) int { dstBase := dd.wrPos dstPos := dstBase srcPos := dstPos - dist endPos := dstPos + length if endPos > len(dd.hist) { endPos = len(dd.hist) } // Copy non-overlapping section after destination position. // // This section is non-overlapping in that the copy length for this section // is always less than or equal to the backwards distance. This can occur // if a distance refers to data that wraps-around in the buffer. // Thus, a backwards copy is performed here; that is, the exact bytes in // the source prior to the copy is placed in the destination. if srcPos < 0 { srcPos += len(dd.hist) dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) srcPos = 0 } // Copy possibly overlapping section before destination position. // // This section can overlap if the copy length for this section is larger // than the backwards distance. This is allowed by LZ77 so that repeated // strings can be succinctly represented using (dist, length) pairs. // Thus, a forwards copy is performed here; that is, the bytes copied is // possibly dependent on the resulting bytes in the destination as the copy // progresses along. This is functionally equivalent to the following: // // for i := 0; i < endPos-dstPos; i++ { // dd.hist[dstPos+i] = dd.hist[srcPos+i] // } // dstPos = endPos // for dstPos < endPos { dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) } dd.wrPos = dstPos return dstPos - dstBase } // tryWriteCopy tries to copy a string at a given (distance, length) to the // output. This specialized version is optimized for short distances. // // This method is designed to be inlined for performance reasons. // // This invariant must be kept: 0 < dist <= histSize() func (dd *dictDecoder) tryWriteCopy(dist, length int) int { dstPos := dd.wrPos endPos := dstPos + length if dstPos < dist || endPos > len(dd.hist) { return 0 } dstBase := dstPos srcPos := dstPos - dist // Copy possibly overlapping section before destination position. loop: dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) if dstPos < endPos { goto loop // Avoid for-loop so that this function can be inlined } dd.wrPos = dstPos return dstPos - dstBase } // readFlush returns a slice of the historical buffer that is ready to be // emitted to the user. The data returned by readFlush must be fully consumed // before calling any other dictDecoder methods. func (dd *dictDecoder) readFlush() []byte { toRead := dd.hist[dd.rdPos:dd.wrPos] dd.rdPos = dd.wrPos if dd.wrPos == len(dd.hist) { dd.wrPos, dd.rdPos = 0, 0 dd.full = true } return toRead } ================================================ FILE: vendor/github.com/klauspost/compress/flate/gen.go ================================================ // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore // This program generates fixedhuff.go // Invoke as // // go run gen.go -output fixedhuff.go package main import ( "bytes" "flag" "fmt" "go/format" "io/ioutil" "log" ) var filename = flag.String("output", "fixedhuff.go", "output file name") const maxCodeLen = 16 // Note: the definition of the huffmanDecoder struct is copied from // inflate.go, as it is private to the implementation. // chunk & 15 is number of bits // chunk >> 4 is value, including table link const ( huffmanChunkBits = 9 huffmanNumChunks = 1 << huffmanChunkBits huffmanCountMask = 15 huffmanValueShift = 4 ) type huffmanDecoder struct { min int // the minimum code length chunks [huffmanNumChunks]uint32 // chunks as described above links [][]uint32 // overflow links linkMask uint32 // mask the width of the link table } // Initialize Huffman decoding tables from array of code lengths. // Following this function, h is guaranteed to be initialized into a complete // tree (i.e., neither over-subscribed nor under-subscribed). The exception is a // degenerate case where the tree has only a single symbol with length 1. Empty // trees are permitted. func (h *huffmanDecoder) init(bits []int) bool { // Sanity enables additional runtime tests during Huffman // table construction. It's intended to be used during // development to supplement the currently ad-hoc unit tests. const sanity = false if h.min != 0 { *h = huffmanDecoder{} } // Count number of codes of each length, // compute min and max length. var count [maxCodeLen]int var min, max int for _, n := range bits { if n == 0 { continue } if min == 0 || n < min { min = n } if n > max { max = n } count[n]++ } // Empty tree. The decompressor.huffSym function will fail later if the tree // is used. Technically, an empty tree is only valid for the HDIST tree and // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree // is guaranteed to fail since it will attempt to use the tree to decode the // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is // guaranteed to fail later since the compressed data section must be // composed of at least one symbol (the end-of-block marker). if max == 0 { return true } code := 0 var nextcode [maxCodeLen]int for i := min; i <= max; i++ { code <<= 1 nextcode[i] = code code += count[i] } // Check that the coding is complete (i.e., that we've // assigned all 2-to-the-max possible bit sequences). // Exception: To be compatible with zlib, we also need to // accept degenerate single-code codings. See also // TestDegenerateHuffmanCoding. if code != 1< huffmanChunkBits { numLinks := 1 << (uint(max) - huffmanChunkBits) h.linkMask = uint32(numLinks - 1) // create link tables link := nextcode[huffmanChunkBits+1] >> 1 h.links = make([][]uint32, huffmanNumChunks-link) for j := uint(link); j < huffmanNumChunks; j++ { reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 reverse >>= uint(16 - huffmanChunkBits) off := j - uint(link) if sanity && h.chunks[reverse] != 0 { panic("impossible: overwriting existing chunk") } h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 reverse >>= uint(16 - n) if n <= huffmanChunkBits { for off := reverse; off < len(h.chunks); off += 1 << uint(n) { // We should never need to overwrite // an existing chunk. Also, 0 is // never a valid chunk, because the // lower 4 "count" bits should be // between 1 and 15. if sanity && h.chunks[off] != 0 { panic("impossible: overwriting existing chunk") } h.chunks[off] = chunk } } else { j := reverse & (huffmanNumChunks - 1) if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { // Longer codes should have been // associated with a link table above. panic("impossible: not an indirect chunk") } value := h.chunks[j] >> huffmanValueShift linktab := h.links[value] reverse >>= huffmanChunkBits for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { if sanity && linktab[off] != 0 { panic("impossible: overwriting existing chunk") } linktab[off] = chunk } } } if sanity { // Above we've sanity checked that we never overwrote // an existing entry. Here we additionally check that // we filled the tables completely. for i, chunk := range h.chunks { if chunk == 0 { // As an exception, in the degenerate // single-code case, we allow odd // chunks to be missing. if code == 1 && i%2 == 1 { continue } panic("impossible: missing chunk") } } for _, linktab := range h.links { for _, chunk := range linktab { if chunk == 0 { panic("impossible: missing chunk") } } } } return true } func main() { flag.Parse() var h huffmanDecoder var bits [288]int initReverseByte() for i := 0; i < 144; i++ { bits[i] = 8 } for i := 144; i < 256; i++ { bits[i] = 9 } for i := 256; i < 280; i++ { bits[i] = 7 } for i := 280; i < 288; i++ { bits[i] = 8 } h.init(bits[:]) if h.links != nil { log.Fatal("Unexpected links table in fixed Huffman decoder") } var buf bytes.Buffer fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file.`+"\n\n") fmt.Fprintln(&buf, "package flate") fmt.Fprintln(&buf) fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") fmt.Fprintln(&buf) fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") fmt.Fprintf(&buf, "\t%d,\n", h.min) fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") for i := 0; i < huffmanNumChunks; i++ { if i&7 == 0 { fmt.Fprintf(&buf, "\t\t") } else { fmt.Fprintf(&buf, " ") } fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) if i&7 == 7 { fmt.Fprintln(&buf) } } fmt.Fprintln(&buf, "\t},") fmt.Fprintln(&buf, "\tnil, 0,") fmt.Fprintln(&buf, "}") data, err := format.Source(buf.Bytes()) if err != nil { log.Fatal(err) } err = ioutil.WriteFile(*filename, data, 0644) if err != nil { log.Fatal(err) } } var reverseByte [256]byte func initReverseByte() { for x := 0; x < 256; x++ { var result byte for i := uint(0); i < 8; i++ { result |= byte(((x >> i) & 1) << (7 - i)) } reverseByte[x] = result } } ================================================ FILE: vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate import ( "io" ) const ( // The largest offset code. offsetCodeCount = 30 // The special code used to mark the end of a block. endBlockMarker = 256 // The first length code. lengthCodesStart = 257 // The number of codegen codes. codegenCodeCount = 19 badCode = 255 // bufferFlushSize indicates the buffer size // after which bytes are flushed to the writer. // Should preferably be a multiple of 6, since // we accumulate 6 bytes between writes to the buffer. bufferFlushSize = 240 // bufferSize is the actual output byte buffer size. // It must have additional headroom for a flush // which can contain up to 8 bytes. bufferSize = bufferFlushSize + 8 ) // The number of extra bits needed by length code X - LENGTH_CODES_START. var lengthExtraBits = []int8{ /* 257 */ 0, 0, 0, /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, /* 280 */ 4, 5, 5, 5, 5, 0, } // The length indicated by length code X - LENGTH_CODES_START. var lengthBase = []uint32{ 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, 64, 80, 96, 112, 128, 160, 192, 224, 255, } // offset code word extra bits. var offsetExtraBits = []int8{ 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, /* extended window */ 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, } var offsetBase = []uint32{ /* normal deflate */ 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, /* extended window */ 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, 0x100000, 0x180000, 0x200000, 0x300000, } // The odd order in which the codegen code sizes are written. var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} type huffmanBitWriter struct { // writer is the underlying writer. // Do not use it directly; use the write method, which ensures // that Write errors are sticky. writer io.Writer // Data waiting to be written is bytes[0:nbytes] // and then the low nbits of bits. bits uint64 nbits uint bytes [bufferSize]byte codegenFreq [codegenCodeCount]int32 nbytes int literalFreq []int32 offsetFreq []int32 codegen []uint8 literalEncoding *huffmanEncoder offsetEncoding *huffmanEncoder codegenEncoding *huffmanEncoder err error } func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { return &huffmanBitWriter{ writer: w, literalFreq: make([]int32, maxNumLit), offsetFreq: make([]int32, offsetCodeCount), codegen: make([]uint8, maxNumLit+offsetCodeCount+1), literalEncoding: newHuffmanEncoder(maxNumLit), codegenEncoding: newHuffmanEncoder(codegenCodeCount), offsetEncoding: newHuffmanEncoder(offsetCodeCount), } } func (w *huffmanBitWriter) reset(writer io.Writer) { w.writer = writer w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil w.bytes = [bufferSize]byte{} } func (w *huffmanBitWriter) flush() { if w.err != nil { w.nbits = 0 return } n := w.nbytes for w.nbits != 0 { w.bytes[n] = byte(w.bits) w.bits >>= 8 if w.nbits > 8 { // Avoid underflow w.nbits -= 8 } else { w.nbits = 0 } n++ } w.bits = 0 w.write(w.bytes[:n]) w.nbytes = 0 } func (w *huffmanBitWriter) write(b []byte) { if w.err != nil { return } _, w.err = w.writer.Write(b) } func (w *huffmanBitWriter) writeBits(b int32, nb uint) { if w.err != nil { return } w.bits |= uint64(b) << w.nbits w.nbits += nb if w.nbits >= 48 { bits := w.bits w.bits >>= 48 w.nbits -= 48 n := w.nbytes bytes := w.bytes[n : n+6] bytes[0] = byte(bits) bytes[1] = byte(bits >> 8) bytes[2] = byte(bits >> 16) bytes[3] = byte(bits >> 24) bytes[4] = byte(bits >> 32) bytes[5] = byte(bits >> 40) n += 6 if n >= bufferFlushSize { w.write(w.bytes[:n]) n = 0 } w.nbytes = n } } func (w *huffmanBitWriter) writeBytes(bytes []byte) { if w.err != nil { return } n := w.nbytes if w.nbits&7 != 0 { w.err = InternalError("writeBytes with unfinished bits") return } for w.nbits != 0 { w.bytes[n] = byte(w.bits) w.bits >>= 8 w.nbits -= 8 n++ } if n != 0 { w.write(w.bytes[:n]) } w.nbytes = 0 w.write(bytes) } // RFC 1951 3.2.7 specifies a special run-length encoding for specifying // the literal and offset lengths arrays (which are concatenated into a single // array). This method generates that run-length encoding. // // The result is written into the codegen array, and the frequencies // of each code is written into the codegenFreq array. // Codes 0-15 are single byte codes. Codes 16-18 are followed by additional // information. Code badCode is an end marker // // numLiterals The number of literals in literalEncoding // numOffsets The number of offsets in offsetEncoding // litenc, offenc The literal and offset encoder to use func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { for i := range w.codegenFreq { w.codegenFreq[i] = 0 } // Note that we are using codegen both as a temporary variable for holding // a copy of the frequencies, and as the place where we put the result. // This is fine because the output is always shorter than the input used // so far. codegen := w.codegen // cache // Copy the concatenated code sizes to codegen. Put a marker at the end. cgnl := codegen[:numLiterals] for i := range cgnl { cgnl[i] = uint8(litEnc.codes[i].len) } cgnl = codegen[numLiterals : numLiterals+numOffsets] for i := range cgnl { cgnl[i] = uint8(offEnc.codes[i].len) } codegen[numLiterals+numOffsets] = badCode size := codegen[0] count := 1 outIndex := 0 for inIndex := 1; size != badCode; inIndex++ { // INVARIANT: We have seen "count" copies of size that have not yet // had output generated for them. nextSize := codegen[inIndex] if nextSize == size { count++ continue } // We need to generate codegen indicating "count" of size. if size != 0 { codegen[outIndex] = size outIndex++ w.codegenFreq[size]++ count-- for count >= 3 { n := 6 if n > count { n = count } codegen[outIndex] = 16 outIndex++ codegen[outIndex] = uint8(n - 3) outIndex++ w.codegenFreq[16]++ count -= n } } else { for count >= 11 { n := 138 if n > count { n = count } codegen[outIndex] = 18 outIndex++ codegen[outIndex] = uint8(n - 11) outIndex++ w.codegenFreq[18]++ count -= n } if count >= 3 { // count >= 3 && count <= 10 codegen[outIndex] = 17 outIndex++ codegen[outIndex] = uint8(count - 3) outIndex++ w.codegenFreq[17]++ count = 0 } } count-- for ; count >= 0; count-- { codegen[outIndex] = size outIndex++ w.codegenFreq[size]++ } // Set up invariant for next time through the loop. size = nextSize count = 1 } // Marker indicating the end of the codegen. codegen[outIndex] = badCode } // dynamicSize returns the size of dynamically encoded data in bits. func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { numCodegens = len(w.codegenFreq) for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { numCodegens-- } header := 3 + 5 + 5 + 4 + (3 * numCodegens) + w.codegenEncoding.bitLength(w.codegenFreq[:]) + int(w.codegenFreq[16])*2 + int(w.codegenFreq[17])*3 + int(w.codegenFreq[18])*7 size = header + litEnc.bitLength(w.literalFreq) + offEnc.bitLength(w.offsetFreq) + extraBits return size, numCodegens } // fixedSize returns the size of dynamically encoded data in bits. func (w *huffmanBitWriter) fixedSize(extraBits int) int { return 3 + fixedLiteralEncoding.bitLength(w.literalFreq) + fixedOffsetEncoding.bitLength(w.offsetFreq) + extraBits } // storedSize calculates the stored size, including header. // The function returns the size in bits and whether the block // fits inside a single block. func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { if in == nil { return 0, false } if len(in) <= maxStoreBlockSize { return (len(in) + 5) * 8, true } return 0, false } func (w *huffmanBitWriter) writeCode(c hcode) { if w.err != nil { return } w.bits |= uint64(c.code) << w.nbits w.nbits += uint(c.len) if w.nbits >= 48 { bits := w.bits w.bits >>= 48 w.nbits -= 48 n := w.nbytes bytes := w.bytes[n : n+6] bytes[0] = byte(bits) bytes[1] = byte(bits >> 8) bytes[2] = byte(bits >> 16) bytes[3] = byte(bits >> 24) bytes[4] = byte(bits >> 32) bytes[5] = byte(bits >> 40) n += 6 if n >= bufferFlushSize { w.write(w.bytes[:n]) n = 0 } w.nbytes = n } } // Write the header of a dynamic Huffman block to the output stream. // // numLiterals The number of literals specified in codegen // numOffsets The number of offsets specified in codegen // numCodegens The number of codegens used in codegen func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { if w.err != nil { return } var firstBits int32 = 4 if isEof { firstBits = 5 } w.writeBits(firstBits, 3) w.writeBits(int32(numLiterals-257), 5) w.writeBits(int32(numOffsets-1), 5) w.writeBits(int32(numCodegens-4), 4) for i := 0; i < numCodegens; i++ { value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) w.writeBits(int32(value), 3) } i := 0 for { var codeWord int = int(w.codegen[i]) i++ if codeWord == badCode { break } w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) switch codeWord { case 16: w.writeBits(int32(w.codegen[i]), 2) i++ break case 17: w.writeBits(int32(w.codegen[i]), 3) i++ break case 18: w.writeBits(int32(w.codegen[i]), 7) i++ break } } } func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { if w.err != nil { return } var flag int32 if isEof { flag = 1 } w.writeBits(flag, 3) w.flush() w.writeBits(int32(length), 16) w.writeBits(int32(^uint16(length)), 16) } func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { if w.err != nil { return } // Indicate that we are a fixed Huffman block var value int32 = 2 if isEof { value = 3 } w.writeBits(value, 3) } // writeBlock will write a block of tokens with the smallest encoding. // The original input can be supplied, and if the huffman encoded data // is larger than the original bytes, the data will be written as a // stored block. // If the input is nil, the tokens will always be Huffman encoded. func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { if w.err != nil { return } tokens = append(tokens, endBlockMarker) numLiterals, numOffsets := w.indexTokens(tokens) var extraBits int storedSize, storable := w.storedSize(input) if storable { // We only bother calculating the costs of the extra bits required by // the length of offset fields (which will be the same for both fixed // and dynamic encoding), if we need to compare those two encodings // against stored encoding. for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { // First eight length codes have extra size = 0. extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) } for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { // First four offset codes have extra size = 0. extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode]) } } // Figure out smallest code. // Fixed Huffman baseline. var literalEncoding = fixedLiteralEncoding var offsetEncoding = fixedOffsetEncoding var size = w.fixedSize(extraBits) // Dynamic Huffman? var numCodegens int // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) w.codegenEncoding.generate(w.codegenFreq[:], 7) dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) if dynamicSize < size { size = dynamicSize literalEncoding = w.literalEncoding offsetEncoding = w.offsetEncoding } // Stored bytes? if storable && storedSize < size { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } // Huffman. if literalEncoding == fixedLiteralEncoding { w.writeFixedHeader(eof) } else { w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) } // Write the tokens. w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) } // writeBlockDynamic encodes a block using a dynamic Huffman table. // This should be used if the symbols used have a disproportionate // histogram distribution. // If input is supplied and the compression savings are below 1/16th of the // input size the block is stored. func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { if w.err != nil { return } tokens = append(tokens, endBlockMarker) numLiterals, numOffsets := w.indexTokens(tokens) // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) w.codegenEncoding.generate(w.codegenFreq[:], 7) size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) // Store bytes, if we don't get a reasonable improvement. if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } // Write Huffman table. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) // Write the tokens. w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) } // indexTokens indexes a slice of tokens, and updates // literalFreq and offsetFreq, and generates literalEncoding // and offsetEncoding. // The number of literal and offset tokens is returned. func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { for i := range w.literalFreq { w.literalFreq[i] = 0 } for i := range w.offsetFreq { w.offsetFreq[i] = 0 } for _, t := range tokens { if t < matchType { w.literalFreq[t.literal()]++ continue } length := t.length() offset := t.offset() w.literalFreq[lengthCodesStart+lengthCode(length)]++ w.offsetFreq[offsetCode(offset)]++ } // get the number of literals numLiterals = len(w.literalFreq) for w.literalFreq[numLiterals-1] == 0 { numLiterals-- } // get the number of offsets numOffsets = len(w.offsetFreq) for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { numOffsets-- } if numOffsets == 0 { // We haven't found a single match. If we want to go with the dynamic encoding, // we should count at least one offset to be sure that the offset huffman tree could be encoded. w.offsetFreq[0] = 1 numOffsets = 1 } w.literalEncoding.generate(w.literalFreq, 15) w.offsetEncoding.generate(w.offsetFreq, 15) return } // writeTokens writes a slice of tokens to the output. // codes for literal and offset encoding must be supplied. func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { if w.err != nil { return } for _, t := range tokens { if t < matchType { w.writeCode(leCodes[t.literal()]) continue } // Write the length length := t.length() lengthCode := lengthCode(length) w.writeCode(leCodes[lengthCode+lengthCodesStart]) extraLengthBits := uint(lengthExtraBits[lengthCode]) if extraLengthBits > 0 { extraLength := int32(length - lengthBase[lengthCode]) w.writeBits(extraLength, extraLengthBits) } // Write the offset offset := t.offset() offsetCode := offsetCode(offset) w.writeCode(oeCodes[offsetCode]) extraOffsetBits := uint(offsetExtraBits[offsetCode]) if extraOffsetBits > 0 { extraOffset := int32(offset - offsetBase[offsetCode]) w.writeBits(extraOffset, extraOffsetBits) } } } // huffOffset is a static offset encoder used for huffman only encoding. // It can be reused since we will not be encoding offset values. var huffOffset *huffmanEncoder func init() { w := newHuffmanBitWriter(nil) w.offsetFreq[0] = 1 huffOffset = newHuffmanEncoder(offsetCodeCount) huffOffset.generate(w.offsetFreq, 15) } // writeBlockHuff encodes a block of bytes as either // Huffman encoded literals or uncompressed bytes if the // results only gains very little from compression. func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { if w.err != nil { return } // Clear histogram for i := range w.literalFreq { w.literalFreq[i] = 0 } // Add everything as literals histogram(input, w.literalFreq) w.literalFreq[endBlockMarker] = 1 const numLiterals = endBlockMarker + 1 const numOffsets = 1 w.literalEncoding.generate(w.literalFreq, 15) // Figure out smallest code. // Always use dynamic Huffman or Store var numCodegens int // Generate codegen and codegenFrequencies, which indicates how to encode // the literalEncoding and the offsetEncoding. w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) w.codegenEncoding.generate(w.codegenFreq[:], 7) size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) // Store bytes, if we don't get a reasonable improvement. if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { w.writeStoredHeader(len(input), eof) w.writeBytes(input) return } // Huffman. w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) encoding := w.literalEncoding.codes[:257] n := w.nbytes for _, t := range input { // Bitwriting inlined, ~30% speedup c := encoding[t] w.bits |= uint64(c.code) << w.nbits w.nbits += uint(c.len) if w.nbits < 48 { continue } // Store 6 bytes bits := w.bits w.bits >>= 48 w.nbits -= 48 bytes := w.bytes[n : n+6] bytes[0] = byte(bits) bytes[1] = byte(bits >> 8) bytes[2] = byte(bits >> 16) bytes[3] = byte(bits >> 24) bytes[4] = byte(bits >> 32) bytes[5] = byte(bits >> 40) n += 6 if n < bufferFlushSize { continue } w.write(w.bytes[:n]) if w.err != nil { return // Return early in the event of write failures } n = 0 } w.nbytes = n w.writeCode(encoding[endBlockMarker]) } ================================================ FILE: vendor/github.com/klauspost/compress/flate/huffman_code.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate import ( "math" "sort" ) // hcode is a huffman code with a bit code and bit length. type hcode struct { code, len uint16 } type huffmanEncoder struct { codes []hcode freqcache []literalNode bitCount [17]int32 lns byLiteral // stored to avoid repeated allocation in generate lfs byFreq // stored to avoid repeated allocation in generate } type literalNode struct { literal uint16 freq int32 } // A levelInfo describes the state of the constructed tree for a given depth. type levelInfo struct { // Our level. for better printing level int32 // The frequency of the last node at this level lastFreq int32 // The frequency of the next character to add to this level nextCharFreq int32 // The frequency of the next pair (from level below) to add to this level. // Only valid if the "needed" value of the next lower level is 0. nextPairFreq int32 // The number of chains remaining to generate for this level before moving // up to the next level needed int32 } // set sets the code and length of an hcode. func (h *hcode) set(code uint16, length uint16) { h.len = length h.code = code } func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } func newHuffmanEncoder(size int) *huffmanEncoder { return &huffmanEncoder{codes: make([]hcode, size)} } // Generates a HuffmanCode corresponding to the fixed literal table func generateFixedLiteralEncoding() *huffmanEncoder { h := newHuffmanEncoder(maxNumLit) codes := h.codes var ch uint16 for ch = 0; ch < maxNumLit; ch++ { var bits uint16 var size uint16 switch { case ch < 144: // size 8, 000110000 .. 10111111 bits = ch + 48 size = 8 break case ch < 256: // size 9, 110010000 .. 111111111 bits = ch + 400 - 144 size = 9 break case ch < 280: // size 7, 0000000 .. 0010111 bits = ch - 256 size = 7 break default: // size 8, 11000000 .. 11000111 bits = ch + 192 - 280 size = 8 } codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} } return h } func generateFixedOffsetEncoding() *huffmanEncoder { h := newHuffmanEncoder(30) codes := h.codes for ch := range codes { codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} } return h } var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() func (h *huffmanEncoder) bitLength(freq []int32) int { var total int for i, f := range freq { if f != 0 { total += int(f) * int(h.codes[i].len) } } return total } const maxBitsLimit = 16 // Return the number of literals assigned to each bit size in the Huffman encoding // // This method is only called when list.length >= 3 // The cases of 0, 1, and 2 literals are handled by special case code. // // list An array of the literals with non-zero frequencies // and their associated frequencies. The array is in order of increasing // frequency, and has as its last element a special element with frequency // MaxInt32 // maxBits The maximum number of bits that should be used to encode any literal. // Must be less than 16. // return An integer array in which array[i] indicates the number of literals // that should be encoded in i bits. func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { if maxBits >= maxBitsLimit { panic("flate: maxBits too large") } n := int32(len(list)) list = list[0 : n+1] list[n] = maxNode() // The tree can't have greater depth than n - 1, no matter what. This // saves a little bit of work in some small cases if maxBits > n-1 { maxBits = n - 1 } // Create information about each of the levels. // A bogus "Level 0" whose sole purpose is so that // level1.prev.needed==0. This makes level1.nextPairFreq // be a legitimate value that never gets chosen. var levels [maxBitsLimit]levelInfo // leafCounts[i] counts the number of literals at the left // of ancestors of the rightmost node at level i. // leafCounts[i][j] is the number of literals at the left // of the level j ancestor. var leafCounts [maxBitsLimit][maxBitsLimit]int32 for level := int32(1); level <= maxBits; level++ { // For every level, the first two items are the first two characters. // We initialize the levels as if we had already figured this out. levels[level] = levelInfo{ level: level, lastFreq: list[1].freq, nextCharFreq: list[2].freq, nextPairFreq: list[0].freq + list[1].freq, } leafCounts[level][level] = 2 if level == 1 { levels[level].nextPairFreq = math.MaxInt32 } } // We need a total of 2*n - 2 items at top level and have already generated 2. levels[maxBits].needed = 2*n - 4 level := maxBits for { l := &levels[level] if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { // We've run out of both leafs and pairs. // End all calculations for this level. // To make sure we never come back to this level or any lower level, // set nextPairFreq impossibly large. l.needed = 0 levels[level+1].nextPairFreq = math.MaxInt32 level++ continue } prevFreq := l.lastFreq if l.nextCharFreq < l.nextPairFreq { // The next item on this row is a leaf node. n := leafCounts[level][level] + 1 l.lastFreq = l.nextCharFreq // Lower leafCounts are the same of the previous node. leafCounts[level][level] = n l.nextCharFreq = list[n].freq } else { // The next item on this row is a pair from the previous row. // nextPairFreq isn't valid until we generate two // more values in the level below l.lastFreq = l.nextPairFreq // Take leaf counts from the lower level, except counts[level] remains the same. copy(leafCounts[level][:level], leafCounts[level-1][:level]) levels[l.level-1].needed = 2 } if l.needed--; l.needed == 0 { // We've done everything we need to do for this level. // Continue calculating one level up. Fill in nextPairFreq // of that level with the sum of the two nodes we've just calculated on // this level. if l.level == maxBits { // All done! break } levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq level++ } else { // If we stole from below, move down temporarily to replenish it. for levels[level-1].needed > 0 { level-- } } } // Somethings is wrong if at the end, the top level is null or hasn't used // all of the leaves. if leafCounts[maxBits][maxBits] != n { panic("leafCounts[maxBits][maxBits] != n") } bitCount := h.bitCount[:maxBits+1] bits := 1 counts := &leafCounts[maxBits] for level := maxBits; level > 0; level-- { // chain.leafCount gives the number of literals requiring at least "bits" // bits to encode. bitCount[bits] = counts[level] - counts[level-1] bits++ } return bitCount } // Look at the leaves and assign them a bit count and an encoding as specified // in RFC 1951 3.2.2 func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { code := uint16(0) for n, bits := range bitCount { code <<= 1 if n == 0 || bits == 0 { continue } // The literals list[len(list)-bits] .. list[len(list)-bits] // are encoded using "bits" bits, and get the values // code, code + 1, .... The code values are // assigned in literal order (not frequency order). chunk := list[len(list)-int(bits):] h.lns.sort(chunk) for _, node := range chunk { h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} code++ } list = list[0 : len(list)-int(bits)] } } // Update this Huffman Code object to be the minimum code for the specified frequency count. // // freq An array of frequencies, in which frequency[i] gives the frequency of literal i. // maxBits The maximum number of bits to use for any literal. func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { if h.freqcache == nil { // Allocate a reusable buffer with the longest possible frequency table. // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. // The largest of these is maxNumLit, so we allocate for that case. h.freqcache = make([]literalNode, maxNumLit+1) } list := h.freqcache[:len(freq)+1] // Number of non-zero literals count := 0 // Set list to be the set of all non-zero literals and their frequencies for i, f := range freq { if f != 0 { list[count] = literalNode{uint16(i), f} count++ } else { list[count] = literalNode{} h.codes[i].len = 0 } } list[len(freq)] = literalNode{} list = list[:count] if count <= 2 { // Handle the small cases here, because they are awkward for the general case code. With // two or fewer literals, everything has bit length 1. for i, node := range list { // "list" is in order of increasing literal value. h.codes[node.literal].set(uint16(i), 1) } return } h.lfs.sort(list) // Get the number of literals for each bit count bitCount := h.bitCounts(list, maxBits) // And do the assignment h.assignEncodingAndSize(bitCount, list) } type byLiteral []literalNode func (s *byLiteral) sort(a []literalNode) { *s = byLiteral(a) sort.Sort(s) } func (s byLiteral) Len() int { return len(s) } func (s byLiteral) Less(i, j int) bool { return s[i].literal < s[j].literal } func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } type byFreq []literalNode func (s *byFreq) sort(a []literalNode) { *s = byFreq(a) sort.Sort(s) } func (s byFreq) Len() int { return len(s) } func (s byFreq) Less(i, j int) bool { if s[i].freq == s[j].freq { return s[i].literal < s[j].literal } return s[i].freq < s[j].freq } func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } ================================================ FILE: vendor/github.com/klauspost/compress/flate/inflate.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package flate implements the DEFLATE compressed data format, described in // RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file // formats. package flate import ( "bufio" "io" "strconv" "sync" ) const ( maxCodeLen = 16 // max length of Huffman code // The next three numbers come from the RFC section 3.2.7, with the // additional proviso in section 3.2.5 which implies that distance codes // 30 and 31 should never occur in compressed data. maxNumLit = 286 maxNumDist = 30 numCodes = 19 // number of codes in Huffman meta-code ) // Initialize the fixedHuffmanDecoder only once upon first use. var fixedOnce sync.Once var fixedHuffmanDecoder huffmanDecoder // A CorruptInputError reports the presence of corrupt input at a given offset. type CorruptInputError int64 func (e CorruptInputError) Error() string { return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) } // An InternalError reports an error in the flate code itself. type InternalError string func (e InternalError) Error() string { return "flate: internal error: " + string(e) } // A ReadError reports an error encountered while reading input. // // Deprecated: No longer returned. type ReadError struct { Offset int64 // byte offset where error occurred Err error // error returned by underlying Read } func (e *ReadError) Error() string { return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() } // A WriteError reports an error encountered while writing output. // // Deprecated: No longer returned. type WriteError struct { Offset int64 // byte offset where error occurred Err error // error returned by underlying Write } func (e *WriteError) Error() string { return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() } // Resetter resets a ReadCloser returned by NewReader or NewReaderDict to // to switch to a new underlying Reader. This permits reusing a ReadCloser // instead of allocating a new one. type Resetter interface { // Reset discards any buffered data and resets the Resetter as if it was // newly initialized with the given reader. Reset(r io.Reader, dict []byte) error } // The data structure for decoding Huffman tables is based on that of // zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), // For codes smaller than the table width, there are multiple entries // (each combination of trailing bits has the same value). For codes // larger than the table width, the table contains a link to an overflow // table. The width of each entry in the link table is the maximum code // size minus the chunk width. // // Note that you can do a lookup in the table even without all bits // filled. Since the extra bits are zero, and the DEFLATE Huffman codes // have the property that shorter codes come before longer ones, the // bit length estimate in the result is a lower bound on the actual // number of bits. // // See the following: // http://www.gzip.org/algorithm.txt // chunk & 15 is number of bits // chunk >> 4 is value, including table link const ( huffmanChunkBits = 9 huffmanNumChunks = 1 << huffmanChunkBits huffmanCountMask = 15 huffmanValueShift = 4 ) type huffmanDecoder struct { min int // the minimum code length chunks [huffmanNumChunks]uint32 // chunks as described above links [][]uint32 // overflow links linkMask uint32 // mask the width of the link table } // Initialize Huffman decoding tables from array of code lengths. // Following this function, h is guaranteed to be initialized into a complete // tree (i.e., neither over-subscribed nor under-subscribed). The exception is a // degenerate case where the tree has only a single symbol with length 1. Empty // trees are permitted. func (h *huffmanDecoder) init(bits []int) bool { // Sanity enables additional runtime tests during Huffman // table construction. It's intended to be used during // development to supplement the currently ad-hoc unit tests. const sanity = false if h.min != 0 { *h = huffmanDecoder{} } // Count number of codes of each length, // compute min and max length. var count [maxCodeLen]int var min, max int for _, n := range bits { if n == 0 { continue } if min == 0 || n < min { min = n } if n > max { max = n } count[n]++ } // Empty tree. The decompressor.huffSym function will fail later if the tree // is used. Technically, an empty tree is only valid for the HDIST tree and // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree // is guaranteed to fail since it will attempt to use the tree to decode the // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is // guaranteed to fail later since the compressed data section must be // composed of at least one symbol (the end-of-block marker). if max == 0 { return true } code := 0 var nextcode [maxCodeLen]int for i := min; i <= max; i++ { code <<= 1 nextcode[i] = code code += count[i] } // Check that the coding is complete (i.e., that we've // assigned all 2-to-the-max possible bit sequences). // Exception: To be compatible with zlib, we also need to // accept degenerate single-code codings. See also // TestDegenerateHuffmanCoding. if code != 1< huffmanChunkBits { numLinks := 1 << (uint(max) - huffmanChunkBits) h.linkMask = uint32(numLinks - 1) // create link tables link := nextcode[huffmanChunkBits+1] >> 1 h.links = make([][]uint32, huffmanNumChunks-link) for j := uint(link); j < huffmanNumChunks; j++ { reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 reverse >>= uint(16 - huffmanChunkBits) off := j - uint(link) if sanity && h.chunks[reverse] != 0 { panic("impossible: overwriting existing chunk") } h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 reverse >>= uint(16 - n) if n <= huffmanChunkBits { for off := reverse; off < len(h.chunks); off += 1 << uint(n) { // We should never need to overwrite // an existing chunk. Also, 0 is // never a valid chunk, because the // lower 4 "count" bits should be // between 1 and 15. if sanity && h.chunks[off] != 0 { panic("impossible: overwriting existing chunk") } h.chunks[off] = chunk } } else { j := reverse & (huffmanNumChunks - 1) if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { // Longer codes should have been // associated with a link table above. panic("impossible: not an indirect chunk") } value := h.chunks[j] >> huffmanValueShift linktab := h.links[value] reverse >>= huffmanChunkBits for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { if sanity && linktab[off] != 0 { panic("impossible: overwriting existing chunk") } linktab[off] = chunk } } } if sanity { // Above we've sanity checked that we never overwrote // an existing entry. Here we additionally check that // we filled the tables completely. for i, chunk := range h.chunks { if chunk == 0 { // As an exception, in the degenerate // single-code case, we allow odd // chunks to be missing. if code == 1 && i%2 == 1 { continue } panic("impossible: missing chunk") } } for _, linktab := range h.links { for _, chunk := range linktab { if chunk == 0 { panic("impossible: missing chunk") } } } } return true } // The actual read interface needed by NewReader. // If the passed in io.Reader does not also have ReadByte, // the NewReader will introduce its own buffering. type Reader interface { io.Reader io.ByteReader } // Decompress state. type decompressor struct { // Input source. r Reader roffset int64 // Input bits, in top of b. b uint32 nb uint // Huffman decoders for literal/length, distance. h1, h2 huffmanDecoder // Length arrays used to define Huffman codes. bits *[maxNumLit + maxNumDist]int codebits *[numCodes]int // Output history, buffer. dict dictDecoder // Temporary buffer (avoids repeated allocation). buf [4]byte // Next step in the decompression, // and decompression state. step func(*decompressor) stepState int final bool err error toRead []byte hl, hd *huffmanDecoder copyLen int copyDist int } func (f *decompressor) nextBlock() { for f.nb < 1+2 { if f.err = f.moreBits(); f.err != nil { return } } f.final = f.b&1 == 1 f.b >>= 1 typ := f.b & 3 f.b >>= 2 f.nb -= 1 + 2 switch typ { case 0: f.dataBlock() case 1: // compressed, fixed Huffman tables f.hl = &fixedHuffmanDecoder f.hd = nil f.huffmanBlock() case 2: // compressed, dynamic Huffman tables if f.err = f.readHuffman(); f.err != nil { break } f.hl = &f.h1 f.hd = &f.h2 f.huffmanBlock() default: // 3 is reserved. f.err = CorruptInputError(f.roffset) } } func (f *decompressor) Read(b []byte) (int, error) { for { if len(f.toRead) > 0 { n := copy(b, f.toRead) f.toRead = f.toRead[n:] if len(f.toRead) == 0 { return n, f.err } return n, nil } if f.err != nil { return 0, f.err } f.step(f) if f.err != nil && len(f.toRead) == 0 { f.toRead = f.dict.readFlush() // Flush what's left in case of error } } } // Support the io.WriteTo interface for io.Copy and friends. func (f *decompressor) WriteTo(w io.Writer) (int64, error) { total := int64(0) flushed := false for { if len(f.toRead) > 0 { n, err := w.Write(f.toRead) total += int64(n) if err != nil { f.err = err return total, err } if n != len(f.toRead) { return total, io.ErrShortWrite } f.toRead = f.toRead[:0] } if f.err != nil && flushed { if f.err == io.EOF { return total, nil } return total, f.err } if f.err == nil { f.step(f) } if len(f.toRead) == 0 && f.err != nil && !flushed { f.toRead = f.dict.readFlush() // Flush what's left in case of error flushed = true } } } func (f *decompressor) Close() error { if f.err == io.EOF { return nil } return f.err } // RFC 1951 section 3.2.7. // Compression with dynamic Huffman codes var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} func (f *decompressor) readHuffman() error { // HLIT[5], HDIST[5], HCLEN[4]. for f.nb < 5+5+4 { if err := f.moreBits(); err != nil { return err } } nlit := int(f.b&0x1F) + 257 if nlit > maxNumLit { return CorruptInputError(f.roffset) } f.b >>= 5 ndist := int(f.b&0x1F) + 1 if ndist > maxNumDist { return CorruptInputError(f.roffset) } f.b >>= 5 nclen := int(f.b&0xF) + 4 // numCodes is 19, so nclen is always valid. f.b >>= 4 f.nb -= 5 + 5 + 4 // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. for i := 0; i < nclen; i++ { for f.nb < 3 { if err := f.moreBits(); err != nil { return err } } f.codebits[codeOrder[i]] = int(f.b & 0x7) f.b >>= 3 f.nb -= 3 } for i := nclen; i < len(codeOrder); i++ { f.codebits[codeOrder[i]] = 0 } if !f.h1.init(f.codebits[0:]) { return CorruptInputError(f.roffset) } // HLIT + 257 code lengths, HDIST + 1 code lengths, // using the code length Huffman code. for i, n := 0, nlit+ndist; i < n; { x, err := f.huffSym(&f.h1) if err != nil { return err } if x < 16 { // Actual length. f.bits[i] = x i++ continue } // Repeat previous length or zero. var rep int var nb uint var b int switch x { default: return InternalError("unexpected length code") case 16: rep = 3 nb = 2 if i == 0 { return CorruptInputError(f.roffset) } b = f.bits[i-1] case 17: rep = 3 nb = 3 b = 0 case 18: rep = 11 nb = 7 b = 0 } for f.nb < nb { if err := f.moreBits(); err != nil { return err } } rep += int(f.b & uint32(1<>= nb f.nb -= nb if i+rep > n { return CorruptInputError(f.roffset) } for j := 0; j < rep; j++ { f.bits[i] = b i++ } } if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { return CorruptInputError(f.roffset) } // As an optimization, we can initialize the min bits to read at a time // for the HLIT tree to the length of the EOB marker since we know that // every block must terminate with one. This preserves the property that // we never read any extra bytes after the end of the DEFLATE stream. if f.h1.min < f.bits[endBlockMarker] { f.h1.min = f.bits[endBlockMarker] } return nil } // Decode a single Huffman block from f. // hl and hd are the Huffman states for the lit/length values // and the distance values, respectively. If hd == nil, using the // fixed distance encoding associated with fixed Huffman blocks. func (f *decompressor) huffmanBlock() { const ( stateInit = iota // Zero value must be stateInit stateDict ) switch f.stepState { case stateInit: goto readLiteral case stateDict: goto copyHistory } readLiteral: // Read literal and/or (length, distance) according to RFC section 3.2.3. { v, err := f.huffSym(f.hl) if err != nil { f.err = err return } var n uint // number of bits extra var length int switch { case v < 256: f.dict.writeByte(byte(v)) if f.dict.availWrite() == 0 { f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBlock f.stepState = stateInit return } goto readLiteral case v == 256: f.finishBlock() return // otherwise, reference to older data case v < 265: length = v - (257 - 3) n = 0 case v < 269: length = v*2 - (265*2 - 11) n = 1 case v < 273: length = v*4 - (269*4 - 19) n = 2 case v < 277: length = v*8 - (273*8 - 35) n = 3 case v < 281: length = v*16 - (277*16 - 67) n = 4 case v < 285: length = v*32 - (281*32 - 131) n = 5 case v < maxNumLit: length = 258 n = 0 default: f.err = CorruptInputError(f.roffset) return } if n > 0 { for f.nb < n { if err = f.moreBits(); err != nil { f.err = err return } } length += int(f.b & uint32(1<>= n f.nb -= n } var dist int if f.hd == nil { for f.nb < 5 { if err = f.moreBits(); err != nil { f.err = err return } } dist = int(reverseByte[(f.b&0x1F)<<3]) f.b >>= 5 f.nb -= 5 } else { if dist, err = f.huffSym(f.hd); err != nil { f.err = err return } } switch { case dist < 4: dist++ case dist < maxNumDist: nb := uint(dist-2) >> 1 // have 1 bit in bottom of dist, need nb more. extra := (dist & 1) << nb for f.nb < nb { if err = f.moreBits(); err != nil { f.err = err return } } extra |= int(f.b & uint32(1<>= nb f.nb -= nb dist = 1<<(nb+1) + 1 + extra default: f.err = CorruptInputError(f.roffset) return } // No check on length; encoding can be prescient. if dist > f.dict.histSize() { f.err = CorruptInputError(f.roffset) return } f.copyLen, f.copyDist = length, dist goto copyHistory } copyHistory: // Perform a backwards copy according to RFC section 3.2.3. { cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) if cnt == 0 { cnt = f.dict.writeCopy(f.copyDist, f.copyLen) } f.copyLen -= cnt if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() f.step = (*decompressor).huffmanBlock // We need to continue this work f.stepState = stateDict return } goto readLiteral } } // Copy a single uncompressed data block from input to output. func (f *decompressor) dataBlock() { // Uncompressed. // Discard current half-byte. f.nb = 0 f.b = 0 // Length then ones-complement of length. nr, err := io.ReadFull(f.r, f.buf[0:4]) f.roffset += int64(nr) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } f.err = err return } n := int(f.buf[0]) | int(f.buf[1])<<8 nn := int(f.buf[2]) | int(f.buf[3])<<8 if uint16(nn) != uint16(^n) { f.err = CorruptInputError(f.roffset) return } if n == 0 { f.toRead = f.dict.readFlush() f.finishBlock() return } f.copyLen = n f.copyData() } // copyData copies f.copyLen bytes from the underlying reader into f.hist. // It pauses for reads when f.hist is full. func (f *decompressor) copyData() { buf := f.dict.writeSlice() if len(buf) > f.copyLen { buf = buf[:f.copyLen] } cnt, err := io.ReadFull(f.r, buf) f.roffset += int64(cnt) f.copyLen -= cnt f.dict.writeMark(cnt) if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } f.err = err return } if f.dict.availWrite() == 0 || f.copyLen > 0 { f.toRead = f.dict.readFlush() f.step = (*decompressor).copyData return } f.finishBlock() } func (f *decompressor) finishBlock() { if f.final { if f.dict.availRead() > 0 { f.toRead = f.dict.readFlush() } f.err = io.EOF } f.step = (*decompressor).nextBlock } func (f *decompressor) moreBits() error { c, err := f.r.ReadByte() if err != nil { if err == io.EOF { err = io.ErrUnexpectedEOF } return err } f.roffset++ f.b |= uint32(c) << f.nb f.nb += 8 return nil } // Read the next Huffman-encoded symbol from f according to h. func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { // Since a huffmanDecoder can be empty or be composed of a degenerate tree // with single element, huffSym must error on these two edge cases. In both // cases, the chunks slice will be 0 for the invalid sequence, leading it // satisfy the n == 0 check below. n := uint(h.min) for { for f.nb < n { if err := f.moreBits(); err != nil { return 0, err } } chunk := h.chunks[f.b&(huffmanNumChunks-1)] n = uint(chunk & huffmanCountMask) if n > huffmanChunkBits { chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask] n = uint(chunk & huffmanCountMask) } if n <= f.nb { if n == 0 { f.err = CorruptInputError(f.roffset) return 0, f.err } f.b >>= n f.nb -= n return int(chunk >> huffmanValueShift), nil } } } func makeReader(r io.Reader) Reader { if rr, ok := r.(Reader); ok { return rr } return bufio.NewReader(r) } func fixedHuffmanDecoderInit() { fixedOnce.Do(func() { // These come from the RFC section 3.2.6. var bits [288]int for i := 0; i < 144; i++ { bits[i] = 8 } for i := 144; i < 256; i++ { bits[i] = 9 } for i := 256; i < 280; i++ { bits[i] = 7 } for i := 280; i < 288; i++ { bits[i] = 8 } fixedHuffmanDecoder.init(bits[:]) }) } func (f *decompressor) Reset(r io.Reader, dict []byte) error { *f = decompressor{ r: makeReader(r), bits: f.bits, codebits: f.codebits, dict: f.dict, step: (*decompressor).nextBlock, } f.dict.init(maxMatchOffset, dict) return nil } // NewReader returns a new ReadCloser that can be used // to read the uncompressed version of r. // If r does not also implement io.ByteReader, // the decompressor may read more data than necessary from r. // It is the caller's responsibility to call Close on the ReadCloser // when finished reading. // // The ReadCloser returned by NewReader also implements Resetter. func NewReader(r io.Reader) io.ReadCloser { fixedHuffmanDecoderInit() var f decompressor f.r = makeReader(r) f.bits = new([maxNumLit + maxNumDist]int) f.codebits = new([numCodes]int) f.step = (*decompressor).nextBlock f.dict.init(maxMatchOffset, nil) return &f } // NewReaderDict is like NewReader but initializes the reader // with a preset dictionary. The returned Reader behaves as if // the uncompressed data stream started with the given dictionary, // which has already been read. NewReaderDict is typically used // to read data compressed by NewWriterDict. // // The ReadCloser returned by NewReader also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { fixedHuffmanDecoderInit() var f decompressor f.r = makeReader(r) f.bits = new([maxNumLit + maxNumDist]int) f.codebits = new([numCodes]int) f.step = (*decompressor).nextBlock f.dict.init(maxMatchOffset, dict) return &f } ================================================ FILE: vendor/github.com/klauspost/compress/flate/reverse_bits.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate var reverseByte = [256]byte{ 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, } func reverseUint16(v uint16) uint16 { return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 } func reverseBits(number uint16, bitLength byte) uint16 { return reverseUint16(number << uint8(16-bitLength)) } ================================================ FILE: vendor/github.com/klauspost/compress/flate/snappy.go ================================================ // Copyright 2011 The Snappy-Go Authors. All rights reserved. // Modified for deflate by Klaus Post (c) 2015. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate // emitLiteral writes a literal chunk and returns the number of bytes written. func emitLiteral(dst *tokens, lit []byte) { ol := int(dst.n) for i, v := range lit { dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) } dst.n += uint16(len(lit)) } // emitCopy writes a copy chunk and returns the number of bytes written. func emitCopy(dst *tokens, offset, length int) { dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) dst.n++ } type snappyEnc interface { Encode(dst *tokens, src []byte) Reset() } func newSnappy(level int) snappyEnc { switch level { case 1: return &snappyL1{} case 2: return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} case 3: return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} case 4: return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} default: panic("invalid level specified") } } const ( tableBits = 14 // Bits used in the table tableSize = 1 << tableBits // Size of the table tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. baseMatchOffset = 1 // The smallest match offset baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 maxMatchOffset = 1 << 15 // The largest match offset ) func load32(b []byte, i int) uint32 { b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 } func load64(b []byte, i int) uint64 { b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } func hash(u uint32) uint32 { return (u * 0x1e35a7bd) >> tableShift } // snappyL1 encapsulates level 1 compression type snappyL1 struct{} func (e *snappyL1) Reset() {} func (e *snappyL1) Encode(dst *tokens, src []byte) { const ( inputMargin = 16 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) // This check isn't in the Snappy implementation, but there, the caller // instead of the callee handles this case. if len(src) < minNonLiteralBlockSize { // We do not fill the token table. // This will be picked up by caller. dst.n = uint16(len(src)) return } // Initialize the hash table. // // The table element type is uint16, as s < sLimit and sLimit < len(src) // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. var table [tableSize]uint16 // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are // looking for copies. sLimit := len(src) - inputMargin // nextEmit is where in src the next emitLiteral should start from. nextEmit := 0 // The encoded form must start with a literal, as there are no previous // bytes to copy, so we start looking for hash matches at s == 1. s := 1 nextHash := hash(load32(src, s)) for { // Copied from the C++ snappy implementation: // // Heuristic match skipping: If 32 bytes are scanned with no matches // found, start looking only at every other byte. If 32 more bytes are // scanned (or skipped), look at every third byte, etc.. When a match // is found, immediately go back to looking at every byte. This is a // small loss (~5% performance, ~0.1% density) for compressible data // due to more bookkeeping, but for non-compressible data (such as // JPEG) it's a huge win since the compressor quickly "realizes" the // data is incompressible and doesn't bother looking for matches // everywhere. // // The "skip" variable keeps track of how many bytes there are since // the last match; dividing it by 32 (ie. right-shifting by five) gives // the number of bytes to move ahead for each iteration. skip := 32 nextS := s candidate := 0 for { s = nextS bytesBetweenHashLookups := skip >> 5 nextS = s + bytesBetweenHashLookups skip += bytesBetweenHashLookups if nextS > sLimit { goto emitRemainder } candidate = int(table[nextHash&tableMask]) table[nextHash&tableMask] = uint16(s) nextHash = hash(load32(src, nextS)) if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { break } } // A 4-byte match has been found. We'll later see if more than 4 bytes // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit // them as literal bytes. emitLiteral(dst, src[nextEmit:s]) // Call emitCopy, and then see if another emitCopy could be our next // move. Repeat until we find no match for the input immediately after // what was consumed by the last emitCopy call. // // If we exit this loop normally then we need to call emitLiteral next, // though we don't yet know how big the literal will be. We handle that // by proceeding to the next iteration of the main loop. We also can // exit this loop via goto if we get close to exhausting the input. for { // Invariant: we have a 4-byte match at s, and no need to emit any // literal bytes prior to s. base := s // Extend the 4-byte match as long as possible. // // This is an inlined version of Snappy's: // s = extendMatch(src, candidate+4, s+4) s += 4 s1 := base + maxMatchLength if s1 > len(src) { s1 = len(src) } a := src[s:s1] b := src[candidate+4:] b = b[:len(a)] l := len(a) for i := range a { if a[i] != b[i] { l = i break } } s += l // matchToken is flate's equivalent of Snappy's emitCopy. dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) dst.n++ nextEmit = s if s >= sLimit { goto emitRemainder } // We could immediately start working at s now, but to improve // compression we first update the hash table at s-1 and at s. If // another emitCopy is not our next move, also calculate nextHash // at s+1. At least on GOARCH=amd64, these three hash calculations // are faster as one load64 call (with some shifts) instead of // three load32 calls. x := load64(src, s-1) prevHash := hash(uint32(x >> 0)) table[prevHash&tableMask] = uint16(s - 1) currHash := hash(uint32(x >> 8)) candidate = int(table[currHash&tableMask]) table[currHash&tableMask] = uint16(s) if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { nextHash = hash(uint32(x >> 16)) s++ break } } } emitRemainder: if nextEmit < len(src) { emitLiteral(dst, src[nextEmit:]) } } type tableEntry struct { val uint32 offset int32 } func load3232(b []byte, i int32) uint32 { b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 } func load6432(b []byte, i int32) uint64 { b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 } // snappyGen maintains the table for matches, // and the previous byte block for level 2. // This is the generic implementation. type snappyGen struct { prev []byte cur int32 } // snappyGen maintains the table for matches, // and the previous byte block for level 2. // This is the generic implementation. type snappyL2 struct { snappyGen table [tableSize]tableEntry } // EncodeL2 uses a similar algorithm to level 1, but is capable // of matching across blocks giving better compression at a small slowdown. func (e *snappyL2) Encode(dst *tokens, src []byte) { const ( inputMargin = 8 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) // Protect against e.cur wraparound. if e.cur > 1<<30 { for i := range e.table { e.table[i] = tableEntry{} } e.cur = maxStoreBlockSize } // This check isn't in the Snappy implementation, but there, the caller // instead of the callee handles this case. if len(src) < minNonLiteralBlockSize { // We do not fill the token table. // This will be picked up by caller. dst.n = uint16(len(src)) e.cur += maxStoreBlockSize e.prev = e.prev[:0] return } // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are // looking for copies. sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. nextEmit := int32(0) s := int32(0) cv := load3232(src, s) nextHash := hash(cv) for { // Copied from the C++ snappy implementation: // // Heuristic match skipping: If 32 bytes are scanned with no matches // found, start looking only at every other byte. If 32 more bytes are // scanned (or skipped), look at every third byte, etc.. When a match // is found, immediately go back to looking at every byte. This is a // small loss (~5% performance, ~0.1% density) for compressible data // due to more bookkeeping, but for non-compressible data (such as // JPEG) it's a huge win since the compressor quickly "realizes" the // data is incompressible and doesn't bother looking for matches // everywhere. // // The "skip" variable keeps track of how many bytes there are since // the last match; dividing it by 32 (ie. right-shifting by five) gives // the number of bytes to move ahead for each iteration. skip := int32(32) nextS := s var candidate tableEntry for { s = nextS bytesBetweenHashLookups := skip >> 5 nextS = s + bytesBetweenHashLookups skip += bytesBetweenHashLookups if nextS > sLimit { goto emitRemainder } candidate = e.table[nextHash&tableMask] now := load3232(src, nextS) e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} nextHash = hash(now) offset := s - (candidate.offset - e.cur) if offset > maxMatchOffset || cv != candidate.val { // Out of range or not matched. cv = now continue } break } // A 4-byte match has been found. We'll later see if more than 4 bytes // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit // them as literal bytes. emitLiteral(dst, src[nextEmit:s]) // Call emitCopy, and then see if another emitCopy could be our next // move. Repeat until we find no match for the input immediately after // what was consumed by the last emitCopy call. // // If we exit this loop normally then we need to call emitLiteral next, // though we don't yet know how big the literal will be. We handle that // by proceeding to the next iteration of the main loop. We also can // exit this loop via goto if we get close to exhausting the input. for { // Invariant: we have a 4-byte match at s, and no need to emit any // literal bytes prior to s. // Extend the 4-byte match as long as possible. // s += 4 t := candidate.offset - e.cur + 4 l := e.matchlen(s, t, src) // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) dst.n++ s += l nextEmit = s if s >= sLimit { t += l // Index first pair after match end. if int(t+4) < len(src) && t > 0 { cv := load3232(src, t) e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} } goto emitRemainder } // We could immediately start working at s now, but to improve // compression we first update the hash table at s-1 and at s. If // another emitCopy is not our next move, also calculate nextHash // at s+1. At least on GOARCH=amd64, these three hash calculations // are faster as one load64 call (with some shifts) instead of // three load32 calls. x := load6432(src, s-1) prevHash := hash(uint32(x)) e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} x >>= 8 currHash := hash(uint32(x)) candidate = e.table[currHash&tableMask] e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} offset := s - (candidate.offset - e.cur) if offset > maxMatchOffset || uint32(x) != candidate.val { cv = uint32(x >> 8) nextHash = hash(cv) s++ break } } } emitRemainder: if int(nextEmit) < len(src) { emitLiteral(dst, src[nextEmit:]) } e.cur += int32(len(src)) e.prev = e.prev[:len(src)] copy(e.prev, src) } type tableEntryPrev struct { Cur tableEntry Prev tableEntry } // snappyL3 type snappyL3 struct { snappyGen table [tableSize]tableEntryPrev } // Encode uses a similar algorithm to level 2, will check up to two candidates. func (e *snappyL3) Encode(dst *tokens, src []byte) { const ( inputMargin = 8 - 1 minNonLiteralBlockSize = 1 + 1 + inputMargin ) // Protect against e.cur wraparound. if e.cur > 1<<30 { for i := range e.table { e.table[i] = tableEntryPrev{} } e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} } // This check isn't in the Snappy implementation, but there, the caller // instead of the callee handles this case. if len(src) < minNonLiteralBlockSize { // We do not fill the token table. // This will be picked up by caller. dst.n = uint16(len(src)) e.cur += maxStoreBlockSize e.prev = e.prev[:0] return } // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are // looking for copies. sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. nextEmit := int32(0) s := int32(0) cv := load3232(src, s) nextHash := hash(cv) for { // Copied from the C++ snappy implementation: // // Heuristic match skipping: If 32 bytes are scanned with no matches // found, start looking only at every other byte. If 32 more bytes are // scanned (or skipped), look at every third byte, etc.. When a match // is found, immediately go back to looking at every byte. This is a // small loss (~5% performance, ~0.1% density) for compressible data // due to more bookkeeping, but for non-compressible data (such as // JPEG) it's a huge win since the compressor quickly "realizes" the // data is incompressible and doesn't bother looking for matches // everywhere. // // The "skip" variable keeps track of how many bytes there are since // the last match; dividing it by 32 (ie. right-shifting by five) gives // the number of bytes to move ahead for each iteration. skip := int32(32) nextS := s var candidate tableEntry for { s = nextS bytesBetweenHashLookups := skip >> 5 nextS = s + bytesBetweenHashLookups skip += bytesBetweenHashLookups if nextS > sLimit { goto emitRemainder } candidates := e.table[nextHash&tableMask] now := load3232(src, nextS) e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} nextHash = hash(now) // Check both candidates candidate = candidates.Cur if cv == candidate.val { offset := s - (candidate.offset - e.cur) if offset <= maxMatchOffset { break } } else { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev if cv == candidate.val { offset := s - (candidate.offset - e.cur) if offset <= maxMatchOffset { break } } } cv = now } // A 4-byte match has been found. We'll later see if more than 4 bytes // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit // them as literal bytes. emitLiteral(dst, src[nextEmit:s]) // Call emitCopy, and then see if another emitCopy could be our next // move. Repeat until we find no match for the input immediately after // what was consumed by the last emitCopy call. // // If we exit this loop normally then we need to call emitLiteral next, // though we don't yet know how big the literal will be. We handle that // by proceeding to the next iteration of the main loop. We also can // exit this loop via goto if we get close to exhausting the input. for { // Invariant: we have a 4-byte match at s, and no need to emit any // literal bytes prior to s. // Extend the 4-byte match as long as possible. // s += 4 t := candidate.offset - e.cur + 4 l := e.matchlen(s, t, src) // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) dst.n++ s += l nextEmit = s if s >= sLimit { t += l // Index first pair after match end. if int(t+4) < len(src) && t > 0 { cv := load3232(src, t) nextHash = hash(cv) e.table[nextHash&tableMask] = tableEntryPrev{ Prev: e.table[nextHash&tableMask].Cur, Cur: tableEntry{offset: e.cur + t, val: cv}, } } goto emitRemainder } // We could immediately start working at s now, but to improve // compression we first update the hash table at s-3 to s. If // another emitCopy is not our next move, also calculate nextHash // at s+1. At least on GOARCH=amd64, these three hash calculations // are faster as one load64 call (with some shifts) instead of // three load32 calls. x := load6432(src, s-3) prevHash := hash(uint32(x)) e.table[prevHash&tableMask] = tableEntryPrev{ Prev: e.table[prevHash&tableMask].Cur, Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, } x >>= 8 prevHash = hash(uint32(x)) e.table[prevHash&tableMask] = tableEntryPrev{ Prev: e.table[prevHash&tableMask].Cur, Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, } x >>= 8 prevHash = hash(uint32(x)) e.table[prevHash&tableMask] = tableEntryPrev{ Prev: e.table[prevHash&tableMask].Cur, Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, } x >>= 8 currHash := hash(uint32(x)) candidates := e.table[currHash&tableMask] cv = uint32(x) e.table[currHash&tableMask] = tableEntryPrev{ Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}, } // Check both candidates candidate = candidates.Cur if cv == candidate.val { offset := s - (candidate.offset - e.cur) if offset <= maxMatchOffset { continue } } else { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev if cv == candidate.val { offset := s - (candidate.offset - e.cur) if offset <= maxMatchOffset { continue } } } cv = uint32(x >> 8) nextHash = hash(cv) s++ break } } emitRemainder: if int(nextEmit) < len(src) { emitLiteral(dst, src[nextEmit:]) } e.cur += int32(len(src)) e.prev = e.prev[:len(src)] copy(e.prev, src) } // snappyL4 type snappyL4 struct { snappyL3 } // Encode uses a similar algorithm to level 3, // but will check up to two candidates if first isn't long enough. func (e *snappyL4) Encode(dst *tokens, src []byte) { const ( inputMargin = 8 - 3 minNonLiteralBlockSize = 1 + 1 + inputMargin matchLenGood = 12 ) // Protect against e.cur wraparound. if e.cur > 1<<30 { for i := range e.table { e.table[i] = tableEntryPrev{} } e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} } // This check isn't in the Snappy implementation, but there, the caller // instead of the callee handles this case. if len(src) < minNonLiteralBlockSize { // We do not fill the token table. // This will be picked up by caller. dst.n = uint16(len(src)) e.cur += maxStoreBlockSize e.prev = e.prev[:0] return } // sLimit is when to stop looking for offset/length copies. The inputMargin // lets us use a fast path for emitLiteral in the main loop, while we are // looking for copies. sLimit := int32(len(src) - inputMargin) // nextEmit is where in src the next emitLiteral should start from. nextEmit := int32(0) s := int32(0) cv := load3232(src, s) nextHash := hash(cv) for { // Copied from the C++ snappy implementation: // // Heuristic match skipping: If 32 bytes are scanned with no matches // found, start looking only at every other byte. If 32 more bytes are // scanned (or skipped), look at every third byte, etc.. When a match // is found, immediately go back to looking at every byte. This is a // small loss (~5% performance, ~0.1% density) for compressible data // due to more bookkeeping, but for non-compressible data (such as // JPEG) it's a huge win since the compressor quickly "realizes" the // data is incompressible and doesn't bother looking for matches // everywhere. // // The "skip" variable keeps track of how many bytes there are since // the last match; dividing it by 32 (ie. right-shifting by five) gives // the number of bytes to move ahead for each iteration. skip := int32(32) nextS := s var candidate tableEntry var candidateAlt tableEntry for { s = nextS bytesBetweenHashLookups := skip >> 5 nextS = s + bytesBetweenHashLookups skip += bytesBetweenHashLookups if nextS > sLimit { goto emitRemainder } candidates := e.table[nextHash&tableMask] now := load3232(src, nextS) e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} nextHash = hash(now) // Check both candidates candidate = candidates.Cur if cv == candidate.val { offset := s - (candidate.offset - e.cur) if offset < maxMatchOffset { offset = s - (candidates.Prev.offset - e.cur) if cv == candidates.Prev.val && offset < maxMatchOffset { candidateAlt = candidates.Prev } break } } else { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev if cv == candidate.val { offset := s - (candidate.offset - e.cur) if offset < maxMatchOffset { break } } } cv = now } // A 4-byte match has been found. We'll later see if more than 4 bytes // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit // them as literal bytes. emitLiteral(dst, src[nextEmit:s]) // Call emitCopy, and then see if another emitCopy could be our next // move. Repeat until we find no match for the input immediately after // what was consumed by the last emitCopy call. // // If we exit this loop normally then we need to call emitLiteral next, // though we don't yet know how big the literal will be. We handle that // by proceeding to the next iteration of the main loop. We also can // exit this loop via goto if we get close to exhausting the input. for { // Invariant: we have a 4-byte match at s, and no need to emit any // literal bytes prior to s. // Extend the 4-byte match as long as possible. // s += 4 t := candidate.offset - e.cur + 4 l := e.matchlen(s, t, src) // Try alternative candidate if match length < matchLenGood. if l < matchLenGood-4 && candidateAlt.offset != 0 { t2 := candidateAlt.offset - e.cur + 4 l2 := e.matchlen(s, t2, src) if l2 > l { l = l2 t = t2 } } // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) dst.n++ s += l nextEmit = s if s >= sLimit { t += l // Index first pair after match end. if int(t+4) < len(src) && t > 0 { cv := load3232(src, t) nextHash = hash(cv) e.table[nextHash&tableMask] = tableEntryPrev{ Prev: e.table[nextHash&tableMask].Cur, Cur: tableEntry{offset: e.cur + t, val: cv}, } } goto emitRemainder } // We could immediately start working at s now, but to improve // compression we first update the hash table at s-3 to s. If // another emitCopy is not our next move, also calculate nextHash // at s+1. At least on GOARCH=amd64, these three hash calculations // are faster as one load64 call (with some shifts) instead of // three load32 calls. x := load6432(src, s-3) prevHash := hash(uint32(x)) e.table[prevHash&tableMask] = tableEntryPrev{ Prev: e.table[prevHash&tableMask].Cur, Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, } x >>= 8 prevHash = hash(uint32(x)) e.table[prevHash&tableMask] = tableEntryPrev{ Prev: e.table[prevHash&tableMask].Cur, Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, } x >>= 8 prevHash = hash(uint32(x)) e.table[prevHash&tableMask] = tableEntryPrev{ Prev: e.table[prevHash&tableMask].Cur, Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, } x >>= 8 currHash := hash(uint32(x)) candidates := e.table[currHash&tableMask] cv = uint32(x) e.table[currHash&tableMask] = tableEntryPrev{ Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}, } // Check both candidates candidate = candidates.Cur candidateAlt = tableEntry{} if cv == candidate.val { offset := s - (candidate.offset - e.cur) if offset <= maxMatchOffset { offset = s - (candidates.Prev.offset - e.cur) if cv == candidates.Prev.val && offset <= maxMatchOffset { candidateAlt = candidates.Prev } continue } } else { // We only check if value mismatches. // Offset will always be invalid in other cases. candidate = candidates.Prev if cv == candidate.val { offset := s - (candidate.offset - e.cur) if offset <= maxMatchOffset { continue } } } cv = uint32(x >> 8) nextHash = hash(cv) s++ break } } emitRemainder: if int(nextEmit) < len(src) { emitLiteral(dst, src[nextEmit:]) } e.cur += int32(len(src)) e.prev = e.prev[:len(src)] copy(e.prev, src) } func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { s1 := int(s) + maxMatchLength - 4 if s1 > len(src) { s1 = len(src) } // If we are inside the current block if t >= 0 { b := src[t:] a := src[s:s1] b = b[:len(a)] // Extend the match to be as long as possible. for i := range a { if a[i] != b[i] { return int32(i) } } return int32(len(a)) } // We found a match in the previous block. tp := int32(len(e.prev)) + t if tp < 0 { return 0 } // Extend the match to be as long as possible. a := src[s:s1] b := e.prev[tp:] if len(b) > len(a) { b = b[:len(a)] } a = a[:len(b)] for i := range b { if a[i] != b[i] { return int32(i) } } // If we reached our limit, we matched everything we are // allowed to in the previous block and we return. n := int32(len(b)) if int(s+n) == s1 { return n } // Continue looking for more matches in the current block. a = src[s+n : s1] b = src[:len(a)] for i := range a { if a[i] != b[i] { return int32(i) + n } } return int32(len(a)) + n } // Reset the encoding table. func (e *snappyGen) Reset() { e.prev = e.prev[:0] e.cur += maxMatchOffset } ================================================ FILE: vendor/github.com/klauspost/compress/flate/token.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flate import "fmt" const ( // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused // 8 bits: xlength = length - MIN_MATCH_LENGTH // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal lengthShift = 22 offsetMask = 1< pair into a match token. func matchToken(xlength uint32, xoffset uint32) token { return token(matchType + xlength< maxMatchLength || xoffset > maxMatchOffset { panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset)) return token(matchType) } return token(matchType + xlength<> lengthShift) } func lengthCode(len uint32) uint32 { return lengthCodes[len] } // Returns the offset code corresponding to a specific offset func offsetCode(off uint32) uint32 { if off < uint32(len(offsetCodes)) { return offsetCodes[off] } else if off>>7 < uint32(len(offsetCodes)) { return offsetCodes[off>>7] + 14 } else { return offsetCodes[off>>14] + 28 } } ================================================ FILE: vendor/github.com/klauspost/cpuid/LICENSE ================================================ The MIT License (MIT) Copyright (c) 2015 Klaus Post Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/klauspost/cpuid/cpuid.go ================================================ // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. // Package cpuid provides information about the CPU running the current program. // // CPU features are detected on startup, and kept for fast access through the life of the application. // Currently x86 / x64 (AMD64) is supported. // // You can access the CPU information by accessing the shared CPU variable of the cpuid library. // // Package home: https://github.com/klauspost/cpuid package cpuid import "strings" // Vendor is a representation of a CPU vendor. type Vendor int const ( Other Vendor = iota Intel AMD VIA Transmeta NSC KVM // Kernel-based Virtual Machine MSVM // Microsoft Hyper-V or Windows Virtual PC VMware XenHVM ) const ( CMOV = 1 << iota // i686 CMOV NX // NX (No-Execute) bit AMD3DNOW // AMD 3DNOW AMD3DNOWEXT // AMD 3DNowExt MMX // standard MMX MMXEXT // SSE integer functions or AMD MMX ext SSE // SSE functions SSE2 // P4 SSE functions SSE3 // Prescott SSE3 functions SSSE3 // Conroe SSSE3 functions SSE4 // Penryn SSE4.1 functions SSE4A // AMD Barcelona microarchitecture SSE4a instructions SSE42 // Nehalem SSE4.2 functions AVX // AVX functions AVX2 // AVX2 functions FMA3 // Intel FMA 3 FMA4 // Bulldozer FMA4 functions XOP // Bulldozer XOP functions F16C // Half-precision floating-point conversion BMI1 // Bit Manipulation Instruction Set 1 BMI2 // Bit Manipulation Instruction Set 2 TBM // AMD Trailing Bit Manipulation LZCNT // LZCNT instruction POPCNT // POPCNT instruction AESNI // Advanced Encryption Standard New Instructions CLMUL // Carry-less Multiplication HTT // Hyperthreading (enabled) HLE // Hardware Lock Elision RTM // Restricted Transactional Memory RDRAND // RDRAND instruction is available RDSEED // RDSEED instruction is available ADX // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) SHA // Intel SHA Extensions AVX512F // AVX-512 Foundation AVX512DQ // AVX-512 Doubleword and Quadword Instructions AVX512IFMA // AVX-512 Integer Fused Multiply-Add Instructions AVX512PF // AVX-512 Prefetch Instructions AVX512ER // AVX-512 Exponential and Reciprocal Instructions AVX512CD // AVX-512 Conflict Detection Instructions AVX512BW // AVX-512 Byte and Word Instructions AVX512VL // AVX-512 Vector Length Extensions AVX512VBMI // AVX-512 Vector Bit Manipulation Instructions MPX // Intel MPX (Memory Protection Extensions) ERMS // Enhanced REP MOVSB/STOSB RDTSCP // RDTSCP Instruction CX16 // CMPXCHG16B Instruction SGX // Software Guard Extensions // Performance indicators SSE2SLOW // SSE2 is supported, but usually not faster SSE3SLOW // SSE3 is supported, but usually not faster ATOM // Atom processor, some SSSE3 instructions are slower ) var flagNames = map[Flags]string{ CMOV: "CMOV", // i686 CMOV NX: "NX", // NX (No-Execute) bit AMD3DNOW: "AMD3DNOW", // AMD 3DNOW AMD3DNOWEXT: "AMD3DNOWEXT", // AMD 3DNowExt MMX: "MMX", // Standard MMX MMXEXT: "MMXEXT", // SSE integer functions or AMD MMX ext SSE: "SSE", // SSE functions SSE2: "SSE2", // P4 SSE2 functions SSE3: "SSE3", // Prescott SSE3 functions SSSE3: "SSSE3", // Conroe SSSE3 functions SSE4: "SSE4.1", // Penryn SSE4.1 functions SSE4A: "SSE4A", // AMD Barcelona microarchitecture SSE4a instructions SSE42: "SSE4.2", // Nehalem SSE4.2 functions AVX: "AVX", // AVX functions AVX2: "AVX2", // AVX functions FMA3: "FMA3", // Intel FMA 3 FMA4: "FMA4", // Bulldozer FMA4 functions XOP: "XOP", // Bulldozer XOP functions F16C: "F16C", // Half-precision floating-point conversion BMI1: "BMI1", // Bit Manipulation Instruction Set 1 BMI2: "BMI2", // Bit Manipulation Instruction Set 2 TBM: "TBM", // AMD Trailing Bit Manipulation LZCNT: "LZCNT", // LZCNT instruction POPCNT: "POPCNT", // POPCNT instruction AESNI: "AESNI", // Advanced Encryption Standard New Instructions CLMUL: "CLMUL", // Carry-less Multiplication HTT: "HTT", // Hyperthreading (enabled) HLE: "HLE", // Hardware Lock Elision RTM: "RTM", // Restricted Transactional Memory RDRAND: "RDRAND", // RDRAND instruction is available RDSEED: "RDSEED", // RDSEED instruction is available ADX: "ADX", // Intel ADX (Multi-Precision Add-Carry Instruction Extensions) SHA: "SHA", // Intel SHA Extensions AVX512F: "AVX512F", // AVX-512 Foundation AVX512DQ: "AVX512DQ", // AVX-512 Doubleword and Quadword Instructions AVX512IFMA: "AVX512IFMA", // AVX-512 Integer Fused Multiply-Add Instructions AVX512PF: "AVX512PF", // AVX-512 Prefetch Instructions AVX512ER: "AVX512ER", // AVX-512 Exponential and Reciprocal Instructions AVX512CD: "AVX512CD", // AVX-512 Conflict Detection Instructions AVX512BW: "AVX512BW", // AVX-512 Byte and Word Instructions AVX512VL: "AVX512VL", // AVX-512 Vector Length Extensions AVX512VBMI: "AVX512VBMI", // AVX-512 Vector Bit Manipulation Instructions MPX: "MPX", // Intel MPX (Memory Protection Extensions) ERMS: "ERMS", // Enhanced REP MOVSB/STOSB RDTSCP: "RDTSCP", // RDTSCP Instruction CX16: "CX16", // CMPXCHG16B Instruction SGX: "SGX", // Software Guard Extensions // Performance indicators SSE2SLOW: "SSE2SLOW", // SSE2 supported, but usually not faster SSE3SLOW: "SSE3SLOW", // SSE3 supported, but usually not faster ATOM: "ATOM", // Atom processor, some SSSE3 instructions are slower } // CPUInfo contains information about the detected system CPU. type CPUInfo struct { BrandName string // Brand name reported by the CPU VendorID Vendor // Comparable CPU vendor ID Features Flags // Features of the CPU PhysicalCores int // Number of physical processor cores in your CPU. Will be 0 if undetectable. ThreadsPerCore int // Number of threads per physical core. Will be 1 if undetectable. LogicalCores int // Number of physical cores times threads that can run on each core through the use of hyperthreading. Will be 0 if undetectable. Family int // CPU family number Model int // CPU model number CacheLine int // Cache line size in bytes. Will be 0 if undetectable. Cache struct { L1I int // L1 Instruction Cache (per core or shared). Will be -1 if undetected L1D int // L1 Data Cache (per core or shared). Will be -1 if undetected L2 int // L2 Cache (per core or shared). Will be -1 if undetected L3 int // L3 Instruction Cache (per core or shared). Will be -1 if undetected } SGX SGXSupport maxFunc uint32 maxExFunc uint32 } var cpuid func(op uint32) (eax, ebx, ecx, edx uint32) var cpuidex func(op, op2 uint32) (eax, ebx, ecx, edx uint32) var xgetbv func(index uint32) (eax, edx uint32) var rdtscpAsm func() (eax, ebx, ecx, edx uint32) // CPU contains information about the CPU as detected on startup, // or when Detect last was called. // // Use this as the primary entry point to you data, // this way queries are var CPU CPUInfo func init() { initCPU() Detect() } // Detect will re-detect current CPU info. // This will replace the content of the exported CPU variable. // // Unless you expect the CPU to change while you are running your program // you should not need to call this function. // If you call this, you must ensure that no other goroutine is accessing the // exported CPU variable. func Detect() { CPU.maxFunc = maxFunctionID() CPU.maxExFunc = maxExtendedFunction() CPU.BrandName = brandName() CPU.CacheLine = cacheLine() CPU.Family, CPU.Model = familyModel() CPU.Features = support() CPU.SGX = sgx(CPU.Features&SGX != 0) CPU.ThreadsPerCore = threadsPerCore() CPU.LogicalCores = logicalCores() CPU.PhysicalCores = physicalCores() CPU.VendorID = vendorID() CPU.cacheSize() } // Generated here: http://play.golang.org/p/BxFH2Gdc0G // Cmov indicates support of CMOV instructions func (c CPUInfo) Cmov() bool { return c.Features&CMOV != 0 } // Amd3dnow indicates support of AMD 3DNOW! instructions func (c CPUInfo) Amd3dnow() bool { return c.Features&AMD3DNOW != 0 } // Amd3dnowExt indicates support of AMD 3DNOW! Extended instructions func (c CPUInfo) Amd3dnowExt() bool { return c.Features&AMD3DNOWEXT != 0 } // MMX indicates support of MMX instructions func (c CPUInfo) MMX() bool { return c.Features&MMX != 0 } // MMXExt indicates support of MMXEXT instructions // (SSE integer functions or AMD MMX ext) func (c CPUInfo) MMXExt() bool { return c.Features&MMXEXT != 0 } // SSE indicates support of SSE instructions func (c CPUInfo) SSE() bool { return c.Features&SSE != 0 } // SSE2 indicates support of SSE 2 instructions func (c CPUInfo) SSE2() bool { return c.Features&SSE2 != 0 } // SSE3 indicates support of SSE 3 instructions func (c CPUInfo) SSE3() bool { return c.Features&SSE3 != 0 } // SSSE3 indicates support of SSSE 3 instructions func (c CPUInfo) SSSE3() bool { return c.Features&SSSE3 != 0 } // SSE4 indicates support of SSE 4 (also called SSE 4.1) instructions func (c CPUInfo) SSE4() bool { return c.Features&SSE4 != 0 } // SSE42 indicates support of SSE4.2 instructions func (c CPUInfo) SSE42() bool { return c.Features&SSE42 != 0 } // AVX indicates support of AVX instructions // and operating system support of AVX instructions func (c CPUInfo) AVX() bool { return c.Features&AVX != 0 } // AVX2 indicates support of AVX2 instructions func (c CPUInfo) AVX2() bool { return c.Features&AVX2 != 0 } // FMA3 indicates support of FMA3 instructions func (c CPUInfo) FMA3() bool { return c.Features&FMA3 != 0 } // FMA4 indicates support of FMA4 instructions func (c CPUInfo) FMA4() bool { return c.Features&FMA4 != 0 } // XOP indicates support of XOP instructions func (c CPUInfo) XOP() bool { return c.Features&XOP != 0 } // F16C indicates support of F16C instructions func (c CPUInfo) F16C() bool { return c.Features&F16C != 0 } // BMI1 indicates support of BMI1 instructions func (c CPUInfo) BMI1() bool { return c.Features&BMI1 != 0 } // BMI2 indicates support of BMI2 instructions func (c CPUInfo) BMI2() bool { return c.Features&BMI2 != 0 } // TBM indicates support of TBM instructions // (AMD Trailing Bit Manipulation) func (c CPUInfo) TBM() bool { return c.Features&TBM != 0 } // Lzcnt indicates support of LZCNT instruction func (c CPUInfo) Lzcnt() bool { return c.Features&LZCNT != 0 } // Popcnt indicates support of POPCNT instruction func (c CPUInfo) Popcnt() bool { return c.Features&POPCNT != 0 } // HTT indicates the processor has Hyperthreading enabled func (c CPUInfo) HTT() bool { return c.Features&HTT != 0 } // SSE2Slow indicates that SSE2 may be slow on this processor func (c CPUInfo) SSE2Slow() bool { return c.Features&SSE2SLOW != 0 } // SSE3Slow indicates that SSE3 may be slow on this processor func (c CPUInfo) SSE3Slow() bool { return c.Features&SSE3SLOW != 0 } // AesNi indicates support of AES-NI instructions // (Advanced Encryption Standard New Instructions) func (c CPUInfo) AesNi() bool { return c.Features&AESNI != 0 } // Clmul indicates support of CLMUL instructions // (Carry-less Multiplication) func (c CPUInfo) Clmul() bool { return c.Features&CLMUL != 0 } // NX indicates support of NX (No-Execute) bit func (c CPUInfo) NX() bool { return c.Features&NX != 0 } // SSE4A indicates support of AMD Barcelona microarchitecture SSE4a instructions func (c CPUInfo) SSE4A() bool { return c.Features&SSE4A != 0 } // HLE indicates support of Hardware Lock Elision func (c CPUInfo) HLE() bool { return c.Features&HLE != 0 } // RTM indicates support of Restricted Transactional Memory func (c CPUInfo) RTM() bool { return c.Features&RTM != 0 } // Rdrand indicates support of RDRAND instruction is available func (c CPUInfo) Rdrand() bool { return c.Features&RDRAND != 0 } // Rdseed indicates support of RDSEED instruction is available func (c CPUInfo) Rdseed() bool { return c.Features&RDSEED != 0 } // ADX indicates support of Intel ADX (Multi-Precision Add-Carry Instruction Extensions) func (c CPUInfo) ADX() bool { return c.Features&ADX != 0 } // SHA indicates support of Intel SHA Extensions func (c CPUInfo) SHA() bool { return c.Features&SHA != 0 } // AVX512F indicates support of AVX-512 Foundation func (c CPUInfo) AVX512F() bool { return c.Features&AVX512F != 0 } // AVX512DQ indicates support of AVX-512 Doubleword and Quadword Instructions func (c CPUInfo) AVX512DQ() bool { return c.Features&AVX512DQ != 0 } // AVX512IFMA indicates support of AVX-512 Integer Fused Multiply-Add Instructions func (c CPUInfo) AVX512IFMA() bool { return c.Features&AVX512IFMA != 0 } // AVX512PF indicates support of AVX-512 Prefetch Instructions func (c CPUInfo) AVX512PF() bool { return c.Features&AVX512PF != 0 } // AVX512ER indicates support of AVX-512 Exponential and Reciprocal Instructions func (c CPUInfo) AVX512ER() bool { return c.Features&AVX512ER != 0 } // AVX512CD indicates support of AVX-512 Conflict Detection Instructions func (c CPUInfo) AVX512CD() bool { return c.Features&AVX512CD != 0 } // AVX512BW indicates support of AVX-512 Byte and Word Instructions func (c CPUInfo) AVX512BW() bool { return c.Features&AVX512BW != 0 } // AVX512VL indicates support of AVX-512 Vector Length Extensions func (c CPUInfo) AVX512VL() bool { return c.Features&AVX512VL != 0 } // AVX512VBMI indicates support of AVX-512 Vector Bit Manipulation Instructions func (c CPUInfo) AVX512VBMI() bool { return c.Features&AVX512VBMI != 0 } // MPX indicates support of Intel MPX (Memory Protection Extensions) func (c CPUInfo) MPX() bool { return c.Features&MPX != 0 } // ERMS indicates support of Enhanced REP MOVSB/STOSB func (c CPUInfo) ERMS() bool { return c.Features&ERMS != 0 } func (c CPUInfo) RDTSCP() bool { return c.Features&RDTSCP != 0 } func (c CPUInfo) CX16() bool { return c.Features&CX16 != 0 } // Atom indicates an Atom processor func (c CPUInfo) Atom() bool { return c.Features&ATOM != 0 } // Intel returns true if vendor is recognized as Intel func (c CPUInfo) Intel() bool { return c.VendorID == Intel } // AMD returns true if vendor is recognized as AMD func (c CPUInfo) AMD() bool { return c.VendorID == AMD } // Transmeta returns true if vendor is recognized as Transmeta func (c CPUInfo) Transmeta() bool { return c.VendorID == Transmeta } // NSC returns true if vendor is recognized as National Semiconductor func (c CPUInfo) NSC() bool { return c.VendorID == NSC } // VIA returns true if vendor is recognized as VIA func (c CPUInfo) VIA() bool { return c.VendorID == VIA } // RTCounter returns the 64-bit time-stamp counter // Uses the RDTSCP instruction. The value 0 is returned // if the CPU does not support the instruction. func (c CPUInfo) RTCounter() uint64 { if !c.RDTSCP() { return 0 } a, _, _, d := rdtscpAsm() return uint64(a) | (uint64(d) << 32) } // Ia32TscAux returns the IA32_TSC_AUX part of the RDTSCP. // This variable is OS dependent, but on Linux contains information // about the current cpu/core the code is running on. // If the RDTSCP instruction isn't supported on the CPU, the value 0 is returned. func (c CPUInfo) Ia32TscAux() uint32 { if !c.RDTSCP() { return 0 } _, _, ecx, _ := rdtscpAsm() return ecx } // LogicalCPU will return the Logical CPU the code is currently executing on. // This is likely to change when the OS re-schedules the running thread // to another CPU. // If the current core cannot be detected, -1 will be returned. func (c CPUInfo) LogicalCPU() int { if c.maxFunc < 1 { return -1 } _, ebx, _, _ := cpuid(1) return int(ebx >> 24) } // VM Will return true if the cpu id indicates we are in // a virtual machine. This is only a hint, and will very likely // have many false negatives. func (c CPUInfo) VM() bool { switch c.VendorID { case MSVM, KVM, VMware, XenHVM: return true } return false } // Flags contains detected cpu features and caracteristics type Flags uint64 // String returns a string representation of the detected // CPU features. func (f Flags) String() string { return strings.Join(f.Strings(), ",") } // Strings returns and array of the detected features. func (f Flags) Strings() []string { s := support() r := make([]string, 0, 20) for i := uint(0); i < 64; i++ { key := Flags(1 << i) val := flagNames[key] if s&key != 0 { r = append(r, val) } } return r } func maxExtendedFunction() uint32 { eax, _, _, _ := cpuid(0x80000000) return eax } func maxFunctionID() uint32 { a, _, _, _ := cpuid(0) return a } func brandName() string { if maxExtendedFunction() >= 0x80000004 { v := make([]uint32, 0, 48) for i := uint32(0); i < 3; i++ { a, b, c, d := cpuid(0x80000002 + i) v = append(v, a, b, c, d) } return strings.Trim(string(valAsString(v...)), " ") } return "unknown" } func threadsPerCore() int { mfi := maxFunctionID() if mfi < 0x4 || vendorID() != Intel { return 1 } if mfi < 0xb { _, b, _, d := cpuid(1) if (d & (1 << 28)) != 0 { // v will contain logical core count v := (b >> 16) & 255 if v > 1 { a4, _, _, _ := cpuid(4) // physical cores v2 := (a4 >> 26) + 1 if v2 > 0 { return int(v) / int(v2) } } } return 1 } _, b, _, _ := cpuidex(0xb, 0) if b&0xffff == 0 { return 1 } return int(b & 0xffff) } func logicalCores() int { mfi := maxFunctionID() switch vendorID() { case Intel: // Use this on old Intel processors if mfi < 0xb { if mfi < 1 { return 0 } // CPUID.1:EBX[23:16] represents the maximum number of addressable IDs (initial APIC ID) // that can be assigned to logical processors in a physical package. // The value may not be the same as the number of logical processors that are present in the hardware of a physical package. _, ebx, _, _ := cpuid(1) logical := (ebx >> 16) & 0xff return int(logical) } _, b, _, _ := cpuidex(0xb, 1) return int(b & 0xffff) case AMD: _, b, _, _ := cpuid(1) return int((b >> 16) & 0xff) default: return 0 } } func familyModel() (int, int) { if maxFunctionID() < 0x1 { return 0, 0 } eax, _, _, _ := cpuid(1) family := ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff) model := ((eax >> 4) & 0xf) + ((eax >> 12) & 0xf0) return int(family), int(model) } func physicalCores() int { switch vendorID() { case Intel: return logicalCores() / threadsPerCore() case AMD: if maxExtendedFunction() >= 0x80000008 { _, _, c, _ := cpuid(0x80000008) return int(c&0xff) + 1 } } return 0 } // Except from http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID var vendorMapping = map[string]Vendor{ "AMDisbetter!": AMD, "AuthenticAMD": AMD, "CentaurHauls": VIA, "GenuineIntel": Intel, "TransmetaCPU": Transmeta, "GenuineTMx86": Transmeta, "Geode by NSC": NSC, "VIA VIA VIA ": VIA, "KVMKVMKVMKVM": KVM, "Microsoft Hv": MSVM, "VMwareVMware": VMware, "XenVMMXenVMM": XenHVM, } func vendorID() Vendor { _, b, c, d := cpuid(0) v := valAsString(b, d, c) vend, ok := vendorMapping[string(v)] if !ok { return Other } return vend } func cacheLine() int { if maxFunctionID() < 0x1 { return 0 } _, ebx, _, _ := cpuid(1) cache := (ebx & 0xff00) >> 5 // cflush size if cache == 0 && maxExtendedFunction() >= 0x80000006 { _, _, ecx, _ := cpuid(0x80000006) cache = ecx & 0xff // cacheline size } // TODO: Read from Cache and TLB Information return int(cache) } func (c *CPUInfo) cacheSize() { c.Cache.L1D = -1 c.Cache.L1I = -1 c.Cache.L2 = -1 c.Cache.L3 = -1 vendor := vendorID() switch vendor { case Intel: if maxFunctionID() < 4 { return } for i := uint32(0); ; i++ { eax, ebx, ecx, _ := cpuidex(4, i) cacheType := eax & 15 if cacheType == 0 { break } cacheLevel := (eax >> 5) & 7 coherency := int(ebx&0xfff) + 1 partitions := int((ebx>>12)&0x3ff) + 1 associativity := int((ebx>>22)&0x3ff) + 1 sets := int(ecx) + 1 size := associativity * partitions * coherency * sets switch cacheLevel { case 1: if cacheType == 1 { // 1 = Data Cache c.Cache.L1D = size } else if cacheType == 2 { // 2 = Instruction Cache c.Cache.L1I = size } else { if c.Cache.L1D < 0 { c.Cache.L1I = size } if c.Cache.L1I < 0 { c.Cache.L1I = size } } case 2: c.Cache.L2 = size case 3: c.Cache.L3 = size } } case AMD: // Untested. if maxExtendedFunction() < 0x80000005 { return } _, _, ecx, edx := cpuid(0x80000005) c.Cache.L1D = int(((ecx >> 24) & 0xFF) * 1024) c.Cache.L1I = int(((edx >> 24) & 0xFF) * 1024) if maxExtendedFunction() < 0x80000006 { return } _, _, ecx, _ = cpuid(0x80000006) c.Cache.L2 = int(((ecx >> 16) & 0xFFFF) * 1024) } return } type SGXSupport struct { Available bool SGX1Supported bool SGX2Supported bool MaxEnclaveSizeNot64 int64 MaxEnclaveSize64 int64 } func sgx(available bool) (rval SGXSupport) { rval.Available = available if !available { return } a, _, _, d := cpuidex(0x12, 0) rval.SGX1Supported = a&0x01 != 0 rval.SGX2Supported = a&0x02 != 0 rval.MaxEnclaveSizeNot64 = 1 << (d & 0xFF) // pow 2 rval.MaxEnclaveSize64 = 1 << ((d >> 8) & 0xFF) // pow 2 return } func support() Flags { mfi := maxFunctionID() vend := vendorID() if mfi < 0x1 { return 0 } rval := uint64(0) _, _, c, d := cpuid(1) if (d & (1 << 15)) != 0 { rval |= CMOV } if (d & (1 << 23)) != 0 { rval |= MMX } if (d & (1 << 25)) != 0 { rval |= MMXEXT } if (d & (1 << 25)) != 0 { rval |= SSE } if (d & (1 << 26)) != 0 { rval |= SSE2 } if (c & 1) != 0 { rval |= SSE3 } if (c & 0x00000200) != 0 { rval |= SSSE3 } if (c & 0x00080000) != 0 { rval |= SSE4 } if (c & 0x00100000) != 0 { rval |= SSE42 } if (c & (1 << 25)) != 0 { rval |= AESNI } if (c & (1 << 1)) != 0 { rval |= CLMUL } if c&(1<<23) != 0 { rval |= POPCNT } if c&(1<<30) != 0 { rval |= RDRAND } if c&(1<<29) != 0 { rval |= F16C } if c&(1<<13) != 0 { rval |= CX16 } if vend == Intel && (d&(1<<28)) != 0 && mfi >= 4 { if threadsPerCore() > 1 { rval |= HTT } } // Check XGETBV, OXSAVE and AVX bits if c&(1<<26) != 0 && c&(1<<27) != 0 && c&(1<<28) != 0 { // Check for OS support eax, _ := xgetbv(0) if (eax & 0x6) == 0x6 { rval |= AVX if (c & 0x00001000) != 0 { rval |= FMA3 } } } // Check AVX2, AVX2 requires OS support, but BMI1/2 don't. if mfi >= 7 { _, ebx, ecx, _ := cpuidex(7, 0) if (rval&AVX) != 0 && (ebx&0x00000020) != 0 { rval |= AVX2 } if (ebx & 0x00000008) != 0 { rval |= BMI1 if (ebx & 0x00000100) != 0 { rval |= BMI2 } } if ebx&(1<<2) != 0 { rval |= SGX } if ebx&(1<<4) != 0 { rval |= HLE } if ebx&(1<<9) != 0 { rval |= ERMS } if ebx&(1<<11) != 0 { rval |= RTM } if ebx&(1<<14) != 0 { rval |= MPX } if ebx&(1<<18) != 0 { rval |= RDSEED } if ebx&(1<<19) != 0 { rval |= ADX } if ebx&(1<<29) != 0 { rval |= SHA } // Only detect AVX-512 features if XGETBV is supported if c&((1<<26)|(1<<27)) == (1<<26)|(1<<27) { // Check for OS support eax, _ := xgetbv(0) // Verify that XCR0[7:5] = ‘111b’ (OPMASK state, upper 256-bit of ZMM0-ZMM15 and // ZMM16-ZMM31 state are enabled by OS) /// and that XCR0[2:1] = ‘11b’ (XMM state and YMM state are enabled by OS). if (eax>>5)&7 == 7 && (eax>>1)&3 == 3 { if ebx&(1<<16) != 0 { rval |= AVX512F } if ebx&(1<<17) != 0 { rval |= AVX512DQ } if ebx&(1<<21) != 0 { rval |= AVX512IFMA } if ebx&(1<<26) != 0 { rval |= AVX512PF } if ebx&(1<<27) != 0 { rval |= AVX512ER } if ebx&(1<<28) != 0 { rval |= AVX512CD } if ebx&(1<<30) != 0 { rval |= AVX512BW } if ebx&(1<<31) != 0 { rval |= AVX512VL } // ecx if ecx&(1<<1) != 0 { rval |= AVX512VBMI } } } } if maxExtendedFunction() >= 0x80000001 { _, _, c, d := cpuid(0x80000001) if (c & (1 << 5)) != 0 { rval |= LZCNT rval |= POPCNT } if (d & (1 << 31)) != 0 { rval |= AMD3DNOW } if (d & (1 << 30)) != 0 { rval |= AMD3DNOWEXT } if (d & (1 << 23)) != 0 { rval |= MMX } if (d & (1 << 22)) != 0 { rval |= MMXEXT } if (c & (1 << 6)) != 0 { rval |= SSE4A } if d&(1<<20) != 0 { rval |= NX } if d&(1<<27) != 0 { rval |= RDTSCP } /* Allow for selectively disabling SSE2 functions on AMD processors with SSE2 support but not SSE4a. This includes Athlon64, some Opteron, and some Sempron processors. MMX, SSE, or 3DNow! are faster than SSE2 often enough to utilize this special-case flag. AV_CPU_FLAG_SSE2 and AV_CPU_FLAG_SSE2SLOW are both set in this case so that SSE2 is used unless explicitly disabled by checking AV_CPU_FLAG_SSE2SLOW. */ if vendorID() != Intel && rval&SSE2 != 0 && (c&0x00000040) == 0 { rval |= SSE2SLOW } /* XOP and FMA4 use the AVX instruction coding scheme, so they can't be * used unless the OS has AVX support. */ if (rval & AVX) != 0 { if (c & 0x00000800) != 0 { rval |= XOP } if (c & 0x00010000) != 0 { rval |= FMA4 } } if vendorID() == Intel { family, model := familyModel() if family == 6 && (model == 9 || model == 13 || model == 14) { /* 6/9 (pentium-m "banias"), 6/13 (pentium-m "dothan"), and * 6/14 (core1 "yonah") theoretically support sse2, but it's * usually slower than mmx. */ if (rval & SSE2) != 0 { rval |= SSE2SLOW } if (rval & SSE3) != 0 { rval |= SSE3SLOW } } /* The Atom processor has SSSE3 support, which is useful in many cases, * but sometimes the SSSE3 version is slower than the SSE2 equivalent * on the Atom, but is generally faster on other processors supporting * SSSE3. This flag allows for selectively disabling certain SSSE3 * functions on the Atom. */ if family == 6 && model == 28 { rval |= ATOM } } } return Flags(rval) } func valAsString(values ...uint32) []byte { r := make([]byte, 4*len(values)) for i, v := range values { dst := r[i*4:] dst[0] = byte(v & 0xff) dst[1] = byte((v >> 8) & 0xff) dst[2] = byte((v >> 16) & 0xff) dst[3] = byte((v >> 24) & 0xff) switch { case dst[0] == 0: return r[:i*4] case dst[1] == 0: return r[:i*4+1] case dst[2] == 0: return r[:i*4+2] case dst[3] == 0: return r[:i*4+3] } } return r } ================================================ FILE: vendor/github.com/klauspost/cpuid/cpuid_386.s ================================================ // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. // +build 386,!gccgo // func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) TEXT ·asmCpuid(SB), 7, $0 XORL CX, CX MOVL op+0(FP), AX CPUID MOVL AX, eax+4(FP) MOVL BX, ebx+8(FP) MOVL CX, ecx+12(FP) MOVL DX, edx+16(FP) RET // func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) TEXT ·asmCpuidex(SB), 7, $0 MOVL op+0(FP), AX MOVL op2+4(FP), CX CPUID MOVL AX, eax+8(FP) MOVL BX, ebx+12(FP) MOVL CX, ecx+16(FP) MOVL DX, edx+20(FP) RET // func xgetbv(index uint32) (eax, edx uint32) TEXT ·asmXgetbv(SB), 7, $0 MOVL index+0(FP), CX BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV MOVL AX, eax+4(FP) MOVL DX, edx+8(FP) RET // func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) TEXT ·asmRdtscpAsm(SB), 7, $0 BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP MOVL AX, eax+0(FP) MOVL BX, ebx+4(FP) MOVL CX, ecx+8(FP) MOVL DX, edx+12(FP) RET ================================================ FILE: vendor/github.com/klauspost/cpuid/cpuid_amd64.s ================================================ // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. //+build amd64,!gccgo // func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) TEXT ·asmCpuid(SB), 7, $0 XORQ CX, CX MOVL op+0(FP), AX CPUID MOVL AX, eax+8(FP) MOVL BX, ebx+12(FP) MOVL CX, ecx+16(FP) MOVL DX, edx+20(FP) RET // func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) TEXT ·asmCpuidex(SB), 7, $0 MOVL op+0(FP), AX MOVL op2+4(FP), CX CPUID MOVL AX, eax+8(FP) MOVL BX, ebx+12(FP) MOVL CX, ecx+16(FP) MOVL DX, edx+20(FP) RET // func asmXgetbv(index uint32) (eax, edx uint32) TEXT ·asmXgetbv(SB), 7, $0 MOVL index+0(FP), CX BYTE $0x0f; BYTE $0x01; BYTE $0xd0 // XGETBV MOVL AX, eax+8(FP) MOVL DX, edx+12(FP) RET // func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) TEXT ·asmRdtscpAsm(SB), 7, $0 BYTE $0x0F; BYTE $0x01; BYTE $0xF9 // RDTSCP MOVL AX, eax+0(FP) MOVL BX, ebx+4(FP) MOVL CX, ecx+8(FP) MOVL DX, edx+12(FP) RET ================================================ FILE: vendor/github.com/klauspost/cpuid/detect_intel.go ================================================ // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. // +build 386,!gccgo amd64,!gccgo package cpuid func asmCpuid(op uint32) (eax, ebx, ecx, edx uint32) func asmCpuidex(op, op2 uint32) (eax, ebx, ecx, edx uint32) func asmXgetbv(index uint32) (eax, edx uint32) func asmRdtscpAsm() (eax, ebx, ecx, edx uint32) func initCPU() { cpuid = asmCpuid cpuidex = asmCpuidex xgetbv = asmXgetbv rdtscpAsm = asmRdtscpAsm } ================================================ FILE: vendor/github.com/klauspost/cpuid/detect_ref.go ================================================ // Copyright (c) 2015 Klaus Post, released under MIT License. See LICENSE file. // +build !amd64,!386 gccgo package cpuid func initCPU() { cpuid = func(op uint32) (eax, ebx, ecx, edx uint32) { return 0, 0, 0, 0 } cpuidex = func(op, op2 uint32) (eax, ebx, ecx, edx uint32) { return 0, 0, 0, 0 } xgetbv = func(index uint32) (eax, edx uint32) { return 0, 0 } rdtscpAsm = func() (eax, ebx, ecx, edx uint32) { return 0, 0, 0, 0 } } ================================================ FILE: vendor/github.com/klauspost/cpuid/generate.go ================================================ package cpuid //go:generate go run private-gen.go ================================================ FILE: vendor/github.com/klauspost/cpuid/private-gen.go ================================================ // +build ignore package main import ( "bytes" "fmt" "go/ast" "go/parser" "go/printer" "go/token" "io" "io/ioutil" "log" "os" "reflect" "strings" "unicode" "unicode/utf8" ) var inFiles = []string{"cpuid.go", "cpuid_test.go"} var copyFiles = []string{"cpuid_amd64.s", "cpuid_386.s", "detect_ref.go", "detect_intel.go"} var fileSet = token.NewFileSet() var reWrites = []rewrite{ initRewrite("CPUInfo -> cpuInfo"), initRewrite("Vendor -> vendor"), initRewrite("Flags -> flags"), initRewrite("Detect -> detect"), initRewrite("CPU -> cpu"), } var excludeNames = map[string]bool{"string": true, "join": true, "trim": true, // cpuid_test.go "t": true, "println": true, "logf": true, "log": true, "fatalf": true, "fatal": true, } var excludePrefixes = []string{"test", "benchmark"} func main() { Package := "private" parserMode := parser.ParseComments exported := make(map[string]rewrite) for _, file := range inFiles { in, err := os.Open(file) if err != nil { log.Fatalf("opening input", err) } src, err := ioutil.ReadAll(in) if err != nil { log.Fatalf("reading input", err) } astfile, err := parser.ParseFile(fileSet, file, src, parserMode) if err != nil { log.Fatalf("parsing input", err) } for _, rw := range reWrites { astfile = rw(astfile) } // Inspect the AST and print all identifiers and literals. var startDecl token.Pos var endDecl token.Pos ast.Inspect(astfile, func(n ast.Node) bool { var s string switch x := n.(type) { case *ast.Ident: if x.IsExported() { t := strings.ToLower(x.Name) for _, pre := range excludePrefixes { if strings.HasPrefix(t, pre) { return true } } if excludeNames[t] != true { //if x.Pos() > startDecl && x.Pos() < endDecl { exported[x.Name] = initRewrite(x.Name + " -> " + t) } } case *ast.GenDecl: if x.Tok == token.CONST && x.Lparen > 0 { startDecl = x.Lparen endDecl = x.Rparen // fmt.Printf("Decl:%s -> %s\n", fileSet.Position(startDecl), fileSet.Position(endDecl)) } } if s != "" { fmt.Printf("%s:\t%s\n", fileSet.Position(n.Pos()), s) } return true }) for _, rw := range exported { astfile = rw(astfile) } var buf bytes.Buffer printer.Fprint(&buf, fileSet, astfile) // Remove package documentation and insert information s := buf.String() ind := strings.Index(buf.String(), "\npackage cpuid") s = s[ind:] s = "// Generated, DO NOT EDIT,\n" + "// but copy it to your own project and rename the package.\n" + "// See more at http://github.com/klauspost/cpuid\n" + s outputName := Package + string(os.PathSeparator) + file err = ioutil.WriteFile(outputName, []byte(s), 0644) if err != nil { log.Fatalf("writing output: %s", err) } log.Println("Generated", outputName) } for _, file := range copyFiles { dst := "" if strings.HasPrefix(file, "cpuid") { dst = Package + string(os.PathSeparator) + file } else { dst = Package + string(os.PathSeparator) + "cpuid_" + file } err := copyFile(file, dst) if err != nil { log.Fatalf("copying file: %s", err) } log.Println("Copied", dst) } } // CopyFile copies a file from src to dst. If src and dst files exist, and are // the same, then return success. Copy the file contents from src to dst. func copyFile(src, dst string) (err error) { sfi, err := os.Stat(src) if err != nil { return } if !sfi.Mode().IsRegular() { // cannot copy non-regular files (e.g., directories, // symlinks, devices, etc.) return fmt.Errorf("CopyFile: non-regular source file %s (%q)", sfi.Name(), sfi.Mode().String()) } dfi, err := os.Stat(dst) if err != nil { if !os.IsNotExist(err) { return } } else { if !(dfi.Mode().IsRegular()) { return fmt.Errorf("CopyFile: non-regular destination file %s (%q)", dfi.Name(), dfi.Mode().String()) } if os.SameFile(sfi, dfi) { return } } err = copyFileContents(src, dst) return } // copyFileContents copies the contents of the file named src to the file named // by dst. The file will be created if it does not already exist. If the // destination file exists, all it's contents will be replaced by the contents // of the source file. func copyFileContents(src, dst string) (err error) { in, err := os.Open(src) if err != nil { return } defer in.Close() out, err := os.Create(dst) if err != nil { return } defer func() { cerr := out.Close() if err == nil { err = cerr } }() if _, err = io.Copy(out, in); err != nil { return } err = out.Sync() return } type rewrite func(*ast.File) *ast.File // Mostly copied from gofmt func initRewrite(rewriteRule string) rewrite { f := strings.Split(rewriteRule, "->") if len(f) != 2 { fmt.Fprintf(os.Stderr, "rewrite rule must be of the form 'pattern -> replacement'\n") os.Exit(2) } pattern := parseExpr(f[0], "pattern") replace := parseExpr(f[1], "replacement") return func(p *ast.File) *ast.File { return rewriteFile(pattern, replace, p) } } // parseExpr parses s as an expression. // It might make sense to expand this to allow statement patterns, // but there are problems with preserving formatting and also // with what a wildcard for a statement looks like. func parseExpr(s, what string) ast.Expr { x, err := parser.ParseExpr(s) if err != nil { fmt.Fprintf(os.Stderr, "parsing %s %s at %s\n", what, s, err) os.Exit(2) } return x } // Keep this function for debugging. /* func dump(msg string, val reflect.Value) { fmt.Printf("%s:\n", msg) ast.Print(fileSet, val.Interface()) fmt.Println() } */ // rewriteFile applies the rewrite rule 'pattern -> replace' to an entire file. func rewriteFile(pattern, replace ast.Expr, p *ast.File) *ast.File { cmap := ast.NewCommentMap(fileSet, p, p.Comments) m := make(map[string]reflect.Value) pat := reflect.ValueOf(pattern) repl := reflect.ValueOf(replace) var rewriteVal func(val reflect.Value) reflect.Value rewriteVal = func(val reflect.Value) reflect.Value { // don't bother if val is invalid to start with if !val.IsValid() { return reflect.Value{} } for k := range m { delete(m, k) } val = apply(rewriteVal, val) if match(m, pat, val) { val = subst(m, repl, reflect.ValueOf(val.Interface().(ast.Node).Pos())) } return val } r := apply(rewriteVal, reflect.ValueOf(p)).Interface().(*ast.File) r.Comments = cmap.Filter(r).Comments() // recreate comments list return r } // set is a wrapper for x.Set(y); it protects the caller from panics if x cannot be changed to y. func set(x, y reflect.Value) { // don't bother if x cannot be set or y is invalid if !x.CanSet() || !y.IsValid() { return } defer func() { if x := recover(); x != nil { if s, ok := x.(string); ok && (strings.Contains(s, "type mismatch") || strings.Contains(s, "not assignable")) { // x cannot be set to y - ignore this rewrite return } panic(x) } }() x.Set(y) } // Values/types for special cases. var ( objectPtrNil = reflect.ValueOf((*ast.Object)(nil)) scopePtrNil = reflect.ValueOf((*ast.Scope)(nil)) identType = reflect.TypeOf((*ast.Ident)(nil)) objectPtrType = reflect.TypeOf((*ast.Object)(nil)) positionType = reflect.TypeOf(token.NoPos) callExprType = reflect.TypeOf((*ast.CallExpr)(nil)) scopePtrType = reflect.TypeOf((*ast.Scope)(nil)) ) // apply replaces each AST field x in val with f(x), returning val. // To avoid extra conversions, f operates on the reflect.Value form. func apply(f func(reflect.Value) reflect.Value, val reflect.Value) reflect.Value { if !val.IsValid() { return reflect.Value{} } // *ast.Objects introduce cycles and are likely incorrect after // rewrite; don't follow them but replace with nil instead if val.Type() == objectPtrType { return objectPtrNil } // similarly for scopes: they are likely incorrect after a rewrite; // replace them with nil if val.Type() == scopePtrType { return scopePtrNil } switch v := reflect.Indirect(val); v.Kind() { case reflect.Slice: for i := 0; i < v.Len(); i++ { e := v.Index(i) set(e, f(e)) } case reflect.Struct: for i := 0; i < v.NumField(); i++ { e := v.Field(i) set(e, f(e)) } case reflect.Interface: e := v.Elem() set(v, f(e)) } return val } func isWildcard(s string) bool { rune, size := utf8.DecodeRuneInString(s) return size == len(s) && unicode.IsLower(rune) } // match returns true if pattern matches val, // recording wildcard submatches in m. // If m == nil, match checks whether pattern == val. func match(m map[string]reflect.Value, pattern, val reflect.Value) bool { // Wildcard matches any expression. If it appears multiple // times in the pattern, it must match the same expression // each time. if m != nil && pattern.IsValid() && pattern.Type() == identType { name := pattern.Interface().(*ast.Ident).Name if isWildcard(name) && val.IsValid() { // wildcards only match valid (non-nil) expressions. if _, ok := val.Interface().(ast.Expr); ok && !val.IsNil() { if old, ok := m[name]; ok { return match(nil, old, val) } m[name] = val return true } } } // Otherwise, pattern and val must match recursively. if !pattern.IsValid() || !val.IsValid() { return !pattern.IsValid() && !val.IsValid() } if pattern.Type() != val.Type() { return false } // Special cases. switch pattern.Type() { case identType: // For identifiers, only the names need to match // (and none of the other *ast.Object information). // This is a common case, handle it all here instead // of recursing down any further via reflection. p := pattern.Interface().(*ast.Ident) v := val.Interface().(*ast.Ident) return p == nil && v == nil || p != nil && v != nil && p.Name == v.Name case objectPtrType, positionType: // object pointers and token positions always match return true case callExprType: // For calls, the Ellipsis fields (token.Position) must // match since that is how f(x) and f(x...) are different. // Check them here but fall through for the remaining fields. p := pattern.Interface().(*ast.CallExpr) v := val.Interface().(*ast.CallExpr) if p.Ellipsis.IsValid() != v.Ellipsis.IsValid() { return false } } p := reflect.Indirect(pattern) v := reflect.Indirect(val) if !p.IsValid() || !v.IsValid() { return !p.IsValid() && !v.IsValid() } switch p.Kind() { case reflect.Slice: if p.Len() != v.Len() { return false } for i := 0; i < p.Len(); i++ { if !match(m, p.Index(i), v.Index(i)) { return false } } return true case reflect.Struct: for i := 0; i < p.NumField(); i++ { if !match(m, p.Field(i), v.Field(i)) { return false } } return true case reflect.Interface: return match(m, p.Elem(), v.Elem()) } // Handle token integers, etc. return p.Interface() == v.Interface() } // subst returns a copy of pattern with values from m substituted in place // of wildcards and pos used as the position of tokens from the pattern. // if m == nil, subst returns a copy of pattern and doesn't change the line // number information. func subst(m map[string]reflect.Value, pattern reflect.Value, pos reflect.Value) reflect.Value { if !pattern.IsValid() { return reflect.Value{} } // Wildcard gets replaced with map value. if m != nil && pattern.Type() == identType { name := pattern.Interface().(*ast.Ident).Name if isWildcard(name) { if old, ok := m[name]; ok { return subst(nil, old, reflect.Value{}) } } } if pos.IsValid() && pattern.Type() == positionType { // use new position only if old position was valid in the first place if old := pattern.Interface().(token.Pos); !old.IsValid() { return pattern } return pos } // Otherwise copy. switch p := pattern; p.Kind() { case reflect.Slice: v := reflect.MakeSlice(p.Type(), p.Len(), p.Len()) for i := 0; i < p.Len(); i++ { v.Index(i).Set(subst(m, p.Index(i), pos)) } return v case reflect.Struct: v := reflect.New(p.Type()).Elem() for i := 0; i < p.NumField(); i++ { v.Field(i).Set(subst(m, p.Field(i), pos)) } return v case reflect.Ptr: v := reflect.New(p.Type()).Elem() if elem := p.Elem(); elem.IsValid() { v.Set(subst(m, elem, pos).Addr()) } return v case reflect.Interface: v := reflect.New(p.Type()).Elem() if elem := p.Elem(); elem.IsValid() { v.Set(subst(m, elem, pos)) } return v } return pattern } ================================================ FILE: vendor/github.com/klauspost/crc32/LICENSE ================================================ Copyright (c) 2012 The Go Authors. All rights reserved. Copyright (c) 2015 Klaus Post Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/klauspost/crc32/crc32.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32, // checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for // information. // // Polynomials are represented in LSB-first form also known as reversed representation. // // See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials // for information. package crc32 import ( "hash" "sync" ) // The size of a CRC-32 checksum in bytes. const Size = 4 // Predefined polynomials. const ( // IEEE is by far and away the most common CRC-32 polynomial. // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ... IEEE = 0xedb88320 // Castagnoli's polynomial, used in iSCSI. // Has better error detection characteristics than IEEE. // http://dx.doi.org/10.1109/26.231911 Castagnoli = 0x82f63b78 // Koopman's polynomial. // Also has better error detection characteristics than IEEE. // http://dx.doi.org/10.1109/DSN.2002.1028931 Koopman = 0xeb31d82e ) // Table is a 256-word table representing the polynomial for efficient processing. type Table [256]uint32 // castagnoliTable points to a lazily initialized Table for the Castagnoli // polynomial. MakeTable will always return this value when asked to make a // Castagnoli table so we can compare against it to find when the caller is // using this polynomial. var castagnoliTable *Table var castagnoliTable8 *slicing8Table var castagnoliOnce sync.Once func castagnoliInit() { castagnoliTable = makeTable(Castagnoli) castagnoliTable8 = makeTable8(Castagnoli) } // IEEETable is the table for the IEEE polynomial. var IEEETable = makeTable(IEEE) // slicing8Table is array of 8 Tables type slicing8Table [8]Table // ieeeTable8 is the slicing8Table for IEEE var ieeeTable8 *slicing8Table var ieeeTable8Once sync.Once // MakeTable returns a Table constructed from the specified polynomial. // The contents of this Table must not be modified. func MakeTable(poly uint32) *Table { switch poly { case IEEE: return IEEETable case Castagnoli: castagnoliOnce.Do(castagnoliInit) return castagnoliTable } return makeTable(poly) } // makeTable returns the Table constructed from the specified polynomial. func makeTable(poly uint32) *Table { t := new(Table) for i := 0; i < 256; i++ { crc := uint32(i) for j := 0; j < 8; j++ { if crc&1 == 1 { crc = (crc >> 1) ^ poly } else { crc >>= 1 } } t[i] = crc } return t } // makeTable8 returns slicing8Table constructed from the specified polynomial. func makeTable8(poly uint32) *slicing8Table { t := new(slicing8Table) t[0] = *makeTable(poly) for i := 0; i < 256; i++ { crc := t[0][i] for j := 1; j < 8; j++ { crc = t[0][crc&0xFF] ^ (crc >> 8) t[j][i] = crc } } return t } // digest represents the partial evaluation of a checksum. type digest struct { crc uint32 tab *Table } // New creates a new hash.Hash32 computing the CRC-32 checksum // using the polynomial represented by the Table. // Its Sum method will lay the value out in big-endian byte order. func New(tab *Table) hash.Hash32 { return &digest{0, tab} } // NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum // using the IEEE polynomial. // Its Sum method will lay the value out in big-endian byte order. func NewIEEE() hash.Hash32 { return New(IEEETable) } func (d *digest) Size() int { return Size } func (d *digest) BlockSize() int { return 1 } func (d *digest) Reset() { d.crc = 0 } func update(crc uint32, tab *Table, p []byte) uint32 { crc = ^crc for _, v := range p { crc = tab[byte(crc)^v] ^ (crc >> 8) } return ^crc } // updateSlicingBy8 updates CRC using Slicing-by-8 func updateSlicingBy8(crc uint32, tab *slicing8Table, p []byte) uint32 { crc = ^crc for len(p) > 8 { crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^ tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^ tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF] p = p[8:] } crc = ^crc if len(p) == 0 { return crc } return update(crc, &tab[0], p) } // Update returns the result of adding the bytes in p to the crc. func Update(crc uint32, tab *Table, p []byte) uint32 { if tab == castagnoliTable { return updateCastagnoli(crc, p) } if tab == IEEETable { return updateIEEE(crc, p) } return update(crc, tab, p) } func (d *digest) Write(p []byte) (n int, err error) { d.crc = Update(d.crc, d.tab, p) return len(p), nil } func (d *digest) Sum32() uint32 { return d.crc } func (d *digest) Sum(in []byte) []byte { s := d.Sum32() return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) } // Checksum returns the CRC-32 checksum of data // using the polynomial represented by the Table. func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) } // ChecksumIEEE returns the CRC-32 checksum of data // using the IEEE polynomial. func ChecksumIEEE(data []byte) uint32 { return updateIEEE(0, data) } ================================================ FILE: vendor/github.com/klauspost/crc32/crc32_amd64.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !appengine,!gccgo package crc32 // This file contains the code to call the SSE 4.2 version of the Castagnoli // and IEEE CRC. // haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use // CPUID to test for SSE 4.1, 4.2 and CLMUL support. func haveSSE41() bool func haveSSE42() bool func haveCLMUL() bool // castagnoliSSE42 is defined in crc_amd64.s and uses the SSE4.2 CRC32 // instruction. //go:noescape func castagnoliSSE42(crc uint32, p []byte) uint32 // ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ // instruction as well as SSE 4.1. //go:noescape func ieeeCLMUL(crc uint32, p []byte) uint32 var sse42 = haveSSE42() var useFastIEEE = haveCLMUL() && haveSSE41() func updateCastagnoli(crc uint32, p []byte) uint32 { if sse42 { return castagnoliSSE42(crc, p) } // only use slicing-by-8 when input is >= 16 Bytes if len(p) >= 16 { return updateSlicingBy8(crc, castagnoliTable8, p) } return update(crc, castagnoliTable, p) } func updateIEEE(crc uint32, p []byte) uint32 { if useFastIEEE && len(p) >= 64 { left := len(p) & 15 do := len(p) - left crc = ^ieeeCLMUL(^crc, p[:do]) if left > 0 { crc = update(crc, IEEETable, p[do:]) } return crc } // only use slicing-by-8 when input is >= 16 Bytes if len(p) >= 16 { ieeeTable8Once.Do(func() { ieeeTable8 = makeTable8(IEEE) }) return updateSlicingBy8(crc, ieeeTable8, p) } return update(crc, IEEETable, p) } ================================================ FILE: vendor/github.com/klauspost/crc32/crc32_amd64.s ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build gc #define NOSPLIT 4 #define RODATA 8 // func castagnoliSSE42(crc uint32, p []byte) uint32 TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 MOVL crc+0(FP), AX // CRC value MOVQ p+8(FP), SI // data pointer MOVQ p_len+16(FP), CX // len(p) NOTL AX // If there's less than 8 bytes to process, we do it byte-by-byte. CMPQ CX, $8 JL cleanup // Process individual bytes until the input is 8-byte aligned. startup: MOVQ SI, BX ANDQ $7, BX JZ aligned CRC32B (SI), AX DECQ CX INCQ SI JMP startup aligned: // The input is now 8-byte aligned and we can process 8-byte chunks. CMPQ CX, $8 JL cleanup CRC32Q (SI), AX ADDQ $8, SI SUBQ $8, CX JMP aligned cleanup: // We may have some bytes left over that we process one at a time. CMPQ CX, $0 JE done CRC32B (SI), AX INCQ SI DECQ CX JMP cleanup done: NOTL AX MOVL AX, ret+32(FP) RET // func haveSSE42() bool TEXT ·haveSSE42(SB), NOSPLIT, $0 XORQ AX, AX INCL AX CPUID SHRQ $20, CX ANDQ $1, CX MOVB CX, ret+0(FP) RET // func haveCLMUL() bool TEXT ·haveCLMUL(SB), NOSPLIT, $0 XORQ AX, AX INCL AX CPUID SHRQ $1, CX ANDQ $1, CX MOVB CX, ret+0(FP) RET // func haveSSE41() bool TEXT ·haveSSE41(SB), NOSPLIT, $0 XORQ AX, AX INCL AX CPUID SHRQ $19, CX ANDQ $1, CX MOVB CX, ret+0(FP) RET // CRC32 polynomial data // // These constants are lifted from the // Linux kernel, since they avoid the costly // PSHUFB 16 byte reversal proposed in the // original Intel paper. DATA r2r1kp<>+0(SB)/8, $0x154442bd4 DATA r2r1kp<>+8(SB)/8, $0x1c6e41596 DATA r4r3kp<>+0(SB)/8, $0x1751997d0 DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e DATA rupolykp<>+0(SB)/8, $0x1db710641 DATA rupolykp<>+8(SB)/8, $0x1f7011641 DATA r5kp<>+0(SB)/8, $0x163cd6124 GLOBL r2r1kp<>(SB), RODATA, $16 GLOBL r4r3kp<>(SB), RODATA, $16 GLOBL rupolykp<>(SB), RODATA, $16 GLOBL r5kp<>(SB), RODATA, $8 // Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf // len(p) must be at least 64, and must be a multiple of 16. // func ieeeCLMUL(crc uint32, p []byte) uint32 TEXT ·ieeeCLMUL(SB), NOSPLIT, $0 MOVL crc+0(FP), X0 // Initial CRC value MOVQ p+8(FP), SI // data pointer MOVQ p_len+16(FP), CX // len(p) MOVOU (SI), X1 MOVOU 16(SI), X2 MOVOU 32(SI), X3 MOVOU 48(SI), X4 PXOR X0, X1 ADDQ $64, SI // buf+=64 SUBQ $64, CX // len-=64 CMPQ CX, $64 // Less than 64 bytes left JB remain64 MOVOA r2r1kp<>+0(SB), X0 loopback64: MOVOA X1, X5 MOVOA X2, X6 MOVOA X3, X7 MOVOA X4, X8 PCLMULQDQ $0, X0, X1 PCLMULQDQ $0, X0, X2 PCLMULQDQ $0, X0, X3 PCLMULQDQ $0, X0, X4 // Load next early MOVOU (SI), X11 MOVOU 16(SI), X12 MOVOU 32(SI), X13 MOVOU 48(SI), X14 PCLMULQDQ $0x11, X0, X5 PCLMULQDQ $0x11, X0, X6 PCLMULQDQ $0x11, X0, X7 PCLMULQDQ $0x11, X0, X8 PXOR X5, X1 PXOR X6, X2 PXOR X7, X3 PXOR X8, X4 PXOR X11, X1 PXOR X12, X2 PXOR X13, X3 PXOR X14, X4 ADDQ $0x40, DI ADDQ $64, SI // buf+=64 SUBQ $64, CX // len-=64 CMPQ CX, $64 // Less than 64 bytes left? JGE loopback64 // Fold result into a single register (X1) remain64: MOVOA r4r3kp<>+0(SB), X0 MOVOA X1, X5 PCLMULQDQ $0, X0, X1 PCLMULQDQ $0x11, X0, X5 PXOR X5, X1 PXOR X2, X1 MOVOA X1, X5 PCLMULQDQ $0, X0, X1 PCLMULQDQ $0x11, X0, X5 PXOR X5, X1 PXOR X3, X1 MOVOA X1, X5 PCLMULQDQ $0, X0, X1 PCLMULQDQ $0x11, X0, X5 PXOR X5, X1 PXOR X4, X1 // More than 16 bytes left? CMPQ CX, $16 JB finish // Encode 16 bytes remain16: MOVOU (SI), X10 MOVOA X1, X5 PCLMULQDQ $0, X0, X1 PCLMULQDQ $0x11, X0, X5 PXOR X5, X1 PXOR X10, X1 SUBQ $16, CX ADDQ $16, SI CMPQ CX, $16 JGE remain16 finish: // Fold final result into 32 bits and return it PCMPEQB X3, X3 PCLMULQDQ $1, X1, X0 PSRLDQ $8, X1 PXOR X0, X1 MOVOA X1, X2 MOVQ r5kp<>+0(SB), X0 // Creates 32 bit mask. Note that we don't care about upper half. PSRLQ $32, X3 PSRLDQ $4, X2 PAND X3, X1 PCLMULQDQ $0, X0, X1 PXOR X2, X1 MOVOA rupolykp<>+0(SB), X0 MOVOA X1, X2 PAND X3, X1 PCLMULQDQ $0x10, X0, X1 PAND X3, X1 PCLMULQDQ $0, X0, X1 PXOR X2, X1 // PEXTRD $1, X1, AX (SSE 4.1) BYTE $0x66; BYTE $0x0f; BYTE $0x3a BYTE $0x16; BYTE $0xc8; BYTE $0x01 MOVL AX, ret+32(FP) RET ================================================ FILE: vendor/github.com/klauspost/crc32/crc32_amd64p32.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !appengine,!gccgo package crc32 // This file contains the code to call the SSE 4.2 version of the Castagnoli // CRC. // haveSSE42 is defined in crc_amd64p32.s and uses CPUID to test for SSE 4.2 // support. func haveSSE42() bool // castagnoliSSE42 is defined in crc_amd64.s and uses the SSE4.2 CRC32 // instruction. //go:noescape func castagnoliSSE42(crc uint32, p []byte) uint32 var sse42 = haveSSE42() func updateCastagnoli(crc uint32, p []byte) uint32 { if sse42 { return castagnoliSSE42(crc, p) } return update(crc, castagnoliTable, p) } func updateIEEE(crc uint32, p []byte) uint32 { // only use slicing-by-8 when input is >= 4KB if len(p) >= 4096 { ieeeTable8Once.Do(func() { ieeeTable8 = makeTable8(IEEE) }) return updateSlicingBy8(crc, ieeeTable8, p) } return update(crc, IEEETable, p) } ================================================ FILE: vendor/github.com/klauspost/crc32/crc32_amd64p32.s ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build gc #define NOSPLIT 4 #define RODATA 8 // func castagnoliSSE42(crc uint32, p []byte) uint32 TEXT ·castagnoliSSE42(SB), NOSPLIT, $0 MOVL crc+0(FP), AX // CRC value MOVL p+4(FP), SI // data pointer MOVL p_len+8(FP), CX // len(p) NOTL AX // If there's less than 8 bytes to process, we do it byte-by-byte. CMPQ CX, $8 JL cleanup // Process individual bytes until the input is 8-byte aligned. startup: MOVQ SI, BX ANDQ $7, BX JZ aligned CRC32B (SI), AX DECQ CX INCQ SI JMP startup aligned: // The input is now 8-byte aligned and we can process 8-byte chunks. CMPQ CX, $8 JL cleanup CRC32Q (SI), AX ADDQ $8, SI SUBQ $8, CX JMP aligned cleanup: // We may have some bytes left over that we process one at a time. CMPQ CX, $0 JE done CRC32B (SI), AX INCQ SI DECQ CX JMP cleanup done: NOTL AX MOVL AX, ret+16(FP) RET // func haveSSE42() bool TEXT ·haveSSE42(SB), NOSPLIT, $0 XORQ AX, AX INCL AX CPUID SHRQ $20, CX ANDQ $1, CX MOVB CX, ret+0(FP) RET ================================================ FILE: vendor/github.com/klauspost/crc32/crc32_generic.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build !amd64,!amd64p32 appengine gccgo package crc32 // This file contains the generic version of updateCastagnoli which does // slicing-by-8, or uses the fallback for very small sizes. func updateCastagnoli(crc uint32, p []byte) uint32 { // only use slicing-by-8 when input is >= 16 Bytes if len(p) >= 16 { return updateSlicingBy8(crc, castagnoliTable8, p) } return update(crc, castagnoliTable, p) } func updateIEEE(crc uint32, p []byte) uint32 { // only use slicing-by-8 when input is >= 16 Bytes if len(p) >= 16 { ieeeTable8Once.Do(func() { ieeeTable8 = makeTable8(IEEE) }) return updateSlicingBy8(crc, ieeeTable8, p) } return update(crc, IEEETable, p) } ================================================ FILE: vendor/github.com/klauspost/pgzip/LICENSE ================================================ The MIT License (MIT) Copyright (c) 2014 Klaus Post Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/klauspost/pgzip/gunzip.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package pgzip implements reading and writing of gzip format compressed files, // as specified in RFC 1952. // // This is a drop in replacement for "compress/gzip". // This will split compression into blocks that are compressed in parallel. // This can be useful for compressing big amounts of data. // The gzip decompression has not been modified, but remains in the package, // so you can use it as a complete replacement for "compress/gzip". // // See more at https://github.com/klauspost/pgzip package pgzip import ( "bufio" "errors" "hash" "io" "sync" "time" "github.com/klauspost/compress/flate" "github.com/klauspost/crc32" ) const ( gzipID1 = 0x1f gzipID2 = 0x8b gzipDeflate = 8 flagText = 1 << 0 flagHdrCrc = 1 << 1 flagExtra = 1 << 2 flagName = 1 << 3 flagComment = 1 << 4 ) func makeReader(r io.Reader) flate.Reader { if rr, ok := r.(flate.Reader); ok { return rr } return bufio.NewReader(r) } var ( // ErrChecksum is returned when reading GZIP data that has an invalid checksum. ErrChecksum = errors.New("gzip: invalid checksum") // ErrHeader is returned when reading GZIP data that has an invalid header. ErrHeader = errors.New("gzip: invalid header") ) // The gzip file stores a header giving metadata about the compressed file. // That header is exposed as the fields of the Writer and Reader structs. type Header struct { Comment string // comment Extra []byte // "extra data" ModTime time.Time // modification time Name string // file name OS byte // operating system type } // A Reader is an io.Reader that can be read to retrieve // uncompressed data from a gzip-format compressed file. // // In general, a gzip file can be a concatenation of gzip files, // each with its own header. Reads from the Reader // return the concatenation of the uncompressed data of each. // Only the first header is recorded in the Reader fields. // // Gzip files store a length and checksum of the uncompressed data. // The Reader will return a ErrChecksum when Read // reaches the end of the uncompressed data if it does not // have the expected length or checksum. Clients should treat data // returned by Read as tentative until they receive the io.EOF // marking the end of the data. type Reader struct { Header r flate.Reader decompressor io.ReadCloser digest hash.Hash32 size uint32 flg byte buf [512]byte err error closeErr chan error multistream bool readAhead chan read roff int // read offset current []byte closeReader chan struct{} lastBlock bool blockSize int blocks int activeRA bool // Indication if readahead is active mu sync.Mutex // Lock for above blockPool chan []byte } type read struct { b []byte err error } // NewReader creates a new Reader reading the given reader. // The implementation buffers input and may read more data than necessary from r. // It is the caller's responsibility to call Close on the Reader when done. func NewReader(r io.Reader) (*Reader, error) { z := new(Reader) z.blocks = defaultBlocks z.blockSize = defaultBlockSize z.r = makeReader(r) z.digest = crc32.NewIEEE() z.multistream = true z.blockPool = make(chan []byte, z.blocks) for i := 0; i < z.blocks; i++ { z.blockPool <- make([]byte, z.blockSize) } if err := z.readHeader(true); err != nil { return nil, err } return z, nil } // NewReaderN creates a new Reader reading the given reader. // The implementation buffers input and may read more data than necessary from r. // It is the caller's responsibility to call Close on the Reader when done. // // With this you can control the approximate size of your blocks, // as well as how many blocks you want to have prefetched. // // Default values for this is blockSize = 250000, blocks = 16, // meaning up to 16 blocks of maximum 250000 bytes will be // prefetched. func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) { z := new(Reader) z.blocks = blocks z.blockSize = blockSize z.r = makeReader(r) z.digest = crc32.NewIEEE() z.multistream = true // Account for too small values if z.blocks <= 0 { z.blocks = defaultBlocks } if z.blockSize <= 512 { z.blockSize = defaultBlockSize } z.blockPool = make(chan []byte, z.blocks) for i := 0; i < z.blocks; i++ { z.blockPool <- make([]byte, z.blockSize) } if err := z.readHeader(true); err != nil { return nil, err } return z, nil } // Reset discards the Reader z's state and makes it equivalent to the // result of its original state from NewReader, but reading from r instead. // This permits reusing a Reader rather than allocating a new one. func (z *Reader) Reset(r io.Reader) error { z.killReadAhead() z.r = makeReader(r) z.digest = crc32.NewIEEE() z.size = 0 z.err = nil z.multistream = true // Account for uninitialized values if z.blocks <= 0 { z.blocks = defaultBlocks } if z.blockSize <= 512 { z.blockSize = defaultBlockSize } if z.blockPool == nil { z.blockPool = make(chan []byte, z.blocks) for i := 0; i < z.blocks; i++ { z.blockPool <- make([]byte, z.blockSize) } } return z.readHeader(true) } // Multistream controls whether the reader supports multistream files. // // If enabled (the default), the Reader expects the input to be a sequence // of individually gzipped data streams, each with its own header and // trailer, ending at EOF. The effect is that the concatenation of a sequence // of gzipped files is treated as equivalent to the gzip of the concatenation // of the sequence. This is standard behavior for gzip readers. // // Calling Multistream(false) disables this behavior; disabling the behavior // can be useful when reading file formats that distinguish individual gzip // data streams or mix gzip data streams with other data streams. // In this mode, when the Reader reaches the end of the data stream, // Read returns io.EOF. If the underlying reader implements io.ByteReader, // it will be left positioned just after the gzip stream. // To start the next stream, call z.Reset(r) followed by z.Multistream(false). // If there is no next stream, z.Reset(r) will return io.EOF. func (z *Reader) Multistream(ok bool) { z.multistream = ok } // GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). func get4(p []byte) uint32 { return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 } func (z *Reader) readString() (string, error) { var err error needconv := false for i := 0; ; i++ { if i >= len(z.buf) { return "", ErrHeader } z.buf[i], err = z.r.ReadByte() if err != nil { return "", err } if z.buf[i] > 0x7f { needconv = true } if z.buf[i] == 0 { // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). if needconv { s := make([]rune, 0, i) for _, v := range z.buf[0:i] { s = append(s, rune(v)) } return string(s), nil } return string(z.buf[0:i]), nil } } } func (z *Reader) read2() (uint32, error) { _, err := io.ReadFull(z.r, z.buf[0:2]) if err != nil { return 0, err } return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil } func (z *Reader) readHeader(save bool) error { z.killReadAhead() _, err := io.ReadFull(z.r, z.buf[0:10]) if err != nil { return err } if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { return ErrHeader } z.flg = z.buf[3] if save { z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0) // z.buf[8] is xfl, ignored z.OS = z.buf[9] } z.digest.Reset() z.digest.Write(z.buf[0:10]) if z.flg&flagExtra != 0 { n, err := z.read2() if err != nil { return err } data := make([]byte, n) if _, err = io.ReadFull(z.r, data); err != nil { return err } if save { z.Extra = data } } var s string if z.flg&flagName != 0 { if s, err = z.readString(); err != nil { return err } if save { z.Name = s } } if z.flg&flagComment != 0 { if s, err = z.readString(); err != nil { return err } if save { z.Comment = s } } if z.flg&flagHdrCrc != 0 { n, err := z.read2() if err != nil { return err } sum := z.digest.Sum32() & 0xFFFF if n != sum { return ErrHeader } } z.digest.Reset() z.decompressor = flate.NewReader(z.r) z.doReadAhead() return nil } func (z *Reader) killReadAhead() error { z.mu.Lock() defer z.mu.Unlock() if z.activeRA { if z.closeReader != nil { close(z.closeReader) } // Wait for decompressor to be closed and return error, if any. e, ok := <-z.closeErr z.activeRA = false if !ok { // Channel is closed, so if there was any error it has already been returned. return nil } return e } return nil } // Starts readahead. // Will return on error (including io.EOF) // or when z.closeReader is closed. func (z *Reader) doReadAhead() { z.mu.Lock() defer z.mu.Unlock() z.activeRA = true if z.blocks <= 0 { z.blocks = defaultBlocks } if z.blockSize <= 512 { z.blockSize = defaultBlockSize } ra := make(chan read, z.blocks) z.readAhead = ra closeReader := make(chan struct{}, 0) z.closeReader = closeReader z.lastBlock = false closeErr := make(chan error, 1) z.closeErr = closeErr z.size = 0 z.roff = 0 z.current = nil decomp := z.decompressor go func() { defer func() { closeErr <- decomp.Close() close(closeErr) close(ra) }() // We hold a local reference to digest, since // it way be changed by reset. digest := z.digest var wg sync.WaitGroup for { var buf []byte select { case buf = <-z.blockPool: case <-closeReader: return } buf = buf[0:z.blockSize] // Try to fill the buffer n, err := io.ReadFull(decomp, buf) if err == io.ErrUnexpectedEOF { err = nil } if n < len(buf) { buf = buf[0:n] } wg.Wait() wg.Add(1) go func() { digest.Write(buf) wg.Done() }() z.size += uint32(n) // If we return any error, out digest must be ready if err != nil { wg.Wait() } select { case z.readAhead <- read{b: buf, err: err}: case <-closeReader: // Sent on close, we don't care about the next results return } if err != nil { return } } }() } func (z *Reader) Read(p []byte) (n int, err error) { if z.err != nil { return 0, z.err } if len(p) == 0 { return 0, nil } for { if len(z.current) == 0 && !z.lastBlock { read := <-z.readAhead if read.err != nil { // If not nil, the reader will have exited z.closeReader = nil if read.err != io.EOF { z.err = read.err return } if read.err == io.EOF { z.lastBlock = true err = nil } } z.current = read.b z.roff = 0 } avail := z.current[z.roff:] if len(p) >= len(avail) { // If len(p) >= len(current), return all content of current n = copy(p, avail) z.blockPool <- z.current z.current = nil if z.lastBlock { err = io.EOF break } } else { // We copy as much as there is space for n = copy(p, avail) z.roff += n } return } // Finished file; check checksum + size. if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { z.err = err return 0, err } crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) sum := z.digest.Sum32() if sum != crc32 || isize != z.size { z.err = ErrChecksum return 0, z.err } // File is ok; should we attempt reading one more? if !z.multistream { return 0, io.EOF } // Is there another? if err = z.readHeader(false); err != nil { z.err = err return } // Yes. Reset and read from it. return z.Read(p) } func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { total := int64(0) for { if z.err != nil { return total, z.err } // We write both to output and digest. for { // Read from input read := <-z.readAhead if read.err != nil { // If not nil, the reader will have exited z.closeReader = nil if read.err != io.EOF { z.err = read.err return total, z.err } if read.err == io.EOF { z.lastBlock = true err = nil } } // Write what we got n, err := w.Write(read.b) if n != len(read.b) { return total, io.ErrShortWrite } total += int64(n) if err != nil { return total, err } // Put block back z.blockPool <- read.b if z.lastBlock { break } } // Finished file; check checksum + size. if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { z.err = err return total, err } crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) sum := z.digest.Sum32() if sum != crc32 || isize != z.size { z.err = ErrChecksum return total, z.err } // File is ok; should we attempt reading one more? if !z.multistream { return total, nil } // Is there another? err = z.readHeader(false) if err == io.EOF { return total, nil } if err != nil { z.err = err return total, err } } } // Close closes the Reader. It does not close the underlying io.Reader. func (z *Reader) Close() error { return z.killReadAhead() } ================================================ FILE: vendor/github.com/klauspost/pgzip/gzip.go ================================================ // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package pgzip import ( "bytes" "errors" "fmt" "hash" "io" "sync" "github.com/klauspost/compress/flate" "github.com/klauspost/crc32" ) const ( defaultBlockSize = 250000 tailSize = 16384 defaultBlocks = 16 ) // These constants are copied from the flate package, so that code that imports // "compress/gzip" does not also have to import "compress/flate". const ( NoCompression = flate.NoCompression BestSpeed = flate.BestSpeed BestCompression = flate.BestCompression DefaultCompression = flate.DefaultCompression ConstantCompression = flate.ConstantCompression ) // A Writer is an io.WriteCloser. // Writes to a Writer are compressed and written to w. type Writer struct { Header w io.Writer level int wroteHeader bool blockSize int blocks int currentBuffer []byte prevTail []byte digest hash.Hash32 size int closed bool buf [10]byte err error pushedErr chan error results chan result dictFlatePool *sync.Pool dstPool *sync.Pool } type result struct { result chan []byte notifyWritten chan struct{} } // Use SetConcurrency to finetune the concurrency level if needed. // // With this you can control the approximate size of your blocks, // as well as how many you want to be processing in parallel. // // Default values for this is SetConcurrency(250000, 16), // meaning blocks are split at 250000 bytes and up to 16 blocks // can be processing at once before the writer blocks. func (z *Writer) SetConcurrency(blockSize, blocks int) error { if blockSize <= tailSize { return fmt.Errorf("gzip: block size cannot be less than or equal to %d", tailSize) } if blocks <= 0 { return errors.New("gzip: blocks cannot be zero or less") } z.blockSize = blockSize z.results = make(chan result, blocks) z.blocks = blocks return nil } // NewWriter returns a new Writer. // Writes to the returned writer are compressed and written to w. // // It is the caller's responsibility to call Close on the WriteCloser when done. // Writes may be buffered and not flushed until Close. // // Callers that wish to set the fields in Writer.Header must do so before // the first call to Write or Close. The Comment and Name header fields are // UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO // 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an // error on Write. func NewWriter(w io.Writer) *Writer { z, _ := NewWriterLevel(w, DefaultCompression) return z } // NewWriterLevel is like NewWriter but specifies the compression level instead // of assuming DefaultCompression. // // The compression level can be DefaultCompression, NoCompression, or any // integer value between BestSpeed and BestCompression inclusive. The error // returned will be nil if the level is valid. func NewWriterLevel(w io.Writer, level int) (*Writer, error) { if level < ConstantCompression || level > BestCompression { return nil, fmt.Errorf("gzip: invalid compression level: %d", level) } z := new(Writer) z.SetConcurrency(defaultBlockSize, defaultBlocks) z.init(w, level) return z, nil } // This function must be used by goroutines to set an // error condition, since z.err access is restricted // to the callers goruotine. func (z *Writer) pushError(err error) { z.pushedErr <- err close(z.pushedErr) } func (z *Writer) init(w io.Writer, level int) { digest := z.digest if digest != nil { digest.Reset() } else { digest = crc32.NewIEEE() } *z = Writer{ Header: Header{ OS: 255, // unknown }, w: w, level: level, digest: digest, pushedErr: make(chan error, 1), results: make(chan result, z.blocks), blockSize: z.blockSize, blocks: z.blocks, } z.dictFlatePool = &sync.Pool{ New: func() interface{} { f, _ := flate.NewWriterDict(w, level, nil) return f }, } z.dstPool = &sync.Pool{New: func() interface{} { return make([]byte, 0, z.blockSize) }} } // Reset discards the Writer z's state and makes it equivalent to the // result of its original state from NewWriter or NewWriterLevel, but // writing to w instead. This permits reusing a Writer rather than // allocating a new one. func (z *Writer) Reset(w io.Writer) { if z.results != nil && !z.closed { close(z.results) } z.SetConcurrency(defaultBlockSize, defaultBlocks) z.init(w, z.level) } // GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). func put2(p []byte, v uint16) { p[0] = uint8(v >> 0) p[1] = uint8(v >> 8) } func put4(p []byte, v uint32) { p[0] = uint8(v >> 0) p[1] = uint8(v >> 8) p[2] = uint8(v >> 16) p[3] = uint8(v >> 24) } // writeBytes writes a length-prefixed byte slice to z.w. func (z *Writer) writeBytes(b []byte) error { if len(b) > 0xffff { return errors.New("gzip.Write: Extra data is too large") } put2(z.buf[0:2], uint16(len(b))) _, err := z.w.Write(z.buf[0:2]) if err != nil { return err } _, err = z.w.Write(b) return err } // writeString writes a UTF-8 string s in GZIP's format to z.w. // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). func (z *Writer) writeString(s string) (err error) { // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. needconv := false for _, v := range s { if v == 0 || v > 0xff { return errors.New("gzip.Write: non-Latin-1 header string") } if v > 0x7f { needconv = true } } if needconv { b := make([]byte, 0, len(s)) for _, v := range s { b = append(b, byte(v)) } _, err = z.w.Write(b) } else { _, err = io.WriteString(z.w, s) } if err != nil { return err } // GZIP strings are NUL-terminated. z.buf[0] = 0 _, err = z.w.Write(z.buf[0:1]) return err } // compressCurrent will compress the data currently buffered // This should only be called from the main writer/flush/closer func (z *Writer) compressCurrent(flush bool) { r := result{} r.result = make(chan []byte, 1) r.notifyWritten = make(chan struct{}, 0) z.results <- r // If block given is more than twice the block size, split it. c := z.currentBuffer if len(c) > z.blockSize*2 { c = c[:z.blockSize] go compressBlock(c, z.prevTail, *z, r) z.prevTail = c[len(c)-tailSize:] z.currentBuffer = z.currentBuffer[z.blockSize:] z.compressCurrent(flush) // Last one flushes if needed return } go compressBlock(c, z.prevTail, *z, r) if len(c) > tailSize { z.prevTail = c[len(c)-tailSize:] } else { z.prevTail = nil } z.currentBuffer = make([]byte, 0, z.blockSize+(z.blockSize/4)) // Wait if flushing if flush { _ = <-r.notifyWritten } } // Returns an error if it has been set. // Cannot be used by functions that are from internal goroutines. func (z *Writer) checkError() error { if z.err != nil { return z.err } select { case err := <-z.pushedErr: z.err = err default: } return z.err } // Write writes a compressed form of p to the underlying io.Writer. The // compressed bytes are not necessarily flushed to output until // the Writer is closed or Flush() is called. // // The function will return quickly, if there are unused buffers. // The sent slice (p) is copied, and the caller is free to re-use the buffer // when the function returns. // // Errors that occur during compression will be reported later, and a nil error // does not signify that the compression succeeded (since it is most likely still running) // That means that the call that returns an error may not be the call that caused it. // Only Flush and Close functions are guaranteed to return any errors up to that point. func (z *Writer) Write(p []byte) (int, error) { if z.checkError() != nil { return 0, z.err } // Write the GZIP header lazily. if !z.wroteHeader { z.wroteHeader = true z.buf[0] = gzipID1 z.buf[1] = gzipID2 z.buf[2] = gzipDeflate z.buf[3] = 0 if z.Extra != nil { z.buf[3] |= 0x04 } if z.Name != "" { z.buf[3] |= 0x08 } if z.Comment != "" { z.buf[3] |= 0x10 } put4(z.buf[4:8], uint32(z.ModTime.Unix())) if z.level == BestCompression { z.buf[8] = 2 } else if z.level == BestSpeed { z.buf[8] = 4 } else { z.buf[8] = 0 } z.buf[9] = z.OS var n int n, z.err = z.w.Write(z.buf[0:10]) if z.err != nil { return n, z.err } if z.Extra != nil { z.err = z.writeBytes(z.Extra) if z.err != nil { return n, z.err } } if z.Name != "" { z.err = z.writeString(z.Name) if z.err != nil { return n, z.err } } if z.Comment != "" { z.err = z.writeString(z.Comment) if z.err != nil { return n, z.err } } // Start receiving data from compressors go func() { listen := z.results for { r, ok := <-listen // If closed, we are finished. if !ok { return } buf := <-r.result n, err := z.w.Write(buf) if err != nil { z.pushError(err) close(r.notifyWritten) return } if n != len(buf) { z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf))) close(r.notifyWritten) return } z.dstPool.Put(buf) close(r.notifyWritten) } }() z.currentBuffer = make([]byte, 0, z.blockSize+(z.blockSize/4)) } // Handle very large writes in a loop if len(p) > z.blockSize*z.blocks { q := p for len(q) > 0 { length := len(q) if length > z.blockSize { length = z.blockSize } z.digest.Write(q[:length]) z.currentBuffer = append(z.currentBuffer, q[:length]...) if len(z.currentBuffer) >= z.blockSize { z.compressCurrent(false) if z.err != nil { return len(p) - len(q) - length, z.err } } z.size += length q = q[length:] } return len(p), z.err } else { z.size += len(p) z.digest.Write(p) z.currentBuffer = append(z.currentBuffer, p...) if len(z.currentBuffer) >= z.blockSize { z.compressCurrent(false) } return len(p), z.err } } // Step 1: compresses buffer to buffer // Step 2: send writer to channel // Step 3: Close result channel to indicate we are done func compressBlock(p, prevTail []byte, z Writer, r result) { defer close(r.result) buf := z.dstPool.Get().([]byte) dest := bytes.NewBuffer(buf[:0]) compressor := z.dictFlatePool.Get().(*flate.Writer) compressor.ResetDict(dest, prevTail) compressor.Write(p) err := compressor.Flush() if err != nil { z.pushError(err) return } if z.closed { err = compressor.Close() if err != nil { z.pushError(err) return } } z.dictFlatePool.Put(compressor) // Read back buffer buf = dest.Bytes() r.result <- buf } // Flush flushes any pending compressed data to the underlying writer. // // It is useful mainly in compressed network protocols, to ensure that // a remote reader has enough data to reconstruct a packet. Flush does // not return until the data has been written. If the underlying // writer returns an error, Flush returns that error. // // In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. func (z *Writer) Flush() error { if z.checkError() != nil { return z.err } if z.closed { return nil } if !z.wroteHeader { _, err := z.Write(nil) if err != nil { return err } } // We send current block to compression z.compressCurrent(true) if z.checkError() != nil { return z.err } return nil } // UncompressedSize will return the number of bytes written. // pgzip only, not a function in the official gzip package. func (z Writer) UncompressedSize() int { return z.size } // Close closes the Writer, flushing any unwritten data to the underlying // io.Writer, but does not close the underlying io.Writer. func (z *Writer) Close() error { if z.checkError() != nil { return z.err } if z.closed { return nil } z.closed = true if !z.wroteHeader { z.Write(nil) if z.err != nil { return z.err } } z.compressCurrent(true) if z.checkError() != nil { return z.err } close(z.results) put4(z.buf[0:4], z.digest.Sum32()) put4(z.buf[4:8], uint32(z.size)) _, z.err = z.w.Write(z.buf[0:8]) return z.err } ================================================ FILE: vendor/github.com/opencontainers/go-digest/LICENSE.code ================================================ Apache License Version 2.0, January 2004 https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2016 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/github.com/opencontainers/go-digest/LICENSE.docs ================================================ Attribution-ShareAlike 4.0 International ======================================================================= Creative Commons Corporation ("Creative Commons") is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an "as-is" basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. Using Creative Commons Public Licenses Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. Considerations for licensors: Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC- licensed material, or material used under an exception or limitation to copyright. More considerations for licensors: wiki.creativecommons.org/Considerations_for_licensors Considerations for the public: By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor's permission is not necessary for any reason--for example, because of any applicable exception or limitation to copyright--then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. More_considerations for the public: wiki.creativecommons.org/Considerations_for_licensees ======================================================================= Creative Commons Attribution-ShareAlike 4.0 International Public License By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. Section 1 -- Definitions. a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License. d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike. h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License. i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. j. Licensor means the individual(s) or entity(ies) granting rights under this Public License. k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. Section 2 -- Scope. a. License grant. 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: a. reproduce and Share the Licensed Material, in whole or in part; and b. produce, reproduce, and Share Adapted Material. 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 3. Term. The term of this Public License is specified in Section 6(a). 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a) (4) never produces Adapted Material. 5. Downstream recipients. a. Offer from the Licensor -- Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. b. Additional offer from the Licensor -- Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter's License You apply. c. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). b. Other rights. 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 2. Patent and trademark rights are not licensed under this Public License. 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties. Section 3 -- License Conditions. Your exercise of the Licensed Rights is expressly made subject to the following conditions. a. Attribution. 1. If You Share the Licensed Material (including in modified form), You must: a. retain the following if it is supplied by the Licensor with the Licensed Material: i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); ii. a copyright notice; iii. a notice that refers to this Public License; iv. a notice that refers to the disclaimer of warranties; v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; b. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and c. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. b. ShareAlike. In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply. 1. The Adapter's License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License. 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material. 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply. Section 4 -- Sui Generis Database Rights. Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database; b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. Section 5 -- Disclaimer of Warranties and Limitation of Liability. a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. Section 6 -- Term and Termination. a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 2. upon express reinstatement by the Licensor. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. Section 7 -- Other Terms and Conditions. a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. Section 8 -- Interpretation. a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. ======================================================================= Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the "Licensor." Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at creativecommons.org/policies, Creative Commons does not authorize the use of the trademark "Creative Commons" or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. Creative Commons may be contacted at creativecommons.org. ================================================ FILE: vendor/github.com/opencontainers/go-digest/algorithm.go ================================================ package digest import ( "crypto" "fmt" "hash" "io" ) // Algorithm identifies and implementation of a digester by an identifier. // Note the that this defines both the hash algorithm used and the string // encoding. type Algorithm string // supported digest types const ( SHA256 Algorithm = "sha256" // sha256 with hex encoding SHA384 Algorithm = "sha384" // sha384 with hex encoding SHA512 Algorithm = "sha512" // sha512 with hex encoding // Canonical is the primary digest algorithm used with the distribution // project. Other digests may be used but this one is the primary storage // digest. Canonical = SHA256 ) var ( // TODO(stevvooe): Follow the pattern of the standard crypto package for // registration of digests. Effectively, we are a registerable set and // common symbol access. // algorithms maps values to hash.Hash implementations. Other algorithms // may be available but they cannot be calculated by the digest package. algorithms = map[Algorithm]crypto.Hash{ SHA256: crypto.SHA256, SHA384: crypto.SHA384, SHA512: crypto.SHA512, } ) // Available returns true if the digest type is available for use. If this // returns false, Digester and Hash will return nil. func (a Algorithm) Available() bool { h, ok := algorithms[a] if !ok { return false } // check availability of the hash, as well return h.Available() } func (a Algorithm) String() string { return string(a) } // Size returns number of bytes returned by the hash. func (a Algorithm) Size() int { h, ok := algorithms[a] if !ok { return 0 } return h.Size() } // Set implemented to allow use of Algorithm as a command line flag. func (a *Algorithm) Set(value string) error { if value == "" { *a = Canonical } else { // just do a type conversion, support is queried with Available. *a = Algorithm(value) } if !a.Available() { return ErrDigestUnsupported } return nil } // Digester returns a new digester for the specified algorithm. If the algorithm // does not have a digester implementation, nil will be returned. This can be // checked by calling Available before calling Digester. func (a Algorithm) Digester() Digester { return &digester{ alg: a, hash: a.Hash(), } } // Hash returns a new hash as used by the algorithm. If not available, the // method will panic. Check Algorithm.Available() before calling. func (a Algorithm) Hash() hash.Hash { if !a.Available() { // Empty algorithm string is invalid if a == "" { panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) } // NOTE(stevvooe): A missing hash is usually a programming error that // must be resolved at compile time. We don't import in the digest // package to allow users to choose their hash implementation (such as // when using stevvooe/resumable or a hardware accelerated package). // // Applications that may want to resolve the hash at runtime should // call Algorithm.Available before call Algorithm.Hash(). panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) } return algorithms[a].New() } // FromReader returns the digest of the reader using the algorithm. func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { digester := a.Digester() if _, err := io.Copy(digester.Hash(), rd); err != nil { return "", err } return digester.Digest(), nil } // FromBytes digests the input and returns a Digest. func (a Algorithm) FromBytes(p []byte) Digest { digester := a.Digester() if _, err := digester.Hash().Write(p); err != nil { // Writes to a Hash should never fail. None of the existing // hash implementations in the stdlib or hashes vendored // here can return errors from Write. Having a panic in this // condition instead of having FromBytes return an error value // avoids unnecessary error handling paths in all callers. panic("write to hash function returned error: " + err.Error()) } return digester.Digest() } // FromString digests the string input and returns a Digest. func (a Algorithm) FromString(s string) Digest { return a.FromBytes([]byte(s)) } ================================================ FILE: vendor/github.com/opencontainers/go-digest/digest.go ================================================ package digest import ( "fmt" "hash" "io" "regexp" "strings" ) // Digest allows simple protection of hex formatted digest strings, prefixed // by their algorithm. Strings of type Digest have some guarantee of being in // the correct format and it provides quick access to the components of a // digest string. // // The following is an example of the contents of Digest types: // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // // This allows to abstract the digest behind this type and work only in those // terms. type Digest string // NewDigest returns a Digest from alg and a hash.Hash object. func NewDigest(alg Algorithm, h hash.Hash) Digest { return NewDigestFromBytes(alg, h.Sum(nil)) } // NewDigestFromBytes returns a new digest from the byte contents of p. // Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) // functions. This is also useful for rebuilding digests from binary // serializations. func NewDigestFromBytes(alg Algorithm, p []byte) Digest { return Digest(fmt.Sprintf("%s:%x", alg, p)) } // NewDigestFromHex returns a Digest from alg and a the hex encoded digest. func NewDigestFromHex(alg, hex string) Digest { return Digest(fmt.Sprintf("%s:%s", alg, hex)) } // DigestRegexp matches valid digest types. var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) // DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) var ( // ErrDigestInvalidFormat returned when digest format invalid. ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") // ErrDigestInvalidLength returned when digest has invalid length. ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") // ErrDigestUnsupported returned when the digest algorithm is unsupported. ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") ) // Parse parses s and returns the validated digest object. An error will // be returned if the format is invalid. func Parse(s string) (Digest, error) { d := Digest(s) return d, d.Validate() } // FromReader consumes the content of rd until io.EOF, returning canonical digest. func FromReader(rd io.Reader) (Digest, error) { return Canonical.FromReader(rd) } // FromBytes digests the input and returns a Digest. func FromBytes(p []byte) Digest { return Canonical.FromBytes(p) } // FromString digests the input and returns a Digest. func FromString(s string) Digest { return Canonical.FromString(s) } // Validate checks that the contents of d is a valid digest, returning an // error if not. func (d Digest) Validate() error { s := string(d) i := strings.Index(s, ":") // validate i then run through regexp if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) { return ErrDigestInvalidFormat } algorithm := Algorithm(s[:i]) if !algorithm.Available() { return ErrDigestUnsupported } // Digests much always be hex-encoded, ensuring that their hex portion will // always be size*2 if algorithm.Size()*2 != len(s[i+1:]) { return ErrDigestInvalidLength } return nil } // Algorithm returns the algorithm portion of the digest. This will panic if // the underlying digest is not in a valid format. func (d Digest) Algorithm() Algorithm { return Algorithm(d[:d.sepIndex()]) } // Verifier returns a writer object that can be used to verify a stream of // content against the digest. If the digest is invalid, the method will panic. func (d Digest) Verifier() Verifier { return hashVerifier{ hash: d.Algorithm().Hash(), digest: d, } } // Hex returns the hex digest portion of the digest. This will panic if the // underlying digest is not in a valid format. func (d Digest) Hex() string { return string(d[d.sepIndex()+1:]) } func (d Digest) String() string { return string(d) } func (d Digest) sepIndex() int { i := strings.Index(string(d), ":") if i < 0 { panic(fmt.Sprintf("no ':' separator in digest %q", d)) } return i } ================================================ FILE: vendor/github.com/opencontainers/go-digest/digester.go ================================================ package digest import "hash" // Digester calculates the digest of written data. Writes should go directly // to the return value of Hash, while calling Digest will return the current // value of the digest. type Digester interface { Hash() hash.Hash // provides direct access to underlying hash instance. Digest() Digest } // digester provides a simple digester definition that embeds a hasher. type digester struct { alg Algorithm hash hash.Hash } func (d *digester) Hash() hash.Hash { return d.hash } func (d *digester) Digest() Digest { return NewDigest(d.alg, d.hash) } ================================================ FILE: vendor/github.com/opencontainers/go-digest/doc.go ================================================ // Package digest provides a generalized type to opaquely represent message // digests and their operations within the registry. The Digest type is // designed to serve as a flexible identifier in a content-addressable system. // More importantly, it provides tools and wrappers to work with // hash.Hash-based digests with little effort. // // Basics // // The format of a digest is simply a string with two parts, dubbed the // "algorithm" and the "digest", separated by a colon: // // : // // An example of a sha256 digest representation follows: // // sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc // // In this case, the string "sha256" is the algorithm and the hex bytes are // the "digest". // // Because the Digest type is simply a string, once a valid Digest is // obtained, comparisons are cheap, quick and simple to express with the // standard equality operator. // // Verification // // The main benefit of using the Digest type is simple verification against a // given digest. The Verifier interface, modeled after the stdlib hash.Hash // interface, provides a common write sink for digest verification. After // writing is complete, calling the Verifier.Verified method will indicate // whether or not the stream of bytes matches the target digest. // // Missing Features // // In addition to the above, we intend to add the following features to this // package: // // 1. A Digester type that supports write sink digest calculation. // // 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. // package digest ================================================ FILE: vendor/github.com/opencontainers/go-digest/verifiers.go ================================================ package digest import ( "hash" "io" ) // Verifier presents a general verification interface to be used with message // digests and other byte stream verifications. Users instantiate a Verifier // from one of the various methods, write the data under test to it then check // the result with the Verified method. type Verifier interface { io.Writer // Verified will return true if the content written to Verifier matches // the digest. Verified() bool } type hashVerifier struct { digest Digest hash hash.Hash } func (hv hashVerifier) Write(p []byte) (n int, err error) { return hv.hash.Write(p) } func (hv hashVerifier) Verified() bool { return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) } ================================================ FILE: vendor/github.com/opencontainers/image-spec/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS Copyright 2016 The Linux Foundation. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go ================================================ // Copyright 2016 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 // ImageConfig defines the execution parameters which should be used as a base when running a container using an image. type ImageConfig struct { // User defines the username or UID which the process in the container should run as. User string `json:"User"` // Memory defines the memory limit. Memory int64 `json:"Memory"` // MemorySwap defines the total memory usage limit (memory + swap). MemorySwap int64 `json:"MemorySwap"` // CPUShares is the CPU shares (relative weight vs. other containers). CPUShares int64 `json:"CpuShares"` // ExposedPorts a set of ports to expose from a container running this image. ExposedPorts map[string]struct{} `json:"ExposedPorts"` // Env is a list of environment variables to be used in a container. Env []string `json:"Env"` // Entrypoint defines a list of arguments to use as the command to execute when the container starts. Entrypoint []string `json:"Entrypoint"` // Cmd defines the default arguments to the entrypoint of the container. Cmd []string `json:"Cmd"` // Volumes is a set of directories which should be created as data volumes in a container running this image. Volumes map[string]struct{} `json:"Volumes"` // WorkingDir sets the current working directory of the entrypoint process in the container. WorkingDir string `json:"WorkingDir"` } // RootFS describes a layer content addresses type RootFS struct { // Type is the type of the rootfs. Type string `json:"type"` // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. DiffIDs []string `json:"diff_ids"` } // History describes the history of a layer. type History struct { // Created is the creation time. Created string `json:"created"` // CreatedBy is the command which created the layer. CreatedBy string `json:"created_by"` // Author is the author of the build point. Author string `json:"author"` // Comment is a custom message set when creating the layer. Comment string `json:"comment"` // EmptyLayer is used to mark if the history item created a filesystem diff. EmptyLayer bool `json:"empty_layer"` } // Image is the JSON structure which describes some basic information about the image. type Image struct { // Created defines an ISO-8601 formatted combined date and time at which the image was created. Created string `json:"created"` // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. Author string `json:"author"` // Architecture is the CPU architecture which the binaries in this image are built to run on. Architecture string `json:"architecture"` // OS is the name of the operating system which the image is built to run on. OS string `json:"os"` // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config"` // RootFS references the layer content addresses used by the image. RootFS RootFS `json:"rootfs"` // History describes the history of each layer. History []History `json:"history"` } ================================================ FILE: vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go ================================================ // Copyright 2016 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 // Descriptor describes the disposition of targeted content. type Descriptor struct { // MediaType contains the MIME type of the referenced object. MediaType string `json:"mediaType"` // Digest is the digest of the targeted content. Digest string `json:"digest"` // Size specifies the size in bytes of the blob. Size int64 `json:"size"` } ================================================ FILE: vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go ================================================ // Copyright 2016 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 import "github.com/opencontainers/image-spec/specs-go" // Manifest defines a schema2 manifest type Manifest struct { specs.Versioned // Config references a configuration object for a container, by digest. // The referenced configuration object is a JSON blob that the runtime uses to set up the container. Config Descriptor `json:"config"` // Layers is an indexed list of layers referenced by the manifest. Layers []Descriptor `json:"layers"` // Annotations contains arbitrary metadata for the manifest list. Annotations map[string]string `json:"annotations"` } ================================================ FILE: vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest_list.go ================================================ // Copyright 2016 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 import "github.com/opencontainers/image-spec/specs-go" // Platform describes the platform which the image in the manifest runs on. type Platform struct { // Architecture field specifies the CPU architecture, for example // `amd64` or `ppc64`. Architecture string `json:"architecture"` // OS specifies the operating system, for example `linux` or `windows`. OS string `json:"os"` // OSVersion is an optional field specifying the operating system // version, for example `10.0.10586`. OSVersion string `json:"os.version,omitempty"` // OSFeatures is an optional field specifying an array of strings, // each listing a required OS feature (for example on Windows `win32k`). OSFeatures []string `json:"os.features,omitempty"` // Variant is an optional field specifying a variant of the CPU, for // example `ppc64le` to specify a little-endian version of a PowerPC CPU. Variant string `json:"variant,omitempty"` // Features is an optional field specifying an array of strings, each // listing a required CPU feature (for example `sse4` or `aes`). Features []string `json:"features,omitempty"` } // ManifestDescriptor describes a platform specific manifest. type ManifestDescriptor struct { Descriptor // Platform describes the platform which the image in the manifest runs on. Platform Platform `json:"platform"` } // ManifestList references manifests for various platforms. type ManifestList struct { specs.Versioned // Manifests references platform specific manifests. Manifests []ManifestDescriptor `json:"manifests"` // Annotations contains arbitrary metadata for the manifest list. Annotations map[string]string `json:"annotations"` } ================================================ FILE: vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go ================================================ // Copyright 2016 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1 const ( // MediaTypeDescriptor specifies the media type for a content descriptor. MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" // MediaTypeImageManifest specifies the media type for an image manifest. MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" // MediaTypeImageManifestList specifies the media type for an image manifest list. MediaTypeImageManifestList = "application/vnd.oci.image.manifest.list.v1+json" // MediaTypeImageLayer is the media type used for layers referenced by the manifest. MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar+gzip" // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" ) ================================================ FILE: vendor/github.com/opencontainers/image-spec/specs-go/version.go ================================================ // Copyright 2016 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package specs import "fmt" const ( // VersionMajor is for an API incompatible changes VersionMajor = 1 // VersionMinor is for functionality in a backwards-compatible manner VersionMinor = 0 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. VersionDev = "-rc2" ) // Version is the specification version that the package types support. var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) ================================================ FILE: vendor/github.com/opencontainers/image-spec/specs-go/versioned.go ================================================ // Copyright 2016 The Linux Foundation // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package specs // Versioned provides a struct with the manifest schemaVersion and mediaType. // Incoming content with unknown schema version can be decoded against this // struct to check the version. type Versioned struct { // SchemaVersion is the image manifest schema that this image follows SchemaVersion int `json:"schemaVersion"` // MediaType is the media type of this schema. MediaType string `json:"mediaType,omitempty"` } ================================================ FILE: vendor/github.com/spf13/pflag/LICENSE ================================================ Copyright (c) 2012 Alex Ogier. All rights reserved. Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/spf13/pflag/flag.go ================================================ // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* pflag is a drop-in replacement for Go's flag package, implementing POSIX/GNU-style --flags. pflag is compatible with the GNU extensions to the POSIX recommendations for command-line options. See http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html Usage: pflag is a drop-in replacement of Go's native flag package. If you import pflag under the name "flag" then all code should continue to function with no changes. import flag "github.com/ogier/pflag" There is one exception to this: if you directly instantiate the Flag struct there is one more field "Shorthand" that you will need to set. Most code never instantiates this struct directly, and instead uses functions such as String(), BoolVar(), and Var(), and is therefore unaffected. Define flags using flag.String(), Bool(), Int(), etc. This declares an integer flag, -flagname, stored in the pointer ip, with type *int. var ip = flag.Int("flagname", 1234, "help message for flagname") If you like, you can bind the flag to a variable using the Var() functions. var flagvar int func init() { flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") } Or you can create custom flags that satisfy the Value interface (with pointer receivers) and couple them to flag parsing by flag.Var(&flagVal, "name", "help message for flagname") For such flags, the default value is just the initial value of the variable. After all flags are defined, call flag.Parse() to parse the command line into the defined flags. Flags may then be used directly. If you're using the flags themselves, they are all pointers; if you bind to variables, they're values. fmt.Println("ip has value ", *ip) fmt.Println("flagvar has value ", flagvar) After parsing, the arguments after the flag are available as the slice flag.Args() or individually as flag.Arg(i). The arguments are indexed from 0 through flag.NArg()-1. The pflag package also defines some new functions that are not in flag, that give one-letter shorthands for flags. You can use these by appending 'P' to the name of any function that defines a flag. var ip = flag.IntP("flagname", "f", 1234, "help message") var flagvar bool func init() { flag.BoolVarP("boolname", "b", true, "help message") } flag.VarP(&flagVar, "varname", "v", 1234, "help message") Shorthand letters can be used with single dashes on the command line. Boolean shorthand flags can be combined with other shorthand flags. Command line flag syntax: --flag // boolean flags only --flag=x Unlike the flag package, a single dash before an option means something different than a double dash. Single dashes signify a series of shorthand letters for flags. All but the last shorthand letter must be boolean flags. // boolean flags -f -abc // non-boolean flags -n 1234 -Ifile // mixed -abcs "hello" -abcn1234 Flag parsing stops after the terminator "--". Unlike the flag package, flags can be interspersed with arguments anywhere on the command line before this terminator. Integer flags accept 1234, 0664, 0x1234 and may be negative. Boolean flags (in their long form) accept 1, 0, t, f, true, false, TRUE, FALSE, True, False. Duration flags accept any input valid for time.ParseDuration. The default set of command-line flags is controlled by top-level functions. The FlagSet type allows one to define independent sets of flags, such as to implement subcommands in a command-line interface. The methods of FlagSet are analogous to the top-level functions for the command-line flag set. */ package pflag import ( "bytes" "errors" "fmt" "io" "os" "sort" "strconv" "strings" "time" ) // ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. var ErrHelp = errors.New("pflag: help requested") // -- bool Value type boolValue bool func newBoolValue(val bool, p *bool) *boolValue { *p = val return (*boolValue)(p) } func (b *boolValue) Set(s string) error { v, err := strconv.ParseBool(s) *b = boolValue(v) return err } func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } // -- int Value type intValue int func newIntValue(val int, p *int) *intValue { *p = val return (*intValue)(p) } func (i *intValue) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = intValue(v) return err } func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } // -- int64 Value type int64Value int64 func newInt64Value(val int64, p *int64) *int64Value { *p = val return (*int64Value)(p) } func (i *int64Value) Set(s string) error { v, err := strconv.ParseInt(s, 0, 64) *i = int64Value(v) return err } func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } // -- uint Value type uintValue uint func newUintValue(val uint, p *uint) *uintValue { *p = val return (*uintValue)(p) } func (i *uintValue) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uintValue(v) return err } func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } // -- uint64 Value type uint64Value uint64 func newUint64Value(val uint64, p *uint64) *uint64Value { *p = val return (*uint64Value)(p) } func (i *uint64Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 64) *i = uint64Value(v) return err } func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } // -- string Value type stringValue string func newStringValue(val string, p *string) *stringValue { *p = val return (*stringValue)(p) } func (s *stringValue) Set(val string) error { *s = stringValue(val) return nil } func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } // -- float64 Value type float64Value float64 func newFloat64Value(val float64, p *float64) *float64Value { *p = val return (*float64Value)(p) } func (f *float64Value) Set(s string) error { v, err := strconv.ParseFloat(s, 64) *f = float64Value(v) return err } func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } // -- time.Duration Value type durationValue time.Duration func newDurationValue(val time.Duration, p *time.Duration) *durationValue { *p = val return (*durationValue)(p) } func (d *durationValue) Set(s string) error { v, err := time.ParseDuration(s) *d = durationValue(v) return err } func (d *durationValue) String() string { return (*time.Duration)(d).String() } // Value is the interface to the dynamic value stored in a flag. // (The default value is represented as a string.) type Value interface { String() string Set(string) error } // ErrorHandling defines how to handle flag parsing errors. type ErrorHandling int const ( ContinueOnError ErrorHandling = iota ExitOnError PanicOnError ) // A FlagSet represents a set of defined flags. type FlagSet struct { // Usage is the function called when an error occurs while parsing flags. // The field is a function (not a method) that may be changed to point to // a custom error handler. Usage func() name string parsed bool actual map[string]*Flag formal map[string]*Flag shorthands map[byte]*Flag args []string // arguments after flags exitOnError bool // does the program exit if there's an error? errorHandling ErrorHandling output io.Writer // nil means stderr; use out() accessor interspersed bool // allow interspersed option/non-option args } // A Flag represents the state of a flag. type Flag struct { Name string // name as it appears on command line Shorthand string // one-letter abbreviated flag Usage string // help message Value Value // value as set DefValue string // default value (as text); for usage message Changed bool // If the user set the value (or if left to default) } // sortFlags returns the flags as a slice in lexicographical sorted order. func sortFlags(flags map[string]*Flag) []*Flag { list := make(sort.StringSlice, len(flags)) i := 0 for _, f := range flags { list[i] = f.Name i++ } list.Sort() result := make([]*Flag, len(list)) for i, name := range list { result[i] = flags[name] } return result } func (f *FlagSet) out() io.Writer { if f.output == nil { return os.Stderr } return f.output } // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (f *FlagSet) SetOutput(output io.Writer) { f.output = output } // VisitAll visits the flags in lexicographical order, calling fn for each. // It visits all flags, even those not set. func (f *FlagSet) VisitAll(fn func(*Flag)) { for _, flag := range sortFlags(f.formal) { fn(flag) } } func (f *FlagSet) HasFlags() bool { return len(f.formal) > 0 } // VisitAll visits the command-line flags in lexicographical order, calling // fn for each. It visits all flags, even those not set. func VisitAll(fn func(*Flag)) { commandLine.VisitAll(fn) } // Visit visits the flags in lexicographical order, calling fn for each. // It visits only those flags that have been set. func (f *FlagSet) Visit(fn func(*Flag)) { for _, flag := range sortFlags(f.actual) { fn(flag) } } // Visit visits the command-line flags in lexicographical order, calling fn // for each. It visits only those flags that have been set. func Visit(fn func(*Flag)) { commandLine.Visit(fn) } // Lookup returns the Flag structure of the named flag, returning nil if none exists. func (f *FlagSet) Lookup(name string) *Flag { return f.formal[name] } // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { return commandLine.formal[name] } // Set sets the value of the named flag. func (f *FlagSet) Set(name, value string) error { flag, ok := f.formal[name] if !ok { return fmt.Errorf("no such flag -%v", name) } err := flag.Value.Set(value) if err != nil { return err } if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[name] = flag f.Lookup(name).Changed = true return nil } // Set sets the value of the named command-line flag. func Set(name, value string) error { return commandLine.Set(name, value) } // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { f.VisitAll(func(flag *Flag) { format := "--%s=%s: %s\n" if _, ok := flag.Value.(*stringValue); ok { // put quotes on the value format = "--%s=%q: %s\n" } if len(flag.Shorthand) > 0 { format = " -%s, " + format } else { format = " %s " + format } fmt.Fprintf(f.out(), format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) }) } func (f *FlagSet) FlagUsages() string { x := new(bytes.Buffer) f.VisitAll(func(flag *Flag) { format := "--%s=%s: %s\n" if _, ok := flag.Value.(*stringValue); ok { // put quotes on the value format = "--%s=%q: %s\n" } if len(flag.Shorthand) > 0 { format = " -%s, " + format } else { format = " %s " + format } fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) }) return x.String() } // PrintDefaults prints to standard error the default values of all defined command-line flags. func PrintDefaults() { commandLine.PrintDefaults() } // defaultUsage is the default function to print a usage message. func defaultUsage(f *FlagSet) { fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) f.PrintDefaults() } // NOTE: Usage is not just defaultUsage(commandLine) // because it serves (via godoc flag Usage) as the example // for how to write your own usage function. // Usage prints to standard error a usage message documenting all defined command-line flags. // The function is a variable that may be changed to point to a custom function. var Usage = func() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) PrintDefaults() } // NFlag returns the number of flags that have been set. func (f *FlagSet) NFlag() int { return len(f.actual) } // NFlag returns the number of command-line flags that have been set. func NFlag() int { return len(commandLine.actual) } // Arg returns the i'th argument. Arg(0) is the first remaining argument // after flags have been processed. func (f *FlagSet) Arg(i int) string { if i < 0 || i >= len(f.args) { return "" } return f.args[i] } // Arg returns the i'th command-line argument. Arg(0) is the first remaining argument // after flags have been processed. func Arg(i int) string { return commandLine.Arg(i) } // NArg is the number of arguments remaining after flags have been processed. func (f *FlagSet) NArg() int { return len(f.args) } // NArg is the number of arguments remaining after flags have been processed. func NArg() int { return len(commandLine.args) } // Args returns the non-flag arguments. func (f *FlagSet) Args() []string { return f.args } // Args returns the non-flag command-line arguments. func Args() []string { return commandLine.args } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { f.VarP(newBoolValue(value, p), name, "", usage) } // Like BoolVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { f.VarP(newBoolValue(value, p), name, shorthand, usage) } // BoolVar defines a bool flag with specified name, default value, and usage string. // The argument p points to a bool variable in which to store the value of the flag. func BoolVar(p *bool, name string, value bool, usage string) { commandLine.VarP(newBoolValue(value, p), name, "", usage) } // Like BoolVar, but accepts a shorthand letter that can be used after a single dash. func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { commandLine.VarP(newBoolValue(value, p), name, shorthand, usage) } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func (f *FlagSet) Bool(name string, value bool, usage string) *bool { p := new(bool) f.BoolVarP(p, name, "", value, usage) return p } // Like Bool, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { p := new(bool) f.BoolVarP(p, name, shorthand, value, usage) return p } // Bool defines a bool flag with specified name, default value, and usage string. // The return value is the address of a bool variable that stores the value of the flag. func Bool(name string, value bool, usage string) *bool { return commandLine.BoolP(name, "", value, usage) } // Like Bool, but accepts a shorthand letter that can be used after a single dash. func BoolP(name, shorthand string, value bool, usage string) *bool { return commandLine.BoolP(name, shorthand, value, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { f.VarP(newIntValue(value, p), name, "", usage) } // Like IntVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { f.VarP(newIntValue(value, p), name, shorthand, usage) } // IntVar defines an int flag with specified name, default value, and usage string. // The argument p points to an int variable in which to store the value of the flag. func IntVar(p *int, name string, value int, usage string) { commandLine.VarP(newIntValue(value, p), name, "", usage) } // Like IntVar, but accepts a shorthand letter that can be used after a single dash. func IntVarP(p *int, name, shorthand string, value int, usage string) { commandLine.VarP(newIntValue(value, p), name, shorthand, usage) } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func (f *FlagSet) Int(name string, value int, usage string) *int { p := new(int) f.IntVarP(p, name, "", value, usage) return p } // Like Int, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { p := new(int) f.IntVarP(p, name, shorthand, value, usage) return p } // Int defines an int flag with specified name, default value, and usage string. // The return value is the address of an int variable that stores the value of the flag. func Int(name string, value int, usage string) *int { return commandLine.IntP(name, "", value, usage) } // Like Int, but accepts a shorthand letter that can be used after a single dash. func IntP(name, shorthand string, value int, usage string) *int { return commandLine.IntP(name, shorthand, value, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { f.VarP(newInt64Value(value, p), name, "", usage) } // Like Int64Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { f.VarP(newInt64Value(value, p), name, shorthand, usage) } // Int64Var defines an int64 flag with specified name, default value, and usage string. // The argument p points to an int64 variable in which to store the value of the flag. func Int64Var(p *int64, name string, value int64, usage string) { commandLine.VarP(newInt64Value(value, p), name, "", usage) } // Like Int64Var, but accepts a shorthand letter that can be used after a single dash. func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { commandLine.VarP(newInt64Value(value, p), name, shorthand, usage) } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { p := new(int64) f.Int64VarP(p, name, "", value, usage) return p } // Like Int64, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { p := new(int64) f.Int64VarP(p, name, shorthand, value, usage) return p } // Int64 defines an int64 flag with specified name, default value, and usage string. // The return value is the address of an int64 variable that stores the value of the flag. func Int64(name string, value int64, usage string) *int64 { return commandLine.Int64P(name, "", value, usage) } // Like Int64, but accepts a shorthand letter that can be used after a single dash. func Int64P(name, shorthand string, value int64, usage string) *int64 { return commandLine.Int64P(name, shorthand, value, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { f.VarP(newUintValue(value, p), name, "", usage) } // Like UintVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { f.VarP(newUintValue(value, p), name, shorthand, usage) } // UintVar defines a uint flag with specified name, default value, and usage string. // The argument p points to a uint variable in which to store the value of the flag. func UintVar(p *uint, name string, value uint, usage string) { commandLine.VarP(newUintValue(value, p), name, "", usage) } // Like UintVar, but accepts a shorthand letter that can be used after a single dash. func UintVarP(p *uint, name, shorthand string, value uint, usage string) { commandLine.VarP(newUintValue(value, p), name, shorthand, usage) } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func (f *FlagSet) Uint(name string, value uint, usage string) *uint { p := new(uint) f.UintVarP(p, name, "", value, usage) return p } // Like Uint, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { p := new(uint) f.UintVarP(p, name, shorthand, value, usage) return p } // Uint defines a uint flag with specified name, default value, and usage string. // The return value is the address of a uint variable that stores the value of the flag. func Uint(name string, value uint, usage string) *uint { return commandLine.UintP(name, "", value, usage) } // Like Uint, but accepts a shorthand letter that can be used after a single dash. func UintP(name, shorthand string, value uint, usage string) *uint { return commandLine.UintP(name, shorthand, value, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { f.VarP(newUint64Value(value, p), name, "", usage) } // Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { f.VarP(newUint64Value(value, p), name, shorthand, usage) } // Uint64Var defines a uint64 flag with specified name, default value, and usage string. // The argument p points to a uint64 variable in which to store the value of the flag. func Uint64Var(p *uint64, name string, value uint64, usage string) { commandLine.VarP(newUint64Value(value, p), name, "", usage) } // Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { commandLine.VarP(newUint64Value(value, p), name, shorthand, usage) } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { p := new(uint64) f.Uint64VarP(p, name, "", value, usage) return p } // Like Uint64, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { p := new(uint64) f.Uint64VarP(p, name, shorthand, value, usage) return p } // Uint64 defines a uint64 flag with specified name, default value, and usage string. // The return value is the address of a uint64 variable that stores the value of the flag. func Uint64(name string, value uint64, usage string) *uint64 { return commandLine.Uint64P(name, "", value, usage) } // Like Uint64, but accepts a shorthand letter that can be used after a single dash. func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { return commandLine.Uint64P(name, shorthand, value, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { f.VarP(newStringValue(value, p), name, "", usage) } // Like StringVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { f.VarP(newStringValue(value, p), name, shorthand, usage) } // StringVar defines a string flag with specified name, default value, and usage string. // The argument p points to a string variable in which to store the value of the flag. func StringVar(p *string, name string, value string, usage string) { commandLine.VarP(newStringValue(value, p), name, "", usage) } // Like StringVar, but accepts a shorthand letter that can be used after a single dash. func StringVarP(p *string, name, shorthand string, value string, usage string) { commandLine.VarP(newStringValue(value, p), name, shorthand, usage) } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func (f *FlagSet) String(name string, value string, usage string) *string { p := new(string) f.StringVarP(p, name, "", value, usage) return p } // Like String, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { p := new(string) f.StringVarP(p, name, shorthand, value, usage) return p } // String defines a string flag with specified name, default value, and usage string. // The return value is the address of a string variable that stores the value of the flag. func String(name string, value string, usage string) *string { return commandLine.StringP(name, "", value, usage) } // Like String, but accepts a shorthand letter that can be used after a single dash. func StringP(name, shorthand string, value string, usage string) *string { return commandLine.StringP(name, shorthand, value, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { f.VarP(newFloat64Value(value, p), name, "", usage) } // Like Float64Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { f.VarP(newFloat64Value(value, p), name, shorthand, usage) } // Float64Var defines a float64 flag with specified name, default value, and usage string. // The argument p points to a float64 variable in which to store the value of the flag. func Float64Var(p *float64, name string, value float64, usage string) { commandLine.VarP(newFloat64Value(value, p), name, "", usage) } // Like Float64Var, but accepts a shorthand letter that can be used after a single dash. func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { commandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { p := new(float64) f.Float64VarP(p, name, "", value, usage) return p } // Like Float64, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { p := new(float64) f.Float64VarP(p, name, shorthand, value, usage) return p } // Float64 defines a float64 flag with specified name, default value, and usage string. // The return value is the address of a float64 variable that stores the value of the flag. func Float64(name string, value float64, usage string) *float64 { return commandLine.Float64P(name, "", value, usage) } // Like Float64, but accepts a shorthand letter that can be used after a single dash. func Float64P(name, shorthand string, value float64, usage string) *float64 { return commandLine.Float64P(name, shorthand, value, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { f.VarP(newDurationValue(value, p), name, "", usage) } // Like DurationVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { f.VarP(newDurationValue(value, p), name, shorthand, usage) } // DurationVar defines a time.Duration flag with specified name, default value, and usage string. // The argument p points to a time.Duration variable in which to store the value of the flag. func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { commandLine.VarP(newDurationValue(value, p), name, "", usage) } // Like DurationVar, but accepts a shorthand letter that can be used after a single dash. func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { commandLine.VarP(newDurationValue(value, p), name, shorthand, usage) } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVarP(p, name, "", value, usage) return p } // Like Duration, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVarP(p, name, shorthand, value, usage) return p } // Duration defines a time.Duration flag with specified name, default value, and usage string. // The return value is the address of a time.Duration variable that stores the value of the flag. func Duration(name string, value time.Duration, usage string) *time.Duration { return commandLine.DurationP(name, "", value, usage) } // Like Duration, but accepts a shorthand letter that can be used after a single dash. func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { return commandLine.DurationP(name, shorthand, value, usage) } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func (f *FlagSet) Var(value Value, name string, usage string) { f.VarP(value, name, "", usage) } // Like Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { // Remember the default value as a string; it won't change. flag := &Flag{name, shorthand, usage, value, value.String(), false} f.AddFlag(flag) } func (f *FlagSet) AddFlag(flag *Flag) { _, alreadythere := f.formal[flag.Name] if alreadythere { msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name) fmt.Fprintln(f.out(), msg) panic(msg) // Happens only if flags are declared with identical names } if f.formal == nil { f.formal = make(map[string]*Flag) } f.formal[flag.Name] = flag if len(flag.Shorthand) == 0 { return } if len(flag.Shorthand) > 1 { fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand) panic("shorthand is more than one character") } if f.shorthands == nil { f.shorthands = make(map[byte]*Flag) } c := flag.Shorthand[0] old, alreadythere := f.shorthands[c] if alreadythere { fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s and %s\n", f.name, c, flag.Name, old.Name) panic("shorthand redefinition") } f.shorthands[c] = flag } // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the // caller could create a flag that turns a comma-separated string into a slice // of strings by giving the slice the methods of Value; in particular, Set would // decompose the comma-separated string into the slice. func Var(value Value, name string, usage string) { commandLine.VarP(value, name, "", usage) } // Like Var, but accepts a shorthand letter that can be used after a single dash. func VarP(value Value, name, shorthand, usage string) { commandLine.VarP(value, name, shorthand, usage) } // failf prints to standard error a formatted error and usage message and // returns the error. func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) fmt.Fprintln(f.out(), err) f.usage() return err } // usage calls the Usage method for the flag set, or the usage function if // the flag set is commandLine. func (f *FlagSet) usage() { if f == commandLine { Usage() } else if f.Usage == nil { defaultUsage(f) } else { f.Usage() } } func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { if err := flag.Value.Set(value); err != nil { return f.failf("invalid argument %q for %s: %v", value, origArg, err) } // mark as visited for Visit() if f.actual == nil { f.actual = make(map[string]*Flag) } f.actual[flag.Name] = flag flag.Changed = true return nil } func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { a = args if len(s) == 2 { // "--" terminates the flags f.args = append(f.args, args...) return } name := s[2:] if len(name) == 0 || name[0] == '-' || name[0] == '=' { err = f.failf("bad flag syntax: %s", s) return } split := strings.SplitN(name, "=", 2) name = split[0] m := f.formal flag, alreadythere := m[name] // BUG if !alreadythere { if name == "help" { // special case for nice help message. f.usage() return args, ErrHelp } err = f.failf("unknown flag: --%s", name) return } if len(split) == 1 { if _, ok := flag.Value.(*boolValue); !ok { err = f.failf("flag needs an argument: %s", s) return } f.setFlag(flag, "true", s) } else { if e := f.setFlag(flag, split[1], s); e != nil { err = e return } } return args, nil } func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error) { a = args shorthands := s[1:] for i := 0; i < len(shorthands); i++ { c := shorthands[i] flag, alreadythere := f.shorthands[c] if !alreadythere { if c == 'h' { // special case for nice help message. f.usage() err = ErrHelp return } //TODO continue on error err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) if len(args) == 0 { return } } if alreadythere { if _, ok := flag.Value.(*boolValue); ok { f.setFlag(flag, "true", s) continue } if i < len(shorthands)-1 { if e := f.setFlag(flag, shorthands[i+1:], s); e != nil { err = e return } break } if len(args) == 0 { err = f.failf("flag needs an argument: %q in -%s", c, shorthands) return } if e := f.setFlag(flag, args[0], s); e != nil { err = e return } } a = args[1:] break // should be unnecessary } return } func (f *FlagSet) parseArgs(args []string) (err error) { for len(args) > 0 { s := args[0] args = args[1:] if len(s) == 0 || s[0] != '-' || len(s) == 1 { if !f.interspersed { f.args = append(f.args, s) f.args = append(f.args, args...) return nil } f.args = append(f.args, s) continue } if s[1] == '-' { args, err = f.parseLongArg(s, args) } else { args, err = f.parseShortArg(s, args) } } return } // Parse parses flag definitions from the argument list, which should not // include the command name. Must be called after all flags in the FlagSet // are defined and before flags are accessed by the program. // The return value will be ErrHelp if -help was set but not defined. func (f *FlagSet) Parse(arguments []string) error { f.parsed = true f.args = make([]string, 0, len(arguments)) err := f.parseArgs(arguments) if err != nil { switch f.errorHandling { case ContinueOnError: return err case ExitOnError: os.Exit(2) case PanicOnError: panic(err) } } return nil } // Parsed reports whether f.Parse has been called. func (f *FlagSet) Parsed() bool { return f.parsed } // Parse parses the command-line flags from os.Args[1:]. Must be called // after all flags are defined and before flags are accessed by the program. func Parse() { // Ignore errors; commandLine is set for ExitOnError. commandLine.Parse(os.Args[1:]) } // Whether to support interspersed option/non-option arguments. func SetInterspersed(interspersed bool) { commandLine.SetInterspersed(interspersed) } // Parsed returns true if the command-line flags have been parsed. func Parsed() bool { return commandLine.Parsed() } // The default set of command-line flags, parsed from os.Args. var commandLine = NewFlagSet(os.Args[0], ExitOnError) // NewFlagSet returns a new, empty flag set with the specified name and // error handling property. func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { f := &FlagSet{ name: name, errorHandling: errorHandling, interspersed: true, } return f } // Whether to support interspersed option/non-option arguments. func (f *FlagSet) SetInterspersed(interspersed bool) { f.interspersed = interspersed } // Init sets the name and error handling property for a flag set. // By default, the zero FlagSet uses an empty name and the // ContinueOnError error handling policy. func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { f.name = name f.errorHandling = errorHandling } ================================================ FILE: vendor/go4.org/LICENSE ================================================ Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "{}" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ FILE: vendor/go4.org/errorutil/highlight.go ================================================ /* Copyright 2011 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package errorutil helps make better error messages. package errorutil // import "go4.org/errorutil" import ( "bufio" "bytes" "fmt" "io" "strings" ) // HighlightBytePosition takes a reader and the location in bytes of a parse // error (for instance, from json.SyntaxError.Offset) and returns the line, column, // and pretty-printed context around the error with an arrow indicating the exact // position of the syntax error. func HighlightBytePosition(f io.Reader, pos int64) (line, col int, highlight string) { line = 1 br := bufio.NewReader(f) lastLine := "" thisLine := new(bytes.Buffer) for n := int64(0); n < pos; n++ { b, err := br.ReadByte() if err != nil { break } if b == '\n' { lastLine = thisLine.String() thisLine.Reset() line++ col = 1 } else { col++ thisLine.WriteByte(b) } } if line > 1 { highlight += fmt.Sprintf("%5d: %s\n", line-1, lastLine) } highlight += fmt.Sprintf("%5d: %s\n", line, thisLine.String()) highlight += fmt.Sprintf("%s^\n", strings.Repeat(" ", col+5)) return } ================================================ FILE: vendor/golang.org/x/crypto/LICENSE ================================================ Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/golang.org/x/crypto/PATENTS ================================================ Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/terminal.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package terminal import ( "bytes" "io" "sync" "unicode/utf8" ) // EscapeCodes contains escape sequences that can be written to the terminal in // order to achieve different styles of text. type EscapeCodes struct { // Foreground colors Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte // Reset all attributes Reset []byte } var vt100EscapeCodes = EscapeCodes{ Black: []byte{keyEscape, '[', '3', '0', 'm'}, Red: []byte{keyEscape, '[', '3', '1', 'm'}, Green: []byte{keyEscape, '[', '3', '2', 'm'}, Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, Blue: []byte{keyEscape, '[', '3', '4', 'm'}, Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, White: []byte{keyEscape, '[', '3', '7', 'm'}, Reset: []byte{keyEscape, '[', '0', 'm'}, } // Terminal contains the state for running a VT100 terminal that is capable of // reading lines of input. type Terminal struct { // AutoCompleteCallback, if non-null, is called for each keypress with // the full input line and the current position of the cursor (in // bytes, as an index into |line|). If it returns ok=false, the key // press is processed normally. Otherwise it returns a replacement line // and the new cursor position. AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) // Escape contains a pointer to the escape codes for this terminal. // It's always a valid pointer, although the escape codes themselves // may be empty if the terminal doesn't support them. Escape *EscapeCodes // lock protects the terminal and the state in this object from // concurrent processing of a key press and a Write() call. lock sync.Mutex c io.ReadWriter prompt []rune // line is the current line being entered. line []rune // pos is the logical position of the cursor in line pos int // echo is true if local echo is enabled echo bool // pasteActive is true iff there is a bracketed paste operation in // progress. pasteActive bool // cursorX contains the current X value of the cursor where the left // edge is 0. cursorY contains the row number where the first row of // the current line is 0. cursorX, cursorY int // maxLine is the greatest value of cursorY so far. maxLine int termWidth, termHeight int // outBuf contains the terminal data to be sent. outBuf []byte // remainder contains the remainder of any partial key sequences after // a read. It aliases into inBuf. remainder []byte inBuf [256]byte // history contains previously entered commands so that they can be // accessed with the up and down keys. history stRingBuffer // historyIndex stores the currently accessed history entry, where zero // means the immediately previous entry. historyIndex int // When navigating up and down the history it's possible to return to // the incomplete, initial line. That value is stored in // historyPending. historyPending string } // NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is // a local terminal, that terminal must first have been put into raw mode. // prompt is a string that is written at the start of each input line (i.e. // "> "). func NewTerminal(c io.ReadWriter, prompt string) *Terminal { return &Terminal{ Escape: &vt100EscapeCodes, c: c, prompt: []rune(prompt), termWidth: 80, termHeight: 24, echo: true, historyIndex: -1, } } const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota keyUp keyDown keyLeft keyRight keyAltLeft keyAltRight keyHome keyEnd keyDeleteWord keyDeleteLine keyClearScreen keyPasteStart keyPasteEnd ) var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} // bytesToKey tries to parse a key sequence from b. If successful, it returns // the key and the remainder of the input. Otherwise it returns utf8.RuneError. func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { if len(b) == 0 { return utf8.RuneError, nil } if !pasteActive { switch b[0] { case 1: // ^A return keyHome, b[1:] case 5: // ^E return keyEnd, b[1:] case 8: // ^H return keyBackspace, b[1:] case 11: // ^K return keyDeleteLine, b[1:] case 12: // ^L return keyClearScreen, b[1:] case 23: // ^W return keyDeleteWord, b[1:] } } if b[0] != keyEscape { if !utf8.FullRune(b) { return utf8.RuneError, b } r, l := utf8.DecodeRune(b) return r, b[l:] } if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { switch b[2] { case 'A': return keyUp, b[3:] case 'B': return keyDown, b[3:] case 'C': return keyRight, b[3:] case 'D': return keyLeft, b[3:] case 'H': return keyHome, b[3:] case 'F': return keyEnd, b[3:] } } if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { switch b[5] { case 'C': return keyAltRight, b[6:] case 'D': return keyAltLeft, b[6:] } } if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { return keyPasteStart, b[6:] } if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { return keyPasteEnd, b[6:] } // If we get here then we have a key that we don't recognise, or a // partial sequence. It's not clear how one should find the end of a // sequence without knowing them all, but it seems that [a-zA-Z~] only // appears at the end of a sequence. for i, c := range b[0:] { if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { return keyUnknown, b[i+1:] } } return utf8.RuneError, b } // queue appends data to the end of t.outBuf func (t *Terminal) queue(data []rune) { t.outBuf = append(t.outBuf, []byte(string(data))...) } var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} var space = []rune{' '} func isPrintable(key rune) bool { isInSurrogateArea := key >= 0xd800 && key <= 0xdbff return key >= 32 && !isInSurrogateArea } // moveCursorToPos appends data to t.outBuf which will move the cursor to the // given, logical position in the text. func (t *Terminal) moveCursorToPos(pos int) { if !t.echo { return } x := visualLength(t.prompt) + pos y := x / t.termWidth x = x % t.termWidth up := 0 if y < t.cursorY { up = t.cursorY - y } down := 0 if y > t.cursorY { down = y - t.cursorY } left := 0 if x < t.cursorX { left = t.cursorX - x } right := 0 if x > t.cursorX { right = x - t.cursorX } t.cursorX = x t.cursorY = y t.move(up, down, left, right) } func (t *Terminal) move(up, down, left, right int) { movement := make([]rune, 3*(up+down+left+right)) m := movement for i := 0; i < up; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'A' m = m[3:] } for i := 0; i < down; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'B' m = m[3:] } for i := 0; i < left; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'D' m = m[3:] } for i := 0; i < right; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'C' m = m[3:] } t.queue(movement) } func (t *Terminal) clearLineToRight() { op := []rune{keyEscape, '[', 'K'} t.queue(op) } const maxLineLength = 4096 func (t *Terminal) setLine(newLine []rune, newPos int) { if t.echo { t.moveCursorToPos(0) t.writeLine(newLine) for i := len(newLine); i < len(t.line); i++ { t.writeLine(space) } t.moveCursorToPos(newPos) } t.line = newLine t.pos = newPos } func (t *Terminal) advanceCursor(places int) { t.cursorX += places t.cursorY += t.cursorX / t.termWidth if t.cursorY > t.maxLine { t.maxLine = t.cursorY } t.cursorX = t.cursorX % t.termWidth if places > 0 && t.cursorX == 0 { // Normally terminals will advance the current position // when writing a character. But that doesn't happen // for the last character in a line. However, when // writing a character (except a new line) that causes // a line wrap, the position will be advanced two // places. // // So, if we are stopping at the end of a line, we // need to write a newline so that our cursor can be // advanced to the next line. t.outBuf = append(t.outBuf, '\n') } } func (t *Terminal) eraseNPreviousChars(n int) { if n == 0 { return } if t.pos < n { n = t.pos } t.pos -= n t.moveCursorToPos(t.pos) copy(t.line[t.pos:], t.line[n+t.pos:]) t.line = t.line[:len(t.line)-n] if t.echo { t.writeLine(t.line[t.pos:]) for i := 0; i < n; i++ { t.queue(space) } t.advanceCursor(n) t.moveCursorToPos(t.pos) } } // countToLeftWord returns then number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { return 0 } pos := t.pos - 1 for pos > 0 { if t.line[pos] != ' ' { break } pos-- } for pos > 0 { if t.line[pos] == ' ' { pos++ break } pos-- } return t.pos - pos } // countToRightWord returns then number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos for pos < len(t.line) { if t.line[pos] == ' ' { break } pos++ } for pos < len(t.line) { if t.line[pos] != ' ' { break } pos++ } return pos - t.pos } // visualLength returns the number of visible glyphs in s. func visualLength(runes []rune) int { inEscapeSeq := false length := 0 for _, r := range runes { switch { case inEscapeSeq: if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { inEscapeSeq = false } case r == '\x1b': inEscapeSeq = true default: length++ } } return length } // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { if t.pasteActive && key != keyEnter { t.addKeyToLine(key) return } switch key { case keyBackspace: if t.pos == 0 { return } t.eraseNPreviousChars(1) case keyAltLeft: // move left by a word. t.pos -= t.countToLeftWord() t.moveCursorToPos(t.pos) case keyAltRight: // move right by a word. t.pos += t.countToRightWord() t.moveCursorToPos(t.pos) case keyLeft: if t.pos == 0 { return } t.pos-- t.moveCursorToPos(t.pos) case keyRight: if t.pos == len(t.line) { return } t.pos++ t.moveCursorToPos(t.pos) case keyHome: if t.pos == 0 { return } t.pos = 0 t.moveCursorToPos(t.pos) case keyEnd: if t.pos == len(t.line) { return } t.pos = len(t.line) t.moveCursorToPos(t.pos) case keyUp: entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) if !ok { return "", false } if t.historyIndex == -1 { t.historyPending = string(t.line) } t.historyIndex++ runes := []rune(entry) t.setLine(runes, len(runes)) case keyDown: switch t.historyIndex { case -1: return case 0: runes := []rune(t.historyPending) t.setLine(runes, len(runes)) t.historyIndex-- default: entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) if ok { t.historyIndex-- runes := []rune(entry) t.setLine(runes, len(runes)) } } case keyEnter: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) ok = true t.line = t.line[:0] t.pos = 0 t.cursorX = 0 t.cursorY = 0 t.maxLine = 0 case keyDeleteWord: // Delete zero or more spaces and then one or more characters. t.eraseNPreviousChars(t.countToLeftWord()) case keyDeleteLine: // Delete everything from the current cursor position to the // end of line. for i := t.pos; i < len(t.line); i++ { t.queue(space) t.advanceCursor(1) } t.line = t.line[:t.pos] t.moveCursorToPos(t.pos) case keyCtrlD: // Erase the character under the current position. // The EOF case when the line is empty is handled in // readLine(). if t.pos < len(t.line) { t.pos++ t.eraseNPreviousChars(1) } case keyCtrlU: t.eraseNPreviousChars(t.pos) case keyClearScreen: // Erases the screen and moves the cursor to the home position. t.queue([]rune("\x1b[2J\x1b[H")) t.queue(t.prompt) t.cursorX, t.cursorY = 0, 0 t.advanceCursor(visualLength(t.prompt)) t.setLine(t.line, t.pos) default: if t.AutoCompleteCallback != nil { prefix := string(t.line[:t.pos]) suffix := string(t.line[t.pos:]) t.lock.Unlock() newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) t.lock.Lock() if completeOk { t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) return } } if !isPrintable(key) { return } if len(t.line) == maxLineLength { return } t.addKeyToLine(key) } return } // addKeyToLine inserts the given key at the current position in the current // line. func (t *Terminal) addKeyToLine(key rune) { if len(t.line) == cap(t.line) { newLine := make([]rune, len(t.line), 2*(1+len(t.line))) copy(newLine, t.line) t.line = newLine } t.line = t.line[:len(t.line)+1] copy(t.line[t.pos+1:], t.line[t.pos:]) t.line[t.pos] = key if t.echo { t.writeLine(t.line[t.pos:]) } t.pos++ t.moveCursorToPos(t.pos) } func (t *Terminal) writeLine(line []rune) { for len(line) != 0 { remainingOnLine := t.termWidth - t.cursorX todo := len(line) if todo > remainingOnLine { todo = remainingOnLine } t.queue(line[:todo]) t.advanceCursor(visualLength(line[:todo])) line = line[todo:] } } func (t *Terminal) Write(buf []byte) (n int, err error) { t.lock.Lock() defer t.lock.Unlock() if t.cursorX == 0 && t.cursorY == 0 { // This is the easy case: there's nothing on the screen that we // have to move out of the way. return t.c.Write(buf) } // We have a prompt and possibly user input on the screen. We // have to clear it first. t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) t.cursorX = 0 t.clearLineToRight() for t.cursorY > 0 { t.move(1 /* up */, 0, 0, 0) t.cursorY-- t.clearLineToRight() } if _, err = t.c.Write(t.outBuf); err != nil { return } t.outBuf = t.outBuf[:0] if n, err = t.c.Write(buf); err != nil { return } t.writeLine(t.prompt) if t.echo { t.writeLine(t.line) } t.moveCursorToPos(t.pos) if _, err = t.c.Write(t.outBuf); err != nil { return } t.outBuf = t.outBuf[:0] return } // ReadPassword temporarily changes the prompt and reads a password, without // echo, from the terminal. func (t *Terminal) ReadPassword(prompt string) (line string, err error) { t.lock.Lock() defer t.lock.Unlock() oldPrompt := t.prompt t.prompt = []rune(prompt) t.echo = false line, err = t.readLine() t.prompt = oldPrompt t.echo = true return } // ReadLine returns a line of input from the terminal. func (t *Terminal) ReadLine() (line string, err error) { t.lock.Lock() defer t.lock.Unlock() return t.readLine() } func (t *Terminal) readLine() (line string, err error) { // t.lock must be held at this point if t.cursorX == 0 && t.cursorY == 0 { t.writeLine(t.prompt) t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] } lineIsPasted := t.pasteActive for { rest := t.remainder lineOk := false for !lineOk { var key rune key, rest = bytesToKey(rest, t.pasteActive) if key == utf8.RuneError { break } if !t.pasteActive { if key == keyCtrlD { if len(t.line) == 0 { return "", io.EOF } } if key == keyPasteStart { t.pasteActive = true if len(t.line) == 0 { lineIsPasted = true } continue } } else if key == keyPasteEnd { t.pasteActive = false continue } if !t.pasteActive { lineIsPasted = false } line, lineOk = t.handleKey(key) } if len(rest) > 0 { n := copy(t.inBuf[:], rest) t.remainder = t.inBuf[:n] } else { t.remainder = nil } t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] if lineOk { if t.echo { t.historyIndex = -1 t.history.Add(line) } if lineIsPasted { err = ErrPasteIndicator } return } // t.remainder is a slice at the beginning of t.inBuf // containing a partial key sequence readBuf := t.inBuf[len(t.remainder):] var n int t.lock.Unlock() n, err = t.c.Read(readBuf) t.lock.Lock() if err != nil { return } t.remainder = t.inBuf[:n+len(t.remainder)] } panic("unreachable") // for Go 1.0. } // SetPrompt sets the prompt to be used when reading subsequent lines. func (t *Terminal) SetPrompt(prompt string) { t.lock.Lock() defer t.lock.Unlock() t.prompt = []rune(prompt) } func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { // Move cursor to column zero at the start of the line. t.move(t.cursorY, 0, t.cursorX, 0) t.cursorX, t.cursorY = 0, 0 t.clearLineToRight() for t.cursorY < numPrevLines { // Move down a line t.move(0, 1, 0, 0) t.cursorY++ t.clearLineToRight() } // Move back to beginning. t.move(t.cursorY, 0, 0, 0) t.cursorX, t.cursorY = 0, 0 t.queue(t.prompt) t.advanceCursor(visualLength(t.prompt)) t.writeLine(t.line) t.moveCursorToPos(t.pos) } func (t *Terminal) SetSize(width, height int) error { t.lock.Lock() defer t.lock.Unlock() if width == 0 { width = 1 } oldWidth := t.termWidth t.termWidth, t.termHeight = width, height switch { case width == oldWidth: // If the width didn't change then nothing else needs to be // done. return nil case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: // If there is nothing on current line and no prompt printed, // just do nothing return nil case width < oldWidth: // Some terminals (e.g. xterm) will truncate lines that were // too long when shinking. Others, (e.g. gnome-terminal) will // attempt to wrap them. For the former, repainting t.maxLine // works great, but that behaviour goes badly wrong in the case // of the latter because they have doubled every full line. // We assume that we are working on a terminal that wraps lines // and adjust the cursor position based on every previous line // wrapping and turning into two. This causes the prompt on // xterms to move upwards, which isn't great, but it avoids a // huge mess with gnome-terminal. if t.cursorX >= t.termWidth { t.cursorX = t.termWidth - 1 } t.cursorY *= 2 t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) case width > oldWidth: // If the terminal expands then our position calculations will // be wrong in the future because we think the cursor is // |t.pos| chars into the string, but there will be a gap at // the end of any wrapped line. // // But the position will actually be correct until we move, so // we can move back to the beginning and repaint everything. t.clearAndRepaintLinePlusNPrevious(t.maxLine) } _, err := t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] return err } type pasteIndicatorError struct{} func (pasteIndicatorError) Error() string { return "terminal: ErrPasteIndicator not correctly handled" } // ErrPasteIndicator may be returned from ReadLine as the error, in addition // to valid line data. It indicates that bracketed paste mode is enabled and // that the returned line consists only of pasted data. Programs may wish to // interpret pasted data more literally than typed data. var ErrPasteIndicator = pasteIndicatorError{} // SetBracketedPasteMode requests that the terminal bracket paste operations // with markers. Not all terminals support this but, if it is supported, then // enabling this mode will stop any autocomplete callback from running due to // pastes. Additionally, any lines that are completely pasted will be returned // from ReadLine with the error set to ErrPasteIndicator. func (t *Terminal) SetBracketedPasteMode(on bool) { if on { io.WriteString(t.c, "\x1b[?2004h") } else { io.WriteString(t.c, "\x1b[?2004l") } } // stRingBuffer is a ring buffer of strings. type stRingBuffer struct { // entries contains max elements. entries []string max int // head contains the index of the element most recently added to the ring. head int // size contains the number of elements in the ring. size int } func (s *stRingBuffer) Add(a string) { if s.entries == nil { const defaultNumEntries = 100 s.entries = make([]string, defaultNumEntries) s.max = defaultNumEntries } s.head = (s.head + 1) % s.max s.entries[s.head] = a if s.size < s.max { s.size++ } } // NthPreviousEntry returns the value passed to the nth previous call to Add. // If n is zero then the immediately prior value is returned, if one, then the // next most recent, and so on. If such an element doesn't exist then ok is // false. func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { if n >= s.size { return "", false } index := s.head - n if index < 0 { index += s.max } return s.entries[index], true } ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd linux,!appengine netbsd openbsd // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. // // Putting a terminal into raw mode is the most common requirement: // // oldState, err := terminal.MakeRaw(0) // if err != nil { // panic(err) // } // defer terminal.Restore(0, oldState) package terminal // import "golang.org/x/crypto/ssh/terminal" import ( "io" "syscall" "unsafe" ) // State contains the state of a terminal. type State struct { termios syscall.Termios } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd int) bool { var termios syscall.Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd int) (*State, error) { var oldState State if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { return nil, err } return &oldState, nil } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { var oldState State if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { return nil, err } return &oldState, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, state *State) error { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0) return err } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { var dimensions [4]uint16 if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { return -1, -1, err } return int(dimensions[1]), int(dimensions[0]), nil } // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. func ReadPassword(fd int) ([]byte, error) { var oldState syscall.Termios if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { return nil, err } newState := oldState newState.Lflag &^= syscall.ECHO newState.Lflag |= syscall.ICANON | syscall.ISIG newState.Iflag |= syscall.ICRNL if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { return nil, err } defer func() { syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) }() var buf [16]byte var ret []byte for { n, err := syscall.Read(fd, buf[:]) if err != nil { return nil, err } if n == 0 { if len(ret) == 0 { return nil, io.EOF } break } if buf[n-1] == '\n' { n-- } ret = append(ret, buf[:n]...) if n < len(buf) { break } } return ret, nil } ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go ================================================ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd netbsd openbsd package terminal import "syscall" const ioctlReadTermios = syscall.TIOCGETA const ioctlWriteTermios = syscall.TIOCSETA ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_linux.go ================================================ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package terminal // These constants are declared here, rather than importing // them from the syscall package as some syscall packages, even // on linux, for example gccgo, do not declare them. const ioctlReadTermios = 0x5401 // syscall.TCGETS const ioctlWriteTermios = 0x5402 // syscall.TCSETS ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_windows.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build windows // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. // // Putting a terminal into raw mode is the most common requirement: // // oldState, err := terminal.MakeRaw(0) // if err != nil { // panic(err) // } // defer terminal.Restore(0, oldState) package terminal import ( "io" "syscall" "unsafe" ) const ( enableLineInput = 2 enableEchoInput = 4 enableProcessedInput = 1 enableWindowInput = 8 enableMouseInput = 16 enableInsertMode = 32 enableQuickEditMode = 64 enableExtendedFlags = 128 enableAutoPosition = 256 enableProcessedOutput = 1 enableWrapAtEolOutput = 2 ) var kernel32 = syscall.NewLazyDLL("kernel32.dll") var ( procGetConsoleMode = kernel32.NewProc("GetConsoleMode") procSetConsoleMode = kernel32.NewProc("SetConsoleMode") procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") ) type ( short int16 word uint16 coord struct { x short y short } smallRect struct { left short top short right short bottom short } consoleScreenBufferInfo struct { size coord cursorPosition coord attributes word window smallRect maximumWindowSize coord } ) type State struct { mode uint32 } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd int) bool { var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd int) (*State, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } st &^= (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) if e != 0 { return nil, error(e) } return &State{st}, nil } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } return &State{st}, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, state *State) error { _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) return err } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { var info consoleScreenBufferInfo _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) if e != 0 { return 0, 0, error(e) } return int(info.size.x), int(info.size.y), nil } // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. func ReadPassword(fd int) ([]byte, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } old := st st &^= (enableEchoInput) st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) if e != 0 { return nil, error(e) } defer func() { syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) }() var buf [16]byte var ret []byte for { n, err := syscall.Read(syscall.Handle(fd), buf[:]) if err != nil { return nil, err } if n == 0 { if len(ret) == 0 { return nil, io.EOF } break } if buf[n-1] == '\n' { n-- } if n > 0 && buf[n-1] == '\r' { n-- } ret = append(ret, buf[:n]...) if n < len(buf) { break } } return ret, nil } ================================================ FILE: vendor/gopkg.in/inf.v0/LICENSE ================================================ Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/gopkg.in/inf.v0/dec.go ================================================ // Package inf (type inf.Dec) implements "infinite-precision" decimal // arithmetic. // "Infinite precision" describes two characteristics: practically unlimited // precision for decimal number representation and no support for calculating // with any specific fixed precision. // (Although there is no practical limit on precision, inf.Dec can only // represent finite decimals.) // // This package is currently in experimental stage and the API may change. // // This package does NOT support: // - rounding to specific precisions (as opposed to specific decimal positions) // - the notion of context (each rounding must be explicit) // - NaN and Inf values, and distinguishing between positive and negative zero // - conversions to and from float32/64 types // // Features considered for possible addition: // + formatting options // + Exp method // + combined operations such as AddRound/MulAdd etc // + exchanging data in decimal32/64/128 formats // package inf // import "gopkg.in/inf.v0" // TODO: // - avoid excessive deep copying (quo and rounders) import ( "fmt" "io" "math/big" "strings" ) // A Dec represents a signed arbitrary-precision decimal. // It is a combination of a sign, an arbitrary-precision integer coefficient // value, and a signed fixed-precision exponent value. // The sign and the coefficient value are handled together as a signed value // and referred to as the unscaled value. // (Positive and negative zero values are not distinguished.) // Since the exponent is most commonly non-positive, it is handled in negated // form and referred to as scale. // // The mathematical value of a Dec equals: // // unscaled * 10**(-scale) // // Note that different Dec representations may have equal mathematical values. // // unscaled scale String() // ------------------------- // 0 0 "0" // 0 2 "0.00" // 0 -2 "0" // 1 0 "1" // 100 2 "1.00" // 10 0 "10" // 1 -1 "10" // // The zero value for a Dec represents the value 0 with scale 0. // // Operations are typically performed through the *Dec type. // The semantics of the assignment operation "=" for "bare" Dec values is // undefined and should not be relied on. // // Methods are typically of the form: // // func (z *Dec) Op(x, y *Dec) *Dec // // and implement operations z = x Op y with the result as receiver; if it // is one of the operands it may be overwritten (and its memory reused). // To enable chaining of operations, the result is also returned. Methods // returning a result other than *Dec take one of the operands as the receiver. // // A "bare" Quo method (quotient / division operation) is not provided, as the // result is not always a finite decimal and thus in general cannot be // represented as a Dec. // Instead, in the common case when rounding is (potentially) necessary, // QuoRound should be used with a Scale and a Rounder. // QuoExact or QuoRound with RoundExact can be used in the special cases when it // is known that the result is always a finite decimal. // type Dec struct { unscaled big.Int scale Scale } // Scale represents the type used for the scale of a Dec. type Scale int32 const scaleSize = 4 // bytes in a Scale value // Scaler represents a method for obtaining the scale to use for the result of // an operation on x and y. type scaler interface { Scale(x *Dec, y *Dec) Scale } var bigInt = [...]*big.Int{ big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4), big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9), big.NewInt(10), } var exp10cache [64]big.Int = func() [64]big.Int { e10, e10i := [64]big.Int{}, bigInt[1] for i, _ := range e10 { e10[i].Set(e10i) e10i = new(big.Int).Mul(e10i, bigInt[10]) } return e10 }() // NewDec allocates and returns a new Dec set to the given int64 unscaled value // and scale. func NewDec(unscaled int64, scale Scale) *Dec { return new(Dec).SetUnscaled(unscaled).SetScale(scale) } // NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled // value and scale. func NewDecBig(unscaled *big.Int, scale Scale) *Dec { return new(Dec).SetUnscaledBig(unscaled).SetScale(scale) } // Scale returns the scale of x. func (x *Dec) Scale() Scale { return x.scale } // Unscaled returns the unscaled value of x for u and true for ok when the // unscaled value can be represented as int64; otherwise it returns an undefined // int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid // checking the validity of the value when the check is known to be redundant. func (x *Dec) Unscaled() (u int64, ok bool) { u = x.unscaled.Int64() var i big.Int ok = i.SetInt64(u).Cmp(&x.unscaled) == 0 return } // UnscaledBig returns the unscaled value of x as *big.Int. func (x *Dec) UnscaledBig() *big.Int { return &x.unscaled } // SetScale sets the scale of z, with the unscaled value unchanged, and returns // z. // The mathematical value of the Dec changes as if it was multiplied by // 10**(oldscale-scale). func (z *Dec) SetScale(scale Scale) *Dec { z.scale = scale return z } // SetUnscaled sets the unscaled value of z, with the scale unchanged, and // returns z. func (z *Dec) SetUnscaled(unscaled int64) *Dec { z.unscaled.SetInt64(unscaled) return z } // SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and // returns z. func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec { z.unscaled.Set(unscaled) return z } // Set sets z to the value of x and returns z. // It does nothing if z == x. func (z *Dec) Set(x *Dec) *Dec { if z != x { z.SetUnscaledBig(x.UnscaledBig()) z.SetScale(x.Scale()) } return z } // Sign returns: // // -1 if x < 0 // 0 if x == 0 // +1 if x > 0 // func (x *Dec) Sign() int { return x.UnscaledBig().Sign() } // Neg sets z to -x and returns z. func (z *Dec) Neg(x *Dec) *Dec { z.SetScale(x.Scale()) z.UnscaledBig().Neg(x.UnscaledBig()) return z } // Cmp compares x and y and returns: // // -1 if x < y // 0 if x == y // +1 if x > y // func (x *Dec) Cmp(y *Dec) int { xx, yy := upscale(x, y) return xx.UnscaledBig().Cmp(yy.UnscaledBig()) } // Abs sets z to |x| (the absolute value of x) and returns z. func (z *Dec) Abs(x *Dec) *Dec { z.SetScale(x.Scale()) z.UnscaledBig().Abs(x.UnscaledBig()) return z } // Add sets z to the sum x+y and returns z. // The scale of z is the greater of the scales of x and y. func (z *Dec) Add(x, y *Dec) *Dec { xx, yy := upscale(x, y) z.SetScale(xx.Scale()) z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig()) return z } // Sub sets z to the difference x-y and returns z. // The scale of z is the greater of the scales of x and y. func (z *Dec) Sub(x, y *Dec) *Dec { xx, yy := upscale(x, y) z.SetScale(xx.Scale()) z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig()) return z } // Mul sets z to the product x*y and returns z. // The scale of z is the sum of the scales of x and y. func (z *Dec) Mul(x, y *Dec) *Dec { z.SetScale(x.Scale() + y.Scale()) z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig()) return z } // Round sets z to the value of x rounded to Scale s using Rounder r, and // returns z. func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec { return z.QuoRound(x, NewDec(1, 0), s, r) } // QuoRound sets z to the quotient x/y, rounded using the given Rounder to the // specified scale. // // If the rounder is RoundExact but the result can not be expressed exactly at // the specified scale, QuoRound returns nil, and the value of z is undefined. // // There is no corresponding Div method; the equivalent can be achieved through // the choice of Rounder used. // func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec { return z.quo(x, y, sclr{s}, r) } func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec { scl := s.Scale(x, y) var zzz *Dec if r.UseRemainder() { zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int)) zzz = r.Round(new(Dec), zz, rA, rB) } else { zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil) zzz = r.Round(new(Dec), zz, nil, nil) } if zzz == nil { return nil } return z.Set(zzz) } // QuoExact sets z to the quotient x/y and returns z when x/y is a finite // decimal. Otherwise it returns nil and the value of z is undefined. // // The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is // calculated so that the remainder will be zero whenever x/y is a finite // decimal. func (z *Dec) QuoExact(x, y *Dec) *Dec { return z.quo(x, y, scaleQuoExact{}, RoundExact) } // quoRem sets z to the quotient x/y with the scale s, and if useRem is true, // it sets remNum and remDen to the numerator and denominator of the remainder. // It returns z, remNum and remDen. // // The remainder is normalized to the range -1 < r < 1 to simplify rounding; // that is, the results satisfy the following equation: // // x / y = z + (remNum/remDen) * 10**(-z.Scale()) // // See Rounder for more details about rounding. // func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool, remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) { // difference (required adjustment) compared to "canonical" result scale shift := s - (x.Scale() - y.Scale()) // pointers to adjusted unscaled dividend and divisor var ix, iy *big.Int switch { case shift > 0: // increased scale: decimal-shift dividend left ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift)) iy = y.UnscaledBig() case shift < 0: // decreased scale: decimal-shift divisor left ix = x.UnscaledBig() iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift)) default: ix = x.UnscaledBig() iy = y.UnscaledBig() } // save a copy of iy in case it to be overwritten with the result iy2 := iy if iy == z.UnscaledBig() { iy2 = new(big.Int).Set(iy) } // set scale z.SetScale(s) // set unscaled if useRem { // Int division _, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int)) // set remainder remNum.Set(intr) remDen.Set(iy2) } else { z.UnscaledBig().Quo(ix, iy) } return z, remNum, remDen } type sclr struct{ s Scale } func (s sclr) Scale(x, y *Dec) Scale { return s.s } type scaleQuoExact struct{} func (sqe scaleQuoExact) Scale(x, y *Dec) Scale { rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig()) f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5]) var f10 Scale if f2 > f5 { f10 = Scale(f2) } else { f10 = Scale(f5) } return x.Scale() - y.Scale() + f10 } func factor(n *big.Int, p *big.Int) int { // could be improved for large factors d, f := n, 0 for { dd, dm := new(big.Int).DivMod(d, p, new(big.Int)) if dm.Sign() == 0 { f++ d = dd } else { break } } return f } func factor2(n *big.Int) int { // could be improved for large factors f := 0 for ; n.Bit(f) == 0; f++ { } return f } func upscale(a, b *Dec) (*Dec, *Dec) { if a.Scale() == b.Scale() { return a, b } if a.Scale() > b.Scale() { bb := b.rescale(a.Scale()) return a, bb } aa := a.rescale(b.Scale()) return aa, b } func exp10(x Scale) *big.Int { if int(x) < len(exp10cache) { return &exp10cache[int(x)] } return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil) } func (x *Dec) rescale(newScale Scale) *Dec { shift := newScale - x.Scale() switch { case shift < 0: e := exp10(-shift) return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale) case shift > 0: e := exp10(shift) return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale) } return x } var zeros = []byte("00000000000000000000000000000000" + "00000000000000000000000000000000") var lzeros = Scale(len(zeros)) func appendZeros(s []byte, n Scale) []byte { for i := Scale(0); i < n; i += lzeros { if n > i+lzeros { s = append(s, zeros...) } else { s = append(s, zeros[0:n-i]...) } } return s } func (x *Dec) String() string { if x == nil { return "" } scale := x.Scale() s := []byte(x.UnscaledBig().String()) if scale <= 0 { if scale != 0 && x.unscaled.Sign() != 0 { s = appendZeros(s, -scale) } return string(s) } negbit := Scale(-((x.Sign() - 1) / 2)) // scale > 0 lens := Scale(len(s)) if lens-negbit <= scale { ss := make([]byte, 0, scale+2) if negbit == 1 { ss = append(ss, '-') } ss = append(ss, '0', '.') ss = appendZeros(ss, scale-lens+negbit) ss = append(ss, s[negbit:]...) return string(ss) } // lens > scale ss := make([]byte, 0, lens+1) ss = append(ss, s[:lens-scale]...) ss = append(ss, '.') ss = append(ss, s[lens-scale:]...) return string(ss) } // Format is a support routine for fmt.Formatter. It accepts the decimal // formats 'd' and 'f', and handles both equivalently. // Width, precision, flags and bases 2, 8, 16 are not supported. func (x *Dec) Format(s fmt.State, ch rune) { if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' { fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String()) return } fmt.Fprintf(s, x.String()) } func (z *Dec) scan(r io.RuneScanner) (*Dec, error) { unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes dp, dg := -1, -1 // indexes of decimal point, first digit loop: for { ch, _, err := r.ReadRune() if err == io.EOF { break loop } if err != nil { return nil, err } switch { case ch == '+' || ch == '-': if len(unscaled) > 0 || dp >= 0 { // must be first character r.UnreadRune() break loop } case ch == '.': if dp >= 0 { r.UnreadRune() break loop } dp = len(unscaled) continue // don't add to unscaled case ch >= '0' && ch <= '9': if dg == -1 { dg = len(unscaled) } default: r.UnreadRune() break loop } unscaled = append(unscaled, byte(ch)) } if dg == -1 { return nil, fmt.Errorf("no digits read") } if dp >= 0 { z.SetScale(Scale(len(unscaled) - dp)) } else { z.SetScale(0) } _, ok := z.UnscaledBig().SetString(string(unscaled), 10) if !ok { return nil, fmt.Errorf("invalid decimal: %s", string(unscaled)) } return z, nil } // SetString sets z to the value of s, interpreted as a decimal (base 10), // and returns z and a boolean indicating success. The scale of z is the // number of digits after the decimal point (including any trailing 0s), // or 0 if there is no decimal point. If SetString fails, the value of z // is undefined but the returned value is nil. func (z *Dec) SetString(s string) (*Dec, bool) { r := strings.NewReader(s) _, err := z.scan(r) if err != nil { return nil, false } _, _, err = r.ReadRune() if err != io.EOF { return nil, false } // err == io.EOF => scan consumed all of s return z, true } // Scan is a support routine for fmt.Scanner; it sets z to the value of // the scanned number. It accepts the decimal formats 'd' and 'f', and // handles both equivalently. Bases 2, 8, 16 are not supported. // The scale of z is the number of digits after the decimal point // (including any trailing 0s), or 0 if there is no decimal point. func (z *Dec) Scan(s fmt.ScanState, ch rune) error { if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' { return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch) } s.SkipSpace() _, err := z.scan(s) return err } // Gob encoding version const decGobVersion byte = 1 func scaleBytes(s Scale) []byte { buf := make([]byte, scaleSize) i := scaleSize for j := 0; j < scaleSize; j++ { i-- buf[i] = byte(s) s >>= 8 } return buf } func scale(b []byte) (s Scale) { for j := 0; j < scaleSize; j++ { s <<= 8 s |= Scale(b[j]) } return } // GobEncode implements the gob.GobEncoder interface. func (x *Dec) GobEncode() ([]byte, error) { buf, err := x.UnscaledBig().GobEncode() if err != nil { return nil, err } buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion) return buf, nil } // GobDecode implements the gob.GobDecoder interface. func (z *Dec) GobDecode(buf []byte) error { if len(buf) == 0 { return fmt.Errorf("Dec.GobDecode: no data") } b := buf[len(buf)-1] if b != decGobVersion { return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b) } l := len(buf) - scaleSize - 1 err := z.UnscaledBig().GobDecode(buf[:l]) if err != nil { return err } z.SetScale(scale(buf[l : l+scaleSize])) return nil } // MarshalText implements the encoding.TextMarshaler interface. func (x *Dec) MarshalText() ([]byte, error) { return []byte(x.String()), nil } // UnmarshalText implements the encoding.TextUnmarshaler interface. func (z *Dec) UnmarshalText(data []byte) error { _, ok := z.SetString(string(data)) if !ok { return fmt.Errorf("invalid inf.Dec") } return nil } ================================================ FILE: vendor/gopkg.in/inf.v0/rounder.go ================================================ package inf import ( "math/big" ) // Rounder represents a method for rounding the (possibly infinite decimal) // result of a division to a finite Dec. It is used by Dec.Round() and // Dec.Quo(). // // See the Example for results of using each Rounder with some sample values. // type Rounder rounder // See http://speleotrove.com/decimal/damodel.html#refround for more detailed // definitions of these rounding modes. var ( RoundDown Rounder // towards 0 RoundUp Rounder // away from 0 RoundFloor Rounder // towards -infinity RoundCeil Rounder // towards +infinity RoundHalfDown Rounder // to nearest; towards 0 if same distance RoundHalfUp Rounder // to nearest; away from 0 if same distance RoundHalfEven Rounder // to nearest; even last digit if same distance ) // RoundExact is to be used in the case when rounding is not necessary. // When used with Quo or Round, it returns the result verbatim when it can be // expressed exactly with the given precision, and it returns nil otherwise. // QuoExact is a shorthand for using Quo with RoundExact. var RoundExact Rounder type rounder interface { // When UseRemainder() returns true, the Round() method is passed the // remainder of the division, expressed as the numerator and denominator of // a rational. UseRemainder() bool // Round sets the rounded value of a quotient to z, and returns z. // quo is rounded down (truncated towards zero) to the scale obtained from // the Scaler in Quo(). // // When the remainder is not used, remNum and remDen are nil. // When used, the remainder is normalized between -1 and 1; that is: // // -|remDen| < remNum < |remDen| // // remDen has the same sign as y, and remNum is zero or has the same sign // as x. Round(z, quo *Dec, remNum, remDen *big.Int) *Dec } type rndr struct { useRem bool round func(z, quo *Dec, remNum, remDen *big.Int) *Dec } func (r rndr) UseRemainder() bool { return r.useRem } func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec { return r.round(z, quo, remNum, remDen) } var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)} func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec { return func(z, q *Dec, rA, rB *big.Int) *Dec { z.Set(q) brA, brB := rA.BitLen(), rB.BitLen() if brA < brB-1 { // brA < brB-1 => |rA| < |rB/2| return z } roundUp := false srA, srB := rA.Sign(), rB.Sign() s := srA * srB if brA == brB-1 { rA2 := new(big.Int).Lsh(rA, 1) if s < 0 { rA2.Neg(rA2) } roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0)) } else { // brA > brB-1 => |rA| > |rB/2| roundUp = true } if roundUp { z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1]) } return z } } func init() { RoundExact = rndr{true, func(z, q *Dec, rA, rB *big.Int) *Dec { if rA.Sign() != 0 { return nil } return z.Set(q) }} RoundDown = rndr{false, func(z, q *Dec, rA, rB *big.Int) *Dec { return z.Set(q) }} RoundUp = rndr{true, func(z, q *Dec, rA, rB *big.Int) *Dec { z.Set(q) if rA.Sign() != 0 { z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1]) } return z }} RoundFloor = rndr{true, func(z, q *Dec, rA, rB *big.Int) *Dec { z.Set(q) if rA.Sign()*rB.Sign() < 0 { z.UnscaledBig().Add(z.UnscaledBig(), intSign[0]) } return z }} RoundCeil = rndr{true, func(z, q *Dec, rA, rB *big.Int) *Dec { z.Set(q) if rA.Sign()*rB.Sign() > 0 { z.UnscaledBig().Add(z.UnscaledBig(), intSign[2]) } return z }} RoundHalfDown = rndr{true, roundHalf( func(c int, odd uint) bool { return c > 0 })} RoundHalfUp = rndr{true, roundHalf( func(c int, odd uint) bool { return c >= 0 })} RoundHalfEven = rndr{true, roundHalf( func(c int, odd uint) bool { return c > 0 || c == 0 && odd == 1 })} }