Repository: fabric8io/kansible
Branch: master
Commit: 457ddca2ac23
Files: 3439
Total size: 22.4 MB
Directory structure:
gitextract_2u8aug_v/
├── .dockerignore
├── .gitignore
├── BUILDING.md
├── Dockerfile
├── Dockerfile.scratch
├── Jenkinsfile
├── LICENSE
├── Makefile
├── README.md
├── add-headers.sh
├── ansible/
│ ├── ansible.go
│ └── variables.go
├── circle.yml
├── cmd/
│ ├── kill.go
│ ├── pod.go
│ ├── rc.go
│ ├── root.go
│ ├── run.go
│ └── version.go
├── glide.yaml
├── header.txt
├── headers.yml
├── k8s/
│ └── k8s.go
├── kansible.go
├── log/
│ └── log.go
├── ssh/
│ └── ssh.go
├── tools/
│ └── create-intellij-idea-golib.sh
├── vendor/
│ ├── bitbucket.org/
│ │ └── ww/
│ │ └── goautoneg/
│ │ ├── Makefile
│ │ ├── README.txt
│ │ ├── autoneg.go
│ │ └── autoneg_test.go
│ ├── github.com/
│ │ ├── Masterminds/
│ │ │ ├── semver/
│ │ │ │ ├── .travis.yml
│ │ │ │ ├── CHANGELOG.md
│ │ │ │ ├── LICENSE.txt
│ │ │ │ ├── README.md
│ │ │ │ ├── appveyor.yml
│ │ │ │ ├── benchmark_test.go
│ │ │ │ ├── collection.go
│ │ │ │ ├── collection_test.go
│ │ │ │ ├── constraints.go
│ │ │ │ ├── constraints_test.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── version.go
│ │ │ │ └── version_test.go
│ │ │ └── vcs/
│ │ │ ├── .gitignore
│ │ │ ├── .travis.yml
│ │ │ ├── CHANGELOG.md
│ │ │ ├── LICENSE.txt
│ │ │ ├── README.md
│ │ │ ├── bzr.go
│ │ │ ├── bzr_test.go
│ │ │ ├── git.go
│ │ │ ├── git_test.go
│ │ │ ├── hg.go
│ │ │ ├── hg_test.go
│ │ │ ├── repo.go
│ │ │ ├── repo_test.go
│ │ │ ├── svn.go
│ │ │ ├── svn_test.go
│ │ │ ├── vcs_local_lookup.go
│ │ │ ├── vcs_remote_lookup.go
│ │ │ └── vcs_remote_lookup_test.go
│ │ ├── beorn7/
│ │ │ └── perks/
│ │ │ ├── .gitignore
│ │ │ ├── README.md
│ │ │ ├── histogram/
│ │ │ │ ├── bench_test.go
│ │ │ │ ├── histogram.go
│ │ │ │ └── histogram_test.go
│ │ │ ├── quantile/
│ │ │ │ ├── bench_test.go
│ │ │ │ ├── example_test.go
│ │ │ │ ├── exampledata.txt
│ │ │ │ ├── stream.go
│ │ │ │ └── stream_test.go
│ │ │ └── topk/
│ │ │ ├── topk.go
│ │ │ └── topk_test.go
│ │ ├── blang/
│ │ │ └── semver/
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── examples/
│ │ │ │ └── main.go
│ │ │ ├── json.go
│ │ │ ├── json_test.go
│ │ │ ├── semver.go
│ │ │ ├── semver_test.go
│ │ │ ├── sort.go
│ │ │ ├── sort_test.go
│ │ │ ├── sql.go
│ │ │ └── sql_test.go
│ │ ├── cloudfoundry-incubator/
│ │ │ └── candiedyaml/
│ │ │ ├── .gitignore
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── api.go
│ │ │ ├── candiedyaml_suite_test.go
│ │ │ ├── decode.go
│ │ │ ├── decode_test.go
│ │ │ ├── emitter.go
│ │ │ ├── encode.go
│ │ │ ├── encode_test.go
│ │ │ ├── fixtures/
│ │ │ │ └── specification/
│ │ │ │ ├── example2_1.yaml
│ │ │ │ ├── example2_10.yaml
│ │ │ │ ├── example2_11.yaml
│ │ │ │ ├── example2_12.yaml
│ │ │ │ ├── example2_13.yaml
│ │ │ │ ├── example2_14.yaml
│ │ │ │ ├── example2_15.yaml
│ │ │ │ ├── example2_15_dumped.yaml
│ │ │ │ ├── example2_16.yaml
│ │ │ │ ├── example2_17.yaml
│ │ │ │ ├── example2_17_control.yaml
│ │ │ │ ├── example2_17_hexesc.yaml
│ │ │ │ ├── example2_17_quoted.yaml
│ │ │ │ ├── example2_17_single.yaml
│ │ │ │ ├── example2_17_tie_fighter.yaml
│ │ │ │ ├── example2_17_unicode.yaml
│ │ │ │ ├── example2_18.yaml
│ │ │ │ ├── example2_19.yaml
│ │ │ │ ├── example2_2.yaml
│ │ │ │ ├── example2_20.yaml
│ │ │ │ ├── example2_21.yaml
│ │ │ │ ├── example2_22.yaml
│ │ │ │ ├── example2_23.yaml
│ │ │ │ ├── example2_23_application.yaml
│ │ │ │ ├── example2_23_non_date.yaml
│ │ │ │ ├── example2_23_picture.yaml
│ │ │ │ ├── example2_24.yaml
│ │ │ │ ├── example2_24_dumped.yaml
│ │ │ │ ├── example2_25.yaml
│ │ │ │ ├── example2_26.yaml
│ │ │ │ ├── example2_27.yaml
│ │ │ │ ├── example2_27_dumped.yaml
│ │ │ │ ├── example2_28.yaml
│ │ │ │ ├── example2_3.yaml
│ │ │ │ ├── example2_4.yaml
│ │ │ │ ├── example2_5.yaml
│ │ │ │ ├── example2_6.yaml
│ │ │ │ ├── example2_7.yaml
│ │ │ │ ├── example2_8.yaml
│ │ │ │ ├── example2_9.yaml
│ │ │ │ ├── example_empty.yaml
│ │ │ │ └── types/
│ │ │ │ ├── map.yaml
│ │ │ │ ├── map_mixed_tags.yaml
│ │ │ │ ├── merge.yaml
│ │ │ │ ├── omap.yaml
│ │ │ │ ├── pairs.yaml
│ │ │ │ ├── seq.yaml
│ │ │ │ ├── set.yaml
│ │ │ │ ├── v.yaml
│ │ │ │ └── value.yaml
│ │ │ ├── libyaml-LICENSE
│ │ │ ├── parser.go
│ │ │ ├── parser_test.go
│ │ │ ├── reader.go
│ │ │ ├── reader_test.go
│ │ │ ├── resolver.go
│ │ │ ├── resolver_test.go
│ │ │ ├── run_parser.go
│ │ │ ├── scanner.go
│ │ │ ├── scanner_test.go
│ │ │ ├── tags.go
│ │ │ ├── writer.go
│ │ │ ├── yaml_definesh.go
│ │ │ ├── yaml_privateh.go
│ │ │ └── yamlh.go
│ │ ├── davecgh/
│ │ │ └── go-spew/
│ │ │ ├── .gitignore
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── cov_report.sh
│ │ │ ├── spew/
│ │ │ │ ├── common.go
│ │ │ │ ├── common_test.go
│ │ │ │ ├── config.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── dump.go
│ │ │ │ ├── dump_test.go
│ │ │ │ ├── dumpcgo_test.go
│ │ │ │ ├── dumpnocgo_test.go
│ │ │ │ ├── example_test.go
│ │ │ │ ├── format.go
│ │ │ │ ├── format_test.go
│ │ │ │ ├── internal_test.go
│ │ │ │ ├── spew.go
│ │ │ │ ├── spew_test.go
│ │ │ │ └── testdata/
│ │ │ │ └── dumpcgo.go
│ │ │ └── test_coverage.txt
│ │ ├── docker/
│ │ │ ├── docker/
│ │ │ │ ├── .dockerignore
│ │ │ │ ├── .gitignore
│ │ │ │ ├── .mailmap
│ │ │ │ ├── AUTHORS
│ │ │ │ ├── CHANGELOG.md
│ │ │ │ ├── CONTRIBUTING.md
│ │ │ │ ├── Dockerfile
│ │ │ │ ├── Dockerfile.simple
│ │ │ │ ├── LICENSE
│ │ │ │ ├── MAINTAINERS
│ │ │ │ ├── Makefile
│ │ │ │ ├── NOTICE
│ │ │ │ ├── README.md
│ │ │ │ ├── ROADMAP.md
│ │ │ │ ├── VERSION
│ │ │ │ ├── api/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── api_unit_test.go
│ │ │ │ │ ├── client/
│ │ │ │ │ │ ├── attach.go
│ │ │ │ │ │ ├── build.go
│ │ │ │ │ │ ├── cli.go
│ │ │ │ │ │ ├── client.go
│ │ │ │ │ │ ├── commit.go
│ │ │ │ │ │ ├── cp.go
│ │ │ │ │ │ ├── create.go
│ │ │ │ │ │ ├── diff.go
│ │ │ │ │ │ ├── events.go
│ │ │ │ │ │ ├── exec.go
│ │ │ │ │ │ ├── export.go
│ │ │ │ │ │ ├── help.go
│ │ │ │ │ │ ├── hijack.go
│ │ │ │ │ │ ├── history.go
│ │ │ │ │ │ ├── images.go
│ │ │ │ │ │ ├── import.go
│ │ │ │ │ │ ├── info.go
│ │ │ │ │ │ ├── inspect.go
│ │ │ │ │ │ ├── kill.go
│ │ │ │ │ │ ├── load.go
│ │ │ │ │ │ ├── login.go
│ │ │ │ │ │ ├── logout.go
│ │ │ │ │ │ ├── logs.go
│ │ │ │ │ │ ├── network.go
│ │ │ │ │ │ ├── pause.go
│ │ │ │ │ │ ├── port.go
│ │ │ │ │ │ ├── ps.go
│ │ │ │ │ │ ├── pull.go
│ │ │ │ │ │ ├── push.go
│ │ │ │ │ │ ├── rename.go
│ │ │ │ │ │ ├── restart.go
│ │ │ │ │ │ ├── rm.go
│ │ │ │ │ │ ├── rmi.go
│ │ │ │ │ │ ├── run.go
│ │ │ │ │ │ ├── save.go
│ │ │ │ │ │ ├── search.go
│ │ │ │ │ │ ├── service.go
│ │ │ │ │ │ ├── start.go
│ │ │ │ │ │ ├── stats.go
│ │ │ │ │ │ ├── stats_unit_test.go
│ │ │ │ │ │ ├── stop.go
│ │ │ │ │ │ ├── tag.go
│ │ │ │ │ │ ├── top.go
│ │ │ │ │ │ ├── unpause.go
│ │ │ │ │ │ ├── utils.go
│ │ │ │ │ │ ├── version.go
│ │ │ │ │ │ └── wait.go
│ │ │ │ │ ├── common.go
│ │ │ │ │ ├── server/
│ │ │ │ │ │ ├── form.go
│ │ │ │ │ │ ├── form_test.go
│ │ │ │ │ │ ├── profiler.go
│ │ │ │ │ │ ├── server.go
│ │ │ │ │ │ ├── server_experimental.go
│ │ │ │ │ │ ├── server_linux.go
│ │ │ │ │ │ ├── server_linux_test.go
│ │ │ │ │ │ ├── server_stub.go
│ │ │ │ │ │ └── server_windows.go
│ │ │ │ │ └── types/
│ │ │ │ │ ├── stats.go
│ │ │ │ │ └── types.go
│ │ │ │ ├── builder/
│ │ │ │ │ ├── bflag.go
│ │ │ │ │ ├── bflag_test.go
│ │ │ │ │ ├── command/
│ │ │ │ │ │ └── command.go
│ │ │ │ │ ├── dispatchers.go
│ │ │ │ │ ├── evaluator.go
│ │ │ │ │ ├── internals.go
│ │ │ │ │ ├── internals_linux.go
│ │ │ │ │ ├── internals_windows.go
│ │ │ │ │ ├── job.go
│ │ │ │ │ ├── job_test.go
│ │ │ │ │ ├── parser/
│ │ │ │ │ │ ├── dumper/
│ │ │ │ │ │ │ └── main.go
│ │ │ │ │ │ ├── json_test.go
│ │ │ │ │ │ ├── line_parsers.go
│ │ │ │ │ │ ├── parser.go
│ │ │ │ │ │ ├── parser_test.go
│ │ │ │ │ │ ├── testfiles/
│ │ │ │ │ │ │ ├── ADD-COPY-with-JSON/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── brimstone-consuldock/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── brimstone-docker-consul/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── continueIndent/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── cpuguy83-nagios/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── docker/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── env/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── escapes/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── flags/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── influxdb/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── jeztah-invalid-json-json-inside-string/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── jeztah-invalid-json-json-inside-string-double/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── jeztah-invalid-json-single-quotes/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── jeztah-invalid-json-unterminated-bracket/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── jeztah-invalid-json-unterminated-string/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── json/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── kartar-entrypoint-oddities/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── lk4d4-the-edge-case-generator/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── mail/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── multiple-volumes/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── mumble/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── nginx/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── tf2/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ ├── weechat/
│ │ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ │ └── result
│ │ │ │ │ │ │ └── znc/
│ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ └── result
│ │ │ │ │ │ ├── testfiles-negative/
│ │ │ │ │ │ │ ├── env_no_value/
│ │ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ │ └── shykes-nested-json/
│ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ └── utils.go
│ │ │ │ │ ├── shell_parser.go
│ │ │ │ │ ├── shell_parser_test.go
│ │ │ │ │ ├── support.go
│ │ │ │ │ ├── support_test.go
│ │ │ │ │ └── words
│ │ │ │ ├── cliconfig/
│ │ │ │ │ ├── config.go
│ │ │ │ │ └── config_test.go
│ │ │ │ ├── contrib/
│ │ │ │ │ ├── README
│ │ │ │ │ ├── REVIEWERS
│ │ │ │ │ ├── apparmor/
│ │ │ │ │ │ └── docker
│ │ │ │ │ ├── builder/
│ │ │ │ │ │ ├── deb/
│ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ ├── build.sh
│ │ │ │ │ │ │ ├── debian-jessie/
│ │ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ │ ├── debian-stretch/
│ │ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ │ ├── debian-wheezy/
│ │ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ │ ├── generate.sh
│ │ │ │ │ │ │ ├── ubuntu-debootstrap-precise/
│ │ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ │ ├── ubuntu-debootstrap-trusty/
│ │ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ │ ├── ubuntu-debootstrap-utopic/
│ │ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ │ ├── ubuntu-debootstrap-vivid/
│ │ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ │ └── ubuntu-debootstrap-wily/
│ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ └── rpm/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── build.sh
│ │ │ │ │ │ ├── centos-7/
│ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ ├── fedora-20/
│ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ ├── fedora-21/
│ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ ├── fedora-22/
│ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ ├── generate.sh
│ │ │ │ │ │ └── oraclelinux-7/
│ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ ├── check-config.sh
│ │ │ │ │ ├── completion/
│ │ │ │ │ │ ├── REVIEWERS
│ │ │ │ │ │ ├── bash/
│ │ │ │ │ │ │ └── docker
│ │ │ │ │ │ ├── fish/
│ │ │ │ │ │ │ └── docker.fish
│ │ │ │ │ │ └── zsh/
│ │ │ │ │ │ ├── REVIEWERS
│ │ │ │ │ │ └── _docker
│ │ │ │ │ ├── desktop-integration/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── chromium/
│ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ └── gparted/
│ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ ├── docker-device-tool/
│ │ │ │ │ │ └── device_tool.go
│ │ │ │ │ ├── dockerize-disk.sh
│ │ │ │ │ ├── download-frozen-image.sh
│ │ │ │ │ ├── host-integration/
│ │ │ │ │ │ ├── Dockerfile.dev
│ │ │ │ │ │ ├── Dockerfile.min
│ │ │ │ │ │ ├── manager/
│ │ │ │ │ │ │ ├── systemd
│ │ │ │ │ │ │ └── upstart
│ │ │ │ │ │ ├── manager.go
│ │ │ │ │ │ └── manager.sh
│ │ │ │ │ ├── httpserver/
│ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ └── server.go
│ │ │ │ │ ├── init/
│ │ │ │ │ │ ├── openrc/
│ │ │ │ │ │ │ ├── docker.confd
│ │ │ │ │ │ │ └── docker.initd
│ │ │ │ │ │ ├── systemd/
│ │ │ │ │ │ │ ├── REVIEWERS
│ │ │ │ │ │ │ ├── docker.service
│ │ │ │ │ │ │ └── docker.socket
│ │ │ │ │ │ ├── sysvinit-debian/
│ │ │ │ │ │ │ ├── docker
│ │ │ │ │ │ │ └── docker.default
│ │ │ │ │ │ ├── sysvinit-redhat/
│ │ │ │ │ │ │ ├── docker
│ │ │ │ │ │ │ └── docker.sysconfig
│ │ │ │ │ │ └── upstart/
│ │ │ │ │ │ ├── REVIEWERS
│ │ │ │ │ │ └── docker.conf
│ │ │ │ │ ├── mkimage/
│ │ │ │ │ │ ├── .febootstrap-minimize
│ │ │ │ │ │ ├── busybox-static
│ │ │ │ │ │ ├── debootstrap
│ │ │ │ │ │ ├── mageia-urpmi
│ │ │ │ │ │ └── rinse
│ │ │ │ │ ├── mkimage-alpine.sh
│ │ │ │ │ ├── mkimage-arch-pacman.conf
│ │ │ │ │ ├── mkimage-arch.sh
│ │ │ │ │ ├── mkimage-busybox.sh
│ │ │ │ │ ├── mkimage-crux.sh
│ │ │ │ │ ├── mkimage-debootstrap.sh
│ │ │ │ │ ├── mkimage-rinse.sh
│ │ │ │ │ ├── mkimage-yum.sh
│ │ │ │ │ ├── mkimage.sh
│ │ │ │ │ ├── mkseccomp.pl
│ │ │ │ │ ├── mkseccomp.sample
│ │ │ │ │ ├── nuke-graph-directory.sh
│ │ │ │ │ ├── project-stats.sh
│ │ │ │ │ ├── report-issue.sh
│ │ │ │ │ ├── reprepro/
│ │ │ │ │ │ └── suites.sh
│ │ │ │ │ ├── syntax/
│ │ │ │ │ │ ├── kate/
│ │ │ │ │ │ │ └── Dockerfile.xml
│ │ │ │ │ │ ├── nano/
│ │ │ │ │ │ │ ├── Dockerfile.nanorc
│ │ │ │ │ │ │ └── README.md
│ │ │ │ │ │ ├── textmate/
│ │ │ │ │ │ │ ├── Docker.tmbundle/
│ │ │ │ │ │ │ │ ├── Preferences/
│ │ │ │ │ │ │ │ │ └── Dockerfile.tmPreferences
│ │ │ │ │ │ │ │ ├── Syntaxes/
│ │ │ │ │ │ │ │ │ └── Dockerfile.tmLanguage
│ │ │ │ │ │ │ │ └── info.plist
│ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ └── REVIEWERS
│ │ │ │ │ │ └── vim/
│ │ │ │ │ │ ├── LICENSE
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── doc/
│ │ │ │ │ │ │ └── dockerfile.txt
│ │ │ │ │ │ ├── ftdetect/
│ │ │ │ │ │ │ └── dockerfile.vim
│ │ │ │ │ │ └── syntax/
│ │ │ │ │ │ └── dockerfile.vim
│ │ │ │ │ ├── udev/
│ │ │ │ │ │ └── 80-docker.rules
│ │ │ │ │ └── vagrant-docker/
│ │ │ │ │ └── README.md
│ │ │ │ ├── daemon/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── archive.go
│ │ │ │ │ ├── attach.go
│ │ │ │ │ ├── changes.go
│ │ │ │ │ ├── commit.go
│ │ │ │ │ ├── config.go
│ │ │ │ │ ├── config_experimental.go
│ │ │ │ │ ├── config_linux.go
│ │ │ │ │ ├── config_stub.go
│ │ │ │ │ ├── config_windows.go
│ │ │ │ │ ├── container.go
│ │ │ │ │ ├── container_unit_test.go
│ │ │ │ │ ├── container_unix.go
│ │ │ │ │ ├── container_windows.go
│ │ │ │ │ ├── create.go
│ │ │ │ │ ├── daemon.go
│ │ │ │ │ ├── daemon_aufs.go
│ │ │ │ │ ├── daemon_btrfs.go
│ │ │ │ │ ├── daemon_devicemapper.go
│ │ │ │ │ ├── daemon_no_aufs.go
│ │ │ │ │ ├── daemon_overlay.go
│ │ │ │ │ ├── daemon_test.go
│ │ │ │ │ ├── daemon_unit_test.go
│ │ │ │ │ ├── daemon_unix.go
│ │ │ │ │ ├── daemon_windows.go
│ │ │ │ │ ├── daemon_zfs.go
│ │ │ │ │ ├── debugtrap_unix.go
│ │ │ │ │ ├── debugtrap_unsupported.go
│ │ │ │ │ ├── debugtrap_windows.go
│ │ │ │ │ ├── delete.go
│ │ │ │ │ ├── events/
│ │ │ │ │ │ ├── events.go
│ │ │ │ │ │ └── events_test.go
│ │ │ │ │ ├── exec.go
│ │ │ │ │ ├── exec_linux.go
│ │ │ │ │ ├── exec_windows.go
│ │ │ │ │ ├── execdriver/
│ │ │ │ │ │ ├── driver.go
│ │ │ │ │ │ ├── driver_linux.go
│ │ │ │ │ │ ├── execdrivers/
│ │ │ │ │ │ │ ├── execdrivers_linux.go
│ │ │ │ │ │ │ └── execdrivers_windows.go
│ │ │ │ │ │ ├── lxc/
│ │ │ │ │ │ │ ├── driver.go
│ │ │ │ │ │ │ ├── info.go
│ │ │ │ │ │ │ ├── info_test.go
│ │ │ │ │ │ │ ├── init.go
│ │ │ │ │ │ │ ├── lxc_init_linux.go
│ │ │ │ │ │ │ ├── lxc_init_unsupported.go
│ │ │ │ │ │ │ ├── lxc_template.go
│ │ │ │ │ │ │ └── lxc_template_unit_test.go
│ │ │ │ │ │ ├── native/
│ │ │ │ │ │ │ ├── create.go
│ │ │ │ │ │ │ ├── driver.go
│ │ │ │ │ │ │ ├── driver_unsupported.go
│ │ │ │ │ │ │ ├── driver_unsupported_nocgo.go
│ │ │ │ │ │ │ ├── exec.go
│ │ │ │ │ │ │ ├── info.go
│ │ │ │ │ │ │ ├── init.go
│ │ │ │ │ │ │ └── template/
│ │ │ │ │ │ │ └── default_template.go
│ │ │ │ │ │ ├── pipes.go
│ │ │ │ │ │ ├── termconsole.go
│ │ │ │ │ │ ├── utils.go
│ │ │ │ │ │ └── windows/
│ │ │ │ │ │ ├── checkoptions.go
│ │ │ │ │ │ ├── clean.go
│ │ │ │ │ │ ├── exec.go
│ │ │ │ │ │ ├── getpids.go
│ │ │ │ │ │ ├── info.go
│ │ │ │ │ │ ├── namedpipes.go
│ │ │ │ │ │ ├── pauseunpause.go
│ │ │ │ │ │ ├── run.go
│ │ │ │ │ │ ├── stats.go
│ │ │ │ │ │ ├── stdconsole.go
│ │ │ │ │ │ ├── terminatekill.go
│ │ │ │ │ │ ├── ttyconsole.go
│ │ │ │ │ │ ├── unsupported.go
│ │ │ │ │ │ └── windows.go
│ │ │ │ │ ├── export.go
│ │ │ │ │ ├── graphdriver/
│ │ │ │ │ │ ├── aufs/
│ │ │ │ │ │ │ ├── aufs.go
│ │ │ │ │ │ │ ├── aufs_test.go
│ │ │ │ │ │ │ ├── dirs.go
│ │ │ │ │ │ │ ├── migrate.go
│ │ │ │ │ │ │ ├── mount.go
│ │ │ │ │ │ │ ├── mount_linux.go
│ │ │ │ │ │ │ └── mount_unsupported.go
│ │ │ │ │ │ ├── btrfs/
│ │ │ │ │ │ │ ├── btrfs.go
│ │ │ │ │ │ │ ├── btrfs_test.go
│ │ │ │ │ │ │ ├── dummy_unsupported.go
│ │ │ │ │ │ │ ├── version.go
│ │ │ │ │ │ │ ├── version_none.go
│ │ │ │ │ │ │ └── version_test.go
│ │ │ │ │ │ ├── devmapper/
│ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ ├── deviceset.go
│ │ │ │ │ │ │ ├── devmapper_doc.go
│ │ │ │ │ │ │ ├── devmapper_test.go
│ │ │ │ │ │ │ ├── driver.go
│ │ │ │ │ │ │ └── mount.go
│ │ │ │ │ │ ├── driver.go
│ │ │ │ │ │ ├── driver_linux.go
│ │ │ │ │ │ ├── driver_unsupported.go
│ │ │ │ │ │ ├── driver_windows.go
│ │ │ │ │ │ ├── fsdiff.go
│ │ │ │ │ │ ├── graphtest/
│ │ │ │ │ │ │ └── graphtest.go
│ │ │ │ │ │ ├── overlay/
│ │ │ │ │ │ │ ├── copy.go
│ │ │ │ │ │ │ ├── overlay.go
│ │ │ │ │ │ │ ├── overlay_test.go
│ │ │ │ │ │ │ └── overlay_unsupported.go
│ │ │ │ │ │ ├── vfs/
│ │ │ │ │ │ │ ├── driver.go
│ │ │ │ │ │ │ ├── driver_unsupported.go
│ │ │ │ │ │ │ └── vfs_test.go
│ │ │ │ │ │ ├── windows/
│ │ │ │ │ │ │ └── windows.go
│ │ │ │ │ │ └── zfs/
│ │ │ │ │ │ ├── MAINTAINERS
│ │ │ │ │ │ ├── zfs.go
│ │ │ │ │ │ ├── zfs_freebsd.go
│ │ │ │ │ │ ├── zfs_linux.go
│ │ │ │ │ │ ├── zfs_test.go
│ │ │ │ │ │ └── zfs_unsupported.go
│ │ │ │ │ ├── history.go
│ │ │ │ │ ├── image_delete.go
│ │ │ │ │ ├── info.go
│ │ │ │ │ ├── inspect.go
│ │ │ │ │ ├── kill.go
│ │ │ │ │ ├── list.go
│ │ │ │ │ ├── logdrivers_linux.go
│ │ │ │ │ ├── logdrivers_windows.go
│ │ │ │ │ ├── logger/
│ │ │ │ │ │ ├── copier.go
│ │ │ │ │ │ ├── copier_test.go
│ │ │ │ │ │ ├── factory.go
│ │ │ │ │ │ ├── fluentd/
│ │ │ │ │ │ │ └── fluentd.go
│ │ │ │ │ │ ├── gelf/
│ │ │ │ │ │ │ ├── gelf.go
│ │ │ │ │ │ │ └── gelf_unsupported.go
│ │ │ │ │ │ ├── journald/
│ │ │ │ │ │ │ ├── journald.go
│ │ │ │ │ │ │ └── journald_unsupported.go
│ │ │ │ │ │ ├── jsonfilelog/
│ │ │ │ │ │ │ ├── jsonfilelog.go
│ │ │ │ │ │ │ └── jsonfilelog_test.go
│ │ │ │ │ │ ├── logger.go
│ │ │ │ │ │ └── syslog/
│ │ │ │ │ │ ├── syslog.go
│ │ │ │ │ │ └── syslog_unsupported.go
│ │ │ │ │ ├── logs.go
│ │ │ │ │ ├── monitor.go
│ │ │ │ │ ├── network/
│ │ │ │ │ │ └── settings.go
│ │ │ │ │ ├── pause.go
│ │ │ │ │ ├── rename.go
│ │ │ │ │ ├── resize.go
│ │ │ │ │ ├── restart.go
│ │ │ │ │ ├── start.go
│ │ │ │ │ ├── state.go
│ │ │ │ │ ├── state_test.go
│ │ │ │ │ ├── stats.go
│ │ │ │ │ ├── stats_collector_unix.go
│ │ │ │ │ ├── stats_collector_windows.go
│ │ │ │ │ ├── stats_linux.go
│ │ │ │ │ ├── stats_windows.go
│ │ │ │ │ ├── stop.go
│ │ │ │ │ ├── top.go
│ │ │ │ │ ├── unpause.go
│ │ │ │ │ ├── utils_nounix.go
│ │ │ │ │ ├── utils_test.go
│ │ │ │ │ ├── utils_unix.go
│ │ │ │ │ ├── volumes.go
│ │ │ │ │ ├── volumes_linux.go
│ │ │ │ │ ├── volumes_linux_unit_test.go
│ │ │ │ │ ├── volumes_unit_test.go
│ │ │ │ │ ├── volumes_windows.go
│ │ │ │ │ └── wait.go
│ │ │ │ ├── docker/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── client.go
│ │ │ │ │ ├── daemon.go
│ │ │ │ │ ├── daemon_unix.go
│ │ │ │ │ ├── daemon_windows.go
│ │ │ │ │ ├── docker.go
│ │ │ │ │ ├── docker_windows.go
│ │ │ │ │ ├── flags.go
│ │ │ │ │ └── log.go
│ │ │ │ ├── dockerinit/
│ │ │ │ │ └── dockerinit.go
│ │ │ │ ├── docs/
│ │ │ │ │ ├── .gitignore
│ │ │ │ │ ├── Dockerfile
│ │ │ │ │ ├── Makefile
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── article-img/
│ │ │ │ │ │ ├── ipv6_basic_host_config.gliffy
│ │ │ │ │ │ ├── ipv6_ndp_proxying.gliffy
│ │ │ │ │ │ ├── ipv6_routed_network_example.gliffy
│ │ │ │ │ │ ├── ipv6_slash64_subnet_config.gliffy
│ │ │ │ │ │ └── ipv6_switched_network_example.gliffy
│ │ │ │ │ ├── articles/
│ │ │ │ │ │ ├── ambassador_pattern_linking.md
│ │ │ │ │ │ ├── b2d_volume_resize.md
│ │ │ │ │ │ ├── baseimages.md
│ │ │ │ │ │ ├── basics.md
│ │ │ │ │ │ ├── certificates.md
│ │ │ │ │ │ ├── cfengine_process_management.md
│ │ │ │ │ │ ├── chef.md
│ │ │ │ │ │ ├── configuring.md
│ │ │ │ │ │ ├── dockerfile_best-practices.md
│ │ │ │ │ │ ├── dsc.md
│ │ │ │ │ │ ├── host_integration.md
│ │ │ │ │ │ ├── https/
│ │ │ │ │ │ │ ├── Dockerfile
│ │ │ │ │ │ │ ├── Makefile
│ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ ├── make_certs.sh
│ │ │ │ │ │ │ └── parsedocs.sh
│ │ │ │ │ │ ├── https.md
│ │ │ │ │ │ ├── networking.md
│ │ │ │ │ │ ├── puppet.md
│ │ │ │ │ │ ├── registry_mirror.md
│ │ │ │ │ │ ├── runmetrics.md
│ │ │ │ │ │ ├── security.md
│ │ │ │ │ │ ├── systemd.md
│ │ │ │ │ │ └── using_supervisord.md
│ │ │ │ │ ├── docker-hub/
│ │ │ │ │ │ ├── accounts.md
│ │ │ │ │ │ ├── builds.md
│ │ │ │ │ │ ├── home.md
│ │ │ │ │ │ ├── index.md
│ │ │ │ │ │ ├── official_repos.md
│ │ │ │ │ │ ├── repos.md
│ │ │ │ │ │ └── userguide.md
│ │ │ │ │ ├── examples/
│ │ │ │ │ │ ├── apt-cacher-ng.Dockerfile
│ │ │ │ │ │ ├── apt-cacher-ng.md
│ │ │ │ │ │ ├── couchdb_data_volumes.md
│ │ │ │ │ │ ├── mongodb/
│ │ │ │ │ │ │ └── Dockerfile
│ │ │ │ │ │ ├── mongodb.md
│ │ │ │ │ │ ├── nodejs_web_app.md
│ │ │ │ │ │ ├── postgresql_service.Dockerfile
│ │ │ │ │ │ ├── postgresql_service.md
│ │ │ │ │ │ ├── running_redis_service.md
│ │ │ │ │ │ ├── running_riak_service.Dockerfile
│ │ │ │ │ │ ├── running_riak_service.md
│ │ │ │ │ │ ├── running_ssh_service.Dockerfile
│ │ │ │ │ │ ├── running_ssh_service.md
│ │ │ │ │ │ └── supervisord.conf
│ │ │ │ │ ├── extend/
│ │ │ │ │ │ ├── index.md
│ │ │ │ │ │ ├── plugin_api.md
│ │ │ │ │ │ ├── plugins.md
│ │ │ │ │ │ └── plugins_volume.md
│ │ │ │ │ ├── installation/
│ │ │ │ │ │ ├── SUSE.md
│ │ │ │ │ │ ├── amazon.md
│ │ │ │ │ │ ├── archlinux.md
│ │ │ │ │ │ ├── azure.md
│ │ │ │ │ │ ├── binaries.md
│ │ │ │ │ │ ├── centos.md
│ │ │ │ │ │ ├── cruxlinux.md
│ │ │ │ │ │ ├── debian.md
│ │ │ │ │ │ ├── fedora.md
│ │ │ │ │ │ ├── frugalware.md
│ │ │ │ │ │ ├── gentoolinux.md
│ │ │ │ │ │ ├── google.md
│ │ │ │ │ │ ├── index.md
│ │ │ │ │ │ ├── joyent.md
│ │ │ │ │ │ ├── mac.md
│ │ │ │ │ │ ├── oracle.md
│ │ │ │ │ │ ├── rackspace.md
│ │ │ │ │ │ ├── rhel.md
│ │ │ │ │ │ ├── softlayer.md
│ │ │ │ │ │ ├── ubuntulinux.md
│ │ │ │ │ │ └── windows.md
│ │ │ │ │ ├── introduction/
│ │ │ │ │ │ └── understanding-docker.md
│ │ │ │ │ ├── misc/
│ │ │ │ │ │ ├── faq.md
│ │ │ │ │ │ ├── index.md
│ │ │ │ │ │ ├── release-notes.md
│ │ │ │ │ │ └── search.md
│ │ │ │ │ ├── project/
│ │ │ │ │ │ ├── advanced-contributing.md
│ │ │ │ │ │ ├── coding-style.md
│ │ │ │ │ │ ├── create-pr.md
│ │ │ │ │ │ ├── doc-style.md
│ │ │ │ │ │ ├── find-an-issue.md
│ │ │ │ │ │ ├── get-help.md
│ │ │ │ │ │ ├── images/
│ │ │ │ │ │ │ ├── existing_issue.snagproj
│ │ │ │ │ │ │ └── proposal.snagproj
│ │ │ │ │ │ ├── make-a-contribution.md
│ │ │ │ │ │ ├── review-pr.md
│ │ │ │ │ │ ├── set-up-dev-env.md
│ │ │ │ │ │ ├── set-up-git.md
│ │ │ │ │ │ ├── software-req-win.md
│ │ │ │ │ │ ├── software-required.md
│ │ │ │ │ │ ├── test-and-docs.md
│ │ │ │ │ │ ├── who-written-for.md
│ │ │ │ │ │ └── work-issue.md
│ │ │ │ │ ├── reference/
│ │ │ │ │ │ ├── api/
│ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ ├── docker-io_api.md
│ │ │ │ │ │ │ ├── docker_io_accounts_api.md
│ │ │ │ │ │ │ ├── docker_remote_api.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.0.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.1.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.10.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.11.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.12.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.13.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.14.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.15.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.16.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.17.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.18.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.19.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.2.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.20.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.3.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.4.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.5.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.6.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.7.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.8.md
│ │ │ │ │ │ │ ├── docker_remote_api_v1.9.md
│ │ │ │ │ │ │ ├── hub_registry_spec.md
│ │ │ │ │ │ │ ├── images/
│ │ │ │ │ │ │ │ └── event_state.gliffy
│ │ │ │ │ │ │ ├── registry_api.md
│ │ │ │ │ │ │ ├── registry_api_client_libraries.md
│ │ │ │ │ │ │ └── remote_api_client_libraries.md
│ │ │ │ │ │ ├── builder.md
│ │ │ │ │ │ ├── commandline/
│ │ │ │ │ │ │ ├── attach.md
│ │ │ │ │ │ │ ├── build.md
│ │ │ │ │ │ │ ├── cli.md
│ │ │ │ │ │ │ ├── commit.md
│ │ │ │ │ │ │ ├── cp.md
│ │ │ │ │ │ │ ├── create.md
│ │ │ │ │ │ │ ├── daemon.md
│ │ │ │ │ │ │ ├── diff.md
│ │ │ │ │ │ │ ├── events.md
│ │ │ │ │ │ │ ├── exec.md
│ │ │ │ │ │ │ ├── export.md
│ │ │ │ │ │ │ ├── history.md
│ │ │ │ │ │ │ ├── images.md
│ │ │ │ │ │ │ ├── import.md
│ │ │ │ │ │ │ ├── info.md
│ │ │ │ │ │ │ ├── inspect.md
│ │ │ │ │ │ │ ├── kill.md
│ │ │ │ │ │ │ ├── load.md
│ │ │ │ │ │ │ ├── login.md
│ │ │ │ │ │ │ ├── logout.md
│ │ │ │ │ │ │ ├── logs.md
│ │ │ │ │ │ │ ├── pause.md
│ │ │ │ │ │ │ ├── port.md
│ │ │ │ │ │ │ ├── ps.md
│ │ │ │ │ │ │ ├── pull.md
│ │ │ │ │ │ │ ├── push.md
│ │ │ │ │ │ │ ├── rename.md
│ │ │ │ │ │ │ ├── restart.md
│ │ │ │ │ │ │ ├── rm.md
│ │ │ │ │ │ │ ├── rmi.md
│ │ │ │ │ │ │ ├── run.md
│ │ │ │ │ │ │ ├── save.md
│ │ │ │ │ │ │ ├── search.md
│ │ │ │ │ │ │ ├── start.md
│ │ │ │ │ │ │ ├── stats.md
│ │ │ │ │ │ │ ├── stop.md
│ │ │ │ │ │ │ ├── tag.md
│ │ │ │ │ │ │ ├── top.md
│ │ │ │ │ │ │ ├── unpause.md
│ │ │ │ │ │ │ ├── version.md
│ │ │ │ │ │ │ └── wait.md
│ │ │ │ │ │ ├── glossary.md
│ │ │ │ │ │ ├── logging/
│ │ │ │ │ │ │ ├── fluentd.md
│ │ │ │ │ │ │ ├── index.md
│ │ │ │ │ │ │ └── journald.md
│ │ │ │ │ │ └── run.md
│ │ │ │ │ ├── static_files/
│ │ │ │ │ │ └── README.md
│ │ │ │ │ ├── terms/
│ │ │ │ │ │ ├── container.md
│ │ │ │ │ │ ├── filesystem.md
│ │ │ │ │ │ ├── image.md
│ │ │ │ │ │ ├── layer.md
│ │ │ │ │ │ ├── registry.md
│ │ │ │ │ │ └── repository.md
│ │ │ │ │ ├── touch-up.sh
│ │ │ │ │ └── userguide/
│ │ │ │ │ ├── dockerhub.md
│ │ │ │ │ ├── dockerimages.md
│ │ │ │ │ ├── dockerizing.md
│ │ │ │ │ ├── dockerlinks.md
│ │ │ │ │ ├── dockerrepos.md
│ │ │ │ │ ├── dockervolumes.md
│ │ │ │ │ ├── index.md
│ │ │ │ │ ├── labels-custom-metadata.md
│ │ │ │ │ ├── level1.md
│ │ │ │ │ ├── level2.md
│ │ │ │ │ └── usingdocker.md
│ │ │ │ ├── experimental/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── compose_swarm_networking.md
│ │ │ │ │ ├── networking.md
│ │ │ │ │ ├── networking_api.md
│ │ │ │ │ └── plugins_network.md
│ │ │ │ ├── graph/
│ │ │ │ │ ├── export.go
│ │ │ │ │ ├── graph.go
│ │ │ │ │ ├── graph_test.go
│ │ │ │ │ ├── graph_unix.go
│ │ │ │ │ ├── graph_windows.go
│ │ │ │ │ ├── history.go
│ │ │ │ │ ├── import.go
│ │ │ │ │ ├── list.go
│ │ │ │ │ ├── load.go
│ │ │ │ │ ├── load_unsupported.go
│ │ │ │ │ ├── mutex.go
│ │ │ │ │ ├── pools_test.go
│ │ │ │ │ ├── pull.go
│ │ │ │ │ ├── pull_v1.go
│ │ │ │ │ ├── pull_v2.go
│ │ │ │ │ ├── push.go
│ │ │ │ │ ├── push_v1.go
│ │ │ │ │ ├── push_v2.go
│ │ │ │ │ ├── registry.go
│ │ │ │ │ ├── service.go
│ │ │ │ │ ├── tags/
│ │ │ │ │ │ ├── tags.go
│ │ │ │ │ │ └── tags_unit_test.go
│ │ │ │ │ ├── tags.go
│ │ │ │ │ └── tags_unit_test.go
│ │ │ │ ├── hack/
│ │ │ │ │ ├── .vendor-helpers.sh
│ │ │ │ │ ├── dind
│ │ │ │ │ ├── generate-authors.sh
│ │ │ │ │ ├── install.sh
│ │ │ │ │ ├── make/
│ │ │ │ │ │ ├── .build-deb/
│ │ │ │ │ │ │ ├── compat
│ │ │ │ │ │ │ ├── control
│ │ │ │ │ │ │ ├── docker-engine.bash-completion
│ │ │ │ │ │ │ ├── docker-engine.install
│ │ │ │ │ │ │ ├── docker-engine.manpages
│ │ │ │ │ │ │ ├── docker-engine.postinst
│ │ │ │ │ │ │ ├── docs
│ │ │ │ │ │ │ └── rules
│ │ │ │ │ │ ├── .build-rpm/
│ │ │ │ │ │ │ └── docker-engine.spec
│ │ │ │ │ │ ├── .dockerinit
│ │ │ │ │ │ ├── .dockerinit-gccgo
│ │ │ │ │ │ ├── .ensure-emptyfs
│ │ │ │ │ │ ├── .ensure-frozen-images
│ │ │ │ │ │ ├── .ensure-httpserver
│ │ │ │ │ │ ├── .go-autogen
│ │ │ │ │ │ ├── .go-compile-test-dir
│ │ │ │ │ │ ├── .integration-daemon-setup
│ │ │ │ │ │ ├── .integration-daemon-start
│ │ │ │ │ │ ├── .integration-daemon-stop
│ │ │ │ │ │ ├── .resources-windows/
│ │ │ │ │ │ │ └── docker.exe.manifest
│ │ │ │ │ │ ├── .validate
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── binary
│ │ │ │ │ │ ├── build-deb
│ │ │ │ │ │ ├── build-rpm
│ │ │ │ │ │ ├── cover
│ │ │ │ │ │ ├── cross
│ │ │ │ │ │ ├── dynbinary
│ │ │ │ │ │ ├── dyngccgo
│ │ │ │ │ │ ├── gccgo
│ │ │ │ │ │ ├── release-deb
│ │ │ │ │ │ ├── release-rpm
│ │ │ │ │ │ ├── sign-repos
│ │ │ │ │ │ ├── test-docker-py
│ │ │ │ │ │ ├── test-integration-cli
│ │ │ │ │ │ ├── test-unit
│ │ │ │ │ │ ├── tgz
│ │ │ │ │ │ ├── ubuntu
│ │ │ │ │ │ ├── validate-dco
│ │ │ │ │ │ ├── validate-gofmt
│ │ │ │ │ │ ├── validate-lint
│ │ │ │ │ │ ├── validate-pkg
│ │ │ │ │ │ ├── validate-test
│ │ │ │ │ │ ├── validate-toml
│ │ │ │ │ │ └── validate-vet
│ │ │ │ │ ├── make.sh
│ │ │ │ │ ├── release.sh
│ │ │ │ │ └── vendor.sh
│ │ │ │ ├── image/
│ │ │ │ │ ├── image.go
│ │ │ │ │ └── spec/
│ │ │ │ │ └── v1.md
│ │ │ │ ├── integration-cli/
│ │ │ │ │ ├── check_test.go
│ │ │ │ │ ├── docker_api_attach_test.go
│ │ │ │ │ ├── docker_api_containers_test.go
│ │ │ │ │ ├── docker_api_events_test.go
│ │ │ │ │ ├── docker_api_exec_resize_test.go
│ │ │ │ │ ├── docker_api_exec_test.go
│ │ │ │ │ ├── docker_api_images_test.go
│ │ │ │ │ ├── docker_api_info_test.go
│ │ │ │ │ ├── docker_api_inspect_test.go
│ │ │ │ │ ├── docker_api_logs_test.go
│ │ │ │ │ ├── docker_api_network_test.go
│ │ │ │ │ ├── docker_api_resize_test.go
│ │ │ │ │ ├── docker_api_service_test.go
│ │ │ │ │ ├── docker_api_stats_test.go
│ │ │ │ │ ├── docker_api_test.go
│ │ │ │ │ ├── docker_api_version_test.go
│ │ │ │ │ ├── docker_cli_attach_test.go
│ │ │ │ │ ├── docker_cli_attach_unix_test.go
│ │ │ │ │ ├── docker_cli_build_test.go
│ │ │ │ │ ├── docker_cli_build_unix_test.go
│ │ │ │ │ ├── docker_cli_by_digest_test.go
│ │ │ │ │ ├── docker_cli_commit_test.go
│ │ │ │ │ ├── docker_cli_config_test.go
│ │ │ │ │ ├── docker_cli_cp_from_container_test.go
│ │ │ │ │ ├── docker_cli_cp_test.go
│ │ │ │ │ ├── docker_cli_cp_to_container_test.go
│ │ │ │ │ ├── docker_cli_cp_utils.go
│ │ │ │ │ ├── docker_cli_create_test.go
│ │ │ │ │ ├── docker_cli_daemon_experimental_test.go
│ │ │ │ │ ├── docker_cli_daemon_test.go
│ │ │ │ │ ├── docker_cli_diff_test.go
│ │ │ │ │ ├── docker_cli_events_test.go
│ │ │ │ │ ├── docker_cli_events_unix_test.go
│ │ │ │ │ ├── docker_cli_exec_test.go
│ │ │ │ │ ├── docker_cli_exec_unix_test.go
│ │ │ │ │ ├── docker_cli_experimental_test.go
│ │ │ │ │ ├── docker_cli_export_import_test.go
│ │ │ │ │ ├── docker_cli_help_test.go
│ │ │ │ │ ├── docker_cli_history_test.go
│ │ │ │ │ ├── docker_cli_images_test.go
│ │ │ │ │ ├── docker_cli_import_test.go
│ │ │ │ │ ├── docker_cli_info_test.go
│ │ │ │ │ ├── docker_cli_inspect_experimental_test.go
│ │ │ │ │ ├── docker_cli_inspect_test.go
│ │ │ │ │ ├── docker_cli_kill_test.go
│ │ │ │ │ ├── docker_cli_links_test.go
│ │ │ │ │ ├── docker_cli_links_unix_test.go
│ │ │ │ │ ├── docker_cli_login_test.go
│ │ │ │ │ ├── docker_cli_logs_test.go
│ │ │ │ │ ├── docker_cli_nat_test.go
│ │ │ │ │ ├── docker_cli_network_test.go
│ │ │ │ │ ├── docker_cli_pause_test.go
│ │ │ │ │ ├── docker_cli_port_test.go
│ │ │ │ │ ├── docker_cli_port_unix_test.go
│ │ │ │ │ ├── docker_cli_proxy_test.go
│ │ │ │ │ ├── docker_cli_ps_test.go
│ │ │ │ │ ├── docker_cli_pull_test.go
│ │ │ │ │ ├── docker_cli_push_test.go
│ │ │ │ │ ├── docker_cli_rename_test.go
│ │ │ │ │ ├── docker_cli_restart_test.go
│ │ │ │ │ ├── docker_cli_rm_test.go
│ │ │ │ │ ├── docker_cli_rmi_test.go
│ │ │ │ │ ├── docker_cli_run_test.go
│ │ │ │ │ ├── docker_cli_run_unix_test.go
│ │ │ │ │ ├── docker_cli_save_load_test.go
│ │ │ │ │ ├── docker_cli_save_load_unix_test.go
│ │ │ │ │ ├── docker_cli_search_test.go
│ │ │ │ │ ├── docker_cli_service_test.go
│ │ │ │ │ ├── docker_cli_start_test.go
│ │ │ │ │ ├── docker_cli_start_volume_driver_unix_test.go
│ │ │ │ │ ├── docker_cli_stats_test.go
│ │ │ │ │ ├── docker_cli_tag_test.go
│ │ │ │ │ ├── docker_cli_top_test.go
│ │ │ │ │ ├── docker_cli_version_test.go
│ │ │ │ │ ├── docker_cli_wait_test.go
│ │ │ │ │ ├── docker_test_vars.go
│ │ │ │ │ ├── docker_test_vars_cli.go
│ │ │ │ │ ├── docker_test_vars_daemon.go
│ │ │ │ │ ├── docker_utils.go
│ │ │ │ │ ├── fixtures/
│ │ │ │ │ │ └── https/
│ │ │ │ │ │ ├── ca.pem
│ │ │ │ │ │ ├── client-cert.pem
│ │ │ │ │ │ ├── client-key.pem
│ │ │ │ │ │ ├── client-rogue-cert.pem
│ │ │ │ │ │ ├── client-rogue-key.pem
│ │ │ │ │ │ ├── server-cert.pem
│ │ │ │ │ │ ├── server-key.pem
│ │ │ │ │ │ ├── server-rogue-cert.pem
│ │ │ │ │ │ └── server-rogue-key.pem
│ │ │ │ │ ├── registry.go
│ │ │ │ │ ├── requirements.go
│ │ │ │ │ ├── requirements_unix.go
│ │ │ │ │ ├── test_vars_exec.go
│ │ │ │ │ ├── test_vars_noexec.go
│ │ │ │ │ ├── test_vars_unix.go
│ │ │ │ │ ├── test_vars_windows.go
│ │ │ │ │ └── utils.go
│ │ │ │ ├── links/
│ │ │ │ │ ├── links.go
│ │ │ │ │ └── links_test.go
│ │ │ │ ├── man/
│ │ │ │ │ ├── Dockerfile
│ │ │ │ │ ├── Dockerfile.5.md
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── docker-attach.1.md
│ │ │ │ │ ├── docker-build.1.md
│ │ │ │ │ ├── docker-commit.1.md
│ │ │ │ │ ├── docker-cp.1.md
│ │ │ │ │ ├── docker-create.1.md
│ │ │ │ │ ├── docker-diff.1.md
│ │ │ │ │ ├── docker-events.1.md
│ │ │ │ │ ├── docker-exec.1.md
│ │ │ │ │ ├── docker-export.1.md
│ │ │ │ │ ├── docker-history.1.md
│ │ │ │ │ ├── docker-images.1.md
│ │ │ │ │ ├── docker-import.1.md
│ │ │ │ │ ├── docker-info.1.md
│ │ │ │ │ ├── docker-inspect.1.md
│ │ │ │ │ ├── docker-kill.1.md
│ │ │ │ │ ├── docker-load.1.md
│ │ │ │ │ ├── docker-login.1.md
│ │ │ │ │ ├── docker-logout.1.md
│ │ │ │ │ ├── docker-logs.1.md
│ │ │ │ │ ├── docker-pause.1.md
│ │ │ │ │ ├── docker-port.1.md
│ │ │ │ │ ├── docker-ps.1.md
│ │ │ │ │ ├── docker-pull.1.md
│ │ │ │ │ ├── docker-push.1.md
│ │ │ │ │ ├── docker-rename.1.md
│ │ │ │ │ ├── docker-restart.1.md
│ │ │ │ │ ├── docker-rm.1.md
│ │ │ │ │ ├── docker-rmi.1.md
│ │ │ │ │ ├── docker-run.1.md
│ │ │ │ │ ├── docker-save.1.md
│ │ │ │ │ ├── docker-search.1.md
│ │ │ │ │ ├── docker-start.1.md
│ │ │ │ │ ├── docker-stats.1.md
│ │ │ │ │ ├── docker-stop.1.md
│ │ │ │ │ ├── docker-tag.1.md
│ │ │ │ │ ├── docker-top.1.md
│ │ │ │ │ ├── docker-unpause.1.md
│ │ │ │ │ ├── docker-version.1.md
│ │ │ │ │ ├── docker-wait.1.md
│ │ │ │ │ ├── docker.1.md
│ │ │ │ │ └── md2man-all.sh
│ │ │ │ ├── opts/
│ │ │ │ │ ├── envfile.go
│ │ │ │ │ ├── envfile_test.go
│ │ │ │ │ ├── ip.go
│ │ │ │ │ ├── ip_test.go
│ │ │ │ │ ├── opts.go
│ │ │ │ │ ├── opts_test.go
│ │ │ │ │ ├── ulimit.go
│ │ │ │ │ └── ulimit_test.go
│ │ │ │ ├── pkg/
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── archive/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── archive.go
│ │ │ │ │ │ ├── archive_test.go
│ │ │ │ │ │ ├── archive_unix.go
│ │ │ │ │ │ ├── archive_unix_test.go
│ │ │ │ │ │ ├── archive_windows.go
│ │ │ │ │ │ ├── archive_windows_test.go
│ │ │ │ │ │ ├── changes.go
│ │ │ │ │ │ ├── changes_linux.go
│ │ │ │ │ │ ├── changes_other.go
│ │ │ │ │ │ ├── changes_posix_test.go
│ │ │ │ │ │ ├── changes_test.go
│ │ │ │ │ │ ├── changes_unix.go
│ │ │ │ │ │ ├── changes_windows.go
│ │ │ │ │ │ ├── copy.go
│ │ │ │ │ │ ├── copy_test.go
│ │ │ │ │ │ ├── diff.go
│ │ │ │ │ │ ├── diff_test.go
│ │ │ │ │ │ ├── example_changes.go
│ │ │ │ │ │ ├── time_linux.go
│ │ │ │ │ │ ├── time_unsupported.go
│ │ │ │ │ │ ├── utils_test.go
│ │ │ │ │ │ ├── wrap.go
│ │ │ │ │ │ └── wrap_test.go
│ │ │ │ │ ├── broadcastwriter/
│ │ │ │ │ │ ├── broadcastwriter.go
│ │ │ │ │ │ └── broadcastwriter_test.go
│ │ │ │ │ ├── chrootarchive/
│ │ │ │ │ │ ├── archive.go
│ │ │ │ │ │ ├── archive_test.go
│ │ │ │ │ │ ├── archive_unix.go
│ │ │ │ │ │ ├── archive_windows.go
│ │ │ │ │ │ ├── diff_unix.go
│ │ │ │ │ │ ├── diff_windows.go
│ │ │ │ │ │ ├── init_unix.go
│ │ │ │ │ │ └── init_windows.go
│ │ │ │ │ ├── devicemapper/
│ │ │ │ │ │ ├── attach_loopback.go
│ │ │ │ │ │ ├── devmapper.go
│ │ │ │ │ │ ├── devmapper_log.go
│ │ │ │ │ │ ├── devmapper_wrapper.go
│ │ │ │ │ │ ├── devmapper_wrapper_deferred_remove.go
│ │ │ │ │ │ ├── devmapper_wrapper_no_deferred_remove.go
│ │ │ │ │ │ ├── ioctl.go
│ │ │ │ │ │ └── log.go
│ │ │ │ │ ├── directory/
│ │ │ │ │ │ ├── directory_linux.go
│ │ │ │ │ │ ├── directory_test.go
│ │ │ │ │ │ └── directory_windows.go
│ │ │ │ │ ├── fileutils/
│ │ │ │ │ │ ├── fileutils.go
│ │ │ │ │ │ └── fileutils_test.go
│ │ │ │ │ ├── graphdb/
│ │ │ │ │ │ ├── conn_sqlite3.go
│ │ │ │ │ │ ├── conn_sqlite3_unix.go
│ │ │ │ │ │ ├── conn_sqlite3_windows.go
│ │ │ │ │ │ ├── conn_unsupported.go
│ │ │ │ │ │ ├── graphdb.go
│ │ │ │ │ │ ├── graphdb_test.go
│ │ │ │ │ │ ├── sort.go
│ │ │ │ │ │ ├── sort_test.go
│ │ │ │ │ │ └── utils.go
│ │ │ │ │ ├── homedir/
│ │ │ │ │ │ ├── homedir.go
│ │ │ │ │ │ └── homedir_test.go
│ │ │ │ │ ├── httputils/
│ │ │ │ │ │ ├── httputils.go
│ │ │ │ │ │ ├── mimetype.go
│ │ │ │ │ │ ├── resumablerequestreader.go
│ │ │ │ │ │ └── resumablerequestreader_test.go
│ │ │ │ │ ├── ioutils/
│ │ │ │ │ │ ├── fmt.go
│ │ │ │ │ │ ├── fmt_test.go
│ │ │ │ │ │ ├── readers.go
│ │ │ │ │ │ ├── readers_test.go
│ │ │ │ │ │ ├── scheduler.go
│ │ │ │ │ │ ├── scheduler_gccgo.go
│ │ │ │ │ │ ├── writeflusher.go
│ │ │ │ │ │ ├── writers.go
│ │ │ │ │ │ └── writers_test.go
│ │ │ │ │ ├── jsonlog/
│ │ │ │ │ │ ├── jsonlog.go
│ │ │ │ │ │ ├── jsonlog_marshalling.go
│ │ │ │ │ │ ├── jsonlog_marshalling_test.go
│ │ │ │ │ │ ├── jsonlog_test.go
│ │ │ │ │ │ ├── jsonlogbytes.go
│ │ │ │ │ │ └── jsonlogbytes_test.go
│ │ │ │ │ ├── jsonmessage/
│ │ │ │ │ │ ├── jsonmessage.go
│ │ │ │ │ │ └── jsonmessage_test.go
│ │ │ │ │ ├── listenbuffer/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── buffer.go
│ │ │ │ │ │ └── listen_buffer_test.go
│ │ │ │ │ ├── mflag/
│ │ │ │ │ │ ├── LICENSE
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── example/
│ │ │ │ │ │ │ └── example.go
│ │ │ │ │ │ ├── flag.go
│ │ │ │ │ │ └── flag_test.go
│ │ │ │ │ ├── mount/
│ │ │ │ │ │ ├── flags.go
│ │ │ │ │ │ ├── flags_freebsd.go
│ │ │ │ │ │ ├── flags_linux.go
│ │ │ │ │ │ ├── flags_unsupported.go
│ │ │ │ │ │ ├── mount.go
│ │ │ │ │ │ ├── mount_test.go
│ │ │ │ │ │ ├── mounter_freebsd.go
│ │ │ │ │ │ ├── mounter_linux.go
│ │ │ │ │ │ ├── mounter_unsupported.go
│ │ │ │ │ │ ├── mountinfo.go
│ │ │ │ │ │ ├── mountinfo_freebsd.go
│ │ │ │ │ │ ├── mountinfo_linux.go
│ │ │ │ │ │ ├── mountinfo_linux_test.go
│ │ │ │ │ │ ├── mountinfo_unsupported.go
│ │ │ │ │ │ ├── sharedsubtree_linux.go
│ │ │ │ │ │ └── sharedsubtree_linux_test.go
│ │ │ │ │ ├── namesgenerator/
│ │ │ │ │ │ ├── names-generator.go
│ │ │ │ │ │ └── names-generator_test.go
│ │ │ │ │ ├── nat/
│ │ │ │ │ │ ├── nat.go
│ │ │ │ │ │ ├── nat_test.go
│ │ │ │ │ │ ├── sort.go
│ │ │ │ │ │ └── sort_test.go
│ │ │ │ │ ├── parsers/
│ │ │ │ │ │ ├── filters/
│ │ │ │ │ │ │ ├── parse.go
│ │ │ │ │ │ │ └── parse_test.go
│ │ │ │ │ │ ├── kernel/
│ │ │ │ │ │ │ ├── kernel.go
│ │ │ │ │ │ │ ├── kernel_test.go
│ │ │ │ │ │ │ ├── kernel_windows.go
│ │ │ │ │ │ │ ├── uname_linux.go
│ │ │ │ │ │ │ └── uname_unsupported.go
│ │ │ │ │ │ ├── operatingsystem/
│ │ │ │ │ │ │ ├── operatingsystem_linux.go
│ │ │ │ │ │ │ ├── operatingsystem_test.go
│ │ │ │ │ │ │ └── operatingsystem_windows.go
│ │ │ │ │ │ ├── parsers.go
│ │ │ │ │ │ └── parsers_test.go
│ │ │ │ │ ├── pidfile/
│ │ │ │ │ │ ├── pidfile.go
│ │ │ │ │ │ └── pidfile_test.go
│ │ │ │ │ ├── plugins/
│ │ │ │ │ │ ├── client.go
│ │ │ │ │ │ ├── client_test.go
│ │ │ │ │ │ ├── discovery.go
│ │ │ │ │ │ ├── discovery_test.go
│ │ │ │ │ │ ├── pluginrpc-gen/
│ │ │ │ │ │ │ ├── fixtures/
│ │ │ │ │ │ │ │ └── foo.go
│ │ │ │ │ │ │ ├── main.go
│ │ │ │ │ │ │ ├── parser.go
│ │ │ │ │ │ │ ├── parser_test.go
│ │ │ │ │ │ │ └── template.go
│ │ │ │ │ │ └── plugins.go
│ │ │ │ │ ├── pools/
│ │ │ │ │ │ ├── pools.go
│ │ │ │ │ │ └── pools_test.go
│ │ │ │ │ ├── progressreader/
│ │ │ │ │ │ ├── progressreader.go
│ │ │ │ │ │ └── progressreader_test.go
│ │ │ │ │ ├── promise/
│ │ │ │ │ │ └── promise.go
│ │ │ │ │ ├── proxy/
│ │ │ │ │ │ ├── network_proxy_test.go
│ │ │ │ │ │ ├── proxy.go
│ │ │ │ │ │ ├── stub_proxy.go
│ │ │ │ │ │ ├── tcp_proxy.go
│ │ │ │ │ │ └── udp_proxy.go
│ │ │ │ │ ├── pubsub/
│ │ │ │ │ │ ├── publisher.go
│ │ │ │ │ │ └── publisher_test.go
│ │ │ │ │ ├── random/
│ │ │ │ │ │ ├── random.go
│ │ │ │ │ │ └── random_test.go
│ │ │ │ │ ├── reexec/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── command_linux.go
│ │ │ │ │ │ ├── command_unsupported.go
│ │ │ │ │ │ ├── command_windows.go
│ │ │ │ │ │ └── reexec.go
│ │ │ │ │ ├── signal/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── signal.go
│ │ │ │ │ │ ├── signal_darwin.go
│ │ │ │ │ │ ├── signal_freebsd.go
│ │ │ │ │ │ ├── signal_linux.go
│ │ │ │ │ │ ├── signal_unix.go
│ │ │ │ │ │ ├── signal_unsupported.go
│ │ │ │ │ │ ├── signal_windows.go
│ │ │ │ │ │ └── trap.go
│ │ │ │ │ ├── sockets/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── tcp_socket.go
│ │ │ │ │ │ └── unix_socket.go
│ │ │ │ │ ├── stdcopy/
│ │ │ │ │ │ ├── stdcopy.go
│ │ │ │ │ │ └── stdcopy_test.go
│ │ │ │ │ ├── streamformatter/
│ │ │ │ │ │ ├── streamformatter.go
│ │ │ │ │ │ └── streamformatter_test.go
│ │ │ │ │ ├── stringid/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── stringid.go
│ │ │ │ │ │ └── stringid_test.go
│ │ │ │ │ ├── stringutils/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── stringutils.go
│ │ │ │ │ │ └── stringutils_test.go
│ │ │ │ │ ├── symlink/
│ │ │ │ │ │ ├── LICENSE.APACHE
│ │ │ │ │ │ ├── LICENSE.BSD
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── fs.go
│ │ │ │ │ │ └── fs_test.go
│ │ │ │ │ ├── sysinfo/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── sysinfo.go
│ │ │ │ │ │ ├── sysinfo_linux.go
│ │ │ │ │ │ ├── sysinfo_linux_test.go
│ │ │ │ │ │ └── sysinfo_windows.go
│ │ │ │ │ ├── system/
│ │ │ │ │ │ ├── errors.go
│ │ │ │ │ │ ├── events_windows.go
│ │ │ │ │ │ ├── filesys.go
│ │ │ │ │ │ ├── filesys_windows.go
│ │ │ │ │ │ ├── lstat.go
│ │ │ │ │ │ ├── lstat_test.go
│ │ │ │ │ │ ├── lstat_windows.go
│ │ │ │ │ │ ├── meminfo.go
│ │ │ │ │ │ ├── meminfo_linux.go
│ │ │ │ │ │ ├── meminfo_linux_test.go
│ │ │ │ │ │ ├── meminfo_unsupported.go
│ │ │ │ │ │ ├── meminfo_windows.go
│ │ │ │ │ │ ├── mknod.go
│ │ │ │ │ │ ├── mknod_windows.go
│ │ │ │ │ │ ├── stat.go
│ │ │ │ │ │ ├── stat_linux.go
│ │ │ │ │ │ ├── stat_test.go
│ │ │ │ │ │ ├── stat_unsupported.go
│ │ │ │ │ │ ├── stat_windows.go
│ │ │ │ │ │ ├── umask.go
│ │ │ │ │ │ ├── umask_windows.go
│ │ │ │ │ │ ├── utimes_darwin.go
│ │ │ │ │ │ ├── utimes_freebsd.go
│ │ │ │ │ │ ├── utimes_linux.go
│ │ │ │ │ │ ├── utimes_test.go
│ │ │ │ │ │ ├── utimes_unsupported.go
│ │ │ │ │ │ ├── xattrs_linux.go
│ │ │ │ │ │ └── xattrs_unsupported.go
│ │ │ │ │ ├── systemd/
│ │ │ │ │ │ ├── booted.go
│ │ │ │ │ │ ├── listendfd.go
│ │ │ │ │ │ └── sd_notify.go
│ │ │ │ │ ├── tailfile/
│ │ │ │ │ │ ├── tailfile.go
│ │ │ │ │ │ └── tailfile_test.go
│ │ │ │ │ ├── tarsum/
│ │ │ │ │ │ ├── builder_context.go
│ │ │ │ │ │ ├── builder_context_test.go
│ │ │ │ │ │ ├── fileinfosums.go
│ │ │ │ │ │ ├── fileinfosums_test.go
│ │ │ │ │ │ ├── tarsum.go
│ │ │ │ │ │ ├── tarsum_spec.md
│ │ │ │ │ │ ├── tarsum_test.go
│ │ │ │ │ │ ├── testdata/
│ │ │ │ │ │ │ ├── 46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/
│ │ │ │ │ │ │ │ └── json
│ │ │ │ │ │ │ ├── 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/
│ │ │ │ │ │ │ │ └── json
│ │ │ │ │ │ │ └── xattr/
│ │ │ │ │ │ │ └── json
│ │ │ │ │ │ ├── versioning.go
│ │ │ │ │ │ ├── versioning_test.go
│ │ │ │ │ │ └── writercloser.go
│ │ │ │ │ ├── term/
│ │ │ │ │ │ ├── tc_linux_cgo.go
│ │ │ │ │ │ ├── tc_other.go
│ │ │ │ │ │ ├── term.go
│ │ │ │ │ │ ├── term_windows.go
│ │ │ │ │ │ ├── termios_darwin.go
│ │ │ │ │ │ ├── termios_freebsd.go
│ │ │ │ │ │ ├── termios_linux.go
│ │ │ │ │ │ └── winconsole/
│ │ │ │ │ │ ├── console_windows.go
│ │ │ │ │ │ ├── console_windows_test.go
│ │ │ │ │ │ ├── term_emulator.go
│ │ │ │ │ │ └── term_emulator_test.go
│ │ │ │ │ ├── timeoutconn/
│ │ │ │ │ │ ├── timeoutconn.go
│ │ │ │ │ │ └── timeoutconn_test.go
│ │ │ │ │ ├── timeutils/
│ │ │ │ │ │ ├── json.go
│ │ │ │ │ │ ├── json_test.go
│ │ │ │ │ │ ├── utils.go
│ │ │ │ │ │ └── utils_test.go
│ │ │ │ │ ├── tlsconfig/
│ │ │ │ │ │ └── config.go
│ │ │ │ │ ├── truncindex/
│ │ │ │ │ │ ├── truncindex.go
│ │ │ │ │ │ └── truncindex_test.go
│ │ │ │ │ ├── ulimit/
│ │ │ │ │ │ ├── ulimit.go
│ │ │ │ │ │ └── ulimit_test.go
│ │ │ │ │ ├── units/
│ │ │ │ │ │ ├── duration.go
│ │ │ │ │ │ ├── duration_test.go
│ │ │ │ │ │ ├── size.go
│ │ │ │ │ │ └── size_test.go
│ │ │ │ │ ├── urlutil/
│ │ │ │ │ │ ├── urlutil.go
│ │ │ │ │ │ └── urlutil_test.go
│ │ │ │ │ ├── useragent/
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── useragent.go
│ │ │ │ │ │ └── useragent_test.go
│ │ │ │ │ └── version/
│ │ │ │ │ ├── version.go
│ │ │ │ │ └── version_test.go
│ │ │ │ ├── project/
│ │ │ │ │ ├── GOVERNANCE.md
│ │ │ │ │ ├── IRC-ADMINISTRATION.md
│ │ │ │ │ ├── ISSUE-TRIAGE.md
│ │ │ │ │ ├── PACKAGERS.md
│ │ │ │ │ ├── PRINCIPLES.md
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── RELEASE-CHECKLIST.md
│ │ │ │ │ ├── REVIEWING.md
│ │ │ │ │ └── TOOLS.md
│ │ │ │ ├── registry/
│ │ │ │ │ ├── auth.go
│ │ │ │ │ ├── auth_test.go
│ │ │ │ │ ├── authchallenge.go
│ │ │ │ │ ├── config.go
│ │ │ │ │ ├── config_test.go
│ │ │ │ │ ├── endpoint.go
│ │ │ │ │ ├── endpoint_test.go
│ │ │ │ │ ├── registry.go
│ │ │ │ │ ├── registry_mock_test.go
│ │ │ │ │ ├── registry_test.go
│ │ │ │ │ ├── service.go
│ │ │ │ │ ├── session.go
│ │ │ │ │ ├── token.go
│ │ │ │ │ └── types.go
│ │ │ │ ├── runconfig/
│ │ │ │ │ ├── compare.go
│ │ │ │ │ ├── compare_test.go
│ │ │ │ │ ├── config.go
│ │ │ │ │ ├── config_test.go
│ │ │ │ │ ├── exec.go
│ │ │ │ │ ├── exec_test.go
│ │ │ │ │ ├── fixtures/
│ │ │ │ │ │ ├── container_config_1_14.json
│ │ │ │ │ │ ├── container_config_1_17.json
│ │ │ │ │ │ ├── container_config_1_19.json
│ │ │ │ │ │ ├── container_hostconfig_1_14.json
│ │ │ │ │ │ ├── container_hostconfig_1_19.json
│ │ │ │ │ │ ├── valid.env
│ │ │ │ │ │ └── valid.label
│ │ │ │ │ ├── hostconfig.go
│ │ │ │ │ ├── hostconfig_test.go
│ │ │ │ │ ├── hostconfig_unix.go
│ │ │ │ │ ├── hostconfig_windows.go
│ │ │ │ │ ├── merge.go
│ │ │ │ │ ├── merge_test.go
│ │ │ │ │ ├── parse.go
│ │ │ │ │ ├── parse_experimental.go
│ │ │ │ │ ├── parse_stub.go
│ │ │ │ │ ├── parse_test.go
│ │ │ │ │ ├── parse_unix.go
│ │ │ │ │ └── parse_windows.go
│ │ │ │ ├── trust/
│ │ │ │ │ ├── service.go
│ │ │ │ │ └── trusts.go
│ │ │ │ ├── utils/
│ │ │ │ │ ├── experimental.go
│ │ │ │ │ ├── git.go
│ │ │ │ │ ├── git_test.go
│ │ │ │ │ ├── stubs.go
│ │ │ │ │ ├── utils.go
│ │ │ │ │ └── utils_test.go
│ │ │ │ └── volume/
│ │ │ │ ├── drivers/
│ │ │ │ │ ├── adapter.go
│ │ │ │ │ ├── api.go
│ │ │ │ │ ├── extpoint.go
│ │ │ │ │ ├── proxy.go
│ │ │ │ │ └── proxy_test.go
│ │ │ │ ├── local/
│ │ │ │ │ └── local.go
│ │ │ │ └── volume.go
│ │ │ ├── go-units/
│ │ │ │ ├── CONTRIBUTING.md
│ │ │ │ ├── LICENSE.code
│ │ │ │ ├── LICENSE.docs
│ │ │ │ ├── MAINTAINERS
│ │ │ │ ├── README.md
│ │ │ │ ├── circle.yml
│ │ │ │ ├── duration.go
│ │ │ │ ├── duration_test.go
│ │ │ │ ├── size.go
│ │ │ │ ├── size_test.go
│ │ │ │ ├── ulimit.go
│ │ │ │ └── ulimit_test.go
│ │ │ └── libcontainer/
│ │ │ ├── .gitignore
│ │ │ ├── CONTRIBUTING.md
│ │ │ ├── Dockerfile
│ │ │ ├── LICENSE
│ │ │ ├── MAINTAINERS
│ │ │ ├── MAINTAINERS_GUIDE.md
│ │ │ ├── Makefile
│ │ │ ├── NOTICE
│ │ │ ├── PRINCIPLES.md
│ │ │ ├── README.md
│ │ │ ├── ROADMAP.md
│ │ │ ├── SPEC.md
│ │ │ ├── apparmor/
│ │ │ │ ├── apparmor.go
│ │ │ │ ├── apparmor_disabled.go
│ │ │ │ ├── gen.go
│ │ │ │ └── setup.go
│ │ │ ├── capabilities_linux.go
│ │ │ ├── cgroups/
│ │ │ │ ├── cgroups.go
│ │ │ │ ├── cgroups_test.go
│ │ │ │ ├── cgroups_unsupported.go
│ │ │ │ ├── fs/
│ │ │ │ │ ├── apply_raw.go
│ │ │ │ │ ├── blkio.go
│ │ │ │ │ ├── blkio_test.go
│ │ │ │ │ ├── cpu.go
│ │ │ │ │ ├── cpu_test.go
│ │ │ │ │ ├── cpuacct.go
│ │ │ │ │ ├── cpuset.go
│ │ │ │ │ ├── cpuset_test.go
│ │ │ │ │ ├── devices.go
│ │ │ │ │ ├── devices_test.go
│ │ │ │ │ ├── freezer.go
│ │ │ │ │ ├── freezer_test.go
│ │ │ │ │ ├── fs_unsupported.go
│ │ │ │ │ ├── hugetlb.go
│ │ │ │ │ ├── hugetlb_test.go
│ │ │ │ │ ├── memory.go
│ │ │ │ │ ├── memory_test.go
│ │ │ │ │ ├── net_cls.go
│ │ │ │ │ ├── net_cls_test.go
│ │ │ │ │ ├── net_prio.go
│ │ │ │ │ ├── net_prio_test.go
│ │ │ │ │ ├── perf_event.go
│ │ │ │ │ ├── stats_util_test.go
│ │ │ │ │ ├── util_test.go
│ │ │ │ │ ├── utils.go
│ │ │ │ │ └── utils_test.go
│ │ │ │ ├── stats.go
│ │ │ │ ├── systemd/
│ │ │ │ │ ├── apply_nosystemd.go
│ │ │ │ │ └── apply_systemd.go
│ │ │ │ └── utils.go
│ │ │ ├── configs/
│ │ │ │ ├── cgroup.go
│ │ │ │ ├── config.go
│ │ │ │ ├── config_test.go
│ │ │ │ ├── config_unix.go
│ │ │ │ ├── device.go
│ │ │ │ ├── device_defaults.go
│ │ │ │ ├── hugepage_limit.go
│ │ │ │ ├── interface_priority_map.go
│ │ │ │ ├── mount.go
│ │ │ │ ├── namespaces.go
│ │ │ │ ├── namespaces_syscall.go
│ │ │ │ ├── namespaces_syscall_unsupported.go
│ │ │ │ ├── namespaces_unix.go
│ │ │ │ ├── namespaces_windows.go
│ │ │ │ ├── network.go
│ │ │ │ └── validate/
│ │ │ │ └── config.go
│ │ │ ├── console.go
│ │ │ ├── console_freebsd.go
│ │ │ ├── console_linux.go
│ │ │ ├── console_windows.go
│ │ │ ├── container.go
│ │ │ ├── container_linux.go
│ │ │ ├── container_linux_test.go
│ │ │ ├── container_nouserns_linux.go
│ │ │ ├── container_userns_linux.go
│ │ │ ├── criu_opts.go
│ │ │ ├── criurpc/
│ │ │ │ ├── Makefile
│ │ │ │ ├── criurpc.pb.go
│ │ │ │ └── criurpc.proto
│ │ │ ├── devices/
│ │ │ │ ├── devices_test.go
│ │ │ │ ├── devices_unix.go
│ │ │ │ ├── devices_windows.go
│ │ │ │ └── number.go
│ │ │ ├── docs/
│ │ │ │ └── man/
│ │ │ │ └── nsinit.1.md
│ │ │ ├── error.go
│ │ │ ├── error_test.go
│ │ │ ├── factory.go
│ │ │ ├── factory_linux.go
│ │ │ ├── factory_linux_test.go
│ │ │ ├── generic_error.go
│ │ │ ├── generic_error_test.go
│ │ │ ├── hack/
│ │ │ │ └── validate.sh
│ │ │ ├── init_linux.go
│ │ │ ├── integration/
│ │ │ │ ├── checkpoint_test.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── exec_test.go
│ │ │ │ ├── execin_test.go
│ │ │ │ ├── init_test.go
│ │ │ │ ├── template_test.go
│ │ │ │ └── utils_test.go
│ │ │ ├── label/
│ │ │ │ ├── label.go
│ │ │ │ ├── label_selinux.go
│ │ │ │ └── label_selinux_test.go
│ │ │ ├── netlink/
│ │ │ │ ├── MAINTAINERS
│ │ │ │ ├── netlink.go
│ │ │ │ ├── netlink_linux.go
│ │ │ │ ├── netlink_linux_armppc64.go
│ │ │ │ ├── netlink_linux_notarm.go
│ │ │ │ ├── netlink_linux_test.go
│ │ │ │ └── netlink_unsupported.go
│ │ │ ├── network_linux.go
│ │ │ ├── notify_linux.go
│ │ │ ├── notify_linux_test.go
│ │ │ ├── nsenter/
│ │ │ │ ├── README.md
│ │ │ │ ├── nsenter.go
│ │ │ │ ├── nsenter_gccgo.go
│ │ │ │ ├── nsenter_test.go
│ │ │ │ ├── nsenter_unsupported.go
│ │ │ │ └── nsexec.c
│ │ │ ├── nsinit/
│ │ │ │ ├── Makefile
│ │ │ │ ├── README.md
│ │ │ │ ├── checkpoint.go
│ │ │ │ ├── config.go
│ │ │ │ ├── exec.go
│ │ │ │ ├── init.go
│ │ │ │ ├── main.go
│ │ │ │ ├── oom.go
│ │ │ │ ├── pause.go
│ │ │ │ ├── restore.go
│ │ │ │ ├── security.go
│ │ │ │ ├── state.go
│ │ │ │ ├── stats.go
│ │ │ │ ├── tty.go
│ │ │ │ └── utils.go
│ │ │ ├── process.go
│ │ │ ├── process_linux.go
│ │ │ ├── restored_process.go
│ │ │ ├── rootfs_linux.go
│ │ │ ├── rootfs_linux_test.go
│ │ │ ├── sample_configs/
│ │ │ │ ├── README.md
│ │ │ │ ├── apparmor.json
│ │ │ │ ├── attach_to_bridge.json
│ │ │ │ ├── host-pid.json
│ │ │ │ ├── minimal.json
│ │ │ │ ├── selinux.json
│ │ │ │ └── userns.json
│ │ │ ├── seccomp/
│ │ │ │ ├── bpf.go
│ │ │ │ ├── context.go
│ │ │ │ ├── filter.go
│ │ │ │ ├── jump_amd64.go
│ │ │ │ └── seccomp.go
│ │ │ ├── selinux/
│ │ │ │ ├── selinux.go
│ │ │ │ └── selinux_test.go
│ │ │ ├── setns_init_linux.go
│ │ │ ├── stacktrace/
│ │ │ │ ├── capture.go
│ │ │ │ ├── capture_test.go
│ │ │ │ ├── frame.go
│ │ │ │ ├── frame_test.go
│ │ │ │ └── stacktrace.go
│ │ │ ├── standard_init_linux.go
│ │ │ ├── stats.go
│ │ │ ├── stats_freebsd.go
│ │ │ ├── stats_linux.go
│ │ │ ├── stats_windows.go
│ │ │ ├── system/
│ │ │ │ ├── linux.go
│ │ │ │ ├── proc.go
│ │ │ │ ├── setns_linux.go
│ │ │ │ ├── syscall_linux_386.go
│ │ │ │ ├── syscall_linux_64.go
│ │ │ │ ├── syscall_linux_arm.go
│ │ │ │ ├── sysconfig.go
│ │ │ │ ├── sysconfig_notcgo.go
│ │ │ │ └── xattrs_linux.go
│ │ │ ├── update-vendor.sh
│ │ │ ├── user/
│ │ │ │ ├── MAINTAINERS
│ │ │ │ ├── lookup.go
│ │ │ │ ├── lookup_unix.go
│ │ │ │ ├── lookup_unsupported.go
│ │ │ │ ├── user.go
│ │ │ │ └── user_test.go
│ │ │ ├── utils/
│ │ │ │ ├── utils.go
│ │ │ │ └── utils_test.go
│ │ │ └── xattr/
│ │ │ ├── errors.go
│ │ │ ├── xattr_linux.go
│ │ │ ├── xattr_test.go
│ │ │ └── xattr_unsupported.go
│ │ ├── emicklei/
│ │ │ └── go-restful/
│ │ │ ├── .gitignore
│ │ │ ├── CHANGES.md
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── Srcfile
│ │ │ ├── bench_curly_test.go
│ │ │ ├── bench_test.go
│ │ │ ├── bench_test.sh
│ │ │ ├── compress.go
│ │ │ ├── compress_test.go
│ │ │ ├── compressor_cache.go
│ │ │ ├── compressor_pools.go
│ │ │ ├── compressors.go
│ │ │ ├── constants.go
│ │ │ ├── container.go
│ │ │ ├── container_test.go
│ │ │ ├── cors_filter.go
│ │ │ ├── cors_filter_test.go
│ │ │ ├── coverage.sh
│ │ │ ├── curly.go
│ │ │ ├── curly_route.go
│ │ │ ├── curly_test.go
│ │ │ ├── doc.go
│ │ │ ├── doc_examples_test.go
│ │ │ ├── entity_accessors.go
│ │ │ ├── entity_accessors_test.go
│ │ │ ├── examples/
│ │ │ │ ├── .goconvey
│ │ │ │ ├── google_app_engine/
│ │ │ │ │ ├── .goconvey
│ │ │ │ │ ├── app.yaml
│ │ │ │ │ ├── datastore/
│ │ │ │ │ │ ├── .goconvey
│ │ │ │ │ │ ├── app.yaml
│ │ │ │ │ │ └── main.go
│ │ │ │ │ ├── restful-appstats-integration.go
│ │ │ │ │ └── restful-user-service.go
│ │ │ │ ├── home.html
│ │ │ │ ├── restful-CORS-filter.go
│ │ │ │ ├── restful-NCSA-logging.go
│ │ │ │ ├── restful-basic-authentication.go
│ │ │ │ ├── restful-cpuprofiler-service.go
│ │ │ │ ├── restful-curly-router.go
│ │ │ │ ├── restful-encoding-filter.go
│ │ │ │ ├── restful-filters.go
│ │ │ │ ├── restful-form-handling.go
│ │ │ │ ├── restful-hello-world.go
│ │ │ │ ├── restful-html-template.go
│ │ │ │ ├── restful-multi-containers.go
│ │ │ │ ├── restful-options-filter.go
│ │ │ │ ├── restful-path-tail.go
│ │ │ │ ├── restful-pre-post-filters.go
│ │ │ │ ├── restful-resource-functions.go
│ │ │ │ ├── restful-route_test.go
│ │ │ │ ├── restful-routefunction_test.go
│ │ │ │ ├── restful-serve-static.go
│ │ │ │ ├── restful-swagger.go
│ │ │ │ ├── restful-user-resource.go
│ │ │ │ └── restful-user-service.go
│ │ │ ├── filter.go
│ │ │ ├── filter_test.go
│ │ │ ├── install.sh
│ │ │ ├── jsr311.go
│ │ │ ├── jsr311_test.go
│ │ │ ├── log/
│ │ │ │ └── log.go
│ │ │ ├── logger.go
│ │ │ ├── options_filter.go
│ │ │ ├── options_filter_test.go
│ │ │ ├── parameter.go
│ │ │ ├── path_expression.go
│ │ │ ├── path_expression_test.go
│ │ │ ├── request.go
│ │ │ ├── request_test.go
│ │ │ ├── response.go
│ │ │ ├── response_test.go
│ │ │ ├── route.go
│ │ │ ├── route_builder.go
│ │ │ ├── route_builder_test.go
│ │ │ ├── route_test.go
│ │ │ ├── router.go
│ │ │ ├── service_error.go
│ │ │ ├── swagger/
│ │ │ │ ├── CHANGES.md
│ │ │ │ ├── README.md
│ │ │ │ ├── api_declaration_list.go
│ │ │ │ ├── config.go
│ │ │ │ ├── model_builder.go
│ │ │ │ ├── model_builder_test.go
│ │ │ │ ├── model_list.go
│ │ │ │ ├── model_list_test.go
│ │ │ │ ├── model_property_ext.go
│ │ │ │ ├── model_property_ext_test.go
│ │ │ │ ├── model_property_list.go
│ │ │ │ ├── model_property_list_test.go
│ │ │ │ ├── ordered_route_map.go
│ │ │ │ ├── ordered_route_map_test.go
│ │ │ │ ├── postbuild_model_test.go
│ │ │ │ ├── swagger.go
│ │ │ │ ├── swagger_builder.go
│ │ │ │ ├── swagger_test.go
│ │ │ │ ├── swagger_webservice.go
│ │ │ │ ├── test_package/
│ │ │ │ │ └── struct.go
│ │ │ │ └── utils_test.go
│ │ │ ├── tracer_test.go
│ │ │ ├── web_service.go
│ │ │ ├── web_service_container.go
│ │ │ └── web_service_test.go
│ │ ├── evanphx/
│ │ │ └── json-patch/
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── merge.go
│ │ │ ├── merge_test.go
│ │ │ ├── patch.go
│ │ │ └── patch_test.go
│ │ ├── fatih/
│ │ │ └── color/
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE.md
│ │ │ ├── README.md
│ │ │ ├── color.go
│ │ │ ├── color_test.go
│ │ │ └── doc.go
│ │ ├── fsouza/
│ │ │ └── go-dockerclient/
│ │ │ ├── .gitignore
│ │ │ ├── .travis.yml
│ │ │ ├── AUTHORS
│ │ │ ├── DOCKER-LICENSE
│ │ │ ├── LICENSE
│ │ │ ├── Makefile
│ │ │ ├── README.markdown
│ │ │ ├── auth.go
│ │ │ ├── auth_test.go
│ │ │ ├── build_test.go
│ │ │ ├── change.go
│ │ │ ├── change_test.go
│ │ │ ├── client.go
│ │ │ ├── client_test.go
│ │ │ ├── container.go
│ │ │ ├── container_test.go
│ │ │ ├── env.go
│ │ │ ├── env_test.go
│ │ │ ├── event.go
│ │ │ ├── event_test.go
│ │ │ ├── example_test.go
│ │ │ ├── exec.go
│ │ │ ├── exec_test.go
│ │ │ ├── external/
│ │ │ │ ├── github.com/
│ │ │ │ │ ├── Sirupsen/
│ │ │ │ │ │ └── logrus/
│ │ │ │ │ │ ├── CHANGELOG.md
│ │ │ │ │ │ ├── LICENSE
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── doc.go
│ │ │ │ │ │ ├── entry.go
│ │ │ │ │ │ ├── entry_test.go
│ │ │ │ │ │ ├── exported.go
│ │ │ │ │ │ ├── formatter.go
│ │ │ │ │ │ ├── formatter_bench_test.go
│ │ │ │ │ │ ├── hook_test.go
│ │ │ │ │ │ ├── hooks.go
│ │ │ │ │ │ ├── json_formatter.go
│ │ │ │ │ │ ├── json_formatter_test.go
│ │ │ │ │ │ ├── logger.go
│ │ │ │ │ │ ├── logrus.go
│ │ │ │ │ │ ├── logrus_test.go
│ │ │ │ │ │ ├── terminal_bsd.go
│ │ │ │ │ │ ├── terminal_linux.go
│ │ │ │ │ │ ├── terminal_notwindows.go
│ │ │ │ │ │ ├── terminal_solaris.go
│ │ │ │ │ │ ├── terminal_windows.go
│ │ │ │ │ │ ├── text_formatter.go
│ │ │ │ │ │ ├── text_formatter_test.go
│ │ │ │ │ │ └── writer.go
│ │ │ │ │ ├── docker/
│ │ │ │ │ │ ├── docker/
│ │ │ │ │ │ │ ├── opts/
│ │ │ │ │ │ │ │ ├── envfile.go
│ │ │ │ │ │ │ │ ├── envfile_test.go
│ │ │ │ │ │ │ │ ├── hosts.go
│ │ │ │ │ │ │ │ ├── hosts_test.go
│ │ │ │ │ │ │ │ ├── hosts_unix.go
│ │ │ │ │ │ │ │ ├── hosts_windows.go
│ │ │ │ │ │ │ │ ├── ip.go
│ │ │ │ │ │ │ │ ├── ip_test.go
│ │ │ │ │ │ │ │ ├── opts.go
│ │ │ │ │ │ │ │ ├── opts_test.go
│ │ │ │ │ │ │ │ ├── opts_unix.go
│ │ │ │ │ │ │ │ └── opts_windows.go
│ │ │ │ │ │ │ └── pkg/
│ │ │ │ │ │ │ ├── archive/
│ │ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ │ ├── archive.go
│ │ │ │ │ │ │ │ ├── archive_test.go
│ │ │ │ │ │ │ │ ├── archive_unix.go
│ │ │ │ │ │ │ │ ├── archive_unix_test.go
│ │ │ │ │ │ │ │ ├── archive_windows.go
│ │ │ │ │ │ │ │ ├── archive_windows_test.go
│ │ │ │ │ │ │ │ ├── changes.go
│ │ │ │ │ │ │ │ ├── changes_linux.go
│ │ │ │ │ │ │ │ ├── changes_other.go
│ │ │ │ │ │ │ │ ├── changes_posix_test.go
│ │ │ │ │ │ │ │ ├── changes_test.go
│ │ │ │ │ │ │ │ ├── changes_unix.go
│ │ │ │ │ │ │ │ ├── changes_windows.go
│ │ │ │ │ │ │ │ ├── copy.go
│ │ │ │ │ │ │ │ ├── copy_test.go
│ │ │ │ │ │ │ │ ├── copy_unix.go
│ │ │ │ │ │ │ │ ├── copy_windows.go
│ │ │ │ │ │ │ │ ├── diff.go
│ │ │ │ │ │ │ │ ├── diff_test.go
│ │ │ │ │ │ │ │ ├── example_changes.go
│ │ │ │ │ │ │ │ ├── time_linux.go
│ │ │ │ │ │ │ │ ├── time_unsupported.go
│ │ │ │ │ │ │ │ ├── utils_test.go
│ │ │ │ │ │ │ │ ├── whiteouts.go
│ │ │ │ │ │ │ │ ├── wrap.go
│ │ │ │ │ │ │ │ └── wrap_test.go
│ │ │ │ │ │ │ ├── fileutils/
│ │ │ │ │ │ │ │ ├── fileutils.go
│ │ │ │ │ │ │ │ ├── fileutils_test.go
│ │ │ │ │ │ │ │ ├── fileutils_unix.go
│ │ │ │ │ │ │ │ └── fileutils_windows.go
│ │ │ │ │ │ │ ├── homedir/
│ │ │ │ │ │ │ │ ├── homedir.go
│ │ │ │ │ │ │ │ └── homedir_test.go
│ │ │ │ │ │ │ ├── idtools/
│ │ │ │ │ │ │ │ ├── idtools.go
│ │ │ │ │ │ │ │ ├── idtools_unix.go
│ │ │ │ │ │ │ │ ├── idtools_unix_test.go
│ │ │ │ │ │ │ │ ├── idtools_windows.go
│ │ │ │ │ │ │ │ ├── usergroupadd_linux.go
│ │ │ │ │ │ │ │ └── usergroupadd_unsupported.go
│ │ │ │ │ │ │ ├── ioutils/
│ │ │ │ │ │ │ │ ├── bytespipe.go
│ │ │ │ │ │ │ │ ├── bytespipe_test.go
│ │ │ │ │ │ │ │ ├── fmt.go
│ │ │ │ │ │ │ │ ├── fmt_test.go
│ │ │ │ │ │ │ │ ├── multireader.go
│ │ │ │ │ │ │ │ ├── multireader_test.go
│ │ │ │ │ │ │ │ ├── readers.go
│ │ │ │ │ │ │ │ ├── readers_test.go
│ │ │ │ │ │ │ │ ├── scheduler.go
│ │ │ │ │ │ │ │ ├── scheduler_gccgo.go
│ │ │ │ │ │ │ │ ├── temp_unix.go
│ │ │ │ │ │ │ │ ├── temp_windows.go
│ │ │ │ │ │ │ │ ├── writeflusher.go
│ │ │ │ │ │ │ │ ├── writers.go
│ │ │ │ │ │ │ │ └── writers_test.go
│ │ │ │ │ │ │ ├── longpath/
│ │ │ │ │ │ │ │ ├── longpath.go
│ │ │ │ │ │ │ │ └── longpath_test.go
│ │ │ │ │ │ │ ├── pools/
│ │ │ │ │ │ │ │ ├── pools.go
│ │ │ │ │ │ │ │ └── pools_test.go
│ │ │ │ │ │ │ ├── promise/
│ │ │ │ │ │ │ │ └── promise.go
│ │ │ │ │ │ │ ├── stdcopy/
│ │ │ │ │ │ │ │ ├── stdcopy.go
│ │ │ │ │ │ │ │ └── stdcopy_test.go
│ │ │ │ │ │ │ └── system/
│ │ │ │ │ │ │ ├── chtimes.go
│ │ │ │ │ │ │ ├── chtimes_test.go
│ │ │ │ │ │ │ ├── chtimes_unix_test.go
│ │ │ │ │ │ │ ├── chtimes_windows_test.go
│ │ │ │ │ │ │ ├── errors.go
│ │ │ │ │ │ │ ├── events_windows.go
│ │ │ │ │ │ │ ├── filesys.go
│ │ │ │ │ │ │ ├── filesys_windows.go
│ │ │ │ │ │ │ ├── lstat.go
│ │ │ │ │ │ │ ├── lstat_unix_test.go
│ │ │ │ │ │ │ ├── lstat_windows.go
│ │ │ │ │ │ │ ├── meminfo.go
│ │ │ │ │ │ │ ├── meminfo_linux.go
│ │ │ │ │ │ │ ├── meminfo_unix_test.go
│ │ │ │ │ │ │ ├── meminfo_unsupported.go
│ │ │ │ │ │ │ ├── meminfo_windows.go
│ │ │ │ │ │ │ ├── mknod.go
│ │ │ │ │ │ │ ├── mknod_windows.go
│ │ │ │ │ │ │ ├── path_unix.go
│ │ │ │ │ │ │ ├── path_windows.go
│ │ │ │ │ │ │ ├── stat.go
│ │ │ │ │ │ │ ├── stat_freebsd.go
│ │ │ │ │ │ │ ├── stat_linux.go
│ │ │ │ │ │ │ ├── stat_solaris.go
│ │ │ │ │ │ │ ├── stat_unix_test.go
│ │ │ │ │ │ │ ├── stat_unsupported.go
│ │ │ │ │ │ │ ├── stat_windows.go
│ │ │ │ │ │ │ ├── syscall_unix.go
│ │ │ │ │ │ │ ├── syscall_windows.go
│ │ │ │ │ │ │ ├── umask.go
│ │ │ │ │ │ │ ├── umask_windows.go
│ │ │ │ │ │ │ ├── utimes_darwin.go
│ │ │ │ │ │ │ ├── utimes_freebsd.go
│ │ │ │ │ │ │ ├── utimes_linux.go
│ │ │ │ │ │ │ ├── utimes_unix_test.go
│ │ │ │ │ │ │ ├── utimes_unsupported.go
│ │ │ │ │ │ │ ├── xattrs_linux.go
│ │ │ │ │ │ │ └── xattrs_unsupported.go
│ │ │ │ │ │ └── go-units/
│ │ │ │ │ │ ├── CONTRIBUTING.md
│ │ │ │ │ │ ├── LICENSE.code
│ │ │ │ │ │ ├── LICENSE.docs
│ │ │ │ │ │ ├── MAINTAINERS
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── circle.yml
│ │ │ │ │ │ ├── duration.go
│ │ │ │ │ │ ├── duration_test.go
│ │ │ │ │ │ ├── size.go
│ │ │ │ │ │ ├── size_test.go
│ │ │ │ │ │ ├── ulimit.go
│ │ │ │ │ │ └── ulimit_test.go
│ │ │ │ │ ├── gorilla/
│ │ │ │ │ │ ├── context/
│ │ │ │ │ │ │ ├── LICENSE
│ │ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ │ ├── context.go
│ │ │ │ │ │ │ ├── context_test.go
│ │ │ │ │ │ │ └── doc.go
│ │ │ │ │ │ └── mux/
│ │ │ │ │ │ ├── LICENSE
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ ├── bench_test.go
│ │ │ │ │ │ ├── doc.go
│ │ │ │ │ │ ├── mux.go
│ │ │ │ │ │ ├── mux_test.go
│ │ │ │ │ │ ├── old_test.go
│ │ │ │ │ │ ├── regexp.go
│ │ │ │ │ │ └── route.go
│ │ │ │ │ ├── hashicorp/
│ │ │ │ │ │ └── go-cleanhttp/
│ │ │ │ │ │ ├── LICENSE
│ │ │ │ │ │ ├── README.md
│ │ │ │ │ │ └── cleanhttp.go
│ │ │ │ │ └── opencontainers/
│ │ │ │ │ └── runc/
│ │ │ │ │ └── libcontainer/
│ │ │ │ │ └── user/
│ │ │ │ │ ├── MAINTAINERS
│ │ │ │ │ ├── lookup.go
│ │ │ │ │ ├── lookup_unix.go
│ │ │ │ │ ├── lookup_unsupported.go
│ │ │ │ │ ├── user.go
│ │ │ │ │ └── user_test.go
│ │ │ │ └── golang.org/
│ │ │ │ └── x/
│ │ │ │ ├── net/
│ │ │ │ │ └── context/
│ │ │ │ │ ├── context.go
│ │ │ │ │ ├── context_test.go
│ │ │ │ │ └── withtimeout_test.go
│ │ │ │ └── sys/
│ │ │ │ └── unix/
│ │ │ │ ├── asm.s
│ │ │ │ ├── asm_darwin_386.s
│ │ │ │ ├── asm_darwin_amd64.s
│ │ │ │ ├── asm_darwin_arm.s
│ │ │ │ ├── asm_darwin_arm64.s
│ │ │ │ ├── asm_dragonfly_386.s
│ │ │ │ ├── asm_dragonfly_amd64.s
│ │ │ │ ├── asm_freebsd_386.s
│ │ │ │ ├── asm_freebsd_amd64.s
│ │ │ │ ├── asm_freebsd_arm.s
│ │ │ │ ├── asm_linux_386.s
│ │ │ │ ├── asm_linux_amd64.s
│ │ │ │ ├── asm_linux_arm.s
│ │ │ │ ├── asm_linux_arm64.s
│ │ │ │ ├── asm_linux_ppc64x.s
│ │ │ │ ├── asm_netbsd_386.s
│ │ │ │ ├── asm_netbsd_amd64.s
│ │ │ │ ├── asm_netbsd_arm.s
│ │ │ │ ├── asm_openbsd_386.s
│ │ │ │ ├── asm_openbsd_amd64.s
│ │ │ │ ├── asm_solaris_amd64.s
│ │ │ │ ├── constants.go
│ │ │ │ ├── creds_test.go
│ │ │ │ ├── env_unix.go
│ │ │ │ ├── env_unset.go
│ │ │ │ ├── export_test.go
│ │ │ │ ├── flock.go
│ │ │ │ ├── flock_linux_32bit.go
│ │ │ │ ├── gccgo.go
│ │ │ │ ├── gccgo_c.c
│ │ │ │ ├── gccgo_linux_amd64.go
│ │ │ │ ├── mkall.sh
│ │ │ │ ├── mkerrors.sh
│ │ │ │ ├── mksyscall.pl
│ │ │ │ ├── mksyscall_solaris.pl
│ │ │ │ ├── mksysctl_openbsd.pl
│ │ │ │ ├── mksysnum_darwin.pl
│ │ │ │ ├── mksysnum_dragonfly.pl
│ │ │ │ ├── mksysnum_freebsd.pl
│ │ │ │ ├── mksysnum_linux.pl
│ │ │ │ ├── mksysnum_netbsd.pl
│ │ │ │ ├── mksysnum_openbsd.pl
│ │ │ │ ├── mmap_unix_test.go
│ │ │ │ ├── race.go
│ │ │ │ ├── race0.go
│ │ │ │ ├── sockcmsg_linux.go
│ │ │ │ ├── sockcmsg_unix.go
│ │ │ │ ├── str.go
│ │ │ │ ├── syscall.go
│ │ │ │ ├── syscall_bsd.go
│ │ │ │ ├── syscall_bsd_test.go
│ │ │ │ ├── syscall_darwin.go
│ │ │ │ ├── syscall_darwin_386.go
│ │ │ │ ├── syscall_darwin_amd64.go
│ │ │ │ ├── syscall_darwin_arm.go
│ │ │ │ ├── syscall_darwin_arm64.go
│ │ │ │ ├── syscall_dragonfly.go
│ │ │ │ ├── syscall_dragonfly_386.go
│ │ │ │ ├── syscall_dragonfly_amd64.go
│ │ │ │ ├── syscall_freebsd.go
│ │ │ │ ├── syscall_freebsd_386.go
│ │ │ │ ├── syscall_freebsd_amd64.go
│ │ │ │ ├── syscall_freebsd_arm.go
│ │ │ │ ├── syscall_freebsd_test.go
│ │ │ │ ├── syscall_linux.go
│ │ │ │ ├── syscall_linux_386.go
│ │ │ │ ├── syscall_linux_amd64.go
│ │ │ │ ├── syscall_linux_arm.go
│ │ │ │ ├── syscall_linux_arm64.go
│ │ │ │ ├── syscall_linux_ppc64x.go
│ │ │ │ ├── syscall_netbsd.go
│ │ │ │ ├── syscall_netbsd_386.go
│ │ │ │ ├── syscall_netbsd_amd64.go
│ │ │ │ ├── syscall_netbsd_arm.go
│ │ │ │ ├── syscall_no_getwd.go
│ │ │ │ ├── syscall_openbsd.go
│ │ │ │ ├── syscall_openbsd_386.go
│ │ │ │ ├── syscall_openbsd_amd64.go
│ │ │ │ ├── syscall_solaris.go
│ │ │ │ ├── syscall_solaris_amd64.go
│ │ │ │ ├── syscall_test.go
│ │ │ │ ├── syscall_unix.go
│ │ │ │ ├── syscall_unix_test.go
│ │ │ │ ├── types_darwin.go
│ │ │ │ ├── types_dragonfly.go
│ │ │ │ ├── types_freebsd.go
│ │ │ │ ├── types_linux.go
│ │ │ │ ├── types_netbsd.go
│ │ │ │ ├── types_openbsd.go
│ │ │ │ ├── types_solaris.go
│ │ │ │ ├── zerrors_darwin_386.go
│ │ │ │ ├── zerrors_darwin_amd64.go
│ │ │ │ ├── zerrors_darwin_arm.go
│ │ │ │ ├── zerrors_darwin_arm64.go
│ │ │ │ ├── zerrors_dragonfly_386.go
│ │ │ │ ├── zerrors_dragonfly_amd64.go
│ │ │ │ ├── zerrors_freebsd_386.go
│ │ │ │ ├── zerrors_freebsd_amd64.go
│ │ │ │ ├── zerrors_freebsd_arm.go
│ │ │ │ ├── zerrors_linux_386.go
│ │ │ │ ├── zerrors_linux_amd64.go
│ │ │ │ ├── zerrors_linux_arm.go
│ │ │ │ ├── zerrors_linux_arm64.go
│ │ │ │ ├── zerrors_linux_ppc64.go
│ │ │ │ ├── zerrors_linux_ppc64le.go
│ │ │ │ ├── zerrors_netbsd_386.go
│ │ │ │ ├── zerrors_netbsd_amd64.go
│ │ │ │ ├── zerrors_netbsd_arm.go
│ │ │ │ ├── zerrors_openbsd_386.go
│ │ │ │ ├── zerrors_openbsd_amd64.go
│ │ │ │ ├── zerrors_solaris_amd64.go
│ │ │ │ ├── zsyscall_darwin_386.go
│ │ │ │ ├── zsyscall_darwin_amd64.go
│ │ │ │ ├── zsyscall_darwin_arm.go
│ │ │ │ ├── zsyscall_darwin_arm64.go
│ │ │ │ ├── zsyscall_dragonfly_386.go
│ │ │ │ ├── zsyscall_dragonfly_amd64.go
│ │ │ │ ├── zsyscall_freebsd_386.go
│ │ │ │ ├── zsyscall_freebsd_amd64.go
│ │ │ │ ├── zsyscall_freebsd_arm.go
│ │ │ │ ├── zsyscall_linux_386.go
│ │ │ │ ├── zsyscall_linux_amd64.go
│ │ │ │ ├── zsyscall_linux_arm.go
│ │ │ │ ├── zsyscall_linux_arm64.go
│ │ │ │ ├── zsyscall_linux_ppc64.go
│ │ │ │ ├── zsyscall_linux_ppc64le.go
│ │ │ │ ├── zsyscall_netbsd_386.go
│ │ │ │ ├── zsyscall_netbsd_amd64.go
│ │ │ │ ├── zsyscall_netbsd_arm.go
│ │ │ │ ├── zsyscall_openbsd_386.go
│ │ │ │ ├── zsyscall_openbsd_amd64.go
│ │ │ │ ├── zsyscall_solaris_amd64.go
│ │ │ │ ├── zsysctl_openbsd.go
│ │ │ │ ├── zsysnum_darwin_386.go
│ │ │ │ ├── zsysnum_darwin_amd64.go
│ │ │ │ ├── zsysnum_darwin_arm.go
│ │ │ │ ├── zsysnum_darwin_arm64.go
│ │ │ │ ├── zsysnum_dragonfly_386.go
│ │ │ │ ├── zsysnum_dragonfly_amd64.go
│ │ │ │ ├── zsysnum_freebsd_386.go
│ │ │ │ ├── zsysnum_freebsd_amd64.go
│ │ │ │ ├── zsysnum_freebsd_arm.go
│ │ │ │ ├── zsysnum_linux_386.go
│ │ │ │ ├── zsysnum_linux_amd64.go
│ │ │ │ ├── zsysnum_linux_arm.go
│ │ │ │ ├── zsysnum_linux_arm64.go
│ │ │ │ ├── zsysnum_linux_ppc64.go
│ │ │ │ ├── zsysnum_linux_ppc64le.go
│ │ │ │ ├── zsysnum_netbsd_386.go
│ │ │ │ ├── zsysnum_netbsd_amd64.go
│ │ │ │ ├── zsysnum_netbsd_arm.go
│ │ │ │ ├── zsysnum_openbsd_386.go
│ │ │ │ ├── zsysnum_openbsd_amd64.go
│ │ │ │ ├── zsysnum_solaris_amd64.go
│ │ │ │ ├── ztypes_darwin_386.go
│ │ │ │ ├── ztypes_darwin_amd64.go
│ │ │ │ ├── ztypes_darwin_arm.go
│ │ │ │ ├── ztypes_darwin_arm64.go
│ │ │ │ ├── ztypes_dragonfly_386.go
│ │ │ │ ├── ztypes_dragonfly_amd64.go
│ │ │ │ ├── ztypes_freebsd_386.go
│ │ │ │ ├── ztypes_freebsd_amd64.go
│ │ │ │ ├── ztypes_freebsd_arm.go
│ │ │ │ ├── ztypes_linux_386.go
│ │ │ │ ├── ztypes_linux_amd64.go
│ │ │ │ ├── ztypes_linux_arm.go
│ │ │ │ ├── ztypes_linux_arm64.go
│ │ │ │ ├── ztypes_linux_ppc64.go
│ │ │ │ ├── ztypes_linux_ppc64le.go
│ │ │ │ ├── ztypes_netbsd_386.go
│ │ │ │ ├── ztypes_netbsd_amd64.go
│ │ │ │ ├── ztypes_netbsd_arm.go
│ │ │ │ ├── ztypes_openbsd_386.go
│ │ │ │ ├── ztypes_openbsd_amd64.go
│ │ │ │ └── ztypes_solaris_amd64.go
│ │ │ ├── image.go
│ │ │ ├── image_test.go
│ │ │ ├── integration_test.go
│ │ │ ├── misc.go
│ │ │ ├── misc_test.go
│ │ │ ├── network.go
│ │ │ ├── network_test.go
│ │ │ ├── signal.go
│ │ │ ├── tar.go
│ │ │ ├── testing/
│ │ │ │ ├── data/
│ │ │ │ │ ├── .dockerignore
│ │ │ │ │ ├── Dockerfile
│ │ │ │ │ ├── barfile
│ │ │ │ │ ├── ca.pem
│ │ │ │ │ ├── cert.pem
│ │ │ │ │ ├── foofile
│ │ │ │ │ ├── key.pem
│ │ │ │ │ ├── server.pem
│ │ │ │ │ └── serverkey.pem
│ │ │ │ ├── server.go
│ │ │ │ └── server_test.go
│ │ │ ├── tls.go
│ │ │ ├── volume.go
│ │ │ └── volume_test.go
│ │ ├── ghodss/
│ │ │ └── yaml/
│ │ │ ├── .gitignore
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── fields.go
│ │ │ ├── yaml.go
│ │ │ └── yaml_test.go
│ │ ├── golang/
│ │ │ ├── glog/
│ │ │ │ ├── LICENSE
│ │ │ │ ├── README
│ │ │ │ ├── glog.go
│ │ │ │ ├── glog_file.go
│ │ │ │ └── glog_test.go
│ │ │ ├── groupcache/
│ │ │ │ ├── .gitignore
│ │ │ │ ├── LICENSE
│ │ │ │ ├── README.md
│ │ │ │ ├── byteview.go
│ │ │ │ ├── byteview_test.go
│ │ │ │ ├── consistenthash/
│ │ │ │ │ ├── consistenthash.go
│ │ │ │ │ └── consistenthash_test.go
│ │ │ │ ├── groupcache.go
│ │ │ │ ├── groupcache_test.go
│ │ │ │ ├── groupcachepb/
│ │ │ │ │ ├── groupcache.pb.go
│ │ │ │ │ └── groupcache.proto
│ │ │ │ ├── http.go
│ │ │ │ ├── http_test.go
│ │ │ │ ├── lru/
│ │ │ │ │ ├── lru.go
│ │ │ │ │ └── lru_test.go
│ │ │ │ ├── peers.go
│ │ │ │ ├── singleflight/
│ │ │ │ │ ├── singleflight.go
│ │ │ │ │ └── singleflight_test.go
│ │ │ │ ├── sinks.go
│ │ │ │ └── testpb/
│ │ │ │ ├── test.pb.go
│ │ │ │ └── test.proto
│ │ │ └── protobuf/
│ │ │ ├── .gitignore
│ │ │ ├── AUTHORS
│ │ │ ├── CONTRIBUTORS
│ │ │ ├── LICENSE
│ │ │ ├── Make.protobuf
│ │ │ ├── Makefile
│ │ │ ├── README.md
│ │ │ ├── jsonpb/
│ │ │ │ ├── jsonpb.go
│ │ │ │ ├── jsonpb_test.go
│ │ │ │ └── jsonpb_test_proto/
│ │ │ │ ├── Makefile
│ │ │ │ ├── more_test_objects.proto
│ │ │ │ └── test_objects.proto
│ │ │ ├── proto/
│ │ │ │ ├── Makefile
│ │ │ │ ├── all_test.go
│ │ │ │ ├── clone.go
│ │ │ │ ├── clone_test.go
│ │ │ │ ├── decode.go
│ │ │ │ ├── encode.go
│ │ │ │ ├── equal.go
│ │ │ │ ├── equal_test.go
│ │ │ │ ├── extensions.go
│ │ │ │ ├── extensions_test.go
│ │ │ │ ├── lib.go
│ │ │ │ ├── message_set.go
│ │ │ │ ├── message_set_test.go
│ │ │ │ ├── pointer_reflect.go
│ │ │ │ ├── pointer_unsafe.go
│ │ │ │ ├── properties.go
│ │ │ │ ├── proto3_proto/
│ │ │ │ │ └── proto3.proto
│ │ │ │ ├── proto3_test.go
│ │ │ │ ├── size2_test.go
│ │ │ │ ├── size_test.go
│ │ │ │ ├── testdata/
│ │ │ │ │ ├── Makefile
│ │ │ │ │ ├── golden_test.go
│ │ │ │ │ └── test.proto
│ │ │ │ ├── text.go
│ │ │ │ ├── text_parser.go
│ │ │ │ ├── text_parser_test.go
│ │ │ │ └── text_test.go
│ │ │ └── protoc-gen-go/
│ │ │ ├── Makefile
│ │ │ ├── descriptor/
│ │ │ │ └── Makefile
│ │ │ ├── doc.go
│ │ │ ├── generator/
│ │ │ │ ├── Makefile
│ │ │ │ ├── generator.go
│ │ │ │ └── name_test.go
│ │ │ ├── internal/
│ │ │ │ └── grpc/
│ │ │ │ └── grpc.go
│ │ │ ├── link_grpc.go
│ │ │ ├── main.go
│ │ │ ├── plugin/
│ │ │ │ ├── Makefile
│ │ │ │ └── plugin.pb.golden
│ │ │ └── testdata/
│ │ │ ├── Makefile
│ │ │ ├── extension_base.proto
│ │ │ ├── extension_extra.proto
│ │ │ ├── extension_test.go
│ │ │ ├── extension_user.proto
│ │ │ ├── grpc.proto
│ │ │ ├── imp.pb.go.golden
│ │ │ ├── imp.proto
│ │ │ ├── imp2.proto
│ │ │ ├── imp3.proto
│ │ │ ├── main_test.go
│ │ │ ├── multi/
│ │ │ │ ├── multi1.proto
│ │ │ │ ├── multi2.proto
│ │ │ │ └── multi3.proto
│ │ │ ├── my_test/
│ │ │ │ ├── test.pb.go.golden
│ │ │ │ └── test.proto
│ │ │ └── proto3.proto
│ │ ├── google/
│ │ │ └── gofuzz/
│ │ │ ├── .travis.yml
│ │ │ ├── CONTRIBUTING.md
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── doc.go
│ │ │ ├── example_test.go
│ │ │ ├── fuzz.go
│ │ │ └── fuzz_test.go
│ │ ├── imdario/
│ │ │ └── mergo/
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── doc.go
│ │ │ ├── map.go
│ │ │ ├── merge.go
│ │ │ ├── mergo.go
│ │ │ ├── mergo_test.go
│ │ │ └── testdata/
│ │ │ ├── license.yml
│ │ │ └── thing.yml
│ │ ├── inconshreveable/
│ │ │ └── mousetrap/
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── trap_others.go
│ │ │ ├── trap_windows.go
│ │ │ └── trap_windows_1.4.go
│ │ ├── juju/
│ │ │ └── ratelimit/
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── ratelimit.go
│ │ │ ├── ratelimit_test.go
│ │ │ └── reader.go
│ │ ├── masterzen/
│ │ │ ├── simplexml/
│ │ │ │ ├── LICENSE
│ │ │ │ ├── README.md
│ │ │ │ └── dom/
│ │ │ │ ├── document.go
│ │ │ │ ├── dom_test.go
│ │ │ │ ├── element.go
│ │ │ │ └── namespace.go
│ │ │ ├── winrm/
│ │ │ │ ├── .gitignore
│ │ │ │ ├── .travis.yml
│ │ │ │ ├── LICENSE
│ │ │ │ ├── Makefile
│ │ │ │ ├── README.md
│ │ │ │ ├── development/
│ │ │ │ │ ├── Vagrantfile
│ │ │ │ │ ├── sample_requests.txt
│ │ │ │ │ └── winrm-tests.sh
│ │ │ │ ├── scripts/
│ │ │ │ │ └── test.sh
│ │ │ │ ├── soap/
│ │ │ │ │ ├── header.go
│ │ │ │ │ ├── header_test.go
│ │ │ │ │ ├── message.go
│ │ │ │ │ ├── namespaces.go
│ │ │ │ │ └── namespaces_test.go
│ │ │ │ ├── winrm/
│ │ │ │ │ ├── client.go
│ │ │ │ │ ├── client_test.go
│ │ │ │ │ ├── command.go
│ │ │ │ │ ├── command_test.go
│ │ │ │ │ ├── endpoint.go
│ │ │ │ │ ├── endpoint_test.go
│ │ │ │ │ ├── fixture_test.go
│ │ │ │ │ ├── http.go
│ │ │ │ │ ├── http_test.go
│ │ │ │ │ ├── parameters.go
│ │ │ │ │ ├── parameters_test.go
│ │ │ │ │ ├── powershell.go
│ │ │ │ │ ├── powershell_test.go
│ │ │ │ │ ├── request.go
│ │ │ │ │ ├── request_test.go
│ │ │ │ │ ├── response.go
│ │ │ │ │ ├── response_test.go
│ │ │ │ │ ├── shell.go
│ │ │ │ │ └── shell_test.go
│ │ │ │ └── winrm.go
│ │ │ └── xmlpath/
│ │ │ ├── LICENSE
│ │ │ ├── all_test.go
│ │ │ ├── doc.go
│ │ │ ├── parser.go
│ │ │ └── path.go
│ │ ├── mattn/
│ │ │ ├── go-colorable/
│ │ │ │ ├── LICENSE
│ │ │ │ ├── README.md
│ │ │ │ ├── _example/
│ │ │ │ │ └── main.go
│ │ │ │ ├── colorable_others.go
│ │ │ │ └── colorable_windows.go
│ │ │ └── go-isatty/
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── _example/
│ │ │ │ └── example.go
│ │ │ ├── doc.go
│ │ │ ├── isatty_appengine.go
│ │ │ ├── isatty_bsd.go
│ │ │ ├── isatty_linux.go
│ │ │ ├── isatty_solaris.go
│ │ │ └── isatty_windows.go
│ │ ├── matttproud/
│ │ │ └── golang_protobuf_extensions/
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── ext/
│ │ │ │ └── moved.go
│ │ │ ├── pbtest/
│ │ │ │ ├── doc.go
│ │ │ │ ├── example_test.go
│ │ │ │ └── quick.go
│ │ │ └── pbutil/
│ │ │ ├── all_test.go
│ │ │ ├── decode.go
│ │ │ ├── doc.go
│ │ │ ├── encode.go
│ │ │ └── fixtures_test.go
│ │ ├── nu7hatch/
│ │ │ └── gouuid/
│ │ │ ├── .gitignore
│ │ │ ├── COPYING
│ │ │ ├── README.md
│ │ │ ├── example_test.go
│ │ │ ├── uuid.go
│ │ │ └── uuid_test.go
│ │ ├── pborman/
│ │ │ └── uuid/
│ │ │ ├── CONTRIBUTORS
│ │ │ ├── LICENSE
│ │ │ ├── dce.go
│ │ │ ├── doc.go
│ │ │ ├── hash.go
│ │ │ ├── json.go
│ │ │ ├── json_test.go
│ │ │ ├── node.go
│ │ │ ├── seq_test.go
│ │ │ ├── time.go
│ │ │ ├── util.go
│ │ │ ├── uuid.go
│ │ │ ├── uuid_test.go
│ │ │ ├── version1.go
│ │ │ └── version4.go
│ │ ├── prometheus/
│ │ │ ├── client_golang/
│ │ │ │ ├── .gitignore
│ │ │ │ ├── .travis.yml
│ │ │ │ ├── AUTHORS.md
│ │ │ │ ├── CHANGELOG.md
│ │ │ │ ├── CONTRIBUTING.md
│ │ │ │ ├── LICENSE
│ │ │ │ ├── NOTICE
│ │ │ │ ├── README.md
│ │ │ │ ├── VERSION
│ │ │ │ ├── api/
│ │ │ │ │ └── prometheus/
│ │ │ │ │ ├── api.go
│ │ │ │ │ └── api_test.go
│ │ │ │ ├── examples/
│ │ │ │ │ ├── random/
│ │ │ │ │ │ └── main.go
│ │ │ │ │ └── simple/
│ │ │ │ │ └── main.go
│ │ │ │ └── prometheus/
│ │ │ │ ├── .gitignore
│ │ │ │ ├── README.md
│ │ │ │ ├── benchmark_test.go
│ │ │ │ ├── collector.go
│ │ │ │ ├── counter.go
│ │ │ │ ├── counter_test.go
│ │ │ │ ├── desc.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── example_clustermanager_test.go
│ │ │ │ ├── example_memstats_test.go
│ │ │ │ ├── example_selfcollector_test.go
│ │ │ │ ├── examples_test.go
│ │ │ │ ├── expvar.go
│ │ │ │ ├── expvar_test.go
│ │ │ │ ├── gauge.go
│ │ │ │ ├── gauge_test.go
│ │ │ │ ├── go_collector.go
│ │ │ │ ├── go_collector_test.go
│ │ │ │ ├── histogram.go
│ │ │ │ ├── histogram_test.go
│ │ │ │ ├── http.go
│ │ │ │ ├── http_test.go
│ │ │ │ ├── metric.go
│ │ │ │ ├── metric_test.go
│ │ │ │ ├── process_collector.go
│ │ │ │ ├── process_collector_test.go
│ │ │ │ ├── push.go
│ │ │ │ ├── registry.go
│ │ │ │ ├── registry_test.go
│ │ │ │ ├── summary.go
│ │ │ │ ├── summary_test.go
│ │ │ │ ├── untyped.go
│ │ │ │ ├── value.go
│ │ │ │ ├── vec.go
│ │ │ │ └── vec_test.go
│ │ │ ├── client_model/
│ │ │ │ ├── .gitignore
│ │ │ │ ├── AUTHORS.md
│ │ │ │ ├── CONTRIBUTING.md
│ │ │ │ ├── LICENSE
│ │ │ │ ├── Makefile
│ │ │ │ ├── NOTICE
│ │ │ │ ├── README.md
│ │ │ │ ├── cpp/
│ │ │ │ │ ├── metrics.pb.cc
│ │ │ │ │ └── metrics.pb.h
│ │ │ │ ├── go/
│ │ │ │ │ └── metrics.pb.go
│ │ │ │ ├── metrics.proto
│ │ │ │ ├── pom.xml
│ │ │ │ ├── python/
│ │ │ │ │ └── prometheus/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── client/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── model/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ └── metrics_pb2.py
│ │ │ │ ├── ruby/
│ │ │ │ │ ├── .gitignore
│ │ │ │ │ ├── Gemfile
│ │ │ │ │ ├── LICENSE
│ │ │ │ │ ├── Makefile
│ │ │ │ │ ├── README.md
│ │ │ │ │ ├── Rakefile
│ │ │ │ │ ├── lib/
│ │ │ │ │ │ └── prometheus/
│ │ │ │ │ │ └── client/
│ │ │ │ │ │ ├── model/
│ │ │ │ │ │ │ ├── metrics.pb.rb
│ │ │ │ │ │ │ └── version.rb
│ │ │ │ │ │ └── model.rb
│ │ │ │ │ └── prometheus-client-model.gemspec
│ │ │ │ ├── setup.py
│ │ │ │ └── src/
│ │ │ │ └── main/
│ │ │ │ └── java/
│ │ │ │ └── io/
│ │ │ │ └── prometheus/
│ │ │ │ └── client/
│ │ │ │ └── Metrics.java
│ │ │ ├── common/
│ │ │ │ ├── README.md
│ │ │ │ ├── expfmt/
│ │ │ │ │ ├── bench_test.go
│ │ │ │ │ ├── decode.go
│ │ │ │ │ ├── decode_test.go
│ │ │ │ │ ├── encode.go
│ │ │ │ │ ├── expfmt.go
│ │ │ │ │ ├── fuzz/
│ │ │ │ │ │ └── corpus/
│ │ │ │ │ │ ├── from_test_parse_0
│ │ │ │ │ │ ├── from_test_parse_1
│ │ │ │ │ │ ├── from_test_parse_2
│ │ │ │ │ │ ├── from_test_parse_3
│ │ │ │ │ │ ├── from_test_parse_4
│ │ │ │ │ │ ├── from_test_parse_error_0
│ │ │ │ │ │ ├── from_test_parse_error_1
│ │ │ │ │ │ ├── from_test_parse_error_10
│ │ │ │ │ │ ├── from_test_parse_error_11
│ │ │ │ │ │ ├── from_test_parse_error_12
│ │ │ │ │ │ ├── from_test_parse_error_13
│ │ │ │ │ │ ├── from_test_parse_error_14
│ │ │ │ │ │ ├── from_test_parse_error_15
│ │ │ │ │ │ ├── from_test_parse_error_16
│ │ │ │ │ │ ├── from_test_parse_error_17
│ │ │ │ │ │ ├── from_test_parse_error_18
│ │ │ │ │ │ ├── from_test_parse_error_19
│ │ │ │ │ │ ├── from_test_parse_error_2
│ │ │ │ │ │ ├── from_test_parse_error_3
│ │ │ │ │ │ ├── from_test_parse_error_4
│ │ │ │ │ │ ├── from_test_parse_error_5
│ │ │ │ │ │ ├── from_test_parse_error_6
│ │ │ │ │ │ ├── from_test_parse_error_7
│ │ │ │ │ │ ├── from_test_parse_error_8
│ │ │ │ │ │ ├── from_test_parse_error_9
│ │ │ │ │ │ └── minimal
│ │ │ │ │ ├── fuzz.go
│ │ │ │ │ ├── json_decode.go
│ │ │ │ │ ├── json_decode_test.go
│ │ │ │ │ ├── testdata/
│ │ │ │ │ │ ├── json2
│ │ │ │ │ │ ├── protobuf
│ │ │ │ │ │ └── text
│ │ │ │ │ ├── text_create.go
│ │ │ │ │ ├── text_create_test.go
│ │ │ │ │ ├── text_parse.go
│ │ │ │ │ └── text_parse_test.go
│ │ │ │ ├── model/
│ │ │ │ │ ├── fingerprinting.go
│ │ │ │ │ ├── labels.go
│ │ │ │ │ ├── labels_test.go
│ │ │ │ │ ├── labelset.go
│ │ │ │ │ ├── metric.go
│ │ │ │ │ ├── metric_test.go
│ │ │ │ │ ├── model.go
│ │ │ │ │ ├── signature.go
│ │ │ │ │ ├── signature_test.go
│ │ │ │ │ ├── time.go
│ │ │ │ │ ├── time_test.go
│ │ │ │ │ ├── value.go
│ │ │ │ │ └── value_test.go
│ │ │ │ └── route/
│ │ │ │ └── route.go
│ │ │ └── procfs/
│ │ │ ├── .travis.yml
│ │ │ ├── AUTHORS.md
│ │ │ ├── CONTRIBUTING.md
│ │ │ ├── LICENSE
│ │ │ ├── NOTICE
│ │ │ ├── README.md
│ │ │ ├── doc.go
│ │ │ ├── fixtures/
│ │ │ │ ├── 26231/
│ │ │ │ │ ├── cmdline
│ │ │ │ │ ├── fd/
│ │ │ │ │ │ ├── 0
│ │ │ │ │ │ ├── 1
│ │ │ │ │ │ ├── 2
│ │ │ │ │ │ ├── 3
│ │ │ │ │ │ └── 4
│ │ │ │ │ ├── limits
│ │ │ │ │ └── stat
│ │ │ │ ├── 584/
│ │ │ │ │ └── stat
│ │ │ │ └── stat
│ │ │ ├── fs.go
│ │ │ ├── fs_test.go
│ │ │ ├── proc.go
│ │ │ ├── proc_limits.go
│ │ │ ├── proc_limits_test.go
│ │ │ ├── proc_stat.go
│ │ │ ├── proc_stat_test.go
│ │ │ ├── proc_test.go
│ │ │ ├── stat.go
│ │ │ └── stat_test.go
│ │ ├── russross/
│ │ │ └── blackfriday/
│ │ │ ├── .gitignore
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE.txt
│ │ │ ├── README.md
│ │ │ ├── block.go
│ │ │ ├── block_test.go
│ │ │ ├── html.go
│ │ │ ├── inline.go
│ │ │ ├── inline_test.go
│ │ │ ├── latex.go
│ │ │ ├── markdown.go
│ │ │ ├── smartypants.go
│ │ │ ├── upskirtref/
│ │ │ │ ├── Amps and angle encoding.html
│ │ │ │ ├── Amps and angle encoding.text
│ │ │ │ ├── Auto links.html
│ │ │ │ ├── Auto links.text
│ │ │ │ ├── Backslash escapes.html
│ │ │ │ ├── Backslash escapes.text
│ │ │ │ ├── Blockquotes with code blocks.html
│ │ │ │ ├── Blockquotes with code blocks.text
│ │ │ │ ├── Code Blocks.html
│ │ │ │ ├── Code Blocks.text
│ │ │ │ ├── Code Spans.html
│ │ │ │ ├── Code Spans.text
│ │ │ │ ├── Hard-wrapped paragraphs with list-like lines no empty line before block.html
│ │ │ │ ├── Hard-wrapped paragraphs with list-like lines no empty line before block.text
│ │ │ │ ├── Hard-wrapped paragraphs with list-like lines.html
│ │ │ │ ├── Hard-wrapped paragraphs with list-like lines.text
│ │ │ │ ├── Horizontal rules.html
│ │ │ │ ├── Horizontal rules.text
│ │ │ │ ├── Inline HTML (Advanced).html
│ │ │ │ ├── Inline HTML (Advanced).text
│ │ │ │ ├── Inline HTML (Simple).html
│ │ │ │ ├── Inline HTML (Simple).text
│ │ │ │ ├── Inline HTML comments.html
│ │ │ │ ├── Inline HTML comments.text
│ │ │ │ ├── Links, inline style.html
│ │ │ │ ├── Links, inline style.text
│ │ │ │ ├── Links, reference style.html
│ │ │ │ ├── Links, reference style.text
│ │ │ │ ├── Links, shortcut references.html
│ │ │ │ ├── Links, shortcut references.text
│ │ │ │ ├── Literal quotes in titles.html
│ │ │ │ ├── Literal quotes in titles.text
│ │ │ │ ├── Markdown Documentation - Basics.html
│ │ │ │ ├── Markdown Documentation - Basics.text
│ │ │ │ ├── Markdown Documentation - Syntax.html
│ │ │ │ ├── Markdown Documentation - Syntax.text
│ │ │ │ ├── Nested blockquotes.html
│ │ │ │ ├── Nested blockquotes.text
│ │ │ │ ├── Ordered and unordered lists.html
│ │ │ │ ├── Ordered and unordered lists.text
│ │ │ │ ├── Strong and em together.html
│ │ │ │ ├── Strong and em together.text
│ │ │ │ ├── Tabs.html
│ │ │ │ ├── Tabs.text
│ │ │ │ ├── Tidyness.html
│ │ │ │ └── Tidyness.text
│ │ │ └── upskirtref_test.go
│ │ ├── shurcooL/
│ │ │ └── sanitized_anchor_name/
│ │ │ ├── .travis.yml
│ │ │ ├── README.md
│ │ │ ├── main.go
│ │ │ └── main_test.go
│ │ ├── spf13/
│ │ │ ├── cobra/
│ │ │ │ ├── .gitignore
│ │ │ │ ├── .mailmap
│ │ │ │ ├── .travis.yml
│ │ │ │ ├── LICENSE.txt
│ │ │ │ ├── README.md
│ │ │ │ ├── bash_completions.go
│ │ │ │ ├── bash_completions.md
│ │ │ │ ├── bash_completions_test.go
│ │ │ │ ├── cobra/
│ │ │ │ │ ├── cmd/
│ │ │ │ │ │ ├── add.go
│ │ │ │ │ │ ├── helpers.go
│ │ │ │ │ │ ├── helpers_test.go
│ │ │ │ │ │ ├── init.go
│ │ │ │ │ │ ├── licenses.go
│ │ │ │ │ │ └── root.go
│ │ │ │ │ └── main.go
│ │ │ │ ├── cobra.go
│ │ │ │ ├── cobra_test.go
│ │ │ │ ├── command.go
│ │ │ │ ├── command_test.go
│ │ │ │ ├── doc_util.go
│ │ │ │ ├── examples_test.go
│ │ │ │ ├── man_docs.go
│ │ │ │ ├── man_docs.md
│ │ │ │ ├── man_docs_test.go
│ │ │ │ ├── md_docs.go
│ │ │ │ ├── md_docs.md
│ │ │ │ └── md_docs_test.go
│ │ │ └── pflag/
│ │ │ ├── .travis.yml
│ │ │ ├── LICENSE
│ │ │ ├── README.md
│ │ │ ├── bool.go
│ │ │ ├── bool_test.go
│ │ │ ├── count.go
│ │ │ ├── count_test.go
│ │ │ ├── duration.go
│ │ │ ├── example_test.go
│ │ │ ├── export_test.go
│ │ │ ├── flag.go
│ │ │ ├── flag_test.go
│ │ │ ├── float32.go
│ │ │ ├── float64.go
│ │ │ ├── golangflag.go
│ │ │ ├── golangflag_test.go
│ │ │ ├── int.go
│ │ │ ├── int32.go
│ │ │ ├── int64.go
│ │ │ ├── int8.go
│ │ │ ├── int_slice.go
│ │ │ ├── int_slice_test.go
│ │ │ ├── ip.go
│ │ │ ├── ip_test.go
│ │ │ ├── ipmask.go
│ │ │ ├── ipnet.go
│ │ │ ├── ipnet_test.go
│ │ │ ├── string.go
│ │ │ ├── string_slice.go
│ │ │ ├── string_slice_test.go
│ │ │ ├── uint.go
│ │ │ ├── uint16.go
│ │ │ ├── uint32.go
│ │ │ ├── uint64.go
│ │ │ ├── uint8.go
│ │ │ └── verify/
│ │ │ ├── all.sh
│ │ │ ├── gofmt.sh
│ │ │ └── golint.sh
│ │ └── ugorji/
│ │ └── go/
│ │ ├── LICENSE
│ │ ├── README.md
│ │ ├── codec/
│ │ │ ├── 0doc.go
│ │ │ ├── README.md
│ │ │ ├── binc.go
│ │ │ ├── cbor.go
│ │ │ ├── cbor_test.go
│ │ │ ├── codec_test.go
│ │ │ ├── codecgen/
│ │ │ │ ├── README.md
│ │ │ │ ├── gen.go
│ │ │ │ └── z.go
│ │ │ ├── codecgen_test.go
│ │ │ ├── decode.go
│ │ │ ├── encode.go
│ │ │ ├── fast-path.generated.go
│ │ │ ├── fast-path.go.tmpl
│ │ │ ├── fast-path.not.go
│ │ │ ├── gen-dec-array.go.tmpl
│ │ │ ├── gen-dec-map.go.tmpl
│ │ │ ├── gen-helper.generated.go
│ │ │ ├── gen-helper.go.tmpl
│ │ │ ├── gen.generated.go
│ │ │ ├── gen.go
│ │ │ ├── helper.go
│ │ │ ├── helper_internal.go
│ │ │ ├── helper_not_unsafe.go
│ │ │ ├── helper_test.go
│ │ │ ├── helper_unsafe.go
│ │ │ ├── json.go
│ │ │ ├── msgpack.go
│ │ │ ├── noop.go
│ │ │ ├── prebuild.go
│ │ │ ├── prebuild.sh
│ │ │ ├── py_test.go
│ │ │ ├── rpc.go
│ │ │ ├── simple.go
│ │ │ ├── test-cbor-goldens.json
│ │ │ ├── test.py
│ │ │ ├── tests.sh
│ │ │ ├── time.go
│ │ │ └── values_test.go
│ │ └── msgpack.org.md
│ ├── golang.org/
│ │ └── x/
│ │ ├── crypto/
│ │ │ ├── .gitattributes
│ │ │ ├── .gitignore
│ │ │ ├── AUTHORS
│ │ │ ├── CONTRIBUTING.md
│ │ │ ├── CONTRIBUTORS
│ │ │ ├── LICENSE
│ │ │ ├── PATENTS
│ │ │ ├── README
│ │ │ ├── bcrypt/
│ │ │ │ ├── base64.go
│ │ │ │ ├── bcrypt.go
│ │ │ │ └── bcrypt_test.go
│ │ │ ├── blowfish/
│ │ │ │ ├── block.go
│ │ │ │ ├── blowfish_test.go
│ │ │ │ ├── cipher.go
│ │ │ │ └── const.go
│ │ │ ├── bn256/
│ │ │ │ ├── bn256.go
│ │ │ │ ├── bn256_test.go
│ │ │ │ ├── constants.go
│ │ │ │ ├── curve.go
│ │ │ │ ├── example_test.go
│ │ │ │ ├── gfp12.go
│ │ │ │ ├── gfp2.go
│ │ │ │ ├── gfp6.go
│ │ │ │ ├── optate.go
│ │ │ │ └── twist.go
│ │ │ ├── cast5/
│ │ │ │ ├── cast5.go
│ │ │ │ └── cast5_test.go
│ │ │ ├── codereview.cfg
│ │ │ ├── curve25519/
│ │ │ │ ├── const_amd64.s
│ │ │ │ ├── cswap_amd64.s
│ │ │ │ ├── curve25519.go
│ │ │ │ ├── curve25519_test.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── freeze_amd64.s
│ │ │ │ ├── ladderstep_amd64.s
│ │ │ │ ├── mont25519_amd64.go
│ │ │ │ ├── mul_amd64.s
│ │ │ │ └── square_amd64.s
│ │ │ ├── hkdf/
│ │ │ │ ├── example_test.go
│ │ │ │ ├── hkdf.go
│ │ │ │ └── hkdf_test.go
│ │ │ ├── md4/
│ │ │ │ ├── md4.go
│ │ │ │ ├── md4_test.go
│ │ │ │ └── md4block.go
│ │ │ ├── nacl/
│ │ │ │ ├── box/
│ │ │ │ │ ├── box.go
│ │ │ │ │ └── box_test.go
│ │ │ │ └── secretbox/
│ │ │ │ ├── secretbox.go
│ │ │ │ └── secretbox_test.go
│ │ │ ├── ocsp/
│ │ │ │ ├── ocsp.go
│ │ │ │ └── ocsp_test.go
│ │ │ ├── openpgp/
│ │ │ │ ├── armor/
│ │ │ │ │ ├── armor.go
│ │ │ │ │ ├── armor_test.go
│ │ │ │ │ └── encode.go
│ │ │ │ ├── canonical_text.go
│ │ │ │ ├── canonical_text_test.go
│ │ │ │ ├── clearsign/
│ │ │ │ │ ├── clearsign.go
│ │ │ │ │ └── clearsign_test.go
│ │ │ │ ├── elgamal/
│ │ │ │ │ ├── elgamal.go
│ │ │ │ │ └── elgamal_test.go
│ │ │ │ ├── errors/
│ │ │ │ │ └── errors.go
│ │ │ │ ├── keys.go
│ │ │ │ ├── keys_test.go
│ │ │ │ ├── packet/
│ │ │ │ │ ├── compressed.go
│ │ │ │ │ ├── compressed_test.go
│ │ │ │ │ ├── config.go
│ │ │ │ │ ├── encrypted_key.go
│ │ │ │ │ ├── encrypted_key_test.go
│ │ │ │ │ ├── literal.go
│ │ │ │ │ ├── ocfb.go
│ │ │ │ │ ├── ocfb_test.go
│ │ │ │ │ ├── one_pass_signature.go
│ │ │ │ │ ├── opaque.go
│ │ │ │ │ ├── opaque_test.go
│ │ │ │ │ ├── packet.go
│ │ │ │ │ ├── packet_test.go
│ │ │ │ │ ├── private_key.go
│ │ │ │ │ ├── private_key_test.go
│ │ │ │ │ ├── public_key.go
│ │ │ │ │ ├── public_key_test.go
│ │ │ │ │ ├── public_key_v3.go
│ │ │ │ │ ├── public_key_v3_test.go
│ │ │ │ │ ├── reader.go
│ │ │ │ │ ├── signature.go
│ │ │ │ │ ├── signature_test.go
│ │ │ │ │ ├── signature_v3.go
│ │ │ │ │ ├── signature_v3_test.go
│ │ │ │ │ ├── symmetric_key_encrypted.go
│ │ │ │ │ ├── symmetric_key_encrypted_test.go
│ │ │ │ │ ├── symmetrically_encrypted.go
│ │ │ │ │ ├── symmetrically_encrypted_test.go
│ │ │ │ │ ├── userattribute.go
│ │ │ │ │ ├── userattribute_test.go
│ │ │ │ │ ├── userid.go
│ │ │ │ │ └── userid_test.go
│ │ │ │ ├── read.go
│ │ │ │ ├── read_test.go
│ │ │ │ ├── s2k/
│ │ │ │ │ ├── s2k.go
│ │ │ │ │ └── s2k_test.go
│ │ │ │ ├── write.go
│ │ │ │ └── write_test.go
│ │ │ ├── otr/
│ │ │ │ ├── libotr_test_helper.c
│ │ │ │ ├── otr.go
│ │ │ │ ├── otr_test.go
│ │ │ │ └── smp.go
│ │ │ ├── pbkdf2/
│ │ │ │ ├── pbkdf2.go
│ │ │ │ └── pbkdf2_test.go
│ │ │ ├── poly1305/
│ │ │ │ ├── const_amd64.s
│ │ │ │ ├── poly1305.go
│ │ │ │ ├── poly1305_amd64.s
│ │ │ │ ├── poly1305_test.go
│ │ │ │ ├── sum_amd64.go
│ │ │ │ └── sum_ref.go
│ │ │ ├── ripemd160/
│ │ │ │ ├── ripemd160.go
│ │ │ │ ├── ripemd160_test.go
│ │ │ │ └── ripemd160block.go
│ │ │ ├── salsa20/
│ │ │ │ ├── salsa/
│ │ │ │ │ ├── hsalsa20.go
│ │ │ │ │ ├── salsa2020_amd64.s
│ │ │ │ │ ├── salsa208.go
│ │ │ │ │ ├── salsa20_amd64.go
│ │ │ │ │ ├── salsa20_ref.go
│ │ │ │ │ └── salsa_test.go
│ │ │ │ ├── salsa20.go
│ │ │ │ └── salsa20_test.go
│ │ │ ├── scrypt/
│ │ │ │ ├── scrypt.go
│ │ │ │ └── scrypt_test.go
│ │ │ ├── sha3/
│ │ │ │ ├── doc.go
│ │ │ │ ├── hashes.go
│ │ │ │ ├── keccakf.go
│ │ │ │ ├── register.go
│ │ │ │ ├── sha3.go
│ │ │ │ ├── sha3_test.go
│ │ │ │ ├── shake.go
│ │ │ │ ├── testdata/
│ │ │ │ │ └── keccakKats.json.deflate
│ │ │ │ ├── xor.go
│ │ │ │ ├── xor_generic.go
│ │ │ │ └── xor_unaligned.go
│ │ │ ├── ssh/
│ │ │ │ ├── agent/
│ │ │ │ │ ├── client.go
│ │ │ │ │ ├── client_test.go
│ │ │ │ │ ├── forward.go
│ │ │ │ │ ├── keyring.go
│ │ │ │ │ ├── server.go
│ │ │ │ │ ├── server_test.go
│ │ │ │ │ └── testdata_test.go
│ │ │ │ ├── benchmark_test.go
│ │ │ │ ├── buffer.go
│ │ │ │ ├── buffer_test.go
│ │ │ │ ├── certs.go
│ │ │ │ ├── certs_test.go
│ │ │ │ ├── channel.go
│ │ │ │ ├── cipher.go
│ │ │ │ ├── cipher_test.go
│ │ │ │ ├── client.go
│ │ │ │ ├── client_auth.go
│ │ │ │ ├── client_auth_test.go
│ │ │ │ ├── client_test.go
│ │ │ │ ├── common.go
│ │ │ │ ├── connection.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── example_test.go
│ │ │ │ ├── handshake.go
│ │ │ │ ├── handshake_test.go
│ │ │ │ ├── kex.go
│ │ │ │ ├── kex_test.go
│ │ │ │ ├── keys.go
│ │ │ │ ├── keys_test.go
│ │ │ │ ├── mac.go
│ │ │ │ ├── mempipe_test.go
│ │ │ │ ├── messages.go
│ │ │ │ ├── messages_test.go
│ │ │ │ ├── mux.go
│ │ │ │ ├── mux_test.go
│ │ │ │ ├── server.go
│ │ │ │ ├── session.go
│ │ │ │ ├── session_test.go
│ │ │ │ ├── tcpip.go
│ │ │ │ ├── tcpip_test.go
│ │ │ │ ├── terminal/
│ │ │ │ │ ├── terminal.go
│ │ │ │ │ ├── terminal_test.go
│ │ │ │ │ ├── util.go
│ │ │ │ │ ├── util_bsd.go
│ │ │ │ │ ├── util_linux.go
│ │ │ │ │ └── util_windows.go
│ │ │ │ ├── test/
│ │ │ │ │ ├── agent_unix_test.go
│ │ │ │ │ ├── cert_test.go
│ │ │ │ │ ├── doc.go
│ │ │ │ │ ├── forward_unix_test.go
│ │ │ │ │ ├── session_test.go
│ │ │ │ │ ├── tcpip_test.go
│ │ │ │ │ ├── test_unix_test.go
│ │ │ │ │ └── testdata_test.go
│ │ │ │ ├── testdata/
│ │ │ │ │ ├── doc.go
│ │ │ │ │ └── keys.go
│ │ │ │ ├── testdata_test.go
│ │ │ │ ├── transport.go
│ │ │ │ └── transport_test.go
│ │ │ ├── twofish/
│ │ │ │ ├── twofish.go
│ │ │ │ └── twofish_test.go
│ │ │ ├── xtea/
│ │ │ │ ├── block.go
│ │ │ │ ├── cipher.go
│ │ │ │ └── xtea_test.go
│ │ │ └── xts/
│ │ │ ├── xts.go
│ │ │ └── xts_test.go
│ │ ├── net/
│ │ │ ├── .gitattributes
│ │ │ ├── .gitignore
│ │ │ ├── AUTHORS
│ │ │ ├── CONTRIBUTING.md
│ │ │ ├── CONTRIBUTORS
│ │ │ ├── LICENSE
│ │ │ ├── PATENTS
│ │ │ ├── README
│ │ │ ├── codereview.cfg
│ │ │ ├── context/
│ │ │ │ ├── context.go
│ │ │ │ ├── context_test.go
│ │ │ │ ├── ctxhttp/
│ │ │ │ │ ├── cancelreq.go
│ │ │ │ │ ├── cancelreq_go14.go
│ │ │ │ │ ├── ctxhttp.go
│ │ │ │ │ └── ctxhttp_test.go
│ │ │ │ └── withtimeout_test.go
│ │ │ ├── dict/
│ │ │ │ └── dict.go
│ │ │ ├── html/
│ │ │ │ ├── atom/
│ │ │ │ │ ├── atom.go
│ │ │ │ │ ├── atom_test.go
│ │ │ │ │ ├── gen.go
│ │ │ │ │ ├── table.go
│ │ │ │ │ └── table_test.go
│ │ │ │ ├── charset/
│ │ │ │ │ ├── charset.go
│ │ │ │ │ ├── charset_test.go
│ │ │ │ │ ├── gen.go
│ │ │ │ │ ├── table.go
│ │ │ │ │ └── testdata/
│ │ │ │ │ ├── HTTP-charset.html
│ │ │ │ │ ├── HTTP-vs-UTF-8-BOM.html
│ │ │ │ │ ├── HTTP-vs-meta-charset.html
│ │ │ │ │ ├── HTTP-vs-meta-content.html
│ │ │ │ │ ├── No-encoding-declaration.html
│ │ │ │ │ ├── README
│ │ │ │ │ ├── UTF-16BE-BOM.html
│ │ │ │ │ ├── UTF-16LE-BOM.html
│ │ │ │ │ ├── UTF-8-BOM-vs-meta-charset.html
│ │ │ │ │ ├── UTF-8-BOM-vs-meta-content.html
│ │ │ │ │ ├── meta-charset-attribute.html
│ │ │ │ │ └── meta-content-attribute.html
│ │ │ │ ├── const.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── doctype.go
│ │ │ │ ├── entity.go
│ │ │ │ ├── entity_test.go
│ │ │ │ ├── escape.go
│ │ │ │ ├── escape_test.go
│ │ │ │ ├── example_test.go
│ │ │ │ ├── foreign.go
│ │ │ │ ├── node.go
│ │ │ │ ├── node_test.go
│ │ │ │ ├── parse.go
│ │ │ │ ├── parse_test.go
│ │ │ │ ├── render.go
│ │ │ │ ├── render_test.go
│ │ │ │ ├── testdata/
│ │ │ │ │ ├── go1.html
│ │ │ │ │ └── webkit/
│ │ │ │ │ └── README
│ │ │ │ ├── token.go
│ │ │ │ └── token_test.go
│ │ │ ├── http2/
│ │ │ │ ├── .gitignore
│ │ │ │ ├── Dockerfile
│ │ │ │ ├── Makefile
│ │ │ │ ├── README
│ │ │ │ ├── buffer.go
│ │ │ │ ├── buffer_test.go
│ │ │ │ ├── errors.go
│ │ │ │ ├── errors_test.go
│ │ │ │ ├── flow.go
│ │ │ │ ├── flow_test.go
│ │ │ │ ├── frame.go
│ │ │ │ ├── frame_test.go
│ │ │ │ ├── gotrack.go
│ │ │ │ ├── gotrack_test.go
│ │ │ │ ├── h2i/
│ │ │ │ │ ├── README.md
│ │ │ │ │ └── h2i.go
│ │ │ │ ├── headermap.go
│ │ │ │ ├── hpack/
│ │ │ │ │ ├── encode.go
│ │ │ │ │ ├── encode_test.go
│ │ │ │ │ ├── hpack.go
│ │ │ │ │ ├── hpack_test.go
│ │ │ │ │ ├── huffman.go
│ │ │ │ │ └── tables.go
│ │ │ │ ├── http2.go
│ │ │ │ ├── http2_test.go
│ │ │ │ ├── pipe.go
│ │ │ │ ├── pipe_test.go
│ │ │ │ ├── priority_test.go
│ │ │ │ ├── server.go
│ │ │ │ ├── server_test.go
│ │ │ │ ├── testdata/
│ │ │ │ │ └── draft-ietf-httpbis-http2.xml
│ │ │ │ ├── transport.go
│ │ │ │ ├── transport_test.go
│ │ │ │ ├── write.go
│ │ │ │ ├── writesched.go
│ │ │ │ └── z_spec_test.go
│ │ │ ├── icmp/
│ │ │ │ ├── dstunreach.go
│ │ │ │ ├── echo.go
│ │ │ │ ├── endpoint.go
│ │ │ │ ├── example_test.go
│ │ │ │ ├── extension.go
│ │ │ │ ├── extension_test.go
│ │ │ │ ├── helper_posix.go
│ │ │ │ ├── interface.go
│ │ │ │ ├── ipv4.go
│ │ │ │ ├── ipv4_test.go
│ │ │ │ ├── ipv6.go
│ │ │ │ ├── listen_posix.go
│ │ │ │ ├── listen_stub.go
│ │ │ │ ├── message.go
│ │ │ │ ├── message_test.go
│ │ │ │ ├── messagebody.go
│ │ │ │ ├── mpls.go
│ │ │ │ ├── multipart.go
│ │ │ │ ├── multipart_test.go
│ │ │ │ ├── packettoobig.go
│ │ │ │ ├── paramprob.go
│ │ │ │ ├── ping_test.go
│ │ │ │ ├── sys_freebsd.go
│ │ │ │ └── timeexceeded.go
│ │ │ ├── idna/
│ │ │ │ ├── idna.go
│ │ │ │ ├── idna_test.go
│ │ │ │ ├── punycode.go
│ │ │ │ └── punycode_test.go
│ │ │ ├── internal/
│ │ │ │ ├── iana/
│ │ │ │ │ ├── const.go
│ │ │ │ │ └── gen.go
│ │ │ │ ├── nettest/
│ │ │ │ │ ├── error_posix.go
│ │ │ │ │ ├── error_stub.go
│ │ │ │ │ ├── interface.go
│ │ │ │ │ ├── rlimit.go
│ │ │ │ │ ├── rlimit_stub.go
│ │ │ │ │ ├── rlimit_unix.go
│ │ │ │ │ ├── rlimit_windows.go
│ │ │ │ │ ├── stack.go
│ │ │ │ │ ├── stack_stub.go
│ │ │ │ │ ├── stack_unix.go
│ │ │ │ │ └── stack_windows.go
│ │ │ │ └── timeseries/
│ │ │ │ ├── timeseries.go
│ │ │ │ └── timeseries_test.go
│ │ │ ├── ipv4/
│ │ │ │ ├── control.go
│ │ │ │ ├── control_bsd.go
│ │ │ │ ├── control_pktinfo.go
│ │ │ │ ├── control_stub.go
│ │ │ │ ├── control_unix.go
│ │ │ │ ├── control_windows.go
│ │ │ │ ├── defs_darwin.go
│ │ │ │ ├── defs_dragonfly.go
│ │ │ │ ├── defs_freebsd.go
│ │ │ │ ├── defs_linux.go
│ │ │ │ ├── defs_netbsd.go
│ │ │ │ ├── defs_openbsd.go
│ │ │ │ ├── defs_solaris.go
│ │ │ │ ├── dgramopt_posix.go
│ │ │ │ ├── dgramopt_stub.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── endpoint.go
│ │ │ │ ├── example_test.go
│ │ │ │ ├── gen.go
│ │ │ │ ├── genericopt_posix.go
│ │ │ │ ├── genericopt_stub.go
│ │ │ │ ├── header.go
│ │ │ │ ├── header_test.go
│ │ │ │ ├── helper.go
│ │ │ │ ├── helper_stub.go
│ │ │ │ ├── helper_unix.go
│ │ │ │ ├── helper_windows.go
│ │ │ │ ├── iana.go
│ │ │ │ ├── icmp.go
│ │ │ │ ├── icmp_linux.go
│ │ │ │ ├── icmp_stub.go
│ │ │ │ ├── icmp_test.go
│ │ │ │ ├── mocktransponder_test.go
│ │ │ │ ├── multicast_test.go
│ │ │ │ ├── multicastlistener_test.go
│ │ │ │ ├── multicastsockopt_test.go
│ │ │ │ ├── packet.go
│ │ │ │ ├── payload.go
│ │ │ │ ├── payload_cmsg.go
│ │ │ │ ├── payload_nocmsg.go
│ │ │ │ ├── readwrite_test.go
│ │ │ │ ├── sockopt.go
│ │ │ │ ├── sockopt_asmreq.go
│ │ │ │ ├── sockopt_asmreq_stub.go
│ │ │ │ ├── sockopt_asmreq_unix.go
│ │ │ │ ├── sockopt_asmreq_windows.go
│ │ │ │ ├── sockopt_asmreqn_stub.go
│ │ │ │ ├── sockopt_asmreqn_unix.go
│ │ │ │ ├── sockopt_ssmreq_stub.go
│ │ │ │ ├── sockopt_ssmreq_unix.go
│ │ │ │ ├── sockopt_stub.go
│ │ │ │ ├── sockopt_unix.go
│ │ │ │ ├── sockopt_windows.go
│ │ │ │ ├── sys_bsd.go
│ │ │ │ ├── sys_darwin.go
│ │ │ │ ├── sys_freebsd.go
│ │ │ │ ├── sys_linux.go
│ │ │ │ ├── sys_openbsd.go
│ │ │ │ ├── sys_stub.go
│ │ │ │ ├── sys_windows.go
│ │ │ │ ├── syscall_linux_386.go
│ │ │ │ ├── syscall_unix.go
│ │ │ │ ├── thunk_linux_386.s
│ │ │ │ ├── unicast_test.go
│ │ │ │ ├── unicastsockopt_test.go
│ │ │ │ ├── zsys_darwin.go
│ │ │ │ ├── zsys_dragonfly.go
│ │ │ │ ├── zsys_freebsd_386.go
│ │ │ │ ├── zsys_freebsd_amd64.go
│ │ │ │ ├── zsys_freebsd_arm.go
│ │ │ │ ├── zsys_linux_386.go
│ │ │ │ ├── zsys_linux_amd64.go
│ │ │ │ ├── zsys_linux_arm.go
│ │ │ │ ├── zsys_linux_arm64.go
│ │ │ │ ├── zsys_linux_ppc64.go
│ │ │ │ ├── zsys_linux_ppc64le.go
│ │ │ │ ├── zsys_netbsd.go
│ │ │ │ ├── zsys_openbsd.go
│ │ │ │ └── zsys_solaris.go
│ │ │ ├── ipv6/
│ │ │ │ ├── control.go
│ │ │ │ ├── control_rfc2292_unix.go
│ │ │ │ ├── control_rfc3542_unix.go
│ │ │ │ ├── control_stub.go
│ │ │ │ ├── control_unix.go
│ │ │ │ ├── control_windows.go
│ │ │ │ ├── defs_darwin.go
│ │ │ │ ├── defs_dragonfly.go
│ │ │ │ ├── defs_freebsd.go
│ │ │ │ ├── defs_linux.go
│ │ │ │ ├── defs_netbsd.go
│ │ │ │ ├── defs_openbsd.go
│ │ │ │ ├── defs_solaris.go
│ │ │ │ ├── dgramopt_posix.go
│ │ │ │ ├── dgramopt_stub.go
│ │ │ │ ├── doc.go
│ │ │ │ ├── endpoint.go
│ │ │ │ ├── example_test.go
│ │ │ │ ├── gen.go
│ │ │ │ ├── genericopt_posix.go
│ │ │ │ ├── genericopt_stub.go
│ │ │ │ ├── header.go
│ │ │ │ ├── header_test.go
│ │ │ │ ├── helper.go
│ │ │ │ ├── helper_stub.go
│ │ │ │ ├── helper_unix.go
│ │ │ │ ├── helper_windows.go
│ │ │ │ ├── iana.go
│ │ │ │ ├── icmp.go
│ │ │ │ ├── icmp_bsd.go
│ │ │ │ ├── icmp_linux.go
│ │ │ │ ├── icmp_solaris.go
│ │ │ │ ├── icmp_stub.go
│ │ │ │ ├── icmp_test.go
│ │ │ │ ├── icmp_windows.go
│ │ │ │ ├── mocktransponder_test.go
│ │ │ │ ├── multicast_test.go
│ │ │ │ ├── multicastlistener_test.go
│ │ │ │ ├── multicastsockopt_test.go
│ │ │ │ ├── payload.go
│ │ │ │ ├── payload_cmsg.go
│ │ │ │ ├── payload_nocmsg.go
│ │ │ │ ├── readwrite_test.go
│ │ │ │ ├── sockopt.go
│ │ │ │ ├── sockopt_asmreq_unix.go
│ │ │ │ ├── sockopt_asmreq_windows.go
│ │ │ │ ├── sockopt_ssmreq_stub.go
│ │ │ │ ├── sockopt_ssmreq_unix.go
│ │ │ │ ├── sockopt_stub.go
│ │ │ │ ├── sockopt_test.go
│ │ │ │ ├── sockopt_unix.go
│ │ │ │ ├── sockopt_windows.go
│ │ │ │ ├── sys_bsd.go
│ │ │ │ ├── sys_darwin.go
│ │ │ │ ├── sys_freebsd.go
│ │ │ │ ├── sys_linux.go
│ │ │ │ ├── sys_stub.go
│ │ │ │ ├── sys_windows.go
│ │ │ │ ├── syscall_linux_386.go
│ │ │ │ ├── syscall_unix.go
│ │ │ │ ├── thunk_linux_386.s
│ │ │ │ ├── unicast_test.go
│ │ │ │ ├── unicastsockopt_test.go
│ │ │ │ ├── zsys_darwin.go
│ │ │ │ ├── zsys_dragonfly.go
│ │ │ │ ├── zsys_freebsd_386.go
│ │ │ │ ├── zsys_freebsd_amd64.go
│ │ │ │ ├── zsys_freebsd_arm.go
│ │ │ │ ├── zsys_linux_386.go
│ │ │ │ ├── zsys_linux_amd64.go
│ │ │ │ ├── zsys_linux_arm.go
│ │ │ │ ├── zsys_linux_arm64.go
│ │ │ │ ├── zsys_linux_ppc64.go
│ │ │ │ ├── zsys_linux_ppc64le.go
│ │ │ │ ├── zsys_netbsd.go
│ │ │ │ ├── zsys_openbsd.go
│ │ │ │ └── zsys_solaris.go
│ │ │ ├── netutil/
│ │ │ │ ├── listen.go
│ │ │ │ └── listen_test.go
│ │ │ ├── proxy/
│ │ │ │ ├── direct.go
│ │ │ │ ├── per_host.go
│ │ │ │ ├── per_host_test.go
│ │ │ │ ├── proxy.go
│ │ │ │ ├── proxy_test.go
│ │ │ │ └── socks5.go
│ │ │ ├── publicsuffix/
│ │ │ │ ├── gen.go
│ │ │ │ ├── list.go
│ │ │ │ ├── list_test.go
│ │ │ │ ├── table.go
│ │ │ │ └── table_test.go
│ │ │ ├── trace/
│ │ │ │ ├── events.go
│ │ │ │ ├── histogram.go
│ │ │ │ ├── histogram_test.go
│ │ │ │ ├── trace.go
│ │ │ │ └── trace_test.go
│ │ │ ├── webdav/
│ │ │ │ ├── file.go
│ │ │ │ ├── file_test.go
│ │ │ │ ├── if.go
│ │ │ │ ├── if_test.go
│ │ │ │ ├── internal/
│ │ │ │ │ └── xml/
│ │ │ │ │ ├── README
│ │ │ │ │ ├── atom_test.go
│ │ │ │ │ ├── example_test.go
│ │ │ │ │ ├── marshal.go
│ │ │ │ │ ├── marshal_test.go
│ │ │ │ │ ├── read.go
│ │ │ │ │ ├── read_test.go
│ │ │ │ │ ├── typeinfo.go
│ │ │ │ │ ├── xml.go
│ │ │ │ │ └── xml_test.go
│ │ │ │ ├── litmus_test_server.go
│ │ │ │ ├── lock.go
│ │ │ │ ├── lock_test.go
│ │ │ │ ├── prop.go
│ │ │ │ ├── prop_test.go
│ │ │ │ ├── webdav.go
│ │ │ │ ├── webdav_test.go
│ │ │ │ ├── xml.go
│ │ │ │ └── xml_test.go
│ │ │ └── websocket/
│ │ │ ├── client.go
│ │ │ ├── exampledial_test.go
│ │ │ ├── examplehandler_test.go
│ │ │ ├── hybi.go
│ │ │ ├── hybi_test.go
│ │ │ ├── server.go
│ │ │ ├── websocket.go
│ │ │ └── websocket_test.go
│ │ └── sys/
│ │ ├── .gitattributes
│ │ ├── .gitignore
│ │ ├── AUTHORS
│ │ ├── CONTRIBUTING.md
│ │ ├── CONTRIBUTORS
│ │ ├── LICENSE
│ │ ├── PATENTS
│ │ ├── README
│ │ ├── codereview.cfg
│ │ ├── plan9/
│ │ │ ├── asm.s
│ │ │ ├── asm_plan9_386.s
│ │ │ ├── asm_plan9_amd64.s
│ │ │ ├── const_plan9.go
│ │ │ ├── dir_plan9.go
│ │ │ ├── env_plan9.go
│ │ │ ├── env_unset.go
│ │ │ ├── errors_plan9.go
│ │ │ ├── mkall.sh
│ │ │ ├── mkerrors.sh
│ │ │ ├── mksyscall.pl
│ │ │ ├── mksysnum_plan9.sh
│ │ │ ├── pwd_go15_plan9.go
│ │ │ ├── pwd_plan9.go
│ │ │ ├── race.go
│ │ │ ├── race0.go
│ │ │ ├── str.go
│ │ │ ├── syscall.go
│ │ │ ├── syscall_plan9.go
│ │ │ ├── syscall_test.go
│ │ │ ├── zsyscall_plan9_386.go
│ │ │ ├── zsyscall_plan9_amd64.go
│ │ │ └── zsysnum_plan9.go
│ │ ├── unix/
│ │ │ ├── .gitignore
│ │ │ ├── asm.s
│ │ │ ├── asm_darwin_386.s
│ │ │ ├── asm_darwin_amd64.s
│ │ │ ├── asm_darwin_arm.s
│ │ │ ├── asm_darwin_arm64.s
│ │ │ ├── asm_dragonfly_386.s
│ │ │ ├── asm_dragonfly_amd64.s
│ │ │ ├── asm_freebsd_386.s
│ │ │ ├── asm_freebsd_amd64.s
│ │ │ ├── asm_freebsd_arm.s
│ │ │ ├── asm_linux_386.s
│ │ │ ├── asm_linux_amd64.s
│ │ │ ├── asm_linux_arm.s
│ │ │ ├── asm_linux_arm64.s
│ │ │ ├── asm_linux_ppc64x.s
│ │ │ ├── asm_netbsd_386.s
│ │ │ ├── asm_netbsd_amd64.s
│ │ │ ├── asm_netbsd_arm.s
│ │ │ ├── asm_openbsd_386.s
│ │ │ ├── asm_openbsd_amd64.s
│ │ │ ├── asm_solaris_amd64.s
│ │ │ ├── constants.go
│ │ │ ├── creds_test.go
│ │ │ ├── env_unix.go
│ │ │ ├── env_unset.go
│ │ │ ├── export_test.go
│ │ │ ├── flock.go
│ │ │ ├── flock_linux_32bit.go
│ │ │ ├── gccgo.go
│ │ │ ├── gccgo_c.c
│ │ │ ├── gccgo_linux_amd64.go
│ │ │ ├── mkall.sh
│ │ │ ├── mkerrors.sh
│ │ │ ├── mksyscall.pl
│ │ │ ├── mksyscall_solaris.pl
│ │ │ ├── mksysctl_openbsd.pl
│ │ │ ├── mksysnum_darwin.pl
│ │ │ ├── mksysnum_dragonfly.pl
│ │ │ ├── mksysnum_freebsd.pl
│ │ │ ├── mksysnum_linux.pl
│ │ │ ├── mksysnum_netbsd.pl
│ │ │ ├── mksysnum_openbsd.pl
│ │ │ ├── mmap_unix_test.go
│ │ │ ├── race.go
│ │ │ ├── race0.go
│ │ │ ├── sockcmsg_linux.go
│ │ │ ├── sockcmsg_unix.go
│ │ │ ├── str.go
│ │ │ ├── syscall.go
│ │ │ ├── syscall_bsd.go
│ │ │ ├── syscall_bsd_test.go
│ │ │ ├── syscall_darwin.go
│ │ │ ├── syscall_darwin_386.go
│ │ │ ├── syscall_darwin_amd64.go
│ │ │ ├── syscall_darwin_arm.go
│ │ │ ├── syscall_darwin_arm64.go
│ │ │ ├── syscall_dragonfly.go
│ │ │ ├── syscall_dragonfly_386.go
│ │ │ ├── syscall_dragonfly_amd64.go
│ │ │ ├── syscall_freebsd.go
│ │ │ ├── syscall_freebsd_386.go
│ │ │ ├── syscall_freebsd_amd64.go
│ │ │ ├── syscall_freebsd_arm.go
│ │ │ ├── syscall_freebsd_test.go
│ │ │ ├── syscall_linux.go
│ │ │ ├── syscall_linux_386.go
│ │ │ ├── syscall_linux_amd64.go
│ │ │ ├── syscall_linux_arm.go
│ │ │ ├── syscall_linux_arm64.go
│ │ │ ├── syscall_linux_ppc64x.go
│ │ │ ├── syscall_netbsd.go
│ │ │ ├── syscall_netbsd_386.go
│ │ │ ├── syscall_netbsd_amd64.go
│ │ │ ├── syscall_netbsd_arm.go
│ │ │ ├── syscall_no_getwd.go
│ │ │ ├── syscall_openbsd.go
│ │ │ ├── syscall_openbsd_386.go
│ │ │ ├── syscall_openbsd_amd64.go
│ │ │ ├── syscall_solaris.go
│ │ │ ├── syscall_solaris_amd64.go
│ │ │ ├── syscall_test.go
│ │ │ ├── syscall_unix.go
│ │ │ ├── syscall_unix_test.go
│ │ │ ├── types_darwin.go
│ │ │ ├── types_dragonfly.go
│ │ │ ├── types_freebsd.go
│ │ │ ├── types_linux.go
│ │ │ ├── types_netbsd.go
│ │ │ ├── types_openbsd.go
│ │ │ ├── types_solaris.go
│ │ │ ├── zerrors_darwin_386.go
│ │ │ ├── zerrors_darwin_amd64.go
│ │ │ ├── zerrors_darwin_arm.go
│ │ │ ├── zerrors_darwin_arm64.go
│ │ │ ├── zerrors_dragonfly_386.go
│ │ │ ├── zerrors_dragonfly_amd64.go
│ │ │ ├── zerrors_freebsd_386.go
│ │ │ ├── zerrors_freebsd_amd64.go
│ │ │ ├── zerrors_freebsd_arm.go
│ │ │ ├── zerrors_linux_386.go
│ │ │ ├── zerrors_linux_amd64.go
│ │ │ ├── zerrors_linux_arm.go
│ │ │ ├── zerrors_linux_arm64.go
│ │ │ ├── zerrors_linux_ppc64.go
│ │ │ ├── zerrors_linux_ppc64le.go
│ │ │ ├── zerrors_netbsd_386.go
│ │ │ ├── zerrors_netbsd_amd64.go
│ │ │ ├── zerrors_netbsd_arm.go
│ │ │ ├── zerrors_openbsd_386.go
│ │ │ ├── zerrors_openbsd_amd64.go
│ │ │ ├── zerrors_solaris_amd64.go
│ │ │ ├── zsyscall_darwin_386.go
│ │ │ ├── zsyscall_darwin_amd64.go
│ │ │ ├── zsyscall_darwin_arm.go
│ │ │ ├── zsyscall_darwin_arm64.go
│ │ │ ├── zsyscall_dragonfly_386.go
│ │ │ ├── zsyscall_dragonfly_amd64.go
│ │ │ ├── zsyscall_freebsd_386.go
│ │ │ ├── zsyscall_freebsd_amd64.go
│ │ │ ├── zsyscall_freebsd_arm.go
│ │ │ ├── zsyscall_linux_386.go
│ │ │ ├── zsyscall_linux_amd64.go
│ │ │ ├── zsyscall_linux_arm.go
│ │ │ ├── zsyscall_linux_arm64.go
│ │ │ ├── zsyscall_linux_ppc64.go
│ │ │ ├── zsyscall_linux_ppc64le.go
│ │ │ ├── zsyscall_netbsd_386.go
│ │ │ ├── zsyscall_netbsd_amd64.go
│ │ │ ├── zsyscall_netbsd_arm.go
│ │ │ ├── zsyscall_openbsd_386.go
│ │ │ ├── zsyscall_openbsd_amd64.go
│ │ │ ├── zsyscall_solaris_amd64.go
│ │ │ ├── zsysctl_openbsd.go
│ │ │ ├── zsysnum_darwin_386.go
│ │ │ ├── zsysnum_darwin_amd64.go
│ │ │ ├── zsysnum_darwin_arm.go
│ │ │ ├── zsysnum_darwin_arm64.go
│ │ │ ├── zsysnum_dragonfly_386.go
│ │ │ ├── zsysnum_dragonfly_amd64.go
│ │ │ ├── zsysnum_freebsd_386.go
│ │ │ ├── zsysnum_freebsd_amd64.go
│ │ │ ├── zsysnum_freebsd_arm.go
│ │ │ ├── zsysnum_linux_386.go
│ │ │ ├── zsysnum_linux_amd64.go
│ │ │ ├── zsysnum_linux_arm.go
│ │ │ ├── zsysnum_linux_arm64.go
│ │ │ ├── zsysnum_linux_ppc64.go
│ │ │ ├── zsysnum_linux_ppc64le.go
│ │ │ ├── zsysnum_netbsd_386.go
│ │ │ ├── zsysnum_netbsd_amd64.go
│ │ │ ├── zsysnum_netbsd_arm.go
│ │ │ ├── zsysnum_openbsd_386.go
│ │ │ ├── zsysnum_openbsd_amd64.go
│ │ │ ├── zsysnum_solaris_amd64.go
│ │ │ ├── ztypes_darwin_386.go
│ │ │ ├── ztypes_darwin_amd64.go
│ │ │ ├── ztypes_darwin_arm.go
│ │ │ ├── ztypes_darwin_arm64.go
│ │ │ ├── ztypes_dragonfly_386.go
│ │ │ ├── ztypes_dragonfly_amd64.go
│ │ │ ├── ztypes_freebsd_386.go
│ │ │ ├── ztypes_freebsd_amd64.go
│ │ │ ├── ztypes_freebsd_arm.go
│ │ │ ├── ztypes_linux_386.go
│ │ │ ├── ztypes_linux_amd64.go
│ │ │ ├── ztypes_linux_arm.go
│ │ │ ├── ztypes_linux_arm64.go
│ │ │ ├── ztypes_linux_ppc64.go
│ │ │ ├── ztypes_linux_ppc64le.go
│ │ │ ├── ztypes_netbsd_386.go
│ │ │ ├── ztypes_netbsd_amd64.go
│ │ │ ├── ztypes_netbsd_arm.go
│ │ │ ├── ztypes_openbsd_386.go
│ │ │ ├── ztypes_openbsd_amd64.go
│ │ │ └── ztypes_solaris_amd64.go
│ │ └── windows/
│ │ ├── asm.s
│ │ ├── asm_windows_386.s
│ │ ├── asm_windows_amd64.s
│ │ ├── dll_windows.go
│ │ ├── env_unset.go
│ │ ├── env_windows.go
│ │ ├── eventlog.go
│ │ ├── exec_windows.go
│ │ ├── race.go
│ │ ├── race0.go
│ │ ├── registry/
│ │ │ ├── export_test.go
│ │ │ ├── key.go
│ │ │ ├── registry_test.go
│ │ │ ├── syscall.go
│ │ │ ├── value.go
│ │ │ └── zsyscall_windows.go
│ │ ├── security_windows.go
│ │ ├── service.go
│ │ ├── str.go
│ │ ├── svc/
│ │ │ ├── debug/
│ │ │ │ ├── log.go
│ │ │ │ └── service.go
│ │ │ ├── event.go
│ │ │ ├── eventlog/
│ │ │ │ ├── install.go
│ │ │ │ ├── log.go
│ │ │ │ └── log_test.go
│ │ │ ├── example/
│ │ │ │ ├── beep.go
│ │ │ │ ├── install.go
│ │ │ │ ├── main.go
│ │ │ │ ├── manage.go
│ │ │ │ └── service.go
│ │ │ ├── go12.c
│ │ │ ├── go12.go
│ │ │ ├── go13.go
│ │ │ ├── mgr/
│ │ │ │ ├── config.go
│ │ │ │ ├── mgr.go
│ │ │ │ ├── mgr_test.go
│ │ │ │ └── service.go
│ │ │ ├── security.go
│ │ │ ├── service.go
│ │ │ ├── svc_test.go
│ │ │ ├── sys_386.s
│ │ │ └── sys_amd64.s
│ │ ├── syscall.go
│ │ ├── syscall_test.go
│ │ ├── syscall_windows.go
│ │ ├── syscall_windows_test.go
│ │ ├── zsyscall_windows.go
│ │ ├── ztypes_windows.go
│ │ ├── ztypes_windows_386.go
│ │ └── ztypes_windows_amd64.go
│ ├── gopkg.in/
│ │ └── yaml.v2/
│ │ ├── .travis.yml
│ │ ├── LICENSE
│ │ ├── LICENSE.libyaml
│ │ ├── README.md
│ │ ├── apic.go
│ │ ├── decode.go
│ │ ├── decode_test.go
│ │ ├── emitterc.go
│ │ ├── encode.go
│ │ ├── encode_test.go
│ │ ├── parserc.go
│ │ ├── readerc.go
│ │ ├── resolve.go
│ │ ├── scannerc.go
│ │ ├── sorter.go
│ │ ├── suite_test.go
│ │ ├── writerc.go
│ │ ├── yaml.go
│ │ ├── yamlh.go
│ │ └── yamlprivateh.go
│ └── speter.net/
│ └── go/
│ └── exp/
│ └── math/
│ └── dec/
│ └── inf/
│ ├── LICENSE
│ ├── benchmark_test.go
│ ├── dec.go
│ ├── dec_go1_2_test.go
│ ├── dec_internal_test.go
│ ├── dec_test.go
│ ├── example_test.go
│ ├── rounder.go
│ ├── rounder_example_test.go
│ └── rounder_test.go
├── version/
│ ├── VERSION
│ └── info.go
└── winrm/
└── winrm.go
================================================
FILE CONTENTS
================================================
================================================
FILE: .dockerignore
================================================
vendor
golib
================================================
FILE: .gitignore
================================================
*.swp
bin/
_dist/
.idea
golib
release
================================================
FILE: BUILDING.md
================================================
## Building
* install [go version 1.5.1 or later](https://golang.org/doc/install)
* install [glide](https://github.com/Masterminds/glide#install)
* type the following:
```
go get github.com/fabric8io/kansible
cd $GOPATH/src/github.com/fabric8io/kansible
make bootstrap
```
* then to build the binary
make build
* you can then run it via
./bin/kansible
### Running pods locally
You can run `kansible rc ...` easily on a local build when working on the code. However to try out changes to the pod for `kansible pod ...` you can run that locally outside of docker with a small trick.
You must set the `HOSTNAME` environment variable to a valid pod name you wish to use.
```bash
export HOSTNAME=fabric8-znuj5
```
e.g. the above uses the pod name for the current fabric8 console.
This lets you pretend to be different pods from the command line when trying it out locally. e.g. run the `kansible pod ...` command in 2 shells as different pods, provided the `HOSTNAME` values are diferent.
The pod name must be valid as `kansible pod ...` command will update the pod to annotate which host its chosen etc.
So to run the [above examples](#running-kansible) type the following:
for [fabric8-ansible-spring-boot](https://github.com/fabric8io/fabric8-ansible-spring-boot):
kansible pod -rc hawtapp-demo appservers /opt/cdi-camel-2.2.98-SNAPSHOT-app/bin/run.sh
for [fabric8-ansible-hawtapp](https://github.com/fabric8io/fabric8-ansible-hawtapp):
kansible pod -rc springboot-demo appservers /opt/springboot-camel-2.2.98-SNAPSHOT
### Working with Windows
If you're like me and have used a Mac for years, you might have forgotten how to work with Windows boxes ;). Here's some tips on how to test things out on the Windows VMs
First install the [winrm binary](http://github.com/masterzen/winrm/) which you can do if you have golang installed via:
go get github.com/masterzen/winrm
Then to connect to one of the Windows VMs from an example, such as the [fabric8-ansible-hawtapp](https://github.com/fabric8io/fabric8-ansible-hawtapp) you can use:
winrm -hostname 10.10.3.21 -username IEUser -password 'Passw0rd!' 'cmd'
Then you can test if a java process is running via
jps -l
If you wish to kill a java process from its PID you can type:
taskkill /PID 4308 /F
Enjoy!
## Releasing
Just run `make release`. This will cross-compile for all supported platforms, create tag & upload tarballs (zip file for Windows) to Github releases for download.
Updating the version is done via `make bump` to bump minor version & `make bump-patch` to bump patch version. This is necessary as tags are created from the version specified when releasing.
================================================
FILE: Dockerfile
================================================
FROM centos:7
WORKDIR /playbook
ENV PATH $PATH:/usr/local/kansible/
ADD ./bin/kansible /usr/local/kansible/
CMD kansible pod $KANSIBLE_HOSTS
================================================
FILE: Dockerfile.scratch
================================================
FROM scratch
# Call 'make build-all' before building it
ADD bin/kansible-docker /kansible
# Variables are interpolated not by Docker but by kansible (they are transmitted literally)
CMD [ "/kansible", "pod", "$KANSIBLE_HOSTS"]
================================================
FILE: Jenkinsfile
================================================
#!/usr/bin/groovy
node{
stage 'canary release'
git 'https://github.com/fabric8io/kansible.git'
kubernetes.pod('buildpod').withImage('fabric8/go-builder').inside {
retry(3){
sh 'make bootstrap'
}
retry(3){
sh "cd /go/src/workspace/${env.JOB_NAME} && make build test lint"
}
def imageName = 'kansible'
def tag = 'latest'
kubernetes.image().withName(imageName).build().fromPath(".")
kubernetes.image().withName(imageName).tag().inRepository('docker.io/fabric8/'+imageName).force().withTag(tag)
kubernetes.image().withName('docker.io/fabric8/'+imageName).push().withTag(tag).toRegistry()
}
}
================================================
FILE: LICENSE
================================================
Copyright 2016 Red Hat, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: Makefile
================================================
ifndef GOPATH
$(error No GOPATH set)
endif
NAME := kansible
VERSION := $(shell cat version/VERSION)
REVISION=$(shell git rev-parse --short HEAD 2> /dev/null || echo 'unknown')
BRANCH=$(shell git rev-parse --abbrev-ref HEAD 2> /dev/null || echo 'unknown')
HOST=$(shell hostname -f)
BUILD_DATE=$(shell date +%Y%m%d-%H:%M:%S)
GO_VERSION=$(shell go version | sed -e 's/^[^0-9.]*\([0-9.]*\).*/\1/')
ROOT_PACKAGE := $(shell go list .)
BUILDFLAGS := -ldflags \
" -X $(ROOT_PACKAGE)/version.Version='$(VERSION)'\
-X $(ROOT_PACKAGE)/version.Revision='$(REVISION)'\
-X $(ROOT_PACKAGE)/version.Branch='$(BRANCH)'\
-X $(ROOT_PACKAGE)/version.BuildUser='${USER}@$(HOST)'\
-X $(ROOT_PACKAGE)/version.BuildDate='$(BUILD_DATE)'\
-X $(ROOT_PACKAGE)/version.GoVersion='$(GO_VERSION)'"
BIN_DIR := bin
DIST_DIR := _dist
GO := GO15VENDOREXPERIMENT=1 go
GO_PACKAGES := $(shell $(GO) list ./... | grep -v /vendor/)
SRCS := $(shell find . -path ./vendor -prune -o -name '*.go')
MAIN_GO := kansible.go
KANSIBLE_BIN := $(BIN_DIR)/kansible
LINTERS := --disable-all --enable=vet --enable=golint --enable=errcheck --enable=ineffassign --enable=interfacer --enable=goimports --enable=gofmt
build: $(MAIN_GO)
$(GO) build -o $(KANSIBLE_BIN) $(BUILDFLAGS) $<
bootstrap:
$(GO) get -u github.com/golang/lint/golint github.com/mitchellh/gox github.com/alecthomas/gometalinter github.com/fabric8io/gobump
gometalinter --install --update
update-vendor:
GO15VENDOREXPERIMENT=1 glide up --update-vendored
build-all:
gox -verbose \
$(BUILDFLAGS) \
-os="linux darwin freebsd netbsd openbsd solaris windows" \
-arch="amd64 386" \
-output="$(DIST_DIR)/{{.OS}}-{{.Arch}}/{{.Dir}}" .
clean:
rm -rf $(DIST_DIR) $(BIN_DIR) release
install: build
install -d $(DESTDIR)/usr/local/bin/
install -m 755 $(KANSIBLE_BIN) $(DESTDIR)/usr/local/bin/kansible
prep-bintray-json:
# TRAVIS_TAG is set to the tag name if the build is a tag
ifdef TRAVIS_TAG
@jq '.version.name |= "$(VERSION)"' _scripts/ci/bintray-template.json | \
jq '.package.repo |= "kansible"' > _scripts/ci/bintray-ci.json
else
@jq '.version.name |= "$(VERSION)"' _scripts/ci/bintray-template.json \
> _scripts/ci/bintray-ci.json
endif
quicktest:
$(GO) test -short $(GO_PACKAGES)
test:
$(GO) test -v $(GO_PACKAGES)
lint:
@echo "Linting does not currently fail the build but is likely to do so in future - fix stuff you see, when you see it please"
@export TMP=$(shell mktemp -d) && cp -r vendor $${TMP}/src && GOPATH=$${TMP}:$${GOPATH} GO15VENDOREXPERIMENT=1 gometalinter --vendor --deadline=60s $(LINTERS) ./... || rm -rf $${TMP}} || true
docker-scratch:
gox -verbose $(BUILDFLAGS) -os="linux" -arch="amd64" \
-output="bin/kansible-docker" .
docker build -f Dockerfile.scratch -t "fabric8/kansible:scratch" .
release: build-all
rm -rf release && mkdir release
for os in linux darwin freebsd netbsd openbsd solaris ; do \
for arch in amd64 386 ; do \
if [ -f "$(DIST_DIR)/$$os-$$arch/$(NAME)" ] ; then \
tar --transform "s|^$(DIST_DIR)/$$os-$$arch/||" -czf release/$(NAME)-$(VERSION)-$$os-$$arch.tar.gz $(DIST_DIR)/$$os-$$arch/$(NAME) README.md LICENSE ; \
fi ; \
done ; \
done
for arch in amd64 386 ; do \
zip -q --junk-paths release/$(NAME)-$(VERSION)-windows-$$arch.zip $(DIST_DIR)/windows-$$arch/$(NAME).exe README.md LICENSE ; \
done ; \
$(GO) get -u github.com/progrium/gh-release
gh-release create fabric8io/$(NAME) $(VERSION) $(BRANCH) $(VERSION)
bump:
gobump minor -f version/VERSION
bump-patch:
gobump patch -f version/VERSION
.PHONY: release clean
.PHONY: bootstrap \
build \
build-all \
clean \
install \
prep-bintray-json \
quicktest \
release \
test \
test-charts \
lint \
bump \
bump-patch \
update-vendor
================================================
FILE: README.md
================================================
# Kansible
Kansible lets you orchestrate operating system _processes_ on Windows or any Unix in the same way as you orchestrate your Docker _containers_ with [Kubernetes](http://kubernetes.io/) by using [Ansible](https://www.ansible.com/) to provision the software onto hosts and Kubernetes to orchestrate the processes and the containers in a single system.
Kansible uses:
* [Ansible](https://www.ansible.com/) to install, configure and provision your software onto machines using [playbooks](http://docs.ansible.com/ansible/playbooks.html)
* [Kubernetes](http://kubernetes.io/) to run and manage the processes and perform service discovery, scaling, load balancing together with centralised logging, metrics, alerts and management.
Kansible provides a single pane of glass, CLI and REST API to all your processes whether they are inside docker containers or running as vanilla processes on Windows, AIX, Solaris or HP-UX or an old Linux distros that predate docker.
Kansible lets you migrate to a pure container based Docker world at whatever pace suits you, while using Kubernetes to orchestrate all your containers and operating system processes for your entire journey.
## Features
* All your processes appear as Pods inside Kubernetes namespaces so you can visualise, query and watch the status of your processes and containers in a canonical way
* Each kind of process has its own [Replication Controller](http://kubernetes.io/v1.1/docs/user-guide/replication-controller.html) to ensure processes keep running and so you can [manually or automatically scale](http://kubernetes.io/v1.1/docs/user-guide/replication-controller.html#scaling) the number of processes up or down; up to the limit in the number of hosts in your [Ansible inventory](http://docs.ansible.com/ansible/intro_inventory.html)
* Reuse Kubernetes [liveness checks](http://kubernetes.io/v1.1/docs/user-guide/liveness/README.html) so that Kubernetes can monitor the state of your process and restart if it goes bad
* Reuse Kubernetes [readiness checks](http://kubernetes.io/v1.1/docs/user-guide/production-pods.html#liveness-and-readiness-probes-aka-health-checks) so that Kubernetes can know when your process can be included into the [internal or external service load balancer](http://kubernetes.io/v1.1/docs/user-guide/services.html)
* You can view the logs of all your processes in the canonical kubernetes way via the CLI, REST API or web console
* Port forwarding works from the pods to the remote processes so that you can reuse [Kubernetes Services](http://kubernetes.io/v1.1/docs/user-guide/services.html) to load balance across your processes automatically
* [Centralised logging](http://fabric8.io/guide/logging.html) and [metrics and alerting](http://fabric8.io/guide/metrics.html) works equally across your containers and processes
* You can open a shell into the remote process machine via the CLI, REST API or web console; which is either a unix bash or a windows cmd shell as shown in the fabric8 console screenshot below:
### Ansible perspective on Kansible
If you already use Ansible; then one way to think about Kansible is that you continue to use Ansible however you have been doing; using reusable composable playbooks and so forth. The only change to your playbooks that Kansible introduces is that you don't run Unix or Windows services (e.g. like systemd / init.d). You install and configure the software via Ansible playbooks; setting up whatever directories, users and permissions you require. But you don't create services or run the software.
Then we use Kubernetes (and kansible pods) as the alternative to Unix and Windows services. The reason we do this is that Kubernetes is a better distributed version of systemd/init.d/Windows services as it also includes features like:
* service discovery and load balancing
* health monitoring
* centralised logging, metrics and alerts
* manual and automatic scaling up or down
* a consistent web console, CLI and REST API across processes running via kansible and Docker containers
### Kubernetes perspective on Kansible
If you already use Kubernetes then you could look at Kansible as a way of extending the reach of Kubernetes to manage both Docker containers on a host that supports Docker plus remote processes on operating systems that don't support Docker. That then makes Kubernetes the orchestrator of all your software; whether its Dockerized or not!
All your processes are belong to us! :)
Longer term it would be great for Docker to be ported to more operating systems; along with the kubelet. So ideally more operating systems could use native Docker and kubelet; in which case there's less need for kansible. But at the time of writing, that goal is looking some way off for older versions of Windows along with AIX, Solaris and HPUX.
Whats really compelling about using Kubernetes to manage Docker containers and operating system processes via Kansible is that you can mix and match on a per microservice basis - use the right tool for the job right now - but all the while use a single orchestrator platform, Kubernetes, a single REST API, CLI tools and web console - with standard service discovery, load balancing and management functions.
Using Docker is more optimal; so we hope over time that you can use more Docker and less kansible; but its going to take our industry a while to Dockerize all the things and move everything to Linux; or to have fully working Docker + Kubernetes on Windows + all flavours of Unix. Until then, kansible can help! At least we can now pretend everything's Dockerized and running on Linux from an orchestration and management perspective ;)
## How to use Kansible
You use kansible as follows:
* create or reuse an existing [Ansible playbook](http://docs.ansible.com/ansible/playbooks.html) to _install and provision_ the software you wish to run on a number of machines defined by the [Ansible inventory](http://docs.ansible.com/ansible/intro_inventory.html)
* if you reuse an existing playbook, make sure you disable running the unix / windows services; as you will run that command instead in the kansible pods.
* run the [Ansible playbook](http://docs.ansible.com/ansible/playbooks.html) either as part of a [CI / CD build pipeline](http://fabric8.io/guide/cdelivery.html) when there's a change to the git repo of the Playbook, or using a command line tool, cron or [Ansible Tower](https://www.ansible.com/tower)
* define a Replication Controller YAML file at `kubernetes/$HOSTS/rc.yml` for running the command for your process [like this example](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/kubernetes/appservers/rc.yml#L15-L16).
* the RC YAML file contains the command you need to run remotely to execute your process via [`$KANSIBLE_COMMAND`](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/kubernetes/appservers/rc.yml#L15-L16)
* you can think of the RC YAML file as like the systemd configuration file, describing the command to run to startup the application. Only its a single file for the entire cluster which is stored in Kubernetes. Plus it can include [readiness and liveness probes too](http://kubernetes.io/v1.1/docs/user-guide/production-pods.html#liveness-and-readiness-probes-aka-health-checks)
.
* You can use the `{{ foo_bar }}` Ansible variable expressions in the RC YAML to refer to variables from your [global Ansible variables file](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/group_vars/appservers)
* to take advantage of Kubernetes services, you can also define any number of Service YAML files at `kubernetes/$HOSTS/service.yml`
* they can be named anything you like so long as they are valid Kubernetes YAML or JSON and are in the same folder as the RC.yml
* see the [Kubernetes Services example](#trying-out-kubernetes-services) and its [kubernetes/appservers/service.yml](https://github.com/fabric8io/fabric8-ansible-hawtapp/blob/master/kubernetes/appservers/service.yml) file for how to do this.
* whenever the playbook git repo changes, run the **kansible rc** command inside a clone of the playbook git repository:
kansible rc myhosts
where `myhosts` is the name of the hosts you wish to use in the [Ansible inventory](http://docs.ansible.com/ansible/intro_inventory.html).
Then **kansible** will then create/update [Secrets](http://kubernetes.io/v1.1/docs/user-guide/secrets.html) for any SSH private keys in your [Ansible inventory](http://docs.ansible.com/ansible/intro_inventory.html) and create or update a [Replication Controller](http://kubernetes.io/v1.1/docs/user-guide/replication-controller.html) of kansible pods which will start and supervise your processes, capture the logs and redirect ports to enable liveness checks, centralised metrics and Kubernetes services.
So for each remote process on Windows, Linux, Solaris, AIX, HPUX kansible will create a kansible pod in Kubernetes which starts the command and tails the log to stdout/stderr. You can then use the [Replication Controller scaling](http://kubernetes.io/v1.1/docs/user-guide/replication-controller.html#scaling) to start/stop your remote processes!
### Working with kansible pods
* As processes start and stop, you'll see the processes appear or disappear inside kubernetes, the CLI, REST API or the console as a kansible pod.
* You can scale up and down the kansible Replication Controller via CLI, REST API or console.
* You can then view the logs of any process in the usual kubernetes way via the command line, REST API or web console.
* [Centralised logging](http://fabric8.io/guide/logging.html) then works great on all your processes (providing the command you run outputs logs to `stdout` / `stderr`
### Exposing ports
Any ports defined in the Replication Controller YAML file will be automatically forwarded to the remote process. See [this example rc.yml file](https://github.com/fabric8io/fabric8-ansible-hawtapp/blob/master/kubernetes/appservers/rc.yml#L19-L21) to see how to expose ports.
This means you can take advantage of things like [centralised metrics and alerting](http://fabric8.io/guide/metrics.html), liveness checks, Kubernetes Services along with the built in service discovery and load balancing inside Kubernetes!
To see the use of Kubernetes Services and load balancing across remote processes with kansible check out the [fabric8-ansible-hawtapp demo](#fabric8-ansible-hawtapp).
### Opening a shell on the remote process
You can open a shell directly on the remote machine via the web console or by running
oc exec -it -p mypodname bash
Then you'll get a remote shell on the Windows or Unix box!
## Examples
Before you start with the kansible examples you'll need:
* [Download a release](https://github.com/fabric8io/kansible/releases) and add `kansible` to your `$PATH`
* Or [Build kansible](https://github.com/fabric8io/kansible/blob/master/BUILDING.md) then add the `$PWD/bin` folder to your `$PATH` so that you can type in `kansible` on the command line
* Download and install [VirtualBox](https://www.virtualbox.org/wiki/Downloads)
* Download and install [Vagrant](http://www.vagrantup.com/downloads.html)
These examples assume you have a working [Kubernetes](http://kubernetes.io/) or [OpenShift](https://www.openshift.org/) cluster running.
If you don't yet have a Kubernetes cluster to play with, try using the [Fabric8 Vagrant image that includes OpenShift Origin](http://fabric8.io/guide/getStarted/vagrant.html) as the Kubernetes cluster.
### [fabric8-ansible-spring-boot](https://github.com/fabric8io/fabric8-ansible-spring-boot)
To run this example type the following to setup the VMs and provision things with Ansible:
```bash
git clone https://github.com/fabric8io/fabric8-ansible-spring-boot.git
cd fabric8-ansible-spring-boot
vagrant up
ansible-playbook -i inventory provisioning/site.yml -vv
```
You now should have 2 sample VMs (app1 and app2) with a Spring Boot based Java application provisioned onto the machines in the `/opt` folder, but with nothing actually running yet.
Now to setup the kansible Replication Controller run the following, where `appservers` is the hosts from the [Ansible inventory](http://docs.ansible.com/ansible/intro_inventory.html) in [the inventory file](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/inventory#L1-L3)
```bash
kansible rc appservers
```
This should now create a Replication Controller called `springboot-demo` along with 2 pods for each host in the `appservers` inventory file.
You should be able to look at the logs of those 2 pods in the usual Kubernetes / OpenShift way; e.g. via the fabric8 or OpenShift console or via the CLI:
e.g.
```bash
oc get pods
oc logs -f springboot-demo-81ryw
```
where `springboot-demo-81ryw` is the name of the pod you wish to view the logs.
You can now scale down / up the number of pods using the web console or the command line:
```bash
oc scale rc --replicas=2 springboot-demo
```
#### Important files
The examples use the following files:
* [inventory](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/inventory) is the Ansible inventory file to define the [hosts](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/inventory#L1-L3) to run the processes
* [kubernetes/$HOSTS/rc.yml](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/kubernetes/appservers/rc.yml) is the Replication Controller configuration used to [define the command `$KANSIBLE_COMMAND`](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/kubernetes/appservers/rc.yml#L15-L16) which kansible uses to run the process remotely
### [fabric8-ansible-hawtapp](https://github.com/fabric8io/fabric8-ansible-hawtapp)
This demonstration is similar to the above but it also demonstrates:
* using both Windows and Linux boxes as the hosts
* using Kubernetes Services to load balance across the processes
To run this example type the following to setup the VMs and provision things with Ansible:
```bash
git clone https://github.com/fabric8io/fabric8-ansible-hawtapp.git
cd fabric8-ansible-hawtapp
vagrant up
ansible-playbook -i inventory provisioning/site.yml -vv
```
Now to setup the Replication Controller for the supervisors run the following, where `appservers` is the hosts from the inventory
```
kansible rc appservers
```
The pods should now start up for each host in the inventory.
### Using windows machines
This example uses 1 windows box and 1 linux box in the inventory. The example shows that kansible can support both operating systems just fine; it does require the playbooks to handle the differences though.
Also you typically will need to use different commands to run on Unix versus Windows which is configured in the [rc.yml file](https://github.com/fabric8io/fabric8-ansible-hawtapp/blob/master/kubernetes/appservers/rc.yml#L15-L18). For more details see the [documentation on the KANSIBLE_COMMAND_WINRM environment variable](#kansible_command_winrm)
To use windows you may need to first make sure you've installed **pywinrm**:
sudo pip install pywinrm
If you try to open shells via the fabric8 console or `oc exec -it -p podName bash` for both pods running, you'll see that one runs on a Linux box and one runs on a Windows machine like this [example screenshot](https://raw.githubusercontent.com/fabric8io/kansible/master/docs/images/kansible-demo.png)!
#### Trying out Kubernetes Services
This example also creates a [Kubernetes Service](http://kubernetes.io/v1.1/docs/user-guide/services.html) which loads balances across the remote processes thanks to the [kubernetes/appservers/service.yml](https://github.com/fabric8io/fabric8-ansible-hawtapp/blob/master/kubernetes/appservers/service.yml) file which is then exposed via the [LoadBalancer type](https://github.com/fabric8io/fabric8-ansible-hawtapp/blob/master/kubernetes/appservers/service.yml#L16) (on OpenShift a Route is created for this).
If you are using the fabric8 console you'll see the `hawtapp-demo` service in the Services tab.
You can try out the service in your browser via: [http://hawtapp-demo-default.vagrant.f8/camel/hello?name=Kansible](http://hawtapp-demo-default.vagrant.f8/camel/hello?name=Kansible)
Or using the CLI:
curl http://hawtapp-demo-default.vagrant.f8/camel/hello?name=Kansible
Each request load balances over the available processes. You can scale the Replication Controller down to 1 pod or up to 2 and each request should still work.
## Configuration
To configure kansible you need to configure a [Replication Controller](http://kubernetes.io/v1.1/docs/user-guide/replication-controller.html) in a file called [kubernetes/$HOSTS/rc.yml](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/kubernetes/appservers/rc.yml).
Specify a name and optionally some labels for the replication controller inside the `metadata` object. There's no need to specify the `spec.selector` or `spec.template.containers[0].metadata.labels` values as those are inherited by default from the `metadata.labels`.
### Environment variables
You can specify the following environment variables in the `spec.template.spec.containers[0].env` array like the use of `KANSIBLE_COMMAND` below.
These values can use Ansible variable expressions too.
#### KANSIBLE_COMMAND
Then you must specify a command to run via the [`$KANSIBLE_COMMAND`](https://github.com/fabric8io/fabric8-ansible-spring-boot/blob/master/kubernetes/appservers/rc.yml#L15-L16) environment variable:
```yaml
apiVersion: "v1"
kind: "ReplicationController"
metadata:
name: "myapp"
labels:
project: "myapp"
version: "{{ app_version }}"
spec:
template:
spec:
containers:
- env:
- name: "KANSIBLE_COMMAND"
value: "/opt/foo-{{ app_version }}/bin/run.sh"
serviceAccountName: "fabric8"
```
#### KANSIBLE_COMMAND_WINRM
This environment variable lets you provide a Windows specific command. It works the same as the `KANSIBLE_COMMAND` environment variable above, but this value is only used for Ansible connections of the form `winrm`. i.e. to supply a windows only command to execute.
Its quite common to have a `foo.sh` script to run sh/bash scripts on unix and then a `foo.bat` or `foo.cmd` file for Windows.
#### KANSIBLE_EXPORT_ENV_VARS
Specify a space separated list of environment variable names which should be exported into the remote shell when running the remote command.
Note that typically your [sshd_config](http://linux.die.net/man/5/sshd_config) will disable the use of most environment variables being exported that don't start with `LC_*` so you may need to [configure your sshd](http://linux.die.net/man/5/sshd_config) in `/etc/ssh/sshd_config` to enable this.
#### KANSIBLE_BASH
This defines the path where the bash script will be generated for running a remote bash shell. This allows running the command `bash` inside the kansible pod to remotely execute either `/bin/bash` or `cmd.exe` for Windows machines on the remote machine when you try to open a shell inside the Web Console or via:
oc exec -p mypodname bash
#### KANSIBLE_PORT_FORWARD
Allows port forwarding to be disabled.
export KANSIBLE_PORT_FORWARD=false
This is mostly useful to allow the `bash` command within a pod to not also try to port forward as this will fail ;)
### SSH or WinRM
The best way to configure if you want to connect via SSH for unix machines or WinRM for windows machines is via the Ansible Inventory.
By default SSH is used on port 22 unless you specify `ansible_port` in the inventory or specify `--port` on the command line.
You can configure Windows machines using the `ansible_connection=winrm` property in the inventory:
```ini
[winboxes]
windows1 ansible_host=localhost ansible_port=5985 ansible_user=foo ansible_pass=somepasswd! ansible_connection=winrm
[unixes]
app1 ansible_host=10.10.3.20 ansible_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/app1/virtualbox/private_key
app2 ansible_host=10.10.3.21 ansible_user=vagrant ansible_ssh_private_key_file=.vagrant/machines/app2/virtualbox/private_key
```
You can also enable WinRM via the `--winrm` command line flag:
```bash
export KANSIBLE_WINRM=true
kansible pod --winrm somehosts somecommand
```
or by setting the environment variable `KANSIBLE_WINRM` which is a little easier to configure on the RC YAML:
```bash
export KANSIBLE_WINRM=true
kansible pod somehosts somecommand
```
### Checking the runtime status of the supervisors
To see which pods own which hosts run the following command:
oc export rc hawtapp-demo | grep ansible.fabric8 | sort
Where `hawtapp-demo` is the name of the RC for the supervisors.
The output is of the format:
pod.kansible.fabric8.io/app1: supervisor-znuj5
pod.kansible.fabric8.io/app2: supervisor-1same
Where the output is of the form ` pod.ansible.fabric8.io/$HOSTNAME: $PODNAME`
================================================
FILE: add-headers.sh
================================================
#!/bin/bash
function join { local IFS="$1"; shift; echo "$*"; }
copyright-header \
--copyright-software Kansible \
--copyright-holder 'Red Hat' \
--copyright-year 2016 \
--copyright-software-description 'Directly orchestrate operating system processes via Kubernetes' \
-o ./ \
--license-file header.txt \
-c headers.yml \
--add-path $(join : `find . -path ./vendor -prune -o -name '*.go' -print`)
================================================
FILE: ansible/ansible.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ansible
import (
"bufio"
"bytes"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net"
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
"github.com/ghodss/yaml"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/fabric8io/kansible/k8s"
"github.com/fabric8io/kansible/log"
)
const (
// AnsibleHostPodAnnotationPrefix is the annotation prefix used on the RC to associate a host name with a pod name
AnsibleHostPodAnnotationPrefix = "pod.kansible.fabric8.io/"
// HostInventoryAnnotation is the list of hosts from the inventory
HostInventoryAnnotation = "kansible.fabric8.io/host-inventory"
// HostNameAnnotation is used to annotate a pod with the host name its processing
HostNameAnnotation = "kansible.fabric8.io/host-name"
// HostAddressAnnotation is used to annotate a pod with the host address its processing
HostAddressAnnotation = "kansible.fabric8.io/host-address"
// IconAnnotation is the annotation used to denote the icon on an RC or Service
IconAnnotation = "fabric8.io/iconUrl"
// IconURL is the kansible icon URL
IconURL = "https://cdn.rawgit.com/fabric8io/kansible/master/docs/images/logo.png"
// WinRMShellAnnotationPrefix stores the shell ID for the WinRM host name on the RC
WinRMShellAnnotationPrefix = "winrm.shellid.kansible.fabric8.io/"
// EnvHosts is the environment variable on a pod for specifying the Ansible hosts in the inventory
EnvHosts = "KANSIBLE_HOSTS"
// EnvCommand is the environment variable on a pod for specifying the command to run on each host
EnvCommand = "KANSIBLE_COMMAND"
// EnvRC is the environment variable on a pod for the name of the ReplicationController
EnvRC = "KANSIBLE_RC"
// EnvNamespace is the environment variable on a pod for the namespace to use
EnvNamespace = "KANSIBLE_NAMESPACE"
// EnvExportEnvVars is the space separated list of environment variables exported to the remote process
EnvExportEnvVars = "KANSIBLE_EXPORT_ENV_VARS"
// EnvPortForward allows port forwarding to be disabled
EnvPortForward = "KANSIBLE_PORT_FORWARD"
// EnvBash is the environment variable on a pod for the name of the bash script to generate on startup for
// opening a remote shell
EnvBash = "KANSIBLE_BASH"
// EnvIsBashShell is used to indicate of the command running remotely on the machine is a bash shell in which case we
// don't want to delete any previous WinRM shell
EnvIsBashShell = "KANSIBLE_IS_BASH_SHELL"
// PlaybookVolumeMount is the volume mount point where the playbook is assumed to be in the supervisor pod
PlaybookVolumeMount = "/playbook"
// AnsibleVariableHost is the Ansible inventory host variable for the remote host
AnsibleVariableHost = "ansible_host"
// AnsibleVariableUser is the Ansible inventory host variable for the remote user
AnsibleVariableUser = "ansible_user"
// AnsibleVariablePort is the Ansible inventory host variable for the reote port
AnsibleVariablePort = "ansible_port"
// AnsibleVariablePrivateKey is the Ansible inventory host variable for the SSH private key file
AnsibleVariablePrivateKey = "ansible_ssh_private_key_file"
// AnsibleVariableConnection is the Ansible inventory host variable for the kind of connection; e.g. 'winrm' for windows
AnsibleVariableConnection = "ansible_connection"
// AnsibleVariablePassword is the Ansible inventory host variable for the password
AnsibleVariablePassword = "ansible_ssh_pass"
// ConnectionWinRM is the value AnsibleVariableConnection of for using Windows with WinRM
ConnectionWinRM = "winrm"
// AppRunCommand is the Ansible inventory host variable for the run command that is executed on the remote host
AppRunCommand = "app_run_command"
gitURLPrefix = "url = "
gitConfig = ".git/config"
)
// HostEntry represents a single host entry in an Ansible inventory
type HostEntry struct {
Name string
Host string
Port string
User string
PrivateKey string
Connection string
Password string
RunCommand string
}
// LoadHostEntries loads the Ansible inventory for a given hosts string value
func LoadHostEntries(inventoryFile string, hosts string) ([]*HostEntry, error) {
file, err := os.Open(inventoryFile)
if err != nil {
return nil, err
}
defer file.Close()
hostEntries := []*HostEntry{}
hostsLine := "[" + hosts + "]"
foundHeader := false
completed := false
hostNames := []string{}
scanner := bufio.NewScanner(file)
for scanner.Scan() {
text := strings.TrimSpace(scanner.Text())
if len(text) > 0 && !strings.HasPrefix(text, "#") {
isHost := strings.HasPrefix(text, "[") && strings.HasSuffix(text, "]")
if isHost {
hostNames = append(hostNames, text[1:len(text)-1])
}
if foundHeader {
if isHost {
completed = true
} else if !completed {
hostEntry := parseHostEntry(text)
if hostEntry != nil {
hostEntries = append(hostEntries, hostEntry)
}
}
} else if text == hostsLine {
foundHeader = true
}
}
}
if err := scanner.Err(); err != nil {
return nil, err
}
if !foundHeader {
sort.Strings(hostNames)
return nil, fmt.Errorf("Could not find hosts `%s` in Ansible inventory file %s. Possible values are: %s",
hosts, inventoryFile, strings.Join(hostNames, ", "))
}
return hostEntries, nil
}
// LoadHostEntriesFromText loads the host entries from the given text which is typically taken from
// an annotation on the ReplicationController
func LoadHostEntriesFromText(text string) ([]*HostEntry, error) {
hostEntries := []*HostEntry{}
lines := strings.Split(text, "\n")
for _, line := range lines {
text := strings.TrimSpace(line)
if len(text) > 0 && !strings.HasPrefix(text, "#") {
hostEntry := parseHostEntry(text)
if hostEntry != nil {
hostEntries = append(hostEntries, hostEntry)
}
}
}
return hostEntries, nil
}
// ChooseHostAndPrivateKey parses the given Ansible inventory file for the hosts
// and chooses a single host inside it, returning the host name and the private key
func ChooseHostAndPrivateKey(thisPodName string, hosts string, c *client.Client, ns string, rcName string) (*HostEntry, *api.ReplicationController, map[string]string, error) {
retryAttempts := 20
for i := 0; i < retryAttempts; i++ {
if i > 0 {
// lets sleep before retrying
time.Sleep(time.Duration(random(1000, 20000)) * time.Millisecond)
}
if c == nil {
return nil, nil, nil, fmt.Errorf("No Kubernetes Client specified!")
}
rc, err := c.ReplicationControllers(ns).Get(rcName)
if err != nil {
return nil, nil, nil, err
}
if rc == nil {
return nil, nil, nil, fmt.Errorf("No ReplicationController found for name %s", rcName)
}
pods, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
return nil, nil, nil, err
}
metadata := &rc.ObjectMeta
resourceVersion := metadata.ResourceVersion
if metadata.Annotations == nil {
metadata.Annotations = make(map[string]string)
}
annotations := metadata.Annotations
log.Info("Using ReplicationController with namespace %s name %s and version %s", ns, rcName, resourceVersion)
hostsText := annotations[HostInventoryAnnotation]
if len(hostsText) == 0 {
return nil, nil, nil, fmt.Errorf("Could not find annotation %s on ReplicationController %s", HostInventoryAnnotation, rcName)
}
hostEntries, err := LoadHostEntriesFromText(hostsText)
if err != nil {
return nil, nil, nil, err
}
log.Info("Found %d host entries", len(hostEntries))
// lets pick a random entry
if len(hostEntries) > 0 {
filteredHostEntries := hostEntries
for annKey, podName := range annotations {
if strings.HasPrefix(annKey, AnsibleHostPodAnnotationPrefix) {
hostName := annKey[len(AnsibleHostPodAnnotationPrefix):]
if k8s.PodIsRunning(pods, podName) {
if podName != thisPodName {
log.Info("Pod %s podName has already claimed host %s", podName, hostName)
filteredHostEntries = removeHostEntry(filteredHostEntries, hostName)
}
} else {
// lets remove this annotation as the pod is no longer valid
log.Info("Pod %s is no longer running so removing the annotation %s", podName, annKey)
delete(metadata.Annotations, annKey)
}
}
}
count := len(filteredHostEntries)
if count == 0 {
log.Info("There are no more hosts available to be supervised by this pod!")
return nil, nil, nil, fmt.Errorf("No more hosts available to be supervised!")
}
log.Info("After filtering out hosts owned by other pods we have %v host entries left", count)
pickedEntry := filteredHostEntries[random(0, count)]
hostName := pickedEntry.Name
if len(pickedEntry.Host) == 0 {
return nil, nil, nil, fmt.Errorf("Could not find host name for entry %s", pickedEntry.Name)
}
if len(pickedEntry.User) == 0 {
return nil, nil, nil, fmt.Errorf("Could not find User for entry %s", pickedEntry.Name)
}
// lets try pick this pod
annotations[AnsibleHostPodAnnotationPrefix+hostName] = thisPodName
rc, err = c.ReplicationControllers(ns).Update(rc)
if err != nil {
log.Info("Failed to update the RC, could be concurrent update failure: %s", err)
} else {
log.Info("Picked host " + pickedEntry.Host)
// lets update the Pod with the host name label
podClient := c.Pods(ns)
pod, err := podClient.Get(thisPodName)
if err != nil {
return pickedEntry, nil, nil, err
}
metadata := &pod.ObjectMeta
if metadata.Annotations == nil {
metadata.Annotations = make(map[string]string)
}
metadata.Annotations[HostNameAnnotation] = pickedEntry.Name
metadata.Annotations[HostAddressAnnotation] = pickedEntry.Host
//pod.Status = api.PodStatus{}
pod, err = podClient.UpdateStatus(pod)
if err != nil {
return pickedEntry, nil, nil, err
}
// lets export required environment variables
exportEnvVars := os.Getenv(EnvExportEnvVars)
envVars := make(map[string]string)
if len(exportEnvVars) > 0 {
names := strings.Split(exportEnvVars, " ")
for _, name := range names {
name = strings.TrimSpace(name)
if len(name) > 0 {
value := os.Getenv(name)
if len(value) > 0 {
envVars[name] = value
log.Debug("Exporting environment variable %s = %s", name, value)
}
}
}
}
err = forwardPorts(pod, pickedEntry)
return pickedEntry, rc, envVars, err
}
}
}
return nil, nil, nil, fmt.Errorf("Could not find any available hosts on the ReplicationController %s and hosts %s", rcName, hosts)
}
// forwardPorts forwards any ports that are defined in the PodSpec to the host
func forwardPorts(pod *api.Pod, hostEntry *HostEntry) error {
disableForwarding := os.Getenv(EnvPortForward)
if len(disableForwarding) > 0 {
if strings.ToLower(disableForwarding) == "false" {
return nil
}
}
podSpec := pod.Spec
host := hostEntry.Host
for _, container := range podSpec.Containers {
for _, port := range container.Ports {
name := port.Name
portNum := port.ContainerPort
if portNum > 0 {
address := "0.0.0.0:" + strconv.Itoa(portNum)
forwardAddress := host + ":" + strconv.Itoa(portNum)
err := forwardPortLoop(name, address, forwardAddress)
if err != nil {
return err
}
}
}
}
return nil
}
func forwardPortLoop(name string, address string, forwardAddress string) error {
log.Info("forwarding port %s %s => %s", name, address, forwardAddress)
listener, err := net.Listen("tcp", address)
if err != nil {
return err
}
log.Info("About to start the acceptor goroutine!")
go func() {
for {
conn, err := listener.Accept()
if err != nil {
log.Err("Failed to accept listener: %v", err)
}
log.Info("Accepted connection %v\n", conn)
go forwardPort(conn, forwardAddress)
}
}()
return nil
}
func forwardPort(conn net.Conn, address string) {
client, err := net.Dial("tcp", address)
if err != nil {
log.Err("Dial failed: %v", err)
}
log.Info("Connected to localhost %v\n", conn)
go func() {
defer client.Close()
defer conn.Close()
io.Copy(client, conn)
}()
go func() {
defer client.Close()
defer conn.Close()
io.Copy(conn, client)
}()
}
// UpdateKansibleRC reads the Ansible inventory and the RC YAML for the hosts and updates it in Kubernetes
// along with removing any remaining pods which are running against old hosts that have been removed from the inventory
func UpdateKansibleRC(hostEntries []*HostEntry, hosts string, f *cmdutil.Factory, c *client.Client, ns string, rcFile string, replicas int) (*api.ReplicationController, error) {
variables, err := LoadAnsibleVariables(hosts)
if err != nil {
return nil, err
}
data, err := LoadFileAndReplaceVariables(rcFile, variables)
if err != nil {
return nil, err
}
rcConfig, err := k8s.ReadReplicationController(data)
if err != nil {
return nil, err
}
rcName := rcConfig.ObjectMeta.Name
podSpec := k8s.GetOrCreatePodSpec(rcConfig)
// lets default labels and selectors if they are missing
rcLabels := rcConfig.ObjectMeta.Labels
if len(rcLabels) > 0 {
rcSpec := rcConfig.Spec
if len(rcSpec.Selector) == 0 {
rcSpec.Selector = rcLabels
}
template := rcSpec.Template
if template != nil {
if len(template.ObjectMeta.Labels) == 0 {
template.ObjectMeta.Labels = rcLabels
}
}
}
container := k8s.GetFirstContainerOrCreate(rcConfig)
if len(container.Image) == 0 {
container.Image = "fabric8/kansible"
}
if len(container.Name) == 0 {
container.Name = "kansible"
}
if len(container.ImagePullPolicy) == 0 {
container.ImagePullPolicy = "IfNotPresent"
}
preStopCommands := []string{"kansible", "kill"}
if len(podSpec.ServiceAccountName) == 0 {
podSpec.ServiceAccountName = rcName
}
serviceAccountName := podSpec.ServiceAccountName
k8s.EnsureContainerHasPreStopCommand(container, preStopCommands)
k8s.EnsureContainerHasEnvVar(container, EnvHosts, hosts)
k8s.EnsureContainerHasEnvVar(container, EnvRC, rcName)
k8s.EnsureContainerHasEnvVar(container, EnvBash, "/usr/local/bin/bash")
k8s.EnsureContainerHasEnvVarFromField(container, EnvNamespace, "metadata.namespace")
command := k8s.GetContainerEnvVar(container, EnvCommand)
if len(command) == 0 {
return nil, fmt.Errorf("No environemnt variable value defined for %s in ReplicationController YAML file %s", EnvCommand, rcFile)
}
if len(serviceAccountName) > 0 {
created, err := k8s.EnsureServiceAccountExists(c, ns, serviceAccountName)
if err != nil {
return nil, err
}
if created {
err = ensureSCCExists(ns, serviceAccountName)
if err != nil {
return nil, err
}
}
}
isUpdate := true
rc, err := c.ReplicationControllers(ns).Get(rcName)
if err != nil {
isUpdate = false
rc = &api.ReplicationController{
ObjectMeta: api.ObjectMeta{
Namespace: ns,
Name: rcName,
},
}
}
pods, err := c.Pods(ns).List(api.ListOptions{})
if err != nil {
return nil, err
}
// merge the RC configuration to allow configuration
originalReplicas := rc.Spec.Replicas
rc.Spec = rcConfig.Spec
metadata := &rc.ObjectMeta
resourceVersion := metadata.ResourceVersion
rcSpec := &rc.Spec
if replicas < 0 {
replicas = originalReplicas
}
rcSpec.Replicas = replicas
err = generatePrivateKeySecrets(c, ns, hostEntries, rc, podSpec, container)
if err != nil {
return rc, err
}
text := HostEntriesToString(hostEntries)
if metadata.Annotations == nil {
metadata.Annotations = make(map[string]string)
}
metadata.Annotations[HostInventoryAnnotation] = text
metadata.Annotations[IconAnnotation] = IconURL
log.Info("found RC with name %s and version %s and replicas %d", rcName, resourceVersion, rcSpec.Replicas)
deletePodsForOldHosts(c, ns, metadata.Annotations, pods, hostEntries)
replicationController := c.ReplicationControllers(ns)
if isUpdate {
_, err = replicationController.Update(rc)
} else {
_, err = replicationController.Create(rc)
}
if err != nil {
log.Info("Failed to update the RC, could be concurrent update failure: %s", err)
return nil, err
}
err = applyOtherKubernetesResources(f, c, ns, rcFile, variables)
return rc, err
}
func applyOtherKubernetesResources(f *cmdutil.Factory, c *client.Client, ns string, rcFile string, variables map[string]string) error {
dir := filepath.Dir(rcFile)
if len(dir) == 0 {
dir = "."
}
files, err := ioutil.ReadDir(dir)
if err != nil {
return err
}
for _, file := range files {
name := file.Name()
lower := strings.ToLower(name)
ext := filepath.Ext(lower)
if !file.IsDir() && lower != "rc.yml" {
resource := false
switch ext {
case ".json":
resource = true
case ".js":
resource = true
case ".yml":
resource = true
case ".yaml":
resource = true
}
if resource {
fullpath := filepath.Join(dir, name)
err = applyOtherKubernetesResource(f, c, ns, fullpath, variables)
if err != nil {
return err
}
}
}
}
return nil
}
func applyOtherKubernetesResource(f *cmdutil.Factory, c *client.Client, ns string, file string, variables map[string]string) error {
log.Info("applying kubernetes resource: %s", file)
data, err := LoadFileAndReplaceVariables(file, variables)
if err != nil {
return err
}
// TODO the following should work ideally but something's wrong with the loading of versioned schemas...
//return k8s.ApplyResource(f, c, ns, data, file)
// lets use the `oc` binary instead
isOc := true
binary, err := exec.LookPath("oc")
if err != nil {
isOc = false
var err2 error
binary, err2 = exec.LookPath("kubectl")
if err2 != nil {
return err
}
}
reader := bytes.NewReader(data)
err = runCommand(binary, []string{"apply", "-f", "-"}, reader)
if err != nil {
return err
}
if isOc {
// if we are a service lets try figure out the service name?
service := api.Service{}
if err := yaml.Unmarshal(data, &service); err != nil {
log.Info("Probably not a service! %s", err)
return nil
}
name := service.ObjectMeta.Name
serviceType := service.Spec.Type
if service.Kind == "Service" && len(name) > 0 && serviceType == "LoadBalancer" {
log.Info("Checking the service %s is exposed in OpenShift", name)
runCommand(binary, []string{"expose", "service", name}, os.Stdin)
return nil
}
}
return nil
}
func ensureSCCExists(ns string, serviceAccountName string) error {
binary, err := exec.LookPath("oc")
if err != nil {
// no openshift so ignore
return nil
}
text, err := getCommandOutputString(binary, []string{"export", "scc", serviceAccountName}, os.Stdin)
if err != nil {
log.Debug("Failed to get SecurityContextConstraints %s. %s", serviceAccountName, err)
}
if err != nil || len(text) == 0 {
text = `
apiVersion: v1
kind: SecurityContextConstraints
groups:
- system:cluster-admins
- system:nodes
metadata:
creationTimestamp: null
name: ` + serviceAccountName + `
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
supplementalGroups:
type: RunAsAny
users:
`
}
// lets ensure there's a users section
if !strings.Contains(text, "\nusers:") {
text = text + "\nusers:\n"
}
line := "system:serviceaccount:" + ns + ":" + serviceAccountName
if strings.Contains(text, line) {
log.Info("No need to modify SecurityContextConstraints as it already contains line for namespace %s and service account %s", ns, serviceAccountName)
return nil
}
text = text + "\n- " + line + "\n"
log.Debug("created SecurityContextConstraints YAML: %s", text)
log.Info("Applying changes for SecurityContextConstraints %s for namespace %s and ServiceAccount %s", serviceAccountName, ns, serviceAccountName)
reader := bytes.NewReader([]byte(text))
err = runCommand(binary, []string{"apply", "-f", "-"}, reader)
if err != nil {
log.Err("Failed to update OpenShift SecurityContextConstraints named %s. %s", serviceAccountName, err)
}
return err
}
func getCommandOutputString(binary string, args []string, reader io.Reader) (string, error) {
cmd := exec.Command(binary, args...)
cmd.Stdin = reader
var out bytes.Buffer
cmd.Stdout = &out
stderr, err := cmd.StderrPipe()
if err != nil {
return "", fmt.Errorf("Unable to setup stderr for command %s: %v", binary, err)
}
go io.Copy(os.Stderr, stderr)
err = cmd.Start()
if err != nil {
return "", err
}
err = cmd.Wait()
if err != nil {
return "", err
}
return out.String(), err
}
func runCommand(binary string, args []string, reader io.Reader) error {
cmd := exec.Command(binary, args...)
cmd.Stdin = reader
stdout, err := cmd.StdoutPipe()
if err != nil {
return fmt.Errorf("Unable to setup stdout for command %s: %v", binary, err)
}
go io.Copy(os.Stdout, stdout)
stderr, err := cmd.StderrPipe()
if err != nil {
return fmt.Errorf("Unable to setup stderr for command %s: %v", binary, err)
}
go io.Copy(os.Stderr, stderr)
err = cmd.Start()
if err != nil {
return err
}
err = cmd.Wait()
if err != nil {
return err
}
return err
}
func generatePrivateKeySecrets(c *client.Client, ns string, hostEntries []*HostEntry, rc *api.ReplicationController, podSpec *api.PodSpec, container *api.Container) error {
secrets := map[string]string{}
rcName := rc.ObjectMeta.Name
for _, hostEntry := range hostEntries {
privateKey := hostEntry.PrivateKey
if len(privateKey) != 0 {
volumeMount := secrets[privateKey]
if len(volumeMount) == 0 {
buffer, err := ioutil.ReadFile(privateKey)
if err != nil {
return err
}
hostName := hostEntry.Name
secretName := rcName + "-" + hostName
keyName := "sshkey"
secret := &api.Secret{
ObjectMeta: api.ObjectMeta{
Name: secretName,
Labels: rc.ObjectMeta.Labels,
},
Data: map[string][]byte{
keyName: buffer,
},
}
// lets create or update the secret
secretClient := c.Secrets(ns)
current, err := secretClient.Get(secretName)
if err != nil || current == nil {
_, err = secretClient.Create(secret)
} else {
_, err = secretClient.Update(secret)
}
if err != nil {
return err
}
volumeMount = "/secrets/" + hostName
secrets[privateKey] = volumeMount
hostEntry.PrivateKey = volumeMount + "/" + keyName
// lets add the volume mapping to the container
secretVolumeName := "secret-" + hostName
k8s.EnsurePodSpecHasSecretVolume(podSpec, secretVolumeName, secretName)
k8s.EnsureContainerHasVolumeMount(container, secretVolumeName, volumeMount)
}
}
}
return nil
}
func findGitURL() (string, error) {
file, err := os.Open(gitConfig)
if err != nil {
return "", err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
text := strings.TrimSpace(scanner.Text())
if strings.HasPrefix(text, gitURLPrefix) {
return text[len(gitURLPrefix):], nil
}
}
if err := scanner.Err(); err != nil {
return "", err
}
return "", nil
}
func removeHostEntry(hostEntries []*HostEntry, name string) []*HostEntry {
for i, entry := range hostEntries {
if entry.Name == name {
if i < len(hostEntries)-1 {
return append(hostEntries[:i], hostEntries[i+1:]...)
}
return hostEntries[:i]
}
}
log.Warn("Did not find a host entry with name %s", name)
return hostEntries
}
// GetHostEntryByName finds the HostEntry for the given host name or returns nil
func GetHostEntryByName(hostEntries []*HostEntry, name string) *HostEntry {
for _, entry := range hostEntries {
if entry.Name == name {
return entry
}
}
return nil
}
func deletePodsForOldHosts(c *client.Client, ns string, annotations map[string]string, pods *api.PodList, hostEntries []*HostEntry) {
for annKey, podName := range annotations {
if strings.HasPrefix(annKey, AnsibleHostPodAnnotationPrefix) {
hostName := annKey[len(AnsibleHostPodAnnotationPrefix):]
if k8s.PodIsRunning(pods, podName) {
hostEntry := GetHostEntryByName(hostEntries, hostName)
if hostEntry == nil {
log.Info("Deleting pod %s as there is no longer an Ansible inventory host called %s", podName, hostName)
c.Pods(ns).Delete(podName, nil)
}
}
}
}
}
func random(min, max int) int {
rand.Seed(time.Now().Unix())
return rand.Intn(max-min) + min
}
// HostEntriesToString generates the Ansible inventory text for the host entries
func HostEntriesToString(hostEntries []*HostEntry) string {
var buffer bytes.Buffer
for _, hostEntry := range hostEntries {
hostEntry.write(&buffer)
buffer.WriteString("\n")
}
return buffer.String()
}
func (hostEntry HostEntry) write(buffer *bytes.Buffer) {
buffer.WriteString(hostEntry.Name)
host := hostEntry.Host
if len(host) > 0 {
buffer.WriteString(" ")
buffer.WriteString(AnsibleVariableHost)
buffer.WriteString("=")
buffer.WriteString(host)
}
pk := hostEntry.PrivateKey
if len(pk) > 0 {
buffer.WriteString(" ")
buffer.WriteString(AnsibleVariablePrivateKey)
buffer.WriteString("=")
buffer.WriteString(pk)
}
password := hostEntry.Password
if len(password) > 0 {
buffer.WriteString(" ")
buffer.WriteString(AnsibleVariablePassword)
buffer.WriteString("=")
buffer.WriteString(password)
}
runCommand := hostEntry.RunCommand
if len(runCommand) > 0 {
buffer.WriteString(" ")
buffer.WriteString(AppRunCommand)
buffer.WriteString("=")
buffer.WriteString(runCommand)
}
port := hostEntry.Port
if len(port) > 0 {
buffer.WriteString(" ")
buffer.WriteString(AnsibleVariablePort)
buffer.WriteString("=")
buffer.WriteString(port)
}
user := hostEntry.User
if len(user) > 0 {
buffer.WriteString(" ")
buffer.WriteString(AnsibleVariableUser)
buffer.WriteString("=")
buffer.WriteString(user)
}
connection := hostEntry.Connection
if len(connection) > 0 {
buffer.WriteString(" ")
buffer.WriteString(AnsibleVariableConnection)
buffer.WriteString("=")
buffer.WriteString(connection)
}
}
func parseHostEntry(text string) *HostEntry {
values := strings.Split(text, " ")
name := ""
user := ""
host := ""
port := ""
privateKey := ""
connection := ""
password := ""
runCommand := ""
count := len(values)
if count > 0 {
name = values[0]
// lets parse the key value expressions for the host name
for _, exp := range values[1:] {
params := strings.Split(exp, "=")
if len(params) == 2 {
paramValue := params[1]
switch params[0] {
case AnsibleVariableHost:
host = paramValue
case AnsibleVariableUser:
user = paramValue
case AnsibleVariablePort:
port = paramValue
case AnsibleVariablePrivateKey:
privateKey = paramValue
case AnsibleVariableConnection:
connection = paramValue
case AnsibleVariablePassword:
password = paramValue
case AppRunCommand:
runCommand = paramValue
}
}
}
// if there's no host defined yet, lets assume that the name is the host name
if len(host) == 0 {
host = name
}
}
return &HostEntry{
Name: name,
Host: host,
Port: port,
User: user,
PrivateKey: privateKey,
Connection: connection,
Password: password,
RunCommand: runCommand,
}
}
================================================
FILE: ansible/variables.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ansible
import (
"io/ioutil"
"os"
"strings"
"github.com/ghodss/yaml"
)
const (
// AnsibleGlobalVariablesFile is the prefix file name for the Ansible global variables file
AnsibleGlobalVariablesFile = "group_vars/"
)
// LoadAnsibleVariables loads the global variables from the Ansible playbook
// so that we can search and replace them inside other files like the RC.yml
func LoadAnsibleVariables(hosts string) (map[string]string, error) {
path := AnsibleGlobalVariablesFile + hosts
if _, err := os.Stat(path); os.IsNotExist(err) {
return map[string]string{}, nil
}
data, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
variables := map[string]string{}
err = yaml.Unmarshal(data, &variables)
if err != nil {
return variables, err
}
for k, v := range variables {
variables[k] = ReplaceVariables(v, variables)
}
return variables, nil
}
// ReplaceVariables replaces variables in the given string using the Ansible variable syntax of
// `{{ name }}`
func ReplaceVariables(text string, variables map[string]string) string {
for k, v := range variables {
replace := "{{ " + k + " }}"
text = strings.Replace(text, replace, v, -1)
}
return text
}
// LoadFileAndReplaceVariables loads the given file and replaces all the Ansible variable expressions
// and then returns the data
func LoadFileAndReplaceVariables(filename string, variables map[string]string) ([]byte, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
// TODO replace the variables!
text := ReplaceVariables(string(data), variables)
return []byte(text), nil
}
================================================
FILE: circle.yml
================================================
machine:
environment:
IMPORT_PATH: "github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME"
dependencies:
pre:
- echo $PATH
- wget "https://github.com/Masterminds/glide/releases/download/0.8.2/glide-0.8.2-linux-amd64.tar.gz"
- mkdir /home/ubuntu/.go_workspace/bin || true
- tar -vxz -C /home/ubuntu/.go_workspace/bin --strip=1 -f glide-0.8.2-linux-amd64.tar.gz
- mkdir -p "/home/ubuntu/.go_workspace/src/$IMPORT_PATH" || true
- rsync -azC --delete ./ "/home/ubuntu/.go_workspace/src/$IMPORT_PATH/"
override:
- rm -rf vendor
- make bootstrap build:
pwd: "../.go_workspace/src/$IMPORT_PATH/"
test:
override:
- make test lint:
pwd: "../.go_workspace/src/$IMPORT_PATH/"
# Disabled for now to avoid conflicts with the Jenkins builds:
#deployment:
# hub:
# branch: master
# owner: fabric8io
# commands:
# - docker build -t fabric8/kansible .
# - docker login -e $DOCKER_EMAIL -u $DOCKER_USER -p $DOCKER_PASS
# - docker push fabric8/kansible
================================================
FILE: cmd/kill.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/fabric8io/kansible/ansible"
"github.com/fabric8io/kansible/k8s"
"github.com/fabric8io/kansible/log"
"github.com/fabric8io/kansible/winrm"
)
func init() {
killCmd.Flags().StringVar(&rcName, "rc", "$KANSIBLE_RC", "the name of the ReplicationController for the supervisors")
RootCmd.AddCommand(killCmd)
}
// killCmd kills the pending windows shell of the current pod if its still running
var killCmd = &cobra.Command{
Use: "kill [command]",
Short: "Kills any pending shells for this pod.",
Long: `This commmand will find the shell thats associated with a pod and kill it.`,
Run: func(cmd *cobra.Command, args []string) {
f := cmdutil.NewFactory(clientConfig)
if f == nil {
log.Die("Failed to create Kubernetes client factory!")
}
kubeclient, err := f.Client()
if err != nil || kubeclient == nil {
log.Die(MessageFailedToCreateKubernetesClient, err)
}
ns, _, _ := f.DefaultNamespace()
if len(ns) == 0 {
ns = "default"
}
thisPodName, err := k8s.GetThisPodName()
if err != nil {
log.Die("Failed to get this pod name: %s", err)
}
pod, err := kubeclient.Pods(ns).Get(thisPodName)
if err != nil {
log.Die("Failed to get pod from API server: %s", err)
}
annotations := pod.ObjectMeta.Annotations
if annotations == nil {
log.Die("No annotations available on pod %s", thisPodName)
}
hostName := annotations[ansible.HostNameAnnotation]
if len(hostName) == 0 {
log.Info("No annotation `%s` available on pod %s", ansible.HostNameAnnotation, thisPodName)
return
}
// now lets load the connection details from the RC annotations
rcName = os.ExpandEnv(rcName)
if rcName == "" {
log.Die("Replication controller name is required")
}
rc, err := kubeclient.ReplicationControllers(ns).Get(rcName)
if err != nil {
log.Die("Failed to get replication controller from API server: %s", err)
}
if rc == nil {
log.Die("No ReplicationController found for name %s", rcName)
}
metadata := &rc.ObjectMeta
if metadata.Annotations == nil {
metadata.Annotations = make(map[string]string)
}
rcAnnotations := metadata.Annotations
hostsText := rcAnnotations[ansible.HostInventoryAnnotation]
if len(hostsText) == 0 {
log.Die("Could not find annotation %s on ReplicationController %s", ansible.HostInventoryAnnotation, rcName)
}
shellID := rcAnnotations[ansible.WinRMShellAnnotationPrefix+hostName]
if len(shellID) == 0 {
log.Info("No annotation `%s` available on pod %s", ansible.WinRMShellAnnotationPrefix, thisPodName)
return
}
hostEntries, err := ansible.LoadHostEntriesFromText(hostsText)
if err != nil {
log.Die("Failed to load hosts: %s", err)
}
log.Info("Found %d host entries", len(hostEntries))
hostEntry := ansible.GetHostEntryByName(hostEntries, hostName)
if hostEntry == nil {
log.Die("Could not find a HostEntry called `%s` from %d host entries", hostName, len(hostEntries))
}
err = winrm.CloseShell(hostEntry.User, hostEntry.Password, hostEntry.Host, hostEntry.Port, shellID)
if err != nil {
log.Die("Failed to close shell: %s", err)
}
log.Info("Shell %s has been closed", shellID)
},
}
================================================
FILE: cmd/pod.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"io/ioutil"
"os"
"strconv"
"strings"
"github.com/spf13/cobra"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/fabric8io/kansible/ansible"
"github.com/fabric8io/kansible/k8s"
"github.com/fabric8io/kansible/log"
"github.com/fabric8io/kansible/ssh"
"github.com/fabric8io/kansible/winrm"
)
var (
rcName, passwordFlag, connection, bash string
)
func init() {
podCmd.Flags().StringVar(&rcName, "rc", "$KANSIBLE_RC", "the name of the ReplicationController for the supervisors")
podCmd.Flags().StringVar(&passwordFlag, "password", "$KANSIBLE_PASSWORD", "the password used for WinRM connections")
podCmd.Flags().StringVar(&connection, "connection", "", "the Ansible connection type to use. Defaults to SSH unless 'winrm' is defined to use WinRM on Windows")
podCmd.Flags().StringVar(&bash, "bash", "$KANSIBLE_BASH", "if specified a script is generated for running a bash like shell on the remote machine")
RootCmd.AddCommand(podCmd)
}
// podCmd is the root command for the whole program.
var podCmd = &cobra.Command{
Use: "pod [command]",
Short: "Runs the kansible pod which owns a host from the Ansible inventory then runs a remote command on the host",
Long: `This commmand will pick an available host from the Ansible inventory, then run a remote command on that host.`,
Run: func(cmd *cobra.Command, args []string) {
// Pod runs the kansible pod for a given group of hosts in an Ansible playbook
// this grabs a specific host (using annotations on the RC) then runs a remote command
// on that host binding stdin, stdout, stderr to the remote process
if len(args) < 1 {
log.Die("Expected arguments [command]")
}
hosts := os.ExpandEnv(args[0])
command := ""
if len(args) > 1 {
command = os.ExpandEnv(strings.Join(args[1:], " "))
}
f := cmdutil.NewFactory(clientConfig)
if f == nil {
log.Die("Failed to create Kubernetes client factory!")
}
kubeclient, err := f.Client()
if err != nil || kubeclient == nil {
log.Die(MessageFailedToCreateKubernetesClient, err)
}
ns := os.Getenv(ansible.EnvNamespace)
if len(ns) == 0 {
ns, _, _ = f.DefaultNamespace()
if len(ns) == 0 {
ns = "default"
}
}
rcName = os.ExpandEnv(rcName)
if rcName == "" {
log.Die("RC name is required")
}
thisPodName, err := k8s.GetThisPodName()
if err != nil {
log.Die("Couldn't get pod name: %s", err)
}
hostEntry, rc, envVars, err := ansible.ChooseHostAndPrivateKey(thisPodName, hosts, kubeclient, ns, rcName)
if err != nil {
log.Die("Couldn't find host: %s", err)
}
host := hostEntry.Host
user := hostEntry.User
port := hostEntry.Port
if len(port) == 0 {
port = strconv.Itoa(sshPort)
}
connection := hostEntry.Connection
if len(connection) == 0 {
connection = os.ExpandEnv(connection)
}
runCommand := hostEntry.RunCommand
if len(runCommand) != 0 {
command = runCommand
}
commandEnvVars := []string{}
if len(command) == 0 {
if len(connection) > 0 {
envVarName := ansible.EnvCommand + "_" + strings.ToUpper(connection)
commandEnvVars = append(commandEnvVars, envVarName)
command = os.Getenv(envVarName)
}
}
commandEnvVars = append(commandEnvVars, ansible.EnvCommand)
if len(command) == 0 {
command = os.Getenv(ansible.EnvCommand)
}
if len(command) == 0 {
plural := ""
if len(commandEnvVars) > 1 {
plural = "s"
}
log.Die("Could not find a command to execute from the environment variable%s: %s", plural, strings.Join(commandEnvVars, ", "))
}
bash := os.ExpandEnv(bash)
if len(bash) > 0 {
err = generateBashScript(bash, connection)
if err != nil {
log.Err("Failed to generate bash script at %s due to: %v", bash, err)
return
}
}
if connection == ansible.ConnectionWinRM {
password := hostEntry.Password
if len(password) == 0 {
password = os.ExpandEnv(passwordFlag)
if password == "" {
log.Die("Cannot connect without a password")
}
}
err = winrm.RemoteWinRmCommand(user, password, host, port, command, kubeclient, rc, hostEntry.Name)
} else {
privatekey := hostEntry.PrivateKey
err = ssh.RemoteSSHCommand(user, privatekey, host, port, command, envVars)
}
if err != nil {
log.Err("Failed: %v", err)
}
},
}
func generateBashScript(file string, connection string) error {
shellCommand := "bash"
if connection == ansible.ConnectionWinRM {
shellCommand = "cmd"
}
text := `#!/bin/sh
echo "opening shell on remote machine..."
export ` + ansible.EnvIsBashShell + `=true
export ` + ansible.EnvPortForward + `=false
kansible pod appservers ` + shellCommand + "\n"
return ioutil.WriteFile(file, []byte(text), 0555)
}
================================================
FILE: cmd/rc.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/fabric8io/kansible/ansible"
"github.com/fabric8io/kansible/log"
"github.com/spf13/cobra"
)
const (
// MessageFailedToCreateKubernetesClient is the message to report if a kuberentes client cannot be created
MessageFailedToCreateKubernetesClient = "Failed to create Kubernetes client. Maybe you need to run `oc login`?. Error: %s"
)
var (
inventory string
replicas int
)
func init() {
rcCmd.Flags().StringVar(&inventory, "inventory", "inventory", "the location of your Ansible inventory file")
rcCmd.Flags().IntVar(&replicas, "replicas", -1, "specifies the number of replicas to create for the RC")
RootCmd.AddCommand(rcCmd)
}
// RCCmd is the root command for the whole program.
var rcCmd = &cobra.Command{
Use: "rc ",
Short: "Creates or updates the kansible ReplicationController for some hosts in an Ansible inventory",
Long: `This commmand will analyse the hosts in an Ansible inventory and creates or updates the ReplicationController for the kansible pods.`,
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 {
log.Die("Expected argument for the name of the hosts in the ansible inventory file")
}
hosts := args[0]
f := cmdutil.NewFactory(clientConfig)
if f == nil {
log.Die("Failed to create Kubernetes client factory!")
}
kubeclient, err := f.Client()
if err != nil || kubeclient == nil {
log.Die(MessageFailedToCreateKubernetesClient, err)
}
ns, _, _ := f.DefaultNamespace()
if len(ns) == 0 {
ns = "default"
}
inventory = os.ExpandEnv(inventory)
if inventory == "" {
log.Die("Value for inventory flag is empty")
}
hostEntries, err := ansible.LoadHostEntries(inventory, hosts)
if err != nil {
log.Die("Cannot load host entries: %s", err)
}
log.Info("Found %d host entries in the Ansible inventory for %s", len(hostEntries), hosts)
rcFile := "kubernetes/" + hosts + "/rc.yml"
_, err = ansible.UpdateKansibleRC(hostEntries, hosts, f, kubeclient, ns, rcFile, replicas)
if err != nil {
log.Die("Failed to update Kansible RC: %s", err)
}
},
}
================================================
FILE: cmd/root.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"github.com/fabric8io/kansible/log"
)
var (
// RootCmd is the root command for the whole program.
RootCmd = &cobra.Command{
Use: "kansible",
Short: "Orchestrate processes in the same way as you orchestrate Docker containers with Kubernetes",
Long: `Kansible
Kansible orchestrates processes in the same way as you orchestrate Docker containers with Kubernetes.
Once you have created an Ansible playbook to install and configure your software you can use Kansible to create
a Kubernetes Replication Controller to run, scale and manage the processes providing a universal view in Kubernetes
of all your containers and processes along with common scaling, high availability, service discovery and load balancing.
More help is here: https://github.com/fabric8io/kansible/blob/master/README.md
`,
}
sshPort int
clientConfig clientcmd.ClientConfig
)
func init() {
RootCmd.PersistentFlags().IntVar(&sshPort, "port", 22, "the port for the remote SSH connection")
RootCmd.PersistentFlags().BoolVar(&log.IsDebugging, "debug", false, "enable verbose debugging output")
clientConfig = defaultClientConfig(RootCmd.PersistentFlags())
}
func defaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
flags.StringVar(&loadingRules.ExplicitPath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.")
overrides := &clientcmd.ConfigOverrides{}
flagNames := clientcmd.RecommendedConfigOverrideFlags("")
clientcmd.BindOverrideFlags(overrides, flags, flagNames)
clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)
return clientConfig
}
================================================
FILE: cmd/run.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"os"
"strconv"
"github.com/fabric8io/kansible/ansible"
"github.com/fabric8io/kansible/log"
"github.com/fabric8io/kansible/ssh"
"github.com/fabric8io/kansible/winrm"
"github.com/spf13/cobra"
)
// cli.StringFlag{
// Name: "connection",
// Usage: "The Ansible connection type to use. Defaults to SSH unless 'winrm' is defined to use WinRM on Windows",
// },
// },
// },
var (
user, password, host, command, privatekey string
)
func init() {
runCmd.Flags().StringVar(&user, "user", "${KANSIBLE_USER}", "the user to use on the remote connection")
runCmd.Flags().StringVar(&privatekey, "privatekey", "${KANSIBLE_PRIVATEKEY}", "the private key used for SSH")
runCmd.Flags().StringVar(&host, "host", "${KANSIBLE_HOST}", "the host for the remote connection")
runCmd.Flags().StringVar(&command, "command", "${KANSIBLE_COMMAND}", "the remote command to invoke on the host")
runCmd.Flags().StringVar(&password, "password", "", "the password if using WinRM to execute the command")
runCmd.Flags().StringVar(&connection, "connection", "", "the Ansible connection type to use. Defaults to SSH unless 'winrm' is defined to use WinRM on Windows")
RootCmd.AddCommand(runCmd)
}
// runCmd runs a remote command on a given host to test out SSH / WinRM
var runCmd = &cobra.Command{
Use: "run [command]",
Short: "Runs a remote command on a given host to test out SSH / WinRM",
Long: `This commmand will begin running the supervisor on an avaiable host.`,
Run: func(cmd *cobra.Command, args []string) {
command = os.ExpandEnv(command)
if command == "" {
log.Die("Command is required")
}
host = os.ExpandEnv(host)
if host == "" {
log.Die("Host is required")
}
user = os.ExpandEnv(user)
if user == "" {
log.Die("User is required")
}
if connection == ansible.ConnectionWinRM {
password = os.ExpandEnv(password)
if password == "" {
log.Die("Password is required")
}
err := winrm.RemoteWinRmCommand(user, password, host, strconv.Itoa(sshPort), command, nil, nil, "")
if err != nil {
log.Err("Failed: %v", err)
}
} else {
privatekey = os.ExpandEnv(privatekey)
if privatekey == "" {
log.Die("Private key is required")
}
err := ssh.RemoteSSHCommand(user, privatekey, host, strconv.Itoa(sshPort), command, nil)
if err != nil {
log.Err("Failed: %v", err)
}
}
},
}
================================================
FILE: cmd/version.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"bytes"
"fmt"
"html/template"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/fabric8io/kansible/version"
)
var versionInfoTmpl = `
kansible, version {{.version}} (branch: {{.branch}}, revision: {{.revision}})
build user: {{.buildUser}}
build date: {{.buildDate}}
go version: {{.goVersion}}
`
var versionCmd = &cobra.Command{
Use: "version",
Short: "Output version information and exit",
Long: `Output version information and exit.`,
Run: func(cmd *cobra.Command, args []string) {
t := template.Must(template.New("version").Parse(versionInfoTmpl))
var buf bytes.Buffer
if err := t.ExecuteTemplate(&buf, "version", version.Map); err != nil {
panic(err)
}
fmt.Fprintln(os.Stdout, strings.TrimSpace(buf.String()))
},
}
func init() {
RootCmd.AddCommand(versionCmd)
}
================================================
FILE: glide.yaml
================================================
package: github.com/fabric8io/gosupervise
import:
- package: github.com/Masterminds/vcs
- package: github.com/Masterminds/semver
version: ^1.0.0
- package: k8s.io/kubernetes
version: v1.2.1
subpackages:
- pkg/client
- pkg/cmdutil
- package: gopkg.in/yaml.v2
- package: github.com/nu7hatch/gouuid
- package: github.com/masterzen/xmlpath
version: 13f4951698adc0fa9c1dda3e275d489a24201161
- package: github.com/masterzen/simplexml
subpackages:
- dom
- package: github.com/masterzen/winrm
version: 71c963ed3718881facabea7cf61bc7b13911902e
- package: github.com/fatih/color
- package: github.com/cloudfoundry-incubator/candiedyaml
================================================
FILE: header.txt
================================================
Copyright <%=copyright_years.join(', ')%> <%=copyright_holders.join(', ')%>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================
FILE: headers.yml
================================================
ruby:
ext: ['.rb', '.rake']
after: ['^#!', '^#.*encoding:']
comment:
open: '#\n'
close: '#\n'
prefix: '# '
perl:
ext: ['.pl']
after: ['^#!', '^#.*encoding:']
comment:
open: '#\n'
close: '#\n'
prefix: '# '
# Support PEP 0263 comments:
# coding=
# -*- coding: -*-
# vim: set fileencoding= :
python:
ext: ['.py']
after: ['^#!', '^#.*coding:', '^#.*coding=', '^#.*fileencoding=']
comment:
open: '\n'
close: '\n'
prefix: '# '
html:
ext: ['.html', '.htm', '.xhtml']
comment:
open: '\n'
prefix: ' '
php:
ext: ['.php']
after: [ '^#!' ]
comment:
open: '\n'
prefix: ' * '
javacript:
ext: ['.js']
comment:
open: '/*\n'
close: ' */\n\n'
prefix: ' * '
css:
ext: ['.css']
comment:
open: '/*\n'
close: ' */\n\n'
prefix: ' * '
c:
ext: ['.c', '.h']
comment:
open: '/*'
close: ' */\n\n'
prefix: ' * '
cpp:
ext: ['.cpp', '.hpp', '.cc', '.hh']
comment:
open: '//\n'
close: '//\n\n'
prefix: '// '
java:
ext: ['.java']
comment:
open: '/*\n'
close: ' */\n\n'
prefix: ' * '
groovy:
ext: ['.groovy']
comment:
open: '/*\n'
close: ' */\n\n'
prefix: ' * '
haml:
ext: ['.haml', '.hamlc']
comment:
open: '-#\n'
close: '-#\n'
prefix: '-# '
coffee:
ext: ['.coffee']
comment:
open: '###\n'
close: '###\n'
prefix: ''
# M4 macro language, use #, not dnl
m4:
ext: ['.m4']
comment:
open: '#\n'
close: '#\n'
prefix: '# '
# Most shells, really
shell:
ext: ['.sh']
after: ['^#!']
comment:
open: '#\n'
close: '#\n'
prefix: '# '
# Use "-- " to make sure e.g. MySQL understands it
sql:
ext: ['.sql']
comment:
open: '-- \n'
close: '-- \n'
prefix: '-- '
# XML is *not* the same as HTML, and the comments need to go after a
# preprocessing directive, if present.
# FIXME: only supports single line directives
xml:
ext: ['.xml', '.xsd', '.mxml']
after: ['^<\?']
comment:
open: '\n'
prefix: ' '
yaml:
ext: ['.yml', '.yaml']
comment:
open: '#\n'
close: '#\n'
prefix: '# '
action_script:
ext: ['.as']
comment:
open: '//\n'
close: '//\n\n'
prefix: '// '
sass:
ext: ['.sass', '.scss']
comment:
open: '/*\n'
close: ' */\n\n'
prefix: ' * '
go:
ext: ['.go']
comment:
open: '/*\n'
close: ' */\n\n'
prefix: ' * '
================================================
FILE: k8s/k8s.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package k8s
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"k8s.io/kubernetes/pkg/api"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/kubectl"
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/kubectl/resource"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/strategicpatch"
"github.com/ghodss/yaml"
"github.com/fabric8io/kansible/log"
)
// GetThisPodName returns this pod name via the `HOSTNAME` environment variable
func GetThisPodName() (string, error) {
var err error
thisPodName := os.Getenv("HOSTNAME")
if len(thisPodName) == 0 {
thisPodName, err = os.Hostname()
if err != nil {
return "", err
}
}
if len(thisPodName) == 0 {
return "", fmt.Errorf("Could not find the pod name using $HOSTNAME!")
}
return thisPodName, nil
}
// ReadReplicationControllerFromFile reads the ReplicationController object from the given file name
func ReadReplicationControllerFromFile(filename string) (*api.ReplicationController, error) {
data, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return ReadReplicationController(data)
}
// ReadReplicationController loads a ReplicationController from the given data
func ReadReplicationController(data []byte) (*api.ReplicationController, error) {
rc := api.ReplicationController{}
if err := yaml.Unmarshal(data, &rc); err != nil {
return nil, err
}
return &rc, nil
}
// PodIsRunning returns true if the given pod is running in the given list of all pods
func PodIsRunning(pods *api.PodList, podName string) bool {
for _, pod := range pods.Items {
if pod.ObjectMeta.Name == podName {
return true
}
}
return false
}
// GetFirstContainerOrCreate returns the first Container in the PodSpec for this ReplicationController
// lazily creating structures as required
func GetFirstContainerOrCreate(rc *api.ReplicationController) *api.Container {
podSpec := GetOrCreatePodSpec(rc)
if len(podSpec.Containers) == 0 {
podSpec.Containers[0] = api.Container{}
}
return &podSpec.Containers[0]
}
// GetOrCreatePodSpec returns the PodSpec for this ReplicationController
// lazily creating structures as required
func GetOrCreatePodSpec(rc *api.ReplicationController) *api.PodSpec {
spec := &rc.Spec
if spec == nil {
rc.Spec = api.ReplicationControllerSpec{}
spec = &rc.Spec
}
template := spec.Template
if template == nil {
spec.Template = &api.PodTemplateSpec{}
template = spec.Template
}
podSpec := &template.Spec
if podSpec == nil {
template.Spec = api.PodSpec{}
podSpec = &template.Spec
}
return podSpec
}
// GetContainerEnvVar returns the environment variable value for the given name in the Container
func GetContainerEnvVar(container *api.Container, name string) string {
if container != nil {
for _, env := range container.Env {
if env.Name == name {
return env.Value
}
}
}
return ""
}
// EnsureContainerHasEnvVar if there is an existing EnvVar for the given name then lets update it
// with the given value otherwise lets add a new entry.
// Returns true if there was already an existing environment variable
func EnsureContainerHasEnvVar(container *api.Container, name string, value string) bool {
for _, env := range container.Env {
if env.Name == name {
env.Value = value
return true
}
}
container.Env = append(container.Env, api.EnvVar{
Name: name,
Value: value,
})
return false
}
// EnsureContainerHasEnvVarFromField if there is an existing EnvVar for the given name then lets update it
// with the given fieldPath otherwise lets add a new entry.
// Returns true if there was already an existing environment variable
func EnsureContainerHasEnvVarFromField(container *api.Container, name string, fieldPath string) bool {
from := &api.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{
FieldPath: fieldPath,
},
}
for _, env := range container.Env {
if env.Name == name {
env.ValueFrom = from
env.Value = ""
return true
}
}
container.Env = append(container.Env, api.EnvVar{
Name: name,
ValueFrom: from,
})
return false
}
// EnsureContainerHasPreStopCommand ensures that the given container has a `preStop` lifecycle hook
// to invoke the given commands
func EnsureContainerHasPreStopCommand(container *api.Container, commands []string) {
if container.Lifecycle == nil {
container.Lifecycle = &api.Lifecycle{}
}
lifecycle := container.Lifecycle
if lifecycle.PreStop == nil {
lifecycle.PreStop = &api.Handler{}
}
preStop := lifecycle.PreStop
preStop.Exec = &api.ExecAction{
Command: commands,
}
}
// EnsureContainerHasVolumeMount ensures that there is a volume mount of the given name with the given values
// Returns true if there was already a volume mount
func EnsureContainerHasVolumeMount(container *api.Container, name string, mountPath string) bool {
for _, vm := range container.VolumeMounts {
if vm.Name == name {
vm.MountPath = mountPath
return true
}
}
container.VolumeMounts = append(container.VolumeMounts, api.VolumeMount{
Name: name,
MountPath: mountPath,
})
return false
}
// EnsurePodSpecHasGitVolume ensures that there is a volume with the given name and git repo and revision
func EnsurePodSpecHasGitVolume(podSpec *api.PodSpec, name string, gitRepo string, gitRevision string) bool {
for _, vm := range podSpec.Volumes {
if vm.Name == name {
vm.GitRepo = &api.GitRepoVolumeSource{
Repository: gitRepo,
Revision: gitRevision,
}
return true
}
}
podSpec.Volumes = append(podSpec.Volumes, api.Volume{
Name: name,
VolumeSource: api.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{
Repository: gitRepo,
Revision: gitRevision,
},
},
})
return false
}
// EnsurePodSpecHasSecretVolume ensures that there is a volume with the given name and secret
func EnsurePodSpecHasSecretVolume(podSpec *api.PodSpec, name string, secretName string) bool {
for _, vm := range podSpec.Volumes {
if vm.Name == name {
vm.Secret = &api.SecretVolumeSource{
SecretName: secretName,
}
return true
}
}
podSpec.Volumes = append(podSpec.Volumes, api.Volume{
Name: name,
VolumeSource: api.VolumeSource{
Secret: &api.SecretVolumeSource{
SecretName: secretName,
},
},
})
return false
}
// EnsureServiceAccountExists ensures that there is a service account created for the given name
func EnsureServiceAccountExists(c *client.Client, ns string, serviceAccountName string) (bool, error) {
saClient := c.ServiceAccounts(ns)
sa, err := saClient.Get(serviceAccountName)
created := false
if err != nil || sa == nil {
// lets try create the SA
sa = &api.ServiceAccount{
ObjectMeta: api.ObjectMeta{
Name: serviceAccountName,
},
}
log.Info("Creating ServiceAccount %s", serviceAccountName)
_, err = saClient.Create(sa)
if err == nil {
created = true
}
}
return created, err
}
// ApplyResource applies the given data as a kubernetes resource
func ApplyResource(f *cmdutil.Factory, c *client.Client, ns string, data []byte, name string) error {
schemaCacheDir := "/tmp/kubectl.schema"
validate := true
schema, err := f.Validator(validate, schemaCacheDir)
if err != nil {
log.Info("Failed to load kubernetes schema: %s", err)
return err
}
mapper, typer := f.Object()
r := resource.NewBuilder(mapper, typer, resource.ClientMapperFunc(f.ClientForMapping), f.Decoder(true)).
Schema(schema).
ContinueOnError().
NamespaceParam(ns).DefaultNamespace().
Stream(bytes.NewReader(data), name).
Flatten().
Do()
err = r.Err()
if err != nil {
log.Info("Failed to load mapper!")
return err
}
count := 0
err = r.Visit(func(info *resource.Info, err error) error {
// In this method, info.Object contains the object retrieved from the server
// and info.VersionedObject contains the object decoded from the input source.
if err != nil {
return err
}
// Get the modified configuration of the object. Embed the result
// as an annotation in the modified configuration, so that it will appear
// in the patch sent to the server.
modified, err := kubectl.GetModifiedConfiguration(info, true, f.JSONEncoder())
if err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving modified configuration from:\n%v\nfor:", info), info.Source, err)
}
if err := info.Get(); err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving current configuration of:\n%v\nfrom server for:", info), info.Source, err)
}
// Serialize the current configuration of the object from the server.
current, err := runtime.Encode(f.JSONEncoder(), info.Object)
if err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("serializing current configuration from:\n%v\nfor:", info), info.Source, err)
}
// Retrieve the original configuration of the object from the annotation.
original, err := kubectl.GetOriginalConfiguration(info)
if err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("retrieving original configuration from:\n%v\nfor:", info), info.Source, err)
}
// Compute a three way strategic merge patch to send to server.
patch, err := strategicpatch.CreateThreeWayMergePatch(original, modified, current, info.VersionedObject, false)
if err != nil {
format := "creating patch with:\noriginal:\n%s\nmodified:\n%s\ncurrent:\n%s\nfrom:\n%v\nfor:"
return cmdutil.AddSourceToErr(fmt.Sprintf(format, original, modified, current, info), info.Source, err)
}
helper := resource.NewHelper(info.Client, info.Mapping)
_, err = helper.Patch(info.Namespace, info.Name, api.StrategicMergePatchType, patch)
if err != nil {
return cmdutil.AddSourceToErr(fmt.Sprintf("applying patch:\n%s\nto:\n%v\nfor:", patch, info), info.Source, err)
}
count++
cmdutil.PrintSuccess(mapper, false, os.Stdout, info.Mapping.Resource, info.Name, "configured")
return nil
})
if err != nil {
return err
}
if count == 0 {
return fmt.Errorf("no objects passed to apply")
}
return nil
}
================================================
FILE: kansible.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main
import (
"fmt"
"os"
"github.com/fabric8io/kansible/cmd"
)
func main() {
if err := cmd.RootCmd.Execute(); err != nil {
fmt.Println(err)
os.Exit(-1)
}
}
================================================
FILE: log/log.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package log
import (
"fmt"
"os"
"github.com/fatih/color"
)
var (
// IsDebugging toggles whether or not to enable debug output and behavior.
IsDebugging = false
// ErrorState denotes if application is in an error state.
ErrorState = false
)
// Msg passes through the formatter, but otherwise prints exactly as-is.
//
// No prettification.
func Msg(format string, v ...interface{}) {
fmt.Printf(appendNewLine(format), v...)
}
// Die prints an error and then call os.Exit(1).
func Die(format string, v ...interface{}) {
Err(format, v...)
if IsDebugging {
panic(fmt.Sprintf(format, v...))
}
os.Exit(1)
}
// CleanExit prints a message and then exits with 0.
func CleanExit(format string, v ...interface{}) {
Info(format, v...)
os.Exit(0)
}
// Err prints an error message. It does not cause an exit.
func Err(format string, v ...interface{}) {
fmt.Print(color.RedString("[ERROR] "))
fmt.Printf(appendNewLine(format), v...)
ErrorState = true
}
// Info prints a green-tinted message.
func Info(format string, v ...interface{}) {
fmt.Print(color.GreenString("---> "))
fmt.Printf(appendNewLine(format), v...)
}
// Debug prints a cyan-tinted message if IsDebugging is true.
func Debug(format string, v ...interface{}) {
if IsDebugging {
fmt.Print(color.CyanString("[DEBUG] "))
fmt.Printf(appendNewLine(format), v...)
}
}
// Warn prints a yellow-tinted warning message.
func Warn(format string, v ...interface{}) {
fmt.Print(color.YellowString("[WARN] "))
fmt.Printf(appendNewLine(format), v...)
}
func appendNewLine(format string) string {
return format + "\n"
}
================================================
FILE: ssh/ssh.go
================================================
/*
* Copyright 2016 Red Hat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ssh
import (
"fmt"
"io"
"io/ioutil"
"net"
"os"
"os/signal"
"github.com/fabric8io/kansible/log"
"golang.org/x/crypto/ssh"
"syscall"
)
// RemoteSSHCommand invokes the given command on a host and port
func RemoteSSHCommand(user string, privateKey string, host string, port string, cmd string, envVars map[string]string) error {
if len(privateKey) == 0 {
return fmt.Errorf("Could not find PrivateKey for entry %s", host)
}
log.Info("Connecting to host over SSH on host %s and port %s with user %s with command `%s`", host, port, user, cmd)
hostPort := net.JoinHostPort(host, port)
sshConfig := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{
PublicKeyFile(privateKey),
},
}
if sshConfig == nil {
log.Warn("No sshConfig could be created!")
}
connection, err := ssh.Dial("tcp", hostPort, sshConfig)
if err != nil {
return fmt.Errorf("Failed to dial: %s", err)
}
session, err := connection.NewSession()
if err != nil {
return fmt.Errorf("Failed to create session: %s", err)
}
defer session.Close()
modes := ssh.TerminalModes{
// ssh.ECHO: 0, // disable echoing
ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud
ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud
}
if err := session.RequestPty("xterm", 80, 40, modes); err != nil {
return fmt.Errorf("Request for pseudo terminal failed: %s", err)
}
stdin, err := session.StdinPipe()
if err != nil {
return fmt.Errorf("Unable to setup stdin for session: %v", err)
}
go io.Copy(stdin, os.Stdin)
stdout, err := session.StdoutPipe()
if err != nil {
return fmt.Errorf("Unable to setup stdout for session: %v", err)
}
go io.Copy(os.Stdout, stdout)
stderr, err := session.StderrPipe()
if err != nil {
return fmt.Errorf("Unable to setup stderr for session: %v", err)
}
go io.Copy(os.Stderr, stderr)
for envName, envValue := range envVars {
log.Info("Setting environment value %s = %s", envName, envValue)
if err := session.Setenv(envName, envValue); err != nil {
return fmt.Errorf("Could not set environment variable %s = %s over SSH. This could be disabled by the sshd configuration. See the `AcceptEnv` setting in your /etc/ssh/sshd_config more info: http://linux.die.net/man/5/sshd_config . Error: %s", envName, envValue, err)
}
}
signals := make(chan os.Signal, 1)
signal.Notify(signals, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
signaled := false
go func() {
<-signals
log.Info("Shutting down SSH session.")
signaled = true
session.Close()
}()
log.Info("Running command %s", cmd)
err = session.Run(cmd)
if !signaled && err != nil {
return fmt.Errorf("Failed to run command: " + cmd + ": %v", err)
}
return nil
}
// PublicKeyFile creates the auth method for the given private key file
func PublicKeyFile(file string) ssh.AuthMethod {
buffer, err := ioutil.ReadFile(file)
if err != nil {
return nil
}
key, err := ssh.ParsePrivateKey(buffer)
if err != nil {
return nil
}
return ssh.PublicKeys(key)
}
================================================
FILE: tools/create-intellij-idea-golib.sh
================================================
#!/bin/bash
# Build up a GOPATH directory for IntelliJ IDEA
# which doesn't support GO15VENDOREXPERIMENT yet
pushd `dirname $0`/.. > /dev/null
base=`pwd`
popd > /dev/null
golib=${base}/golib
rm -rf ${golib}
# Link all from the vendor dirs pulled by glide:
vendor_src=${golib}/vendor/src
mkdir -p ${vendor_src}
for f in ${base}/vendor/*
do
# echo "Symlinking vendor source dir: ${base}/${f}"
ln -s "${f}" "${vendor_src}/"
done
# Link self into the golib dir
self_src=${golib}/self/src/github.com/fabric8io
mkdir -p ${self_src}
ln -s "${base}" "${self_src}/kansible";
echo "Use the following dirs exclusively as go-libraries in IntelliJ IDEA:"
echo "(Preferences -> Languages & Frameworks -> Go -> Go Libraries, Add to Project Libraries, uncheck 'use system defined GOPATH')"
echo
echo "${golib}/vendor"
echo "${golib}/self"
================================================
FILE: vendor/bitbucket.org/ww/goautoneg/Makefile
================================================
include $(GOROOT)/src/Make.inc
TARG=bitbucket.org/ww/goautoneg
GOFILES=autoneg.go
include $(GOROOT)/src/Make.pkg
format:
gofmt -w *.go
docs:
gomake clean
godoc ${TARG} > README.txt
================================================
FILE: vendor/bitbucket.org/ww/goautoneg/README.txt
================================================
PACKAGE
package goautoneg
import "bitbucket.org/ww/goautoneg"
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
FUNCTIONS
func Negotiate(header string, alternatives []string) (content_type string)
Negotiate the most appropriate content_type given the accept header
and a list of alternatives.
func ParseAccept(header string) (accept []Accept)
Parse an Accept Header string returning a sorted list
of clauses
TYPES
type Accept struct {
Type, SubType string
Q float32
Params map[string]string
}
Structure to represent a clause in an HTTP Accept Header
SUBDIRECTORIES
.hg
================================================
FILE: vendor/bitbucket.org/ww/goautoneg/autoneg.go
================================================
/*
HTTP Content-Type Autonegotiation.
The functions in this package implement the behaviour specified in
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package goautoneg
import (
"sort"
"strconv"
"strings"
)
// Structure to represent a clause in an HTTP Accept Header
type Accept struct {
Type, SubType string
Q float64
Params map[string]string
}
// For internal use, so that we can use the sort interface
type accept_slice []Accept
func (accept accept_slice) Len() int {
slice := []Accept(accept)
return len(slice)
}
func (accept accept_slice) Less(i, j int) bool {
slice := []Accept(accept)
ai, aj := slice[i], slice[j]
if ai.Q > aj.Q {
return true
}
if ai.Type != "*" && aj.Type == "*" {
return true
}
if ai.SubType != "*" && aj.SubType == "*" {
return true
}
return false
}
func (accept accept_slice) Swap(i, j int) {
slice := []Accept(accept)
slice[i], slice[j] = slice[j], slice[i]
}
// Parse an Accept Header string returning a sorted list
// of clauses
func ParseAccept(header string) (accept []Accept) {
parts := strings.Split(header, ",")
accept = make([]Accept, 0, len(parts))
for _, part := range parts {
part := strings.Trim(part, " ")
a := Accept{}
a.Params = make(map[string]string)
a.Q = 1.0
mrp := strings.Split(part, ";")
media_range := mrp[0]
sp := strings.Split(media_range, "/")
a.Type = strings.Trim(sp[0], " ")
switch {
case len(sp) == 1 && a.Type == "*":
a.SubType = "*"
case len(sp) == 2:
a.SubType = strings.Trim(sp[1], " ")
default:
continue
}
if len(mrp) == 1 {
accept = append(accept, a)
continue
}
for _, param := range mrp[1:] {
sp := strings.SplitN(param, "=", 2)
if len(sp) != 2 {
continue
}
token := strings.Trim(sp[0], " ")
if token == "q" {
a.Q, _ = strconv.ParseFloat(sp[1], 32)
} else {
a.Params[token] = strings.Trim(sp[1], " ")
}
}
accept = append(accept, a)
}
slice := accept_slice(accept)
sort.Sort(slice)
return
}
// Negotiate the most appropriate content_type given the accept header
// and a list of alternatives.
func Negotiate(header string, alternatives []string) (content_type string) {
asp := make([][]string, 0, len(alternatives))
for _, ctype := range alternatives {
asp = append(asp, strings.SplitN(ctype, "/", 2))
}
for _, clause := range ParseAccept(header) {
for i, ctsp := range asp {
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
content_type = alternatives[i]
return
}
if clause.Type == ctsp[0] && clause.SubType == "*" {
content_type = alternatives[i]
return
}
if clause.Type == "*" && clause.SubType == "*" {
content_type = alternatives[i]
return
}
}
}
return
}
================================================
FILE: vendor/bitbucket.org/ww/goautoneg/autoneg_test.go
================================================
package goautoneg
import (
"testing"
)
var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5"
func TestParseAccept(t *testing.T) {
alternatives := []string{"text/html", "image/png"}
content_type := Negotiate(chrome, alternatives)
if content_type != "image/png" {
t.Errorf("got %s expected image/png", content_type)
}
alternatives = []string{"text/html", "text/plain", "text/n3"}
content_type = Negotiate(chrome, alternatives)
if content_type != "text/html" {
t.Errorf("got %s expected text/html", content_type)
}
alternatives = []string{"text/n3", "text/plain"}
content_type = Negotiate(chrome, alternatives)
if content_type != "text/plain" {
t.Errorf("got %s expected text/plain", content_type)
}
alternatives = []string{"text/n3", "application/rdf+xml"}
content_type = Negotiate(chrome, alternatives)
if content_type != "text/n3" {
t.Errorf("got %s expected text/n3", content_type)
}
}
================================================
FILE: vendor/github.com/Masterminds/semver/.travis.yml
================================================
language: go
go:
- 1.3
- 1.4
- 1.5
- tip
# Setting sudo access to false will let Travis CI use containers rather than
# VMs to run the tests. For more details see:
# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
sudo: false
notifications:
irc: "irc.freenode.net#masterminds"
================================================
FILE: vendor/github.com/Masterminds/semver/CHANGELOG.md
================================================
# Release 1.x.x (xxxx-xx-xx)
- Issue #9: Speed up version comparison performance (thanks @sdboyer)
- Issue #8: Added benchmarks (thanks @sdboyer)
# Release 1.1.0 (2015-03-11)
- Issue #2: Implemented validation to provide reasons a versions failed a
constraint.
# Release 1.0.1 (2015-12-31)
- Fixed #1: * constraint failing on valid versions.
# Release 1.0.0 (2015-10-20)
- Initial release
================================================
FILE: vendor/github.com/Masterminds/semver/LICENSE.txt
================================================
The Masterminds
Copyright (C) 2014-2015, Matt Butcher and Matt Farina
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
================================================
FILE: vendor/github.com/Masterminds/semver/README.md
================================================
# SemVer
The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to:
* Parse semantic versions
* Sort semantic versions
* Check if a semantic version fits within a set of constraints
* Optionally work with a `v` prefix
[](https://travis-ci.org/Masterminds/semver) [](https://ci.appveyor.com/project/mattfarina/semver/branch/master) [](https://godoc.org/github.com/Masterminds/semver) [](http://goreportcard.com/report/Masterminds/semver)
## Parsing Semantic Versions
To parse a semantic version use the `NewVersion` function. For example,
v, err := semver.NewVersion("1.2.3-beta.1+build345")
If there is an error the version wasn't parseable. The version object has methods
to get the parts of the version, compare it to other versions, convert the
version back into a string, and get the original string. For more details
please see the [documentation](https://godoc.org/github.com/Masterminds/semver).
## Sorting Semantic Versions
A set of versions can be sorted using the [`sort`](https://golang.org/pkg/sort/)
package from the standard library. For example,
raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
vs := make([]*semver.Version, len(raw))
for i, r := range raw {
v, err := semver.NewVersion(r)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
vs[i] = v
}
sort.Sort(semver.Collection(vs))
## Checking Version Constraints
Checking a version against version constraints is one of the most featureful
parts of the package.
c, err := semver.NewConstraint(">= 1.2.3")
if err != nil {
// Handle constraint not being parseable.
}
v, _ := semver.NewVersion("1.3")
if err != nil {
// Handle version not being parseable.
}
// Check if the version meets the constraints. The a variable will be true.
a := c.Check(v)
## Basic Comparisons
There are two elements to the comparisons. First, a comparison string is a list
of comma separated and comparisons. These are then separated by || separated or
comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
greater than or equal to 4.2.3.
The basic comparisons are:
* `=`: equal (aliased to no operator)
* `!=`: not equal
* `>`: greater than
* `<`: less than
* `>=`: greater than or equal to
* `<=`: less than or equal to
## Hyphen Range Comparisons
There are multiple methods to handle ranges and the first is hyphens ranges.
These look like:
* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
## Wildcards In Comparisons
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
for all comparison operators. When used on the `=` operator it falls
back to the pack level comparison (see tilde below). For example,
* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
* `>= 1.2.x` is equivalent to `>= 1.2.0`
* `<= 2.x` is equivalent to `<= 3`
* `*` is equivalent to `>= 0.0.0`
## Tilde Range Comparisons (Patch)
The tilde (`~`) comparison operator is for patch level ranges when a minor
version is specified and major level changes when the minor number is missing.
For example,
* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
* `~1` is equivalent to `>= 1, < 2`
* `~2.3` is equivalent to `>= 2.3, < 2.4`
* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
* `~1.x` is equivalent to `>= 1, < 2`
## Caret Range Comparisons (Major)
The caret (`^`) comparison operator is for major level changes. This is useful
when comparisons of API versions as a major change is API breaking. For example,
* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
* `^2.3` is equivalent to `>= 2.3, < 3`
* `^2.x` is equivalent to `>= 2.0.0, < 3`
# Validation
In addition to testing a version against a constraint, a version can be validated
against a constraint. When validation fails a slice of errors containing why a
version didn't meet the constraint is returned. For example,
c, err := semver.NewConstraint("<= 1.2.3, >= 1.4")
if err != nil {
// Handle constraint not being parseable.
}
v, _ := semver.NewVersion("1.3")
if err != nil {
// Handle version not being parseable.
}
// Validate a version against a constraint.
a, msgs := c.Validate(v)
// a is false
for _, m := range msgs {
fmt.Println(m)
// Loops over the errors which would read
// "1.3 is greater than 1.2.3"
// "1.3 is less than 1.4"
}
# Contribute
If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues)
or [create a pull request](https://github.com/Masterminds/semver/pulls).
================================================
FILE: vendor/github.com/Masterminds/semver/appveyor.yml
================================================
version: build-{build}.{branch}
clone_folder: C:\gopath\src\github.com\Masterminds\semver
shallow_clone: true
environment:
GOPATH: C:\gopath
platform:
- x64
install:
- go version
- go env
build_script:
- go install -v ./...
test_script:
- go test -v
deploy: off
================================================
FILE: vendor/github.com/Masterminds/semver/benchmark_test.go
================================================
package semver_test
import (
"testing"
"github.com/Masterminds/semver"
)
/* Constraint creation benchmarks */
func benchNewConstraint(c string, b *testing.B) {
for i := 0; i < b.N; i++ {
semver.NewConstraint(c)
}
}
func BenchmarkNewConstraintUnary(b *testing.B) {
benchNewConstraint("=2.0", b)
}
func BenchmarkNewConstraintTilde(b *testing.B) {
benchNewConstraint("~2.0.0", b)
}
func BenchmarkNewConstraintCaret(b *testing.B) {
benchNewConstraint("^2.0.0", b)
}
func BenchmarkNewConstraintWildcard(b *testing.B) {
benchNewConstraint("1.x", b)
}
func BenchmarkNewConstraintRange(b *testing.B) {
benchNewConstraint(">=2.1.x, <3.1.0", b)
}
func BenchmarkNewConstraintUnion(b *testing.B) {
benchNewConstraint("~2.0.0 || =3.1.0", b)
}
/* Check benchmarks */
func benchCheckVersion(c, v string, b *testing.B) {
version, _ := semver.NewVersion(v)
constraint, _ := semver.NewConstraint(c)
for i := 0; i < b.N; i++ {
constraint.Check(version)
}
}
func BenchmarkCheckVersionUnary(b *testing.B) {
benchCheckVersion("=2.0", "2.0.0", b)
}
func BenchmarkCheckVersionTilde(b *testing.B) {
benchCheckVersion("~2.0.0", "2.0.5", b)
}
func BenchmarkCheckVersionCaret(b *testing.B) {
benchCheckVersion("^2.0.0", "2.1.0", b)
}
func BenchmarkCheckVersionWildcard(b *testing.B) {
benchCheckVersion("1.x", "1.4.0", b)
}
func BenchmarkCheckVersionRange(b *testing.B) {
benchCheckVersion(">=2.1.x, <3.1.0", "2.4.5", b)
}
func BenchmarkCheckVersionUnion(b *testing.B) {
benchCheckVersion("~2.0.0 || =3.1.0", "3.1.0", b)
}
func benchValidateVersion(c, v string, b *testing.B) {
version, _ := semver.NewVersion(v)
constraint, _ := semver.NewConstraint(c)
for i := 0; i < b.N; i++ {
constraint.Validate(version)
}
}
/* Validate benchmarks, including fails */
func BenchmarkValidateVersionUnary(b *testing.B) {
benchValidateVersion("=2.0", "2.0.0", b)
}
func BenchmarkValidateVersionUnaryFail(b *testing.B) {
benchValidateVersion("=2.0", "2.0.1", b)
}
func BenchmarkValidateVersionTilde(b *testing.B) {
benchValidateVersion("~2.0.0", "2.0.5", b)
}
func BenchmarkValidateVersionTildeFail(b *testing.B) {
benchValidateVersion("~2.0.0", "1.0.5", b)
}
func BenchmarkValidateVersionCaret(b *testing.B) {
benchValidateVersion("^2.0.0", "2.1.0", b)
}
func BenchmarkValidateVersionCaretFail(b *testing.B) {
benchValidateVersion("^2.0.0", "4.1.0", b)
}
func BenchmarkValidateVersionWildcard(b *testing.B) {
benchValidateVersion("1.x", "1.4.0", b)
}
func BenchmarkValidateVersionWildcardFail(b *testing.B) {
benchValidateVersion("1.x", "2.4.0", b)
}
func BenchmarkValidateVersionRange(b *testing.B) {
benchValidateVersion(">=2.1.x, <3.1.0", "2.4.5", b)
}
func BenchmarkValidateVersionRangeFail(b *testing.B) {
benchValidateVersion(">=2.1.x, <3.1.0", "1.4.5", b)
}
func BenchmarkValidateVersionUnion(b *testing.B) {
benchValidateVersion("~2.0.0 || =3.1.0", "3.1.0", b)
}
func BenchmarkValidateVersionUnionFail(b *testing.B) {
benchValidateVersion("~2.0.0 || =3.1.0", "3.1.1", b)
}
/* Version creation benchmarks */
func benchNewVersion(v string, b *testing.B) {
for i := 0; i < b.N; i++ {
semver.NewVersion(v)
}
}
func BenchmarkNewVersionSimple(b *testing.B) {
benchNewVersion("1.0.0", b)
}
func BenchmarkNewVersionPre(b *testing.B) {
benchNewVersion("1.0.0-alpha", b)
}
func BenchmarkNewVersionMeta(b *testing.B) {
benchNewVersion("1.0.0+metadata", b)
}
func BenchmarkNewVersionMetaDash(b *testing.B) {
benchNewVersion("1.0.0+metadata-dash", b)
}
================================================
FILE: vendor/github.com/Masterminds/semver/collection.go
================================================
package semver
// Collection is a collection of Version instances and implements the sort
// interface. See the sort package for more details.
// https://golang.org/pkg/sort/
type Collection []*Version
// Len returns the length of a collection. The number of Version instances
// on the slice.
func (c Collection) Len() int {
return len(c)
}
// Less is needed for the sort interface to compare two Version objects on the
// slice. If checks if one is less than the other.
func (c Collection) Less(i, j int) bool {
return c[i].LessThan(c[j])
}
// Swap is needed for the sort interface to replace the Version objects
// at two different positions in the slice.
func (c Collection) Swap(i, j int) {
c[i], c[j] = c[j], c[i]
}
================================================
FILE: vendor/github.com/Masterminds/semver/collection_test.go
================================================
package semver
import (
"reflect"
"sort"
"testing"
)
func TestCollection(t *testing.T) {
raw := []string{
"1.2.3",
"1.0",
"1.3",
"2",
"0.4.2",
}
vs := make([]*Version, len(raw))
for i, r := range raw {
v, err := NewVersion(r)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
vs[i] = v
}
sort.Sort(Collection(vs))
e := []string{
"0.4.2",
"1.0.0",
"1.2.3",
"1.3.0",
"2.0.0",
}
a := make([]string, len(vs))
for i, v := range vs {
a[i] = v.String()
}
if !reflect.DeepEqual(a, e) {
t.Error("Sorting Collection failed")
}
}
================================================
FILE: vendor/github.com/Masterminds/semver/constraints.go
================================================
package semver
import (
"errors"
"fmt"
"regexp"
"strings"
)
// Constraints is one or more constraint that a semantic version can be
// checked against.
type Constraints struct {
constraints [][]*constraint
}
// NewConstraint returns a Constraints instance that a Version instance can
// be checked against. If there is a parse error it will be returned.
func NewConstraint(c string) (*Constraints, error) {
// Rewrite - ranges into a comparison operation.
c = rewriteRange(c)
ors := strings.Split(c, "||")
or := make([][]*constraint, len(ors))
for k, v := range ors {
cs := strings.Split(v, ",")
result := make([]*constraint, len(cs))
for i, s := range cs {
pc, err := parseConstraint(s)
if err != nil {
return nil, err
}
result[i] = pc
}
or[k] = result
}
o := &Constraints{constraints: or}
return o, nil
}
// Check tests if a version satisfies the constraints.
func (cs Constraints) Check(v *Version) bool {
// loop over the ORs and check the inner ANDs
for _, o := range cs.constraints {
joy := true
for _, c := range o {
if !c.check(v) {
joy = false
break
}
}
if joy {
return true
}
}
return false
}
// Validate checks if a version satisfies a constraint. If not a slice of
// reasons for the failure are returned in addition to a bool.
func (cs Constraints) Validate(v *Version) (bool, []error) {
// loop over the ORs and check the inner ANDs
var e []error
for _, o := range cs.constraints {
joy := true
for _, c := range o {
if !c.check(v) {
em := fmt.Errorf(c.msg, v, c.orig)
e = append(e, em)
joy = false
}
}
if joy {
return true, []error{}
}
}
return false, e
}
var constraintOps map[string]cfunc
var constraintMsg map[string]string
var constraintRegex *regexp.Regexp
func init() {
constraintOps = map[string]cfunc{
"": constraintTildeOrEqual,
"=": constraintTildeOrEqual,
"!=": constraintNotEqual,
">": constraintGreaterThan,
"<": constraintLessThan,
">=": constraintGreaterThanEqual,
"=>": constraintGreaterThanEqual,
"<=": constraintLessThanEqual,
"=<": constraintLessThanEqual,
"~": constraintTilde,
"~>": constraintTilde,
"^": constraintCaret,
}
constraintMsg = map[string]string{
"": "%s is not equal to %s",
"=": "%s is not equal to %s",
"!=": "%s is equal to %s",
">": "%s is less than or equal to %s",
"<": "%s is greater than or equal to %s",
">=": "%s is less than %s",
"=>": "%s is less than %s",
"<=": "%s is greater than %s",
"=<": "%s is greater than %s",
"~": "%s does not have same major and minor version as %s",
"~>": "%s does not have same major and minor version as %s",
"^": "%s does not have same major version as %s",
}
ops := make([]string, 0, len(constraintOps))
for k := range constraintOps {
ops = append(ops, regexp.QuoteMeta(k))
}
constraintRegex = regexp.MustCompile(fmt.Sprintf(
`^\s*(%s)\s*(%s)\s*$`,
strings.Join(ops, "|"),
cvRegex))
constraintRangeRegex = regexp.MustCompile(fmt.Sprintf(
`\s*(%s)\s*-\s*(%s)\s*`,
cvRegex, cvRegex))
}
// An individual constraint
type constraint struct {
// The callback function for the restraint. It performs the logic for
// the constraint.
function cfunc
msg string
// The version used in the constraint check. For example, if a constraint
// is '<= 2.0.0' the con a version instance representing 2.0.0.
con *Version
// The original parsed version (e.g., 4.x from != 4.x)
orig string
// When an x is used as part of the version (e.g., 1.x)
minorDirty bool
dirty bool
}
// Check if a version meets the constraint
func (c *constraint) check(v *Version) bool {
return c.function(v, c)
}
type cfunc func(v *Version, c *constraint) bool
func parseConstraint(c string) (*constraint, error) {
m := constraintRegex.FindStringSubmatch(c)
if m == nil {
return nil, fmt.Errorf("improper constraint: %s", c)
}
ver := m[2]
orig := ver
minorDirty := false
dirty := false
if isX(m[3]) {
ver = "0.0.0"
dirty = true
} else if isX(strings.TrimPrefix(m[4], ".")) {
minorDirty = true
dirty = true
ver = fmt.Sprintf("%s.0.0%s", m[3], m[6])
} else if isX(strings.TrimPrefix(m[5], ".")) {
dirty = true
ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6])
}
con, err := NewVersion(ver)
if err != nil {
// The constraintRegex should catch any regex parsing errors. So,
// we should never get here.
return nil, errors.New("constraint Parser Error")
}
cs := &constraint{
function: constraintOps[m[1]],
msg: constraintMsg[m[1]],
con: con,
orig: orig,
minorDirty: minorDirty,
dirty: dirty,
}
return cs, nil
}
// Constraint functions
func constraintNotEqual(v *Version, c *constraint) bool {
if c.dirty {
if c.con.Major() != v.Major() {
return true
}
if c.con.Minor() != v.Minor() && !c.minorDirty {
return true
} else if c.minorDirty {
return false
}
return false
}
return !v.Equal(c.con)
}
func constraintGreaterThan(v *Version, c *constraint) bool {
return v.Compare(c.con) == 1
}
func constraintLessThan(v *Version, c *constraint) bool {
if !c.dirty {
return v.Compare(c.con) < 0
}
if v.Major() > c.con.Major() {
return false
} else if v.Minor() > c.con.Minor() && !c.minorDirty {
return false
}
return true
}
func constraintGreaterThanEqual(v *Version, c *constraint) bool {
return v.Compare(c.con) >= 0
}
func constraintLessThanEqual(v *Version, c *constraint) bool {
if !c.dirty {
return v.Compare(c.con) <= 0
}
if v.Major() > c.con.Major() {
return false
} else if v.Minor() > c.con.Minor() && !c.minorDirty {
return false
}
return true
}
// ~*, ~>* --> >= 0.0.0 (any)
// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0
// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0
// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0
// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0
// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0
func constraintTilde(v *Version, c *constraint) bool {
if v.LessThan(c.con) {
return false
}
// ~0.0.0 is a special case where all constraints are accepted. It's
// equivalent to >= 0.0.0.
if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 {
return true
}
if v.Major() != c.con.Major() {
return false
}
if v.Minor() != c.con.Minor() && !c.minorDirty {
return false
}
return true
}
// When there is a .x (dirty) status it automatically opts in to ~. Otherwise
// it's a straight =
func constraintTildeOrEqual(v *Version, c *constraint) bool {
if c.dirty {
c.msg = constraintMsg["~"]
return constraintTilde(v, c)
}
return v.Equal(c.con)
}
// ^* --> (any)
// ^2, ^2.x, ^2.x.x --> >=2.0.0, <3.0.0
// ^2.0, ^2.0.x --> >=2.0.0, <3.0.0
// ^1.2, ^1.2.x --> >=1.2.0, <2.0.0
// ^1.2.3 --> >=1.2.3, <2.0.0
// ^1.2.0 --> >=1.2.0, <2.0.0
func constraintCaret(v *Version, c *constraint) bool {
if v.LessThan(c.con) {
return false
}
if v.Major() != c.con.Major() {
return false
}
return true
}
var constraintRangeRegex *regexp.Regexp
const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` +
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
func isX(x string) bool {
switch x {
case "x", "*", "X":
return true
default:
return false
}
}
func rewriteRange(i string) string {
m := constraintRangeRegex.FindAllStringSubmatch(i, -1)
if m == nil {
return i
}
o := i
for _, v := range m {
t := fmt.Sprintf(">= %s, <= %s", v[1], v[11])
o = strings.Replace(o, v[0], t, 1)
}
return o
}
================================================
FILE: vendor/github.com/Masterminds/semver/constraints_test.go
================================================
package semver
import (
"reflect"
"testing"
)
func TestParseConstraint(t *testing.T) {
tests := []struct {
in string
f cfunc
v string
err bool
}{
{">= 1.2", constraintGreaterThanEqual, "1.2.0", false},
{"1.0", constraintTildeOrEqual, "1.0.0", false},
{"foo", nil, "", true},
{"<= 1.2", constraintLessThanEqual, "1.2.0", false},
{"=< 1.2", constraintLessThanEqual, "1.2.0", false},
{"=> 1.2", constraintGreaterThanEqual, "1.2.0", false},
{"v1.2", constraintTildeOrEqual, "1.2.0", false},
{"=1.5", constraintTildeOrEqual, "1.5.0", false},
{"> 1.3", constraintGreaterThan, "1.3.0", false},
{"< 1.4.1", constraintLessThan, "1.4.1", false},
}
for _, tc := range tests {
c, err := parseConstraint(tc.in)
if tc.err && err == nil {
t.Errorf("Expected error for %s didn't occur", tc.in)
} else if !tc.err && err != nil {
t.Errorf("Unexpected error for %s", tc.in)
}
// If an error was expected continue the loop and don't try the other
// tests as they will cause errors.
if tc.err {
continue
}
if tc.v != c.con.String() {
t.Errorf("Incorrect version found on %s", tc.in)
}
f1 := reflect.ValueOf(tc.f)
f2 := reflect.ValueOf(c.function)
if f1 != f2 {
t.Errorf("Wrong constraint found for %s", tc.in)
}
}
}
func TestConstraintCheck(t *testing.T) {
tests := []struct {
constraint string
version string
check bool
}{
{"= 2.0", "1.2.3", false},
{"= 2.0", "2.0.0", true},
{"4.1", "4.1.0", true},
{"!=4.1", "4.1.0", false},
{"!=4.1", "5.1.0", true},
{">1.1", "4.1.0", true},
{">1.1", "1.1.0", false},
{"<1.1", "0.1.0", true},
{"<1.1", "1.1.0", false},
{"<1.1", "1.1.1", false},
{">=1.1", "4.1.0", true},
{">=1.1", "1.1.0", true},
{">=1.1", "0.0.9", false},
{"<=1.1", "0.1.0", true},
{"<=1.1", "1.1.0", true},
{"<=1.1", "1.1.1", false},
}
for _, tc := range tests {
c, err := parseConstraint(tc.constraint)
if err != nil {
t.Errorf("err: %s", err)
continue
}
v, err := NewVersion(tc.version)
if err != nil {
t.Errorf("err: %s", err)
continue
}
a := c.check(v)
if a != tc.check {
t.Errorf("Constraint '%s' failing", tc.constraint)
}
}
}
func TestNewConstraint(t *testing.T) {
tests := []struct {
input string
ors int
count int
err bool
}{
{">= 1.1", 1, 1, false},
{"2.0", 1, 1, false},
{">= bar", 0, 0, true},
{">= 1.2.3, < 2.0", 1, 2, false},
{">= 1.2.3, < 2.0 || => 3.0, < 4", 2, 2, false},
// The 3-4 should be broken into 2 by the range rewriting
{"3-4 || => 3.0, < 4", 2, 2, false},
}
for _, tc := range tests {
v, err := NewConstraint(tc.input)
if tc.err && err == nil {
t.Errorf("expected but did not get error for: %s", tc.input)
continue
} else if !tc.err && err != nil {
t.Errorf("unexpectederror for input %s: %s", tc.input, err)
continue
}
if tc.err {
continue
}
l := len(v.constraints)
if tc.ors != l {
t.Errorf("Expected %s to have %d ORs but got %d",
tc.input, tc.ors, l)
}
l = len(v.constraints[0])
if tc.count != l {
t.Errorf("Expected %s to have %d constraints but got %d",
tc.input, tc.count, l)
}
}
}
func TestConstraintsCheck(t *testing.T) {
tests := []struct {
constraint string
version string
check bool
}{
{"*", "1.2.3", true},
{"~0.0.0", "1.2.3", true},
{"= 2.0", "1.2.3", false},
{"= 2.0", "2.0.0", true},
{"4.1", "4.1.0", true},
{"4.1.x", "4.1.3", true},
{"1.x", "1.4", true},
{"!=4.1", "4.1.0", false},
{"!=4.1", "5.1.0", true},
{"!=4.x", "5.1.0", true},
{"!=4.x", "4.1.0", false},
{"!=4.1.x", "4.2.0", true},
{"!=4.2.x", "4.2.3", false},
{">1.1", "4.1.0", true},
{">1.1", "1.1.0", false},
{"<1.1", "0.1.0", true},
{"<1.1", "1.1.0", false},
{"<1.1", "1.1.1", false},
{"<1.x", "1.1.1", true},
{"<1.x", "2.1.1", false},
{"<1.1.x", "1.2.1", false},
{"<1.1.x", "1.1.500", true},
{"<1.2.x", "1.1.1", true},
{">=1.1", "4.1.0", true},
{">=1.1", "1.1.0", true},
{">=1.1", "0.0.9", false},
{"<=1.1", "0.1.0", true},
{"<=1.1", "1.1.0", true},
{"<=1.x", "1.1.0", true},
{"<=2.x", "3.1.0", false},
{"<=1.1", "1.1.1", false},
{"<=1.1.x", "1.2.500", false},
{">1.1, <2", "1.1.1", true},
{">1.1, <3", "4.3.2", false},
{">=1.1, <2, !=1.2.3", "1.2.3", false},
{">=1.1, <2, !=1.2.3 || > 3", "3.1.2", true},
{">=1.1, <2, !=1.2.3 || >= 3", "3.0.0", true},
{">=1.1, <2, !=1.2.3 || > 3", "3.0.0", false},
{">=1.1, <2, !=1.2.3 || > 3", "1.2.3", false},
{"1.1 - 2", "1.1.1", true},
{"1.1-3", "4.3.2", false},
{"^1.1", "1.1.1", true},
{"^1.1", "4.3.2", false},
{"^1.x", "1.1.1", true},
{"^2.x", "1.1.1", false},
{"^1.x", "2.1.1", false},
{"~*", "2.1.1", true},
{"~1.x", "2.1.1", false},
{"~1.x", "1.3.5", true},
{"~1.x", "1.4", true},
{"~1.1", "1.1.1", true},
{"~1.2.3", "1.2.5", true},
{"~1.2.3", "1.2.2", false},
{"~1.2.3", "1.3.2", false},
{"~1.1", "1.2.3", false},
{"~1.3", "2.4.5", false},
}
for _, tc := range tests {
c, err := NewConstraint(tc.constraint)
if err != nil {
t.Errorf("err: %s", err)
continue
}
v, err := NewVersion(tc.version)
if err != nil {
t.Errorf("err: %s", err)
continue
}
a := c.Check(v)
if a != tc.check {
t.Errorf("Constraint '%s' failing with '%s'", tc.constraint, tc.version)
}
}
}
func TestRewriteRange(t *testing.T) {
tests := []struct {
c string
nc string
}{
{"2-3", ">= 2, <= 3"},
{"2-3, 2-3", ">= 2, <= 3,>= 2, <= 3"},
{"2-3, 4.0.0-5.1", ">= 2, <= 3,>= 4.0.0, <= 5.1"},
}
for _, tc := range tests {
o := rewriteRange(tc.c)
if o != tc.nc {
t.Errorf("Range %s rewritten incorrectly as '%s'", tc.c, o)
}
}
}
func TestIsX(t *testing.T) {
tests := []struct {
t string
c bool
}{
{"A", false},
{"%", false},
{"X", true},
{"x", true},
{"*", true},
}
for _, tc := range tests {
a := isX(tc.t)
if a != tc.c {
t.Errorf("Function isX error on %s", tc.t)
}
}
}
func TestConstraintsValidate(t *testing.T) {
tests := []struct {
constraint string
version string
check bool
}{
{"*", "1.2.3", true},
{"~0.0.0", "1.2.3", true},
{"= 2.0", "1.2.3", false},
{"= 2.0", "2.0.0", true},
{"4.1", "4.1.0", true},
{"4.1.x", "4.1.3", true},
{"1.x", "1.4", true},
{"!=4.1", "4.1.0", false},
{"!=4.1", "5.1.0", true},
{"!=4.x", "5.1.0", true},
{"!=4.x", "4.1.0", false},
{"!=4.1.x", "4.2.0", true},
{"!=4.2.x", "4.2.3", false},
{">1.1", "4.1.0", true},
{">1.1", "1.1.0", false},
{"<1.1", "0.1.0", true},
{"<1.1", "1.1.0", false},
{"<1.1", "1.1.1", false},
{"<1.x", "1.1.1", true},
{"<1.x", "2.1.1", false},
{"<1.1.x", "1.2.1", false},
{"<1.1.x", "1.1.500", true},
{"<1.2.x", "1.1.1", true},
{">=1.1", "4.1.0", true},
{">=1.1", "1.1.0", true},
{">=1.1", "0.0.9", false},
{"<=1.1", "0.1.0", true},
{"<=1.1", "1.1.0", true},
{"<=1.x", "1.1.0", true},
{"<=2.x", "3.1.0", false},
{"<=1.1", "1.1.1", false},
{"<=1.1.x", "1.2.500", false},
{">1.1, <2", "1.1.1", true},
{">1.1, <3", "4.3.2", false},
{">=1.1, <2, !=1.2.3", "1.2.3", false},
{">=1.1, <2, !=1.2.3 || > 3", "3.1.2", true},
{">=1.1, <2, !=1.2.3 || >= 3", "3.0.0", true},
{">=1.1, <2, !=1.2.3 || > 3", "3.0.0", false},
{">=1.1, <2, !=1.2.3 || > 3", "1.2.3", false},
{"1.1 - 2", "1.1.1", true},
{"1.1-3", "4.3.2", false},
{"^1.1", "1.1.1", true},
{"^1.1", "4.3.2", false},
{"^1.x", "1.1.1", true},
{"^2.x", "1.1.1", false},
{"^1.x", "2.1.1", false},
{"~*", "2.1.1", true},
{"~1.x", "2.1.1", false},
{"~1.x", "1.3.5", true},
{"~1.x", "1.4", true},
{"~1.1", "1.1.1", true},
{"~1.2.3", "1.2.5", true},
{"~1.2.3", "1.2.2", false},
{"~1.2.3", "1.3.2", false},
{"~1.1", "1.2.3", false},
{"~1.3", "2.4.5", false},
}
for _, tc := range tests {
c, err := NewConstraint(tc.constraint)
if err != nil {
t.Errorf("err: %s", err)
continue
}
v, err := NewVersion(tc.version)
if err != nil {
t.Errorf("err: %s", err)
continue
}
a, msgs := c.Validate(v)
if a != tc.check {
t.Errorf("Constraint '%s' failing with '%s'", tc.constraint, tc.version)
} else if a == false && len(msgs) == 0 {
t.Errorf("%q failed with %q but no errors returned", tc.constraint, tc.version)
}
// if a == false {
// for _, m := range msgs {
// t.Errorf("%s", m)
// }
// }
}
v, err := NewVersion("1.2.3")
if err != nil {
t.Errorf("err: %s", err)
}
c, err := NewConstraint("!= 1.2.5, ^2, <= 1.1.x")
if err != nil {
t.Errorf("err: %s", err)
}
_, msgs := c.Validate(v)
if len(msgs) != 2 {
t.Error("Invalid number of validations found")
}
e := msgs[0].Error()
if e != "1.2.3 does not have same major version as 2" {
t.Error("Did not get expected message: 1.2.3 does not have same major version as 2")
}
e = msgs[1].Error()
if e != "1.2.3 is greater than 1.1.x" {
t.Error("Did not get expected message: 1.2.3 is greater than 1.1.x")
}
tests2 := []struct {
constraint, version, msg string
}{
{"= 2.0", "1.2.3", "1.2.3 is not equal to 2.0"},
{"!=4.1", "4.1.0", "4.1.0 is equal to 4.1"},
{"!=4.x", "4.1.0", "4.1.0 is equal to 4.x"},
{"!=4.2.x", "4.2.3", "4.2.3 is equal to 4.2.x"},
{">1.1", "1.1.0", "1.1.0 is less than or equal to 1.1"},
{"<1.1", "1.1.0", "1.1.0 is greater than or equal to 1.1"},
{"<1.1", "1.1.1", "1.1.1 is greater than or equal to 1.1"},
{"<1.x", "2.1.1", "2.1.1 is greater than or equal to 1.x"},
{"<1.1.x", "1.2.1", "1.2.1 is greater than or equal to 1.1.x"},
{">=1.1", "0.0.9", "0.0.9 is less than 1.1"},
{"<=2.x", "3.1.0", "3.1.0 is greater than 2.x"},
{"<=1.1", "1.1.1", "1.1.1 is greater than 1.1"},
{"<=1.1.x", "1.2.500", "1.2.500 is greater than 1.1.x"},
{">1.1, <3", "4.3.2", "4.3.2 is greater than or equal to 3"},
{">=1.1, <2, !=1.2.3", "1.2.3", "1.2.3 is equal to 1.2.3"},
{">=1.1, <2, !=1.2.3 || > 3", "3.0.0", "3.0.0 is greater than or equal to 2"},
{">=1.1, <2, !=1.2.3 || > 3", "1.2.3", "1.2.3 is equal to 1.2.3"},
{"1.1-3", "4.3.2", "4.3.2 is greater than 3"},
{"^1.1", "4.3.2", "4.3.2 does not have same major version as 1.1"},
{"^2.x", "1.1.1", "1.1.1 does not have same major version as 2.x"},
{"^1.x", "2.1.1", "2.1.1 does not have same major version as 1.x"},
{"~1.x", "2.1.1", "2.1.1 does not have same major and minor version as 1.x"},
{"~1.2.3", "1.2.2", "1.2.2 does not have same major and minor version as 1.2.3"},
{"~1.2.3", "1.3.2", "1.3.2 does not have same major and minor version as 1.2.3"},
{"~1.1", "1.2.3", "1.2.3 does not have same major and minor version as 1.1"},
{"~1.3", "2.4.5", "2.4.5 does not have same major and minor version as 1.3"},
}
for _, tc := range tests2 {
c, err := NewConstraint(tc.constraint)
if err != nil {
t.Errorf("err: %s", err)
continue
}
v, err := NewVersion(tc.version)
if err != nil {
t.Errorf("err: %s", err)
continue
}
_, msgs := c.Validate(v)
e := msgs[0].Error()
if e != tc.msg {
t.Errorf("Did not get expected message %q: %s", tc.msg, e)
}
}
}
================================================
FILE: vendor/github.com/Masterminds/semver/doc.go
================================================
/*
Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go.
Specifically it provides the ability to:
* Parse semantic versions
* Sort semantic versions
* Check if a semantic version fits within a set of constraints
* Optionally work with a `v` prefix
Parsing Semantic Versions
To parse a semantic version use the `NewVersion` function. For example,
v, err := semver.NewVersion("1.2.3-beta.1+build345")
If there is an error the version wasn't parseable. The version object has methods
to get the parts of the version, compare it to other versions, convert the
version back into a string, and get the original string. For more details
please see the documentation at https://godoc.org/github.com/Masterminds/semver.
Sorting Semantic Versions
A set of versions can be sorted using the `sort` package from the standard library.
For example,
raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",}
vs := make([]*semver.Version, len(raw))
for i, r := range raw {
v, err := semver.NewVersion(r)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
vs[i] = v
}
sort.Sort(semver.Collection(vs))
Checking Version Constraints
Checking a version against version constraints is one of the most featureful
parts of the package.
c, err := semver.NewConstraint(">= 1.2.3")
if err != nil {
// Handle constraint not being parseable.
}
v, _ := semver.NewVersion("1.3")
if err != nil {
// Handle version not being parseable.
}
// Check if the version meets the constraints. The a variable will be true.
a := c.Check(v)
Basic Comparisons
There are two elements to the comparisons. First, a comparison string is a list
of comma separated and comparisons. These are then separated by || separated or
comparisons. For example, `">= 1.2, < 3.0.0 || >= 4.2.3"` is looking for a
comparison that's greater than or equal to 1.2 and less than 3.0.0 or is
greater than or equal to 4.2.3.
The basic comparisons are:
* `=`: equal (aliased to no operator)
* `!=`: not equal
* `>`: greater than
* `<`: less than
* `>=`: greater than or equal to
* `<=`: less than or equal to
Hyphen Range Comparisons
There are multiple methods to handle ranges and the first is hyphens ranges.
These look like:
* `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5`
* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4, <= 4.5`
Wildcards In Comparisons
The `x`, `X`, and `*` characters can be used as a wildcard character. This works
for all comparison operators. When used on the `=` operator it falls
back to the pack level comparison (see tilde below). For example,
* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
* `>= 1.2.x` is equivalent to `>= 1.2.0`
* `<= 2.x` is equivalent to `<= 3`
* `*` is equivalent to `>= 0.0.0`
Tilde Range Comparisons (Patch)
The tilde (`~`) comparison operator is for patch level ranges when a minor
version is specified and major level changes when the minor number is missing.
For example,
* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0`
* `~1` is equivalent to `>= 1, < 2`
* `~2.3` is equivalent to `>= 2.3, < 2.4`
* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0`
* `~1.x` is equivalent to `>= 1, < 2`
Caret Range Comparisons (Major)
The caret (`^`) comparison operator is for major level changes. This is useful
when comparisons of API versions as a major change is API breaking. For example,
* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0`
* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0`
* `^2.3` is equivalent to `>= 2.3, < 3`
* `^2.x` is equivalent to `>= 2.0.0, < 3`
*/
package semver
================================================
FILE: vendor/github.com/Masterminds/semver/version.go
================================================
package semver
import (
"bytes"
"errors"
"fmt"
"regexp"
"strconv"
"strings"
)
// The compiled version of the regex created at init() is cached here so it
// only needs to be created once.
var versionRegex *regexp.Regexp
var (
// ErrInvalidSemVer is returned a version is found to be invalid when
// being parsed.
ErrInvalidSemVer = errors.New("Invalid Semantic Version")
)
// SemVerRegex id the regular expression used to parse a semantic version.
const SemVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` +
`(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` +
`(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?`
// Version represents a single semantic version.
type Version struct {
major, minor, patch int64
pre string
metadata string
original string
}
func init() {
versionRegex = regexp.MustCompile("^" + SemVerRegex + "$")
}
// NewVersion parses a given version and returns an instance of Version or
// an error if unable to parse the version.
func NewVersion(v string) (*Version, error) {
m := versionRegex.FindStringSubmatch(v)
if m == nil {
return nil, ErrInvalidSemVer
}
sv := &Version{
metadata: m[8],
pre: m[5],
original: v,
}
var temp int64
temp, err := strconv.ParseInt(m[1], 10, 32)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
sv.major = temp
if m[2] != "" {
temp, err = strconv.ParseInt(strings.TrimPrefix(m[2], "."), 10, 32)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
sv.minor = temp
} else {
sv.minor = 0
}
if m[3] != "" {
temp, err = strconv.ParseInt(strings.TrimPrefix(m[3], "."), 10, 32)
if err != nil {
return nil, fmt.Errorf("Error parsing version segment: %s", err)
}
sv.patch = temp
} else {
sv.patch = 0
}
return sv, nil
}
// String converts a Version object to a string.
// Note, if the original version contained a leading v this version will not.
// See the Original() method to retrieve the original value. Semantic Versions
// don't contain a leading v per the spec. Instead it's optional on
// impelementation.
func (v *Version) String() string {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch)
if v.pre != "" {
fmt.Fprintf(&buf, "-%s", v.pre)
}
if v.metadata != "" {
fmt.Fprintf(&buf, "+%s", v.metadata)
}
return buf.String()
}
// Original returns the original value passed in to be parsed.
func (v *Version) Original() string {
return v.original
}
// Major returns the major version.
func (v *Version) Major() int64 {
return v.major
}
// Minor returns the minor version.
func (v *Version) Minor() int64 {
return v.minor
}
// Patch returns the patch version.
func (v *Version) Patch() int64 {
return v.patch
}
// Prerelease returns the pre-release version.
func (v *Version) Prerelease() string {
return v.pre
}
// Metadata returns the metadata on the version.
func (v *Version) Metadata() string {
return v.metadata
}
// LessThan tests if one version is less than another one.
func (v *Version) LessThan(o *Version) bool {
return v.Compare(o) < 0
}
// GreaterThan tests if one version is greater than another one.
func (v *Version) GreaterThan(o *Version) bool {
return v.Compare(o) > 0
}
// Equal tests if two versions are equal to each other.
// Note, versions can be equal with different metadata since metadata
// is not considered part of the comparable version.
func (v *Version) Equal(o *Version) bool {
return v.Compare(o) == 0
}
// Compare compares this version to another one. It returns -1, 0, or 1 if
// the version smaller, equal, or larger than the other version.
//
// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is
// lower than the version without a prerelease.
func (v *Version) Compare(o *Version) int {
// Compare the major, minor, and patch version for differences. If a
// difference is found return the comparison.
if d := compareSegment(v.Major(), o.Major()); d != 0 {
return d
}
if d := compareSegment(v.Minor(), o.Minor()); d != 0 {
return d
}
if d := compareSegment(v.Patch(), o.Patch()); d != 0 {
return d
}
// At this point the major, minor, and patch versions are the same.
ps := v.pre
po := o.Prerelease()
if ps == "" && po == "" {
return 0
}
if ps == "" {
return 1
}
if po == "" {
return -1
}
return comparePrerelease(ps, po)
}
func compareSegment(v, o int64) int {
if v < o {
return -1
}
if v > o {
return 1
}
return 0
}
func comparePrerelease(v, o string) int {
// split the prelease versions by their part. The separator, per the spec,
// is a .
sparts := strings.Split(v, ".")
oparts := strings.Split(o, ".")
// Find the longer length of the parts to know how many loop iterations to
// go through.
slen := len(sparts)
olen := len(oparts)
l := slen
if olen > slen {
l = olen
}
// Iterate over each part of the prereleases to compare the differences.
for i := 0; i < l; i++ {
// Since the lentgh of the parts can be different we need to create
// a placeholder. This is to avoid out of bounds issues.
stemp := ""
if i < slen {
stemp = sparts[i]
}
otemp := ""
if i < olen {
otemp = oparts[i]
}
d := comparePrePart(stemp, otemp)
if d != 0 {
return d
}
}
// Reaching here means two versions are of equal value but have different
// metadata (the part following a +). They are not identical in string form
// but the version comparison finds them to be equal.
return 0
}
func comparePrePart(s, o string) int {
// Fastpath if they are equal
if s == o {
return 0
}
// When s or o are empty we can use the other in an attempt to determine
// the response.
if o == "" {
_, n := strconv.ParseInt(s, 10, 64)
if n != nil {
return -1
}
return 1
}
if s == "" {
_, n := strconv.ParseInt(o, 10, 64)
if n != nil {
return 1
}
return -1
}
if s > o {
return 1
}
return -1
}
================================================
FILE: vendor/github.com/Masterminds/semver/version_test.go
================================================
package semver
import (
"testing"
)
func TestNewVersion(t *testing.T) {
tests := []struct {
version string
err bool
}{
{"1.2.3", false},
{"v1.2.3", false},
{"1.0", false},
{"v1.0", false},
{"1", false},
{"v1", false},
{"1.2.beta", true},
{"v1.2.beta", true},
{"foo", true},
{"1.2-5", false},
{"v1.2-5", false},
{"1.2-beta.5", false},
{"v1.2-beta.5", false},
{"\n1.2", true},
{"\nv1.2", true},
{"1.2.0-x.Y.0+metadata", false},
{"v1.2.0-x.Y.0+metadata", false},
{"1.2.0-x.Y.0+metadata-width-hypen", false},
{"v1.2.0-x.Y.0+metadata-width-hypen", false},
{"1.2.3-rc1-with-hypen", false},
{"v1.2.3-rc1-with-hypen", false},
{"1.2.3.4", true},
{"v1.2.3.4", true},
}
for _, tc := range tests {
_, err := NewVersion(tc.version)
if tc.err && err == nil {
t.Fatalf("expected error for version: %s", tc.version)
} else if !tc.err && err != nil {
t.Fatalf("error for version %s: %s", tc.version, err)
}
}
}
func TestOriginal(t *testing.T) {
tests := []string{
"1.2.3",
"v1.2.3",
"1.0",
"v1.0",
"1",
"v1",
"1.2-5",
"v1.2-5",
"1.2-beta.5",
"v1.2-beta.5",
"1.2.0-x.Y.0+metadata",
"v1.2.0-x.Y.0+metadata",
"1.2.0-x.Y.0+metadata-width-hypen",
"v1.2.0-x.Y.0+metadata-width-hypen",
"1.2.3-rc1-with-hypen",
"v1.2.3-rc1-with-hypen",
}
for _, tc := range tests {
v, err := NewVersion(tc)
if err != nil {
t.Errorf("Error parsing version %s", tc)
}
o := v.Original()
if o != tc {
t.Errorf("Error retrieving originl. Expected '%s' but got '%s'", tc, v)
}
}
}
func TestParts(t *testing.T) {
v, err := NewVersion("1.2.3-beta.1+build.123")
if err != nil {
t.Error("Error parsing version 1.2.3-beta.1+build.123")
}
if v.Major() != 1 {
t.Error("Major() returning wrong value")
}
if v.Minor() != 2 {
t.Error("Minor() returning wrong value")
}
if v.Patch() != 3 {
t.Error("Patch() returning wrong value")
}
if v.Prerelease() != "beta.1" {
t.Error("Prerelease() returning wrong value")
}
if v.Metadata() != "build.123" {
t.Error("Metadata() returning wrong value")
}
}
func TestString(t *testing.T) {
tests := []struct {
version string
expected string
}{
{"1.2.3", "1.2.3"},
{"v1.2.3", "1.2.3"},
{"1.0", "1.0.0"},
{"v1.0", "1.0.0"},
{"1", "1.0.0"},
{"v1", "1.0.0"},
{"1.2-5", "1.2.0-5"},
{"v1.2-5", "1.2.0-5"},
{"1.2-beta.5", "1.2.0-beta.5"},
{"v1.2-beta.5", "1.2.0-beta.5"},
{"1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"},
{"v1.2.0-x.Y.0+metadata", "1.2.0-x.Y.0+metadata"},
{"1.2.0-x.Y.0+metadata-width-hypen", "1.2.0-x.Y.0+metadata-width-hypen"},
{"v1.2.0-x.Y.0+metadata-width-hypen", "1.2.0-x.Y.0+metadata-width-hypen"},
{"1.2.3-rc1-with-hypen", "1.2.3-rc1-with-hypen"},
{"v1.2.3-rc1-with-hypen", "1.2.3-rc1-with-hypen"},
}
for _, tc := range tests {
v, err := NewVersion(tc.version)
if err != nil {
t.Errorf("Error parsing version %s", tc)
}
s := v.String()
if s != tc.expected {
t.Errorf("Error generating string. Expected '%s' but got '%s'", tc.expected, s)
}
}
}
func TestCompare(t *testing.T) {
tests := []struct {
v1 string
v2 string
expected int
}{
{"1.2.3", "1.5.1", -1},
{"2.2.3", "1.5.1", 1},
{"2.2.3", "2.2.2", 1},
{"3.2-beta", "3.2-beta", 0},
{"1.3", "1.1.4", 1},
{"4.2", "4.2-beta", 1},
{"4.2-beta", "4.2", -1},
{"4.2-alpha", "4.2-beta", -1},
{"4.2-alpha", "4.2-alpha", 0},
{"4.2-beta.2", "4.2-beta.1", 1},
{"4.2-beta2", "4.2-beta1", 1},
{"4.2-beta", "4.2-beta.2", -1},
{"4.2-beta", "4.2-beta.foo", 1},
{"4.2-beta.2", "4.2-beta", 1},
{"4.2-beta.foo", "4.2-beta", -1},
{"1.2+bar", "1.2+baz", 0},
}
for _, tc := range tests {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
a := v1.Compare(v2)
e := tc.expected
if a != e {
t.Errorf(
"Comparison of '%s' and '%s' failed. Expected '%d', got '%d'",
tc.v1, tc.v2, e, a,
)
}
}
}
func TestLessThan(t *testing.T) {
tests := []struct {
v1 string
v2 string
expected bool
}{
{"1.2.3", "1.5.1", true},
{"2.2.3", "1.5.1", false},
{"3.2-beta", "3.2-beta", false},
}
for _, tc := range tests {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
a := v1.LessThan(v2)
e := tc.expected
if a != e {
t.Errorf(
"Comparison of '%s' and '%s' failed. Expected '%t', got '%t'",
tc.v1, tc.v2, e, a,
)
}
}
}
func TestGreaterThan(t *testing.T) {
tests := []struct {
v1 string
v2 string
expected bool
}{
{"1.2.3", "1.5.1", false},
{"2.2.3", "1.5.1", true},
{"3.2-beta", "3.2-beta", false},
}
for _, tc := range tests {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
a := v1.GreaterThan(v2)
e := tc.expected
if a != e {
t.Errorf(
"Comparison of '%s' and '%s' failed. Expected '%t', got '%t'",
tc.v1, tc.v2, e, a,
)
}
}
}
func TestEqual(t *testing.T) {
tests := []struct {
v1 string
v2 string
expected bool
}{
{"1.2.3", "1.5.1", false},
{"2.2.3", "1.5.1", false},
{"3.2-beta", "3.2-beta", true},
{"3.2-beta+foo", "3.2-beta+bar", true},
}
for _, tc := range tests {
v1, err := NewVersion(tc.v1)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
v2, err := NewVersion(tc.v2)
if err != nil {
t.Errorf("Error parsing version: %s", err)
}
a := v1.Equal(v2)
e := tc.expected
if a != e {
t.Errorf(
"Comparison of '%s' and '%s' failed. Expected '%t', got '%t'",
tc.v1, tc.v2, e, a,
)
}
}
}
================================================
FILE: vendor/github.com/Masterminds/vcs/.gitignore
================================================
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
================================================
FILE: vendor/github.com/Masterminds/vcs/.travis.yml
================================================
language: go
go:
- 1.3
- 1.4
- 1.5
- 1.6
- tip
# Setting sudo access to false will let Travis CI use containers rather than
# VMs to run the tests. For more details see:
# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/
# - http://docs.travis-ci.com/user/workers/standard-infrastructure/
sudo: false
notifications:
irc: "irc.freenode.net#masterminds"
================================================
FILE: vendor/github.com/Masterminds/vcs/CHANGELOG.md
================================================
# 1.5.1 (2016-03-23)
- Fixing bug parsing some Git commit dates.
# 1.5.0 (2016-03-22)
- Add Travis CI testing for Go 1.6.
- Issue #17: Add CommitInfo method allowing for a common way to get commit
metadata from all VCS.
- Autodetect types that have git@ or hg@ users.
- Autodetect git+ssh, bzr+ssh, git, and svn+ssh scheme urls.
- On Bitbucket for ssh style URLs retrieve the type from the URL. This allows
for private repo type detection.
- Issue #14: Autodetect ssh/scp style urls (thanks chonthu).
# 1.4.1 (2016-03-07)
- Fixes #16: some windows situations are unable to create parent directory.
# 1.4.0 (2016-02-15)
- Adding support for IBM JazzHub.
# 1.3.1 (2016-01-27)
- Issue #12: Failed to checkout Bzr repo when parent directory didn't
exist (thanks cyrilleverrier).
# 1.3.0 (2015-11-09)
- Issue #9: Added Date method to get the date/time of latest commit (thanks kamilchm).
# 1.2.0 (2015-10-29)
- Adding IsDirty method to detect a checkout with uncommitted changes.
# 1.1.4 (2015-10-28)
- Fixed #8: Git IsReference not detecting branches that have not been checked
out yet.
# 1.1.3 (2015-10-21)
- Fixing issue where there are multiple go-import statements for go redirects
# 1.1.2 (2015-10-20)
- Fixes #7: hg not checking out code when Get is called
# 1.1.1 (2015-10-20)
- Issue #6: Allow VCS commands to be run concurrently.
# 1.1.0 (2015-10-19)
- #5: Added output of failed command to returned errors.
# 1.0.0 (2015-10-06)
- Initial release.
================================================
FILE: vendor/github.com/Masterminds/vcs/LICENSE.txt
================================================
The Masterminds
Copyright (C) 2014-2015, Matt Butcher and Matt Farina
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
================================================
FILE: vendor/github.com/Masterminds/vcs/README.md
================================================
# VCS Repository Management for Go
Manage repos in varying version control systems with ease through a common
interface.
[](https://travis-ci.org/Masterminds/vcs) [](https://godoc.org/github.com/Masterminds/vcs) [](http://goreportcard.com/report/Masterminds/vcs)
## Quick Usage
Quick usage:
remote := "https://github.com/Masterminds/vcs"
local, _ := ioutil.TempDir("", "go-vcs")
repo, err := NewRepo(remote, local)
In this case `NewRepo` will detect the VCS is Git and return a `GitRepo`. All of
the repos implement the `Repo` interface with a common set of features between
them.
## Supported VCS
Git, SVN, Bazaar (Bzr), and Mercurial (Hg) are currently supported. They each
have their own type (e.g., `GitRepo`) that follow a simple naming pattern. Each
type implements the `Repo` interface and has a constructor (e.g., `NewGitRepo`).
The constructors have the same signature as `NewRepo`.
## Features
- Clone or checkout a repository depending on the version control system.
- Pull updates to a repository.
- Get the currently checked out commit id.
- Checkout a commit id, branch, or tag (depending on the availability in the VCS).
- Get a list of tags and branches in the VCS.
- Check if a string value is a valid reference within the VCS.
- More...
For more details see [the documentation](https://godoc.org/github.com/Masterminds/vcs).
## Motivation
The package `golang.org/x/tools/go/vcs` provides some valuable functionality
for working with packages in repositories in varying source control management
systems. That package, while useful and well tested, is designed with a specific
purpose in mind. Our uses went beyond the scope of that package. To implement
our scope we built a package that went beyond the functionality and scope
of `golang.org/x/tools/go/vcs`.
================================================
FILE: vendor/github.com/Masterminds/vcs/bzr.go
================================================
package vcs
import (
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"time"
)
var bzrDetectURL = regexp.MustCompile("parent branch: (?P.+)\n")
// NewBzrRepo creates a new instance of BzrRepo. The remote and local directories
// need to be passed in.
func NewBzrRepo(remote, local string) (*BzrRepo, error) {
ltype, err := DetectVcsFromFS(local)
// Found a VCS other than Bzr. Need to report an error.
if err == nil && ltype != Bzr {
return nil, ErrWrongVCS
}
r := &BzrRepo{}
r.setRemote(remote)
r.setLocalPath(local)
r.Logger = Logger
// With the other VCS we can check if the endpoint locally is different
// from the one configured internally. But, with Bzr you can't. For example,
// if you do `bzr branch https://launchpad.net/govcstestbzrrepo` and then
// use `bzr info` to get the parent branch you'll find it set to
// http://bazaar.launchpad.net/~mattfarina/govcstestbzrrepo/trunk/. Notice
// the change from https to http and the path chance.
// Here we set the remote to be the local one if none is passed in.
if err == nil && r.CheckLocal() == true && remote == "" {
c := exec.Command("bzr", "info")
c.Dir = local
c.Env = envForDir(c.Dir)
out, err := c.CombinedOutput()
if err != nil {
return nil, err
}
m := bzrDetectURL.FindStringSubmatch(string(out))
// If no remote was passed in but one is configured for the locally
// checked out Bzr repo use that one.
if m[1] != "" {
r.setRemote(m[1])
}
}
return r, nil
}
// BzrRepo implements the Repo interface for the Bzr source control.
type BzrRepo struct {
base
}
// Vcs retrieves the underlying VCS being implemented.
func (s BzrRepo) Vcs() Type {
return Bzr
}
// Get is used to perform an initial clone of a repository.
func (s *BzrRepo) Get() error {
basePath := filepath.Dir(filepath.FromSlash(s.LocalPath()))
if _, err := os.Stat(basePath); os.IsNotExist(err) {
err = os.MkdirAll(basePath, 0755)
if err != nil {
return err
}
}
_, err := s.run("bzr", "branch", s.Remote(), s.LocalPath())
return err
}
// Update performs a Bzr pull and update to an existing checkout.
func (s *BzrRepo) Update() error {
_, err := s.runFromDir("bzr", "pull")
if err != nil {
return err
}
_, err = s.runFromDir("bzr", "update")
return err
}
// UpdateVersion sets the version of a package currently checked out via Bzr.
func (s *BzrRepo) UpdateVersion(version string) error {
_, err := s.runFromDir("bzr", "update", "-r", version)
return err
}
// Version retrieves the current version.
func (s *BzrRepo) Version() (string, error) {
out, err := s.runFromDir("bzr", "revno", "--tree")
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
}
// Date retrieves the date on the latest commit.
func (s *BzrRepo) Date() (time.Time, error) {
out, err := s.runFromDir("bzr", "version-info", "--custom", "--template={date}")
if err != nil {
return time.Time{}, err
}
t, err := time.Parse(longForm, string(out))
if err != nil {
return time.Time{}, err
}
return t, nil
}
// CheckLocal verifies the local location is a Bzr repo.
func (s *BzrRepo) CheckLocal() bool {
if _, err := os.Stat(s.LocalPath() + "/.bzr"); err == nil {
return true
}
return false
}
// Branches returns a list of available branches on the repository.
// In Bazaar (Bzr) clones and branches are the same. A different branch will
// have a different URL location which we cannot detect from the repo. This
// is a little different from other VCS.
func (s *BzrRepo) Branches() ([]string, error) {
var branches []string
return branches, nil
}
// Tags returns a list of available tags on the repository.
func (s *BzrRepo) Tags() ([]string, error) {
out, err := s.runFromDir("bzr", "tags")
if err != nil {
return []string{}, err
}
tags := s.referenceList(string(out), `(?m-s)^(\S+)`)
return tags, nil
}
// IsReference returns if a string is a reference. A reference can be a
// commit id or tag.
func (s *BzrRepo) IsReference(r string) bool {
_, err := s.runFromDir("bzr", "revno", "-r", r)
if err == nil {
return true
}
return false
}
// IsDirty returns if the checkout has been modified from the checked
// out reference.
func (s *BzrRepo) IsDirty() bool {
out, err := s.runFromDir("bzr", "diff")
return err != nil || len(out) != 0
}
// CommitInfo retrieves metadata about a commit.
func (s *BzrRepo) CommitInfo(id string) (*CommitInfo, error) {
r := "-r" + id
out, err := s.runFromDir("bzr", "log", r, "--log-format=long")
if err != nil {
return nil, ErrRevisionUnavailable
}
ci := &CommitInfo{
Commit: id,
}
lines := strings.Split(string(out), "\n")
const format = "Mon 2006-01-02 15:04:05 -0700"
var track int
var trackOn bool
// Note, bzr does not appear to use i18m.
for i, l := range lines {
if strings.HasPrefix(l, "committer:") {
ci.Author = strings.TrimSpace(strings.TrimPrefix(l, "committer:"))
} else if strings.HasPrefix(l, "timestamp:") {
ts := strings.TrimSpace(strings.TrimPrefix(l, "timestamp:"))
ci.Date, err = time.Parse(format, ts)
if err != nil {
return nil, err
}
} else if strings.TrimSpace(l) == "message:" {
track = i
trackOn = true
} else if trackOn && i > track {
ci.Message = ci.Message + l
}
}
ci.Message = strings.TrimSpace(ci.Message)
// Didn't find the revision
if ci.Author == "" {
return nil, ErrRevisionUnavailable
}
return ci, nil
}
================================================
FILE: vendor/github.com/Masterminds/vcs/bzr_test.go
================================================
package vcs
import (
"io/ioutil"
"time"
//"log"
"os"
"testing"
)
// Canary test to ensure BzrRepo implements the Repo interface.
var _ Repo = &BzrRepo{}
// To verify bzr is working we perform integration testing
// with a known bzr service. Due to the long time of repeatedly checking out
// repos these tests are structured to work together.
func TestBzr(t *testing.T) {
tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, err := NewBzrRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo")
if err != nil {
t.Error(err)
}
if repo.Vcs() != Bzr {
t.Error("Bzr is detecting the wrong type")
}
// Check the basic getters.
if repo.Remote() != "https://launchpad.net/govcstestbzrrepo" {
t.Error("Remote not set properly")
}
if repo.LocalPath() != tempDir+"/govcstestbzrrepo" {
t.Error("Local disk location not set properly")
}
//Logger = log.New(os.Stdout, "", log.LstdFlags)
// Do an initial clone.
err = repo.Get()
if err != nil {
t.Errorf("Unable to clone Bzr repo. Err was %s", err)
}
// Verify Bzr repo is a Bzr repo
if repo.CheckLocal() == false {
t.Error("Problem checking out repo or Bzr CheckLocal is not working")
}
// Test internal lookup mechanism used outside of Bzr specific functionality.
ltype, err := DetectVcsFromFS(tempDir + "/govcstestbzrrepo")
if err != nil {
t.Error("detectVcsFromFS unable to Bzr repo")
}
if ltype != Bzr {
t.Errorf("detectVcsFromFS detected %s instead of Bzr type", ltype)
}
// Test NewRepo on existing checkout. This should simply provide a working
// instance without error based on looking at the local directory.
nrepo, nrerr := NewRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo")
if nrerr != nil {
t.Error(nrerr)
}
// Verify the right oject is returned. It will check the local repo type.
if nrepo.CheckLocal() == false {
t.Error("Wrong version returned from NewRepo")
}
err = repo.UpdateVersion("2")
if err != nil {
t.Errorf("Unable to update Bzr repo version. Err was %s", err)
}
// Use Version to verify we are on the right version.
v, err := repo.Version()
if v != "2" {
t.Error("Error checking checked out Bzr version")
}
if err != nil {
t.Error(err)
}
// Use Date to verify we are on the right commit.
d, err := repo.Date()
if d.Format(longForm) != "2015-07-31 09:50:42 -0400" {
t.Error("Error checking checked out Bzr commit date")
}
if err != nil {
t.Error(err)
}
// Perform an update.
err = repo.Update()
if err != nil {
t.Error(err)
}
v, err = repo.Version()
if v != "3" {
t.Error("Error checking checked out Bzr version")
}
if err != nil {
t.Error(err)
}
tags, err := repo.Tags()
if err != nil {
t.Error(err)
}
if tags[0] != "1.0.0" {
t.Error("Bzr tags is not reporting the correct version")
}
branches, err := repo.Branches()
if err != nil {
t.Error(err)
}
if len(branches) != 0 {
t.Error("Bzr is incorrectly returning branches")
}
if repo.IsReference("1.0.0") != true {
t.Error("Bzr is reporting a reference is not one")
}
if repo.IsReference("foo") == true {
t.Error("Bzr is reporting a non-existant reference is one")
}
if repo.IsDirty() == true {
t.Error("Bzr incorrectly reporting dirty")
}
ci, err := repo.CommitInfo("3")
if err != nil {
t.Error(err)
}
if ci.Commit != "3" {
t.Error("Bzr.CommitInfo wrong commit id")
}
if ci.Author != "Matt Farina " {
t.Error("Bzr.CommitInfo wrong author")
}
if ci.Message != "Updated Readme with pointer." {
t.Error("Bzr.CommitInfo wrong message")
}
ti, err := time.Parse(time.RFC1123Z, "Fri, 31 Jul 2015 09:51:37 -0400")
if err != nil {
t.Error(err)
}
if !ti.Equal(ci.Date) {
t.Error("Bzr.CommitInfo wrong date")
}
_, err = repo.CommitInfo("asdfasdfasdf")
if err != ErrRevisionUnavailable {
t.Error("Bzr didn't return expected ErrRevisionUnavailable")
}
}
func TestBzrCheckLocal(t *testing.T) {
// Verify repo.CheckLocal fails for non-Bzr directories.
// TestBzr is already checking on a valid repo
tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, _ := NewBzrRepo("", tempDir)
if repo.CheckLocal() == true {
t.Error("Bzr CheckLocal does not identify non-Bzr location")
}
// Test NewRepo when there's no local. This should simply provide a working
// instance without error based on looking at the remote localtion.
_, nrerr := NewRepo("https://launchpad.net/govcstestbzrrepo", tempDir+"/govcstestbzrrepo")
if nrerr != nil {
t.Error(nrerr)
}
}
================================================
FILE: vendor/github.com/Masterminds/vcs/git.go
================================================
package vcs
import (
"encoding/xml"
"os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// NewGitRepo creates a new instance of GitRepo. The remote and local directories
// need to be passed in.
func NewGitRepo(remote, local string) (*GitRepo, error) {
ltype, err := DetectVcsFromFS(local)
// Found a VCS other than Git. Need to report an error.
if err == nil && ltype != Git {
return nil, ErrWrongVCS
}
r := &GitRepo{}
r.setRemote(remote)
r.setLocalPath(local)
r.RemoteLocation = "origin"
r.Logger = Logger
// Make sure the local Git repo is configured the same as the remote when
// A remote value was passed in.
if err == nil && r.CheckLocal() == true {
c := exec.Command("git", "config", "--get", "remote.origin.url")
c.Dir = local
c.Env = envForDir(c.Dir)
out, err := c.CombinedOutput()
if err != nil {
return nil, err
}
localRemote := strings.TrimSpace(string(out))
if remote != "" && localRemote != remote {
return nil, ErrWrongRemote
}
// If no remote was passed in but one is configured for the locally
// checked out Git repo use that one.
if remote == "" && localRemote != "" {
r.setRemote(localRemote)
}
}
return r, nil
}
// GitRepo implements the Repo interface for the Git source control.
type GitRepo struct {
base
RemoteLocation string
}
// Vcs retrieves the underlying VCS being implemented.
func (s GitRepo) Vcs() Type {
return Git
}
// Get is used to perform an initial clone of a repository.
func (s *GitRepo) Get() error {
_, err := s.run("git", "clone", s.Remote(), s.LocalPath())
// There are some windows cases where Git cannot create the parent directory,
// if it does not already exist, to the location it's trying to create the
// repo. Catch that error and try to handle it.
if err != nil && s.isUnableToCreateDir(err) {
basePath := filepath.Dir(filepath.FromSlash(s.LocalPath()))
if _, err := os.Stat(basePath); os.IsNotExist(err) {
err = os.MkdirAll(basePath, 0755)
if err != nil {
return err
}
_, err = s.run("git", "clone", s.Remote(), s.LocalPath())
return err
}
}
return err
}
// Update performs an Git fetch and pull to an existing checkout.
func (s *GitRepo) Update() error {
// Perform a fetch to make sure everything is up to date.
_, err := s.runFromDir("git", "fetch", s.RemoteLocation)
if err != nil {
return err
}
// When in a detached head state, such as when an individual commit is checked
// out do not attempt a pull. It will cause an error.
detached, err := isDetachedHead(s.LocalPath())
if err != nil {
return err
}
if detached == true {
return nil
}
_, err = s.runFromDir("git", "pull")
return err
}
// UpdateVersion sets the version of a package currently checked out via Git.
func (s *GitRepo) UpdateVersion(version string) error {
_, err := s.runFromDir("git", "checkout", version)
return err
}
// Version retrieves the current version.
func (s *GitRepo) Version() (string, error) {
out, err := s.runFromDir("git", "rev-parse", "HEAD")
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
}
// Date retrieves the date on the latest commit.
func (s *GitRepo) Date() (time.Time, error) {
out, err := s.runFromDir("git", "log", "-1", "--date=iso", "--pretty=format:%cd")
if err != nil {
return time.Time{}, err
}
t, err := time.Parse(longForm, string(out))
if err != nil {
return time.Time{}, err
}
return t, nil
}
// Branches returns a list of available branches on the RemoteLocation
func (s *GitRepo) Branches() ([]string, error) {
out, err := s.runFromDir("git", "show-ref")
if err != nil {
return []string{}, err
}
branches := s.referenceList(string(out), `(?m-s)(?:`+s.RemoteLocation+`)/(\S+)$`)
return branches, nil
}
// Tags returns a list of available tags on the RemoteLocation
func (s *GitRepo) Tags() ([]string, error) {
out, err := s.runFromDir("git", "show-ref")
if err != nil {
return []string{}, err
}
tags := s.referenceList(string(out), `(?m-s)(?:tags)/(\S+)$`)
return tags, nil
}
// CheckLocal verifies the local location is a Git repo.
func (s *GitRepo) CheckLocal() bool {
if _, err := os.Stat(s.LocalPath() + "/.git"); err == nil {
return true
}
return false
}
// IsReference returns if a string is a reference. A reference can be a
// commit id, branch, or tag.
func (s *GitRepo) IsReference(r string) bool {
_, err := s.runFromDir("git", "rev-parse", "--verify", r)
if err == nil {
return true
}
// Some refs will fail rev-parse. For example, a remote branch that has
// not been checked out yet. This next step should pickup the other
// possible references.
_, err = s.runFromDir("git", "show-ref", r)
if err == nil {
return true
}
return false
}
// IsDirty returns if the checkout has been modified from the checked
// out reference.
func (s *GitRepo) IsDirty() bool {
out, err := s.runFromDir("git", "diff")
return err != nil || len(out) != 0
}
// CommitInfo retrieves metadata about a commit.
func (s *GitRepo) CommitInfo(id string) (*CommitInfo, error) {
fm := `--pretty=format:"%H%an <%ae>%aD%s"`
out, err := s.runFromDir("git", "log", id, fm, "-1")
if err != nil {
return nil, ErrRevisionUnavailable
}
cis := struct {
Commit string `xml:"commit"`
Author string `xml:"author"`
Date string `xml:"date"`
Message string `xml:"message"`
}{}
err = xml.Unmarshal(out, &cis)
if err != nil {
return nil, err
}
t, err := time.Parse("Mon, _2 Jan 2006 15:04:05 -0700", cis.Date)
if err != nil {
return nil, err
}
ci := &CommitInfo{
Commit: cis.Commit,
Author: cis.Author,
Date: t,
Message: cis.Message,
}
return ci, nil
}
func isDetachedHead(dir string) (bool, error) {
c := exec.Command("git", "status", "-uno")
c.Dir = dir
c.Env = envForDir(c.Dir)
out, err := c.CombinedOutput()
if err != nil {
return false, err
}
detached := strings.Contains(string(out), "HEAD detached at")
return detached, nil
}
// In a multi-langual manner check for the Git error that it couldn't create
// the directory.
func (s *GitRepo) isUnableToCreateDir(err error) bool {
msg := err.Error()
if strings.HasPrefix(msg, "could not create work tree dir") ||
strings.HasPrefix(msg, "不能创建工作区目录") ||
strings.HasPrefix(msg, "no s'ha pogut crear el directori d'arbre de treball") ||
strings.HasPrefix(msg, "impossible de créer le répertoire de la copie de travail") ||
strings.HasPrefix(msg, "kunde inte skapa arbetskatalogen") ||
(strings.HasPrefix(msg, "Konnte Arbeitsverzeichnis") && strings.Contains(msg, "nicht erstellen")) ||
(strings.HasPrefix(msg, "작업 디렉터리를") && strings.Contains(msg, "만들 수 없습니다")) {
return true
}
return false
}
================================================
FILE: vendor/github.com/Masterminds/vcs/git_test.go
================================================
package vcs
import (
"io/ioutil"
"time"
//"log"
"os"
"testing"
)
// Canary test to ensure GitRepo implements the Repo interface.
var _ Repo = &GitRepo{}
// To verify git is working we perform integration testing
// with a known git service.
func TestGit(t *testing.T) {
tempDir, err := ioutil.TempDir("", "go-vcs-git-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, err := NewGitRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo")
if err != nil {
t.Error(err)
}
if repo.Vcs() != Git {
t.Error("Git is detecting the wrong type")
}
// Check the basic getters.
if repo.Remote() != "https://github.com/Masterminds/VCSTestRepo" {
t.Error("Remote not set properly")
}
if repo.LocalPath() != tempDir+"/VCSTestRepo" {
t.Error("Local disk location not set properly")
}
//Logger = log.New(os.Stdout, "", log.LstdFlags)
// Do an initial clone.
err = repo.Get()
if err != nil {
t.Errorf("Unable to clone Git repo. Err was %s", err)
}
// Verify Git repo is a Git repo
if repo.CheckLocal() == false {
t.Error("Problem checking out repo or Git CheckLocal is not working")
}
// Test internal lookup mechanism used outside of Git specific functionality.
ltype, err := DetectVcsFromFS(tempDir + "/VCSTestRepo")
if err != nil {
t.Error("detectVcsFromFS unable to Git repo")
}
if ltype != Git {
t.Errorf("detectVcsFromFS detected %s instead of Git type", ltype)
}
// Test NewRepo on existing checkout. This should simply provide a working
// instance without error based on looking at the local directory.
nrepo, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo")
if nrerr != nil {
t.Error(nrerr)
}
// Verify the right oject is returned. It will check the local repo type.
if nrepo.CheckLocal() == false {
t.Error("Wrong version returned from NewRepo")
}
// Perform an update.
err = repo.Update()
if err != nil {
t.Error(err)
}
// Set the version using the short hash.
err = repo.UpdateVersion("806b07b")
if err != nil {
t.Errorf("Unable to update Git repo version. Err was %s", err)
}
// Once a ref has been checked out the repo is in a detached head state.
// Trying to pull in an update in this state will cause an error. Update
// should cleanly handle this. Pulling on a branch (tested elsewhere) and
// skipping that here.
err = repo.Update()
if err != nil {
t.Error(err)
}
// Use Version to verify we are on the right version.
v, err := repo.Version()
if v != "806b07b08faa21cfbdae93027904f80174679402" {
t.Error("Error checking checked out Git version")
}
if err != nil {
t.Error(err)
}
// Use Date to verify we are on the right commit.
d, err := repo.Date()
if d.Format(longForm) != "2015-07-29 09:46:39 -0400" {
t.Error("Error checking checked out Git commit date")
}
if err != nil {
t.Error(err)
}
// Verify that we can set the version something other than short hash
err = repo.UpdateVersion("master")
if err != nil {
t.Errorf("Unable to update Git repo version. Err was %s", err)
}
err = repo.UpdateVersion("806b07b08faa21cfbdae93027904f80174679402")
if err != nil {
t.Errorf("Unable to update Git repo version. Err was %s", err)
}
v, err = repo.Version()
if v != "806b07b08faa21cfbdae93027904f80174679402" {
t.Error("Error checking checked out Git version")
}
if err != nil {
t.Error(err)
}
tags, err := repo.Tags()
if err != nil {
t.Error(err)
}
if tags[0] != "1.0.0" {
t.Error("Git tags is not reporting the correct version")
}
branches, err := repo.Branches()
if err != nil {
t.Error(err)
}
// The branches should be HEAD, master, and test.
if branches[2] != "test" {
t.Error("Git is incorrectly returning branches")
}
if repo.IsReference("1.0.0") != true {
t.Error("Git is reporting a reference is not one")
}
if repo.IsReference("foo") == true {
t.Error("Git is reporting a non-existant reference is one")
}
if repo.IsDirty() == true {
t.Error("Git incorrectly reporting dirty")
}
ci, err := repo.CommitInfo("806b07b08faa21cfbdae93027904f80174679402")
if err != nil {
t.Error(err)
}
if ci.Commit != "806b07b08faa21cfbdae93027904f80174679402" {
t.Error("Git.CommitInfo wrong commit id")
}
if ci.Author != "Matt Farina " {
t.Error("Git.CommitInfo wrong author")
}
if ci.Message != "Update README.md" {
t.Error("Git.CommitInfo wrong message")
}
ti, err := time.Parse(time.RFC1123Z, "Wed, 29 Jul 2015 09:46:39 -0400")
if err != nil {
t.Error(err)
}
if !ti.Equal(ci.Date) {
t.Error("Git.CommitInfo wrong date")
}
_, err = repo.CommitInfo("asdfasdfasdf")
if err != ErrRevisionUnavailable {
t.Error("Git didn't return expected ErrRevisionUnavailable")
}
}
func TestGitCheckLocal(t *testing.T) {
// Verify repo.CheckLocal fails for non-Git directories.
// TestGit is already checking on a valid repo
tempDir, err := ioutil.TempDir("", "go-vcs-git-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, _ := NewGitRepo("", tempDir)
if repo.CheckLocal() == true {
t.Error("Git CheckLocal does not identify non-Git location")
}
// Test NewRepo when there's no local. This should simply provide a working
// instance without error based on looking at the remote localtion.
_, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo")
if nrerr != nil {
t.Error(nrerr)
}
}
================================================
FILE: vendor/github.com/Masterminds/vcs/hg.go
================================================
package vcs
import (
"encoding/xml"
"os"
"os/exec"
"regexp"
"strings"
"time"
)
var hgDetectURL = regexp.MustCompile("default = (?P.+)\n")
// NewHgRepo creates a new instance of HgRepo. The remote and local directories
// need to be passed in.
func NewHgRepo(remote, local string) (*HgRepo, error) {
ltype, err := DetectVcsFromFS(local)
// Found a VCS other than Hg. Need to report an error.
if err == nil && ltype != Hg {
return nil, ErrWrongVCS
}
r := &HgRepo{}
r.setRemote(remote)
r.setLocalPath(local)
r.Logger = Logger
// Make sure the local Hg repo is configured the same as the remote when
// A remote value was passed in.
if err == nil && r.CheckLocal() == true {
// An Hg repo was found so test that the URL there matches
// the repo passed in here.
c := exec.Command("hg", "paths")
c.Dir = local
c.Env = envForDir(c.Dir)
out, err := c.CombinedOutput()
if err != nil {
return nil, err
}
m := hgDetectURL.FindStringSubmatch(string(out))
if m[1] != "" && m[1] != remote {
return nil, ErrWrongRemote
}
// If no remote was passed in but one is configured for the locally
// checked out Hg repo use that one.
if remote == "" && m[1] != "" {
r.setRemote(m[1])
}
}
return r, nil
}
// HgRepo implements the Repo interface for the Mercurial source control.
type HgRepo struct {
base
}
// Vcs retrieves the underlying VCS being implemented.
func (s HgRepo) Vcs() Type {
return Hg
}
// Get is used to perform an initial clone of a repository.
func (s *HgRepo) Get() error {
_, err := s.run("hg", "clone", s.Remote(), s.LocalPath())
return err
}
// Update performs a Mercurial pull to an existing checkout.
func (s *HgRepo) Update() error {
_, err := s.runFromDir("hg", "update")
return err
}
// UpdateVersion sets the version of a package currently checked out via Hg.
func (s *HgRepo) UpdateVersion(version string) error {
_, err := s.runFromDir("hg", "pull")
if err != nil {
return err
}
_, err = s.runFromDir("hg", "update", version)
return err
}
// Version retrieves the current version.
func (s *HgRepo) Version() (string, error) {
out, err := s.runFromDir("hg", "identify")
if err != nil {
return "", err
}
parts := strings.SplitN(string(out), " ", 2)
sha := parts[0]
return strings.TrimSpace(sha), nil
}
// Date retrieves the date on the latest commit.
func (s *HgRepo) Date() (time.Time, error) {
version, err := s.Version()
if err != nil {
return time.Time{}, err
}
out, err := s.runFromDir("hg", "log", "-r", version, "--template", "{date|isodatesec}")
if err != nil {
return time.Time{}, err
}
t, err := time.Parse(longForm, string(out))
if err != nil {
return time.Time{}, err
}
return t, nil
}
// CheckLocal verifies the local location is a Git repo.
func (s *HgRepo) CheckLocal() bool {
if _, err := os.Stat(s.LocalPath() + "/.hg"); err == nil {
return true
}
return false
}
// Branches returns a list of available branches
func (s *HgRepo) Branches() ([]string, error) {
out, err := s.runFromDir("hg", "branches")
if err != nil {
return []string{}, err
}
branches := s.referenceList(string(out), `(?m-s)^(\S+)`)
return branches, nil
}
// Tags returns a list of available tags
func (s *HgRepo) Tags() ([]string, error) {
out, err := s.runFromDir("hg", "tags")
if err != nil {
return []string{}, err
}
tags := s.referenceList(string(out), `(?m-s)^(\S+)`)
return tags, nil
}
// IsReference returns if a string is a reference. A reference can be a
// commit id, branch, or tag.
func (s *HgRepo) IsReference(r string) bool {
_, err := s.runFromDir("hg", "log", "-r", r)
if err == nil {
return true
}
return false
}
// IsDirty returns if the checkout has been modified from the checked
// out reference.
func (s *HgRepo) IsDirty() bool {
out, err := s.runFromDir("hg", "diff")
return err != nil || len(out) != 0
}
// CommitInfo retrieves metadata about a commit.
func (s *HgRepo) CommitInfo(id string) (*CommitInfo, error) {
out, err := s.runFromDir("hg", "log", "-r", id, "--style=xml")
if err != nil {
return nil, ErrRevisionUnavailable
}
type Author struct {
Name string `xml:",chardata"`
Email string `xml:"email,attr"`
}
type Logentry struct {
Node string `xml:"node,attr"`
Author Author `xml:"author"`
Date string `xml:"date"`
Msg string `xml:"msg"`
}
type Log struct {
XMLName xml.Name `xml:"log"`
Logs []Logentry `xml:"logentry"`
}
logs := &Log{}
err = xml.Unmarshal(out, &logs)
if err != nil {
return nil, err
}
if len(logs.Logs) == 0 {
return nil, ErrRevisionUnavailable
}
ci := &CommitInfo{
Commit: logs.Logs[0].Node,
Author: logs.Logs[0].Author.Name + " <" + logs.Logs[0].Author.Email + ">",
Message: logs.Logs[0].Msg,
}
if logs.Logs[0].Date != "" {
ci.Date, err = time.Parse(time.RFC3339, logs.Logs[0].Date)
if err != nil {
return nil, err
}
}
return ci, nil
}
================================================
FILE: vendor/github.com/Masterminds/vcs/hg_test.go
================================================
package vcs
import (
"io/ioutil"
"time"
//"log"
"os"
"testing"
)
// Canary test to ensure HgRepo implements the Repo interface.
var _ Repo = &HgRepo{}
// To verify hg is working we perform integration testing
// with a known hg service.
func TestHg(t *testing.T) {
tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, err := NewHgRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo")
if err != nil {
t.Error(err)
}
if repo.Vcs() != Hg {
t.Error("Hg is detecting the wrong type")
}
// Check the basic getters.
if repo.Remote() != "https://bitbucket.org/mattfarina/testhgrepo" {
t.Error("Remote not set properly")
}
if repo.LocalPath() != tempDir+"/testhgrepo" {
t.Error("Local disk location not set properly")
}
//Logger = log.New(os.Stdout, "", log.LstdFlags)
// Do an initial clone.
err = repo.Get()
if err != nil {
t.Errorf("Unable to clone Hg repo. Err was %s", err)
}
// Verify Hg repo is a Hg repo
if repo.CheckLocal() == false {
t.Error("Problem checking out repo or Hg CheckLocal is not working")
}
// Test internal lookup mechanism used outside of Hg specific functionality.
ltype, err := DetectVcsFromFS(tempDir + "/testhgrepo")
if err != nil {
t.Error("detectVcsFromFS unable to Hg repo")
}
if ltype != Hg {
t.Errorf("detectVcsFromFS detected %s instead of Hg type", ltype)
}
// Test NewRepo on existing checkout. This should simply provide a working
// instance without error based on looking at the local directory.
nrepo, nrerr := NewRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo")
if nrerr != nil {
t.Error(nrerr)
}
// Verify the right oject is returned. It will check the local repo type.
if nrepo.CheckLocal() == false {
t.Error("Wrong version returned from NewRepo")
}
// Set the version using the short hash.
err = repo.UpdateVersion("a5494ba2177f")
if err != nil {
t.Errorf("Unable to update Hg repo version. Err was %s", err)
}
// Use Version to verify we are on the right version.
v, err := repo.Version()
if v != "a5494ba2177f" {
t.Error("Error checking checked out Hg version")
}
if err != nil {
t.Error(err)
}
// Use Date to verify we are on the right commit.
d, err := repo.Date()
if err != nil {
t.Error(err)
}
if d.Format(longForm) != "2015-07-30 16:14:08 -0400" {
t.Error("Error checking checked out Hg commit date. Got wrong date:", d)
}
// Perform an update.
err = repo.Update()
if err != nil {
t.Error(err)
}
v, err = repo.Version()
if v != "9c6ccbca73e8" {
t.Error("Error checking checked out Hg version")
}
if err != nil {
t.Error(err)
}
tags, err := repo.Tags()
if err != nil {
t.Error(err)
}
if tags[1] != "1.0.0" {
t.Error("Hg tags is not reporting the correct version")
}
branches, err := repo.Branches()
if err != nil {
t.Error(err)
}
// The branches should be HEAD, master, and test.
if branches[0] != "test" {
t.Error("Hg is incorrectly returning branches")
}
if repo.IsReference("1.0.0") != true {
t.Error("Hg is reporting a reference is not one")
}
if repo.IsReference("test") != true {
t.Error("Hg is reporting a reference is not one")
}
if repo.IsReference("foo") == true {
t.Error("Hg is reporting a non-existant reference is one")
}
if repo.IsDirty() == true {
t.Error("Hg incorrectly reporting dirty")
}
ci, err := repo.CommitInfo("a5494ba2177f")
if err != nil {
t.Error(err)
}
if ci.Commit != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" {
t.Error("Hg.CommitInfo wrong commit id")
}
if ci.Author != "Matt Farina " {
t.Error("Hg.CommitInfo wrong author")
}
if ci.Message != "A commit" {
t.Error("Hg.CommitInfo wrong message")
}
ti := time.Unix(1438287248, 0)
if !ti.Equal(ci.Date) {
t.Error("Hg.CommitInfo wrong date")
}
_, err = repo.CommitInfo("asdfasdfasdf")
if err != ErrRevisionUnavailable {
t.Error("Hg didn't return expected ErrRevisionUnavailable")
}
}
func TestHgCheckLocal(t *testing.T) {
// Verify repo.CheckLocal fails for non-Hg directories.
// TestHg is already checking on a valid repo
tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, _ := NewHgRepo("", tempDir)
if repo.CheckLocal() == true {
t.Error("Hg CheckLocal does not identify non-Hg location")
}
// Test NewRepo when there's no local. This should simply provide a working
// instance without error based on looking at the remote localtion.
_, nrerr := NewRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo")
if nrerr != nil {
t.Error(nrerr)
}
}
================================================
FILE: vendor/github.com/Masterminds/vcs/repo.go
================================================
// Package vcs provides the ability to work with varying version control systems
// (VCS), also known as source control systems (SCM) though the same interface.
//
// This package includes a function that attempts to detect the repo type from
// the remote URL and return the proper type. For example,
//
// remote := "https://github.com/Masterminds/vcs"
// local, _ := ioutil.TempDir("", "go-vcs")
// repo, err := NewRepo(remote, local)
//
// In this case repo will be a GitRepo instance. NewRepo can detect the VCS for
// numerous popular VCS and from the URL. For example, a URL ending in .git
// that's not from one of the popular VCS will be detected as a Git repo and
// the correct type will be returned.
//
// If you know the repository type and would like to create an instance of a
// specific type you can use one of constructors for a type. They are NewGitRepo,
// NewSvnRepo, NewBzrRepo, and NewHgRepo. The definition and usage is the same
// as NewRepo.
//
// Once you have an object implementing the Repo interface the operations are
// the same no matter which VCS you're using. There are some caveats. For
// example, each VCS has its own version formats that need to be respected and
// checkout out branches, if a branch is being worked with, is different in
// each VCS.
package vcs
import (
"errors"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"regexp"
"strings"
"time"
)
var (
// ErrWrongVCS is returned when an action is tried on the wrong VCS.
ErrWrongVCS = errors.New("Wrong VCS detected")
// ErrCannotDetectVCS is returned when VCS cannot be detected from URI string.
ErrCannotDetectVCS = errors.New("Cannot detect VCS")
// ErrWrongRemote occurs when the passed in remote does not match the VCS
// configured endpoint.
ErrWrongRemote = errors.New("The Remote does not match the VCS endpoint")
// ErrRevisionUnavailable happens when commit revision information is
// unavailable.
ErrRevisionUnavailable = errors.New("Revision unavailable")
)
// Logger is where you can provide a logger, implementing the log.Logger interface,
// where verbose output from each VCS will be written. The default logger does
// not log data. To log data supply your own logger or change the output location
// of the provided logger.
var Logger *log.Logger
func init() {
// Initialize the logger to one that does not actually log anywhere. This is
// to be overridden by the package user by setting vcs.Logger to a different
// logger.
Logger = log.New(ioutil.Discard, "go-vcs", log.LstdFlags)
}
const longForm = "2006-01-02 15:04:05 -0700"
// Type describes the type of VCS
type Type string
// VCS types
const (
NoVCS Type = ""
Git Type = "git"
Svn Type = "svn"
Bzr Type = "bzr"
Hg Type = "hg"
)
// Repo provides an interface to work with repositories using different source
// control systems such as Git, Bzr, Mercurial, and SVN. For implementations
// of this interface see BzrRepo, GitRepo, HgRepo, and SvnRepo.
type Repo interface {
// Vcs retrieves the underlying VCS being implemented.
Vcs() Type
// Remote retrieves the remote location for a repo.
Remote() string
// LocalPath retrieves the local file system location for a repo.
LocalPath() string
// Get is used to perform an initial clone/checkout of a repository.
Get() error
// Update performs an update to an existing checkout of a repository.
Update() error
// UpdateVersion sets the version of a package of a repository.
UpdateVersion(string) error
// Version retrieves the current version.
Version() (string, error)
// Date retrieves the date on the latest commit.
Date() (time.Time, error)
// CheckLocal verifies the local location is of the correct VCS type
CheckLocal() bool
// Branches returns a list of available branches on the repository.
Branches() ([]string, error)
// Tags returns a list of available tags on the repository.
Tags() ([]string, error)
// TODO: Provide a consistent manner to get reference information across
// multiple VCS.
// IsReference returns if a string is a reference. A reference can be a
// commit id, branch, or tag.
IsReference(string) bool
// IsDirty returns if the checkout has been modified from the checked
// out reference.
IsDirty() bool
// CommitInfo retrieves metadata about a commit.
CommitInfo(string) (*CommitInfo, error)
}
// NewRepo returns a Repo based on trying to detect the source control from the
// remote and local locations. The appropriate implementation will be returned
// or an ErrCannotDetectVCS if the VCS type cannot be detected.
// Note, this function may make calls to the Internet to determind help determine
// the VCS.
func NewRepo(remote, local string) (Repo, error) {
vtype, remote, err := detectVcsFromRemote(remote)
// From the remote URL the VCS could not be detected. See if the local
// repo contains enough information to figure out the VCS. The reason the
// local repo is not checked first is because of the potential for VCS type
// switches which will be detected in each of the type builders.
if err == ErrCannotDetectVCS {
vtype, err = DetectVcsFromFS(local)
}
if err != nil {
return nil, err
}
switch vtype {
case Git:
return NewGitRepo(remote, local)
case Svn:
return NewSvnRepo(remote, local)
case Hg:
return NewHgRepo(remote, local)
case Bzr:
return NewBzrRepo(remote, local)
}
// Should never fall through to here but just in case.
return nil, ErrCannotDetectVCS
}
// CommitInfo contains metadata about a commit.
type CommitInfo struct {
// The commit id
Commit string
// Who authored the commit
Author string
// Date of the commit
Date time.Time
// Commit message
Message string
}
type base struct {
remote, local string
Logger *log.Logger
}
func (b *base) log(v interface{}) {
b.Logger.Printf("%s", v)
}
// Remote retrieves the remote location for a repo.
func (b *base) Remote() string {
return b.remote
}
// LocalPath retrieves the local file system location for a repo.
func (b *base) LocalPath() string {
return b.local
}
func (b *base) setRemote(remote string) {
b.remote = remote
}
func (b *base) setLocalPath(local string) {
b.local = local
}
func (b base) run(cmd string, args ...string) ([]byte, error) {
out, err := exec.Command(cmd, args...).CombinedOutput()
b.log(out)
if err != nil {
err = fmt.Errorf("%s: %s", out, err)
}
return out, err
}
func (b *base) runFromDir(cmd string, args ...string) ([]byte, error) {
c := exec.Command(cmd, args...)
c.Dir = b.local
c.Env = envForDir(c.Dir)
out, err := c.CombinedOutput()
return out, err
}
func (b *base) referenceList(c, r string) []string {
var out []string
re := regexp.MustCompile(r)
for _, m := range re.FindAllStringSubmatch(c, -1) {
out = append(out, m[1])
}
return out
}
func envForDir(dir string) []string {
env := os.Environ()
return mergeEnvLists([]string{"PWD=" + dir}, env)
}
func mergeEnvLists(in, out []string) []string {
NextVar:
for _, inkv := range in {
k := strings.SplitAfterN(inkv, "=", 2)[0]
for i, outkv := range out {
if strings.HasPrefix(outkv, k) {
out[i] = inkv
continue NextVar
}
}
out = append(out, inkv)
}
return out
}
================================================
FILE: vendor/github.com/Masterminds/vcs/repo_test.go
================================================
package vcs
import (
"fmt"
"io/ioutil"
"os"
"testing"
)
func ExampleNewRepo() {
remote := "https://github.com/Masterminds/vcs"
local, _ := ioutil.TempDir("", "go-vcs")
repo, _ := NewRepo(remote, local)
// Returns: instance of GitRepo
repo.Vcs()
// Returns Git as this is a Git repo
err := repo.Get()
// Pulls down a repo, or a checkout in the case of SVN, and returns an
// error if that didn't happen successfully.
if err != nil {
fmt.Println(err)
}
err = repo.UpdateVersion("master")
// Checkouts out a specific version. In most cases this can be a commit id,
// branch, or tag.
if err != nil {
fmt.Println(err)
}
}
func TestTypeSwitch(t *testing.T) {
// To test repo type switching we checkout as SVN and then try to get it as
// a git repo afterwards.
tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, err := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+"/VCSTestRepo")
if err != nil {
t.Error(err)
}
err = repo.Get()
if err != nil {
t.Errorf("Unable to checkout SVN repo for repo switching tests. Err was %s", err)
}
_, err = NewRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo")
if err != ErrWrongVCS {
t.Errorf("Not detecting repo switch from SVN to Git")
}
}
================================================
FILE: vendor/github.com/Masterminds/vcs/svn.go
================================================
package vcs
import (
"encoding/xml"
"os"
"os/exec"
"regexp"
"strings"
"time"
)
var svnDetectURL = regexp.MustCompile("URL: (?P.+)\n")
// NewSvnRepo creates a new instance of SvnRepo. The remote and local directories
// need to be passed in. The remote location should include the branch for SVN.
// For example, if the package is https://github.com/Masterminds/cookoo/ the remote
// should be https://github.com/Masterminds/cookoo/trunk for the trunk branch.
func NewSvnRepo(remote, local string) (*SvnRepo, error) {
ltype, err := DetectVcsFromFS(local)
// Found a VCS other than Svn. Need to report an error.
if err == nil && ltype != Svn {
return nil, ErrWrongVCS
}
r := &SvnRepo{}
r.setRemote(remote)
r.setLocalPath(local)
r.Logger = Logger
// Make sure the local SVN repo is configured the same as the remote when
// A remote value was passed in.
if err == nil && r.CheckLocal() == true {
// An SVN repo was found so test that the URL there matches
// the repo passed in here.
out, err := exec.Command("svn", "info", local).CombinedOutput()
if err != nil {
return nil, err
}
m := svnDetectURL.FindStringSubmatch(string(out))
if m[1] != "" && m[1] != remote {
return nil, ErrWrongRemote
}
// If no remote was passed in but one is configured for the locally
// checked out Svn repo use that one.
if remote == "" && m[1] != "" {
r.setRemote(m[1])
}
}
return r, nil
}
// SvnRepo implements the Repo interface for the Svn source control.
type SvnRepo struct {
base
}
// Vcs retrieves the underlying VCS being implemented.
func (s SvnRepo) Vcs() Type {
return Svn
}
// Get is used to perform an initial checkout of a repository.
// Note, because SVN isn't distributed this is a checkout without
// a clone.
func (s *SvnRepo) Get() error {
_, err := s.run("svn", "checkout", s.Remote(), s.LocalPath())
return err
}
// Update performs an SVN update to an existing checkout.
func (s *SvnRepo) Update() error {
_, err := s.runFromDir("svn", "update")
return err
}
// UpdateVersion sets the version of a package currently checked out via SVN.
func (s *SvnRepo) UpdateVersion(version string) error {
_, err := s.runFromDir("svn", "update", "-r", version)
return err
}
// Version retrieves the current version.
func (s *SvnRepo) Version() (string, error) {
out, err := s.runFromDir("svnversion", ".")
s.log(out)
if err != nil {
return "", err
}
return strings.TrimSpace(string(out)), nil
}
// Date retrieves the date on the latest commit.
func (s *SvnRepo) Date() (time.Time, error) {
version, err := s.Version()
if err != nil {
return time.Time{}, err
}
out, err := s.runFromDir("svn", "pget", "svn:date", "--revprop", "-r", version)
if err != nil {
return time.Time{}, err
}
const longForm = "2006-01-02T15:04:05.000000Z\n"
t, err := time.Parse(longForm, string(out))
if err != nil {
return time.Time{}, err
}
return t, nil
}
// CheckLocal verifies the local location is an SVN repo.
func (s *SvnRepo) CheckLocal() bool {
if _, err := os.Stat(s.LocalPath() + "/.svn"); err == nil {
return true
}
return false
}
// Tags returns []string{} as there are no formal tags in SVN. Tags are a
// convention in SVN. They are typically implemented as a copy of the trunk and
// placed in the /tags/[tag name] directory. Since this is a convention the
// expectation is to checkout a tag the correct subdirectory will be used
// as the path. For more information see:
// http://svnbook.red-bean.com/en/1.7/svn.branchmerge.tags.html
func (s *SvnRepo) Tags() ([]string, error) {
return []string{}, nil
}
// Branches returns []string{} as there are no formal branches in SVN. Branches
// are a convention. They are typically implemented as a copy of the trunk and
// placed in the /branches/[tag name] directory. Since this is a convention the
// expectation is to checkout a branch the correct subdirectory will be used
// as the path. For more information see:
// http://svnbook.red-bean.com/en/1.7/svn.branchmerge.using.html
func (s *SvnRepo) Branches() ([]string, error) {
return []string{}, nil
}
// IsReference returns if a string is a reference. A reference is a commit id.
// Branches and tags are part of the path.
func (s *SvnRepo) IsReference(r string) bool {
out, err := s.runFromDir("svn", "log", "-r", r)
// This is a complete hack. There must be a better way to do this. Pull
// requests welcome. When the reference isn't real you get a line of
// repeated - followed by an empty line. If the reference is real there
// is commit information in addition to those. So, we look for responses
// over 2 lines long.
lines := strings.Split(string(out), "\n")
if err == nil && len(lines) > 2 {
return true
}
return false
}
// IsDirty returns if the checkout has been modified from the checked
// out reference.
func (s *SvnRepo) IsDirty() bool {
out, err := s.runFromDir("svn", "diff")
return err != nil || len(out) != 0
}
// CommitInfo retrieves metadata about a commit.
func (s *SvnRepo) CommitInfo(id string) (*CommitInfo, error) {
out, err := s.runFromDir("svn", "log", "-r", id, "--xml")
if err != nil {
return nil, err
}
type Logentry struct {
Author string `xml:"author"`
Date string `xml:"date"`
Msg string `xml:"msg"`
}
type Log struct {
XMLName xml.Name `xml:"log"`
Logs []Logentry `xml:"logentry"`
}
logs := &Log{}
err = xml.Unmarshal(out, &logs)
if err != nil {
return nil, err
}
if len(logs.Logs) == 0 {
return nil, ErrRevisionUnavailable
}
ci := &CommitInfo{
Commit: id,
Author: logs.Logs[0].Author,
Message: logs.Logs[0].Msg,
}
if len(logs.Logs[0].Date) > 0 {
ci.Date, err = time.Parse(time.RFC3339Nano, logs.Logs[0].Date)
if err != nil {
return nil, err
}
}
return ci, nil
}
================================================
FILE: vendor/github.com/Masterminds/vcs/svn_test.go
================================================
package vcs
import (
"io/ioutil"
"time"
//"log"
"os"
"testing"
)
// To verify svn is working we perform integration testing
// with a known svn service.
// Canary test to ensure SvnRepo implements the Repo interface.
var _ Repo = &SvnRepo{}
func TestSvn(t *testing.T) {
tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, err := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+"/VCSTestRepo")
if err != nil {
t.Error(err)
}
if repo.Vcs() != Svn {
t.Error("Svn is detecting the wrong type")
}
// Check the basic getters.
if repo.Remote() != "https://github.com/Masterminds/VCSTestRepo/trunk" {
t.Error("Remote not set properly")
}
if repo.LocalPath() != tempDir+"/VCSTestRepo" {
t.Error("Local disk location not set properly")
}
//Logger = log.New(os.Stdout, "", log.LstdFlags)
// Do an initial checkout.
err = repo.Get()
if err != nil {
t.Errorf("Unable to checkout SVN repo. Err was %s", err)
}
// Verify SVN repo is a SVN repo
if repo.CheckLocal() == false {
t.Error("Problem checking out repo or SVN CheckLocal is not working")
}
// Verify an incorrect remote is caught when NewSvnRepo is used on an existing location
_, nrerr := NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/unknownbranch", tempDir+"/VCSTestRepo")
if nrerr != ErrWrongRemote {
t.Error("ErrWrongRemote was not triggered for SVN")
}
// Test internal lookup mechanism used outside of Hg specific functionality.
ltype, err := DetectVcsFromFS(tempDir + "/VCSTestRepo")
if err != nil {
t.Error("detectVcsFromFS unable to Svn repo")
}
if ltype != Svn {
t.Errorf("detectVcsFromFS detected %s instead of Svn type", ltype)
}
// Commenting out auto-detection tests for SVN. NewRepo automatically detects
// GitHub to be a Git repo and that's an issue for this test. Need an
// SVN host that can autodetect from before using this test again.
//
// Test NewRepo on existing checkout. This should simply provide a working
// instance without error based on looking at the local directory.
// nrepo, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+"/VCSTestRepo")
// if nrerr != nil {
// t.Error(nrerr)
// }
// // Verify the right oject is returned. It will check the local repo type.
// if nrepo.CheckLocal() == false {
// t.Error("Wrong version returned from NewRepo")
// }
// Update the version to a previous version.
err = repo.UpdateVersion("r2")
if err != nil {
t.Errorf("Unable to update SVN repo version. Err was %s", err)
}
// Use Version to verify we are on the right version.
v, err := repo.Version()
if v != "2" {
t.Error("Error checking checked SVN out version")
}
if err != nil {
t.Error(err)
}
// Perform an update which should take up back to the latest version.
err = repo.Update()
if err != nil {
t.Error(err)
}
// Make sure we are on a newer version because of the update.
v, err = repo.Version()
if v == "2" {
t.Error("Error with version. Still on old version. Update failed")
}
if err != nil {
t.Error(err)
}
// Use Date to verify we are on the right commit.
d, err := repo.Date()
if d.Format(longForm) != "2015-07-29 13:47:03 +0000" {
t.Error("Error checking checked out Svn commit date")
}
if err != nil {
t.Error(err)
}
tags, err := repo.Tags()
if err != nil {
t.Error(err)
}
if len(tags) != 0 {
t.Error("Svn is incorrectly returning tags")
}
branches, err := repo.Branches()
if err != nil {
t.Error(err)
}
if len(branches) != 0 {
t.Error("Svn is incorrectly returning branches")
}
if repo.IsReference("r4") != true {
t.Error("Svn is reporting a reference is not one")
}
if repo.IsReference("55") == true {
t.Error("Svn is reporting a non-existant reference is one")
}
if repo.IsDirty() == true {
t.Error("Svn incorrectly reporting dirty")
}
ci, err := repo.CommitInfo("2")
if err != nil {
t.Error(err)
}
if ci.Commit != "2" {
t.Error("Svn.CommitInfo wrong commit id")
}
if ci.Author != "matt.farina" {
t.Error("Svn.CommitInfo wrong author")
}
if ci.Message != "Update README.md" {
t.Error("Svn.CommitInfo wrong message")
}
ti, err := time.Parse(time.RFC3339Nano, "2015-07-29T13:46:20.000000Z")
if err != nil {
t.Error(err)
}
if !ti.Equal(ci.Date) {
t.Error("Svn.CommitInfo wrong date")
}
_, err = repo.CommitInfo("555555555")
if err != ErrRevisionUnavailable {
t.Error("Svn didn't return expected ErrRevisionUnavailable")
}
}
func TestSvnCheckLocal(t *testing.T) {
// Verify repo.CheckLocal fails for non-SVN directories.
// TestSvn is already checking on a valid repo
tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests")
if err != nil {
t.Error(err)
}
defer func() {
err = os.RemoveAll(tempDir)
if err != nil {
t.Error(err)
}
}()
repo, _ := NewSvnRepo("", tempDir)
if repo.CheckLocal() == true {
t.Error("SVN CheckLocal does not identify non-SVN location")
}
// Test NewRepo when there's no local. This should simply provide a working
// instance without error based on looking at the remote localtion.
_, nrerr := NewRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+"/VCSTestRepo")
if nrerr != nil {
t.Error(nrerr)
}
}
================================================
FILE: vendor/github.com/Masterminds/vcs/vcs_local_lookup.go
================================================
package vcs
import (
"os"
)
// DetectVcsFromFS detects the type from the local path.
// Is there a better way to do this?
func DetectVcsFromFS(vcsPath string) (Type, error) {
// When the local directory to the package doesn't exist
// it's not yet downloaded so we can't detect the type
// locally.
if _, err := os.Stat(vcsPath); os.IsNotExist(err) {
return "", ErrCannotDetectVCS
}
seperator := string(os.PathSeparator)
// Walk through each of the different VCS types to see if
// one can be detected. Do this is order of guessed popularity.
if _, err := os.Stat(vcsPath + seperator + ".git"); err == nil {
return Git, nil
}
if _, err := os.Stat(vcsPath + seperator + ".svn"); err == nil {
return Svn, nil
}
if _, err := os.Stat(vcsPath + seperator + ".hg"); err == nil {
return Hg, nil
}
if _, err := os.Stat(vcsPath + seperator + ".bzr"); err == nil {
return Bzr, nil
}
// If one was not already detected than we default to not finding it.
return "", ErrCannotDetectVCS
}
================================================
FILE: vendor/github.com/Masterminds/vcs/vcs_remote_lookup.go
================================================
package vcs
import (
"encoding/json"
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"regexp"
"strings"
)
type vcsInfo struct {
host string
pattern string
vcs Type
addCheck func(m map[string]string, u *url.URL) (Type, error)
regex *regexp.Regexp
}
// scpSyntaxRe matches the SCP-like addresses used by Git to access
// repositories by SSH.
var scpSyntaxRe = regexp.MustCompile(`^([a-zA-Z0-9_]+)@([a-zA-Z0-9._-]+):(.*)$`)
var vcsList = []*vcsInfo{
{
host: "github.com",
vcs: Git,
pattern: `^(github\.com[/|:][A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`,
},
{
host: "bitbucket.org",
pattern: `^(bitbucket\.org/(?P[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`,
addCheck: checkBitbucket,
},
{
host: "launchpad.net",
pattern: `^(launchpad\.net/(([A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)?|~[A-Za-z0-9_.\-]+/(\+junk|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`,
vcs: Bzr,
},
{
host: "git.launchpad.net",
vcs: Git,
pattern: `^(git\.launchpad\.net/(([A-Za-z0-9_.\-]+)|~[A-Za-z0-9_.\-]+/(\+git|[A-Za-z0-9_.\-]+)/[A-Za-z0-9_.\-]+))$`,
},
{
host: "hub.jazz.net",
vcs: Git,
pattern: `^(hub\.jazz\.net/git/[a-z0-9]+/[A-Za-z0-9_.\-]+)(/[A-Za-z0-9_.\-]+)*$`,
},
{
host: "go.googlesource.com",
vcs: Git,
pattern: `^(go\.googlesource\.com/[A-Za-z0-9_.\-]+/?)$`,
},
// TODO: Once Google Code becomes fully deprecated this can be removed.
{
host: "code.google.com",
addCheck: checkGoogle,
pattern: `^(code\.google\.com/[pr]/(?P[a-z0-9\-]+)(\.(?P[a-z0-9\-]+))?)(/[A-Za-z0-9_.\-]+)*$`,
},
// Alternative Google setup. This is the previous structure but it still works... until Google Code goes away.
{
addCheck: checkURL,
pattern: `^([a-z0-9_\-.]+)\.googlecode\.com/(?Pgit|hg|svn)(/.*)?$`,
},
// If none of the previous detect the type they will fall to this looking for the type in a generic sense
// by the extension to the path.
{
addCheck: checkURL,
pattern: `\.(?Pgit|hg|svn|bzr)$`,
},
}
func init() {
// Precompile the regular expressions used to check VCS locations.
for _, v := range vcsList {
v.regex = regexp.MustCompile(v.pattern)
}
}
// This function is really a hack around Go redirects rather than around
// something VCS related. Should this be moved to the glide project or a
// helper function?
func detectVcsFromRemote(vcsURL string) (Type, string, error) {
t, e := detectVcsFromURL(vcsURL)
if e == nil {
return t, vcsURL, nil
}
// Need to test for vanity or paths like golang.org/x/
// TODO: Test for 3xx redirect codes and handle appropriately.
// Pages like https://golang.org/x/net provide an html document with
// meta tags containing a location to work with. The go tool uses
// a meta tag with the name go-import which is what we use here.
// godoc.org also has one call go-source that we do not need to use.
// The value of go-import is in the form "prefix vcs repo". The prefix
// should match the vcsURL and the repo is a location that can be
// checked out. Note, to get the html document you you need to add
// ?go-get=1 to the url.
u, err := url.Parse(vcsURL)
if err != nil {
return NoVCS, "", err
}
if u.RawQuery == "" {
u.RawQuery = "go-get=1"
} else {
u.RawQuery = u.RawQuery + "+go-get=1"
}
checkURL := u.String()
resp, err := http.Get(checkURL)
if err != nil {
return NoVCS, "", ErrCannotDetectVCS
}
defer resp.Body.Close()
t, nu, err := parseImportFromBody(u, resp.Body)
if err != nil {
return NoVCS, "", err
} else if t == "" || nu == "" {
return NoVCS, "", ErrCannotDetectVCS
}
return t, nu, nil
}
// From a remote vcs url attempt to detect the VCS.
func detectVcsFromURL(vcsURL string) (Type, error) {
var u *url.URL
var err error
if m := scpSyntaxRe.FindStringSubmatch(vcsURL); m != nil {
// Match SCP-like syntax and convert it to a URL.
// Eg, "git@github.com:user/repo" becomes
// "ssh://git@github.com/user/repo".
u = &url.URL{
Scheme: "ssh",
User: url.User(m[1]),
Host: m[2],
Path: "/" + m[3],
}
} else {
u, err = url.Parse(vcsURL)
if err != nil {
return "", err
}
}
if u.Host == "" {
return "", ErrCannotDetectVCS
}
// Try to detect from the scheme
switch u.Scheme {
case "git+ssh":
return Git, nil
case "git":
return Git, nil
case "bzr+ssh":
return Bzr, nil
case "svn+ssh":
return Svn, nil
}
// Try to detect from known hosts, such as Github
for _, v := range vcsList {
if v.host != "" && v.host != u.Host {
continue
}
// Make sure the pattern matches for an actual repo location. For example,
// we should fail if the VCS listed is github.com/masterminds as that's
// not actually a repo.
uCheck := u.Host + u.Path
m := v.regex.FindStringSubmatch(uCheck)
if m == nil {
if v.host != "" {
return "", ErrCannotDetectVCS
}
continue
}
// If we are here the host matches. If the host has a singular
// VCS type, such as Github, we can return the type right away.
if v.vcs != "" {
return v.vcs, nil
}
// Run additional checks to determine try and determine the repo
// for the matched service.
info := make(map[string]string)
for i, name := range v.regex.SubexpNames() {
if name != "" {
info[name] = m[i]
}
}
t, err := v.addCheck(info, u)
if err != nil {
return "", ErrCannotDetectVCS
}
return t, nil
}
// Attempt to ascertain from the username passed in.
if u.User != nil {
un := u.User.Username()
if un == "git" {
return Git, nil
} else if un == "hg" {
return Hg, nil
}
}
// Unable to determine the vcs from the url.
return "", ErrCannotDetectVCS
}
// Figure out the type for Bitbucket by the passed in information
// or via the public API.
func checkBitbucket(i map[string]string, ul *url.URL) (Type, error) {
// Fast path for ssh urls where we may not even be able to
// anonymously get details from the API.
if ul.User != nil {
un := ul.User.Username()
if un == "git" {
return Git, nil
} else if un == "hg" {
return Hg, nil
}
}
// The part of the response we care about.
var response struct {
SCM Type `json:"scm"`
}
u := expand(i, "https://api.bitbucket.org/1.0/repositories/{name}")
data, err := get(u)
if err != nil {
return "", err
}
if err := json.Unmarshal(data, &response); err != nil {
return "", fmt.Errorf("Decoding error %s: %v", u, err)
}
return response.SCM, nil
}
// Google supports Git, Hg, and Svn. The SVN style is only
// supported through their legacy setup at .googlecode.com.
// I wonder if anyone is actually using SVN support.
func checkGoogle(i map[string]string, u *url.URL) (Type, error) {
// To figure out which of the VCS types is used in Google Code you need
// to parse a web page and find it. Ugh. I mean... ugh.
var hack = regexp.MustCompile(`id="checkoutcmd">(hg|git|svn)`)
d, err := get(expand(i, "https://code.google.com/p/{project}/source/checkout?repo={repo}"))
if err != nil {
return "", err
}
if m := hack.FindSubmatch(d); m != nil {
if vcs := string(m[1]); vcs != "" {
if vcs == "svn" {
// While Google supports SVN it can only be used with the legacy
// urls of .googlecode.com. I considered creating a new
// error for this problem but Google Code is going away and there
// is support for the legacy structure.
return "", ErrCannotDetectVCS
}
return Type(vcs), nil
}
}
return "", ErrCannotDetectVCS
}
// Expect a type key on i with the exact type detected from the regex.
func checkURL(i map[string]string, u *url.URL) (Type, error) {
return Type(i["type"]), nil
}
func get(url string) ([]byte, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, fmt.Errorf("%s: %s", url, resp.Status)
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("%s: %v", url, err)
}
return b, nil
}
func expand(match map[string]string, s string) string {
for k, v := range match {
s = strings.Replace(s, "{"+k+"}", v, -1)
}
return s
}
func parseImportFromBody(ur *url.URL, r io.ReadCloser) (tp Type, u string, err error) {
d := xml.NewDecoder(r)
d.CharsetReader = charsetReader
d.Strict = false
var t xml.Token
for {
t, err = d.Token()
if err != nil {
if err == io.EOF {
// When the end is reached it could not detect a VCS if it
// got here.
err = ErrCannotDetectVCS
}
return
}
if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
return
}
if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
return
}
e, ok := t.(xml.StartElement)
if !ok || !strings.EqualFold(e.Name.Local, "meta") {
continue
}
if attrValue(e.Attr, "name") != "go-import" {
continue
}
if f := strings.Fields(attrValue(e.Attr, "content")); len(f) == 3 {
// If the prefix supplied by the remote system isn't a prefix to the
// url we're fetching continue to look for other imports.
// This will work for exact matches and prefixes. For example,
// golang.org/x/net as a prefix will match for golang.org/x/net and
// golang.org/x/net/context.
vcsURL := ur.Host + ur.Path
if !strings.HasPrefix(vcsURL, f[0]) {
continue
} else {
switch Type(f[1]) {
case Git:
tp = Git
case Svn:
tp = Svn
case Bzr:
tp = Bzr
case Hg:
tp = Hg
}
u = f[2]
return
}
}
}
}
func charsetReader(charset string, input io.Reader) (io.Reader, error) {
switch strings.ToLower(charset) {
case "ascii":
return input, nil
default:
return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
}
}
func attrValue(attrs []xml.Attr, name string) string {
for _, a := range attrs {
if strings.EqualFold(a.Name.Local, name) {
return a.Value
}
}
return ""
}
================================================
FILE: vendor/github.com/Masterminds/vcs/vcs_remote_lookup_test.go
================================================
package vcs
import (
"testing"
)
func TestVCSLookup(t *testing.T) {
// TODO: Expand to make sure it detected the right vcs.
urlList := map[string]struct {
work bool
t Type
}{
"https://github.com/masterminds": {work: false, t: Git},
"https://github.com/Masterminds/VCSTestRepo": {work: true, t: Git},
"https://bitbucket.org/mattfarina/testhgrepo": {work: true, t: Hg},
"https://launchpad.net/govcstestbzrrepo/trunk": {work: true, t: Bzr},
"https://launchpad.net/~mattfarina/+junk/mygovcstestbzrrepo": {work: true, t: Bzr},
"https://launchpad.net/~mattfarina/+junk/mygovcstestbzrrepo/trunk": {work: true, t: Bzr},
"https://git.launchpad.net/govcstestgitrepo": {work: true, t: Git},
"https://git.launchpad.net/~mattfarina/+git/mygovcstestgitrepo": {work: true, t: Git},
"https://hub.jazz.net/git/user1/pkgname": {work: true, t: Git},
"https://hub.jazz.net/git/user1/pkgname/subpkg/subpkg/subpkg": {work: true, t: Git},
"https://hubs.jazz.net/git/user1/pkgname": {work: false, t: Git},
"http://farbtastic.googlecode.com/svn/": {work: true, t: Svn},
"http://farbtastic.googlecode.com/svn/trunk": {work: true, t: Svn},
"https://example.com/foo/bar.git": {work: true, t: Git},
"https://example.com/foo/bar.svn": {work: true, t: Svn},
"https://example.com/foo/bar/baz.bzr": {work: true, t: Bzr},
"https://example.com/foo/bar/baz.hg": {work: true, t: Hg},
"https://gopkg.in/tomb.v1": {work: true, t: Git},
"https://golang.org/x/net": {work: true, t: Git},
"https://speter.net/go/exp/math/dec/inf": {work: true, t: Git},
"git@github.com:Masterminds/vcs.git": {work: true, t: Git},
"git@example.com:foo.git": {work: true, t: Git},
"ssh://hg@bitbucket.org/mattfarina/testhgrepo": {work: true, t: Hg},
"git@bitbucket.org:mattfarina/glide-bitbucket-example.git": {work: true, t: Git},
"git+ssh://example.com/foo/bar": {work: true, t: Git},
"git://example.com/foo/bar": {work: true, t: Git},
"bzr+ssh://example.com/foo/bar": {work: true, t: Bzr},
"svn+ssh://example.com/foo/bar": {work: true, t: Svn},
"git@example.com:foo/bar": {work: true, t: Git},
"hg@example.com:foo/bar": {work: true, t: Hg},
}
for u, c := range urlList {
ty, _, err := detectVcsFromRemote(u)
if err == nil && c.work == false {
t.Errorf("Error detecting VCS from URL(%s)", u)
}
if err == ErrCannotDetectVCS && c.work == true {
t.Errorf("Error detecting VCS from URL(%s)", u)
}
if err != nil && c.work == true {
t.Errorf("Error detecting VCS from URL(%s): %s", u, err)
}
if c.work == true && ty != c.t {
t.Errorf("Incorrect VCS type returned(%s)", u)
}
}
}
================================================
FILE: vendor/github.com/beorn7/perks/.gitignore
================================================
*.test
*.prof
================================================
FILE: vendor/github.com/beorn7/perks/README.md
================================================
# Perks for Go (golang.org)
Perks contains the Go package quantile that computes approximate quantiles over
an unbounded data stream within low memory and CPU bounds.
For more information and examples, see:
http://godoc.org/github.com/bmizerany/perks
A very special thank you and shout out to Graham Cormode (Rutgers University),
Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and
Divesh Srivastava (AT&T Labs–Research) for their research and publication of
[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf)
Thank you, also:
* Armon Dadgar (@armon)
* Andrew Gerrand (@nf)
* Brad Fitzpatrick (@bradfitz)
* Keith Rarick (@kr)
FAQ:
Q: Why not move the quantile package into the project root?
A: I want to add more packages to perks later.
Copyright (C) 2013 Blake Mizerany
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
================================================
FILE: vendor/github.com/beorn7/perks/histogram/bench_test.go
================================================
package histogram
import (
"math/rand"
"testing"
)
func BenchmarkInsert10Bins(b *testing.B) {
b.StopTimer()
h := New(10)
b.StartTimer()
for i := 0; i < b.N; i++ {
f := rand.ExpFloat64()
h.Insert(f)
}
}
func BenchmarkInsert100Bins(b *testing.B) {
b.StopTimer()
h := New(100)
b.StartTimer()
for i := 0; i < b.N; i++ {
f := rand.ExpFloat64()
h.Insert(f)
}
}
================================================
FILE: vendor/github.com/beorn7/perks/histogram/histogram.go
================================================
// Package histogram provides a Go implementation of BigML's histogram package
// for Clojure/Java. It is currently experimental.
package histogram
import (
"container/heap"
"math"
"sort"
)
type Bin struct {
Count int
Sum float64
}
func (b *Bin) Update(x *Bin) {
b.Count += x.Count
b.Sum += x.Sum
}
func (b *Bin) Mean() float64 {
return b.Sum / float64(b.Count)
}
type Bins []*Bin
func (bs Bins) Len() int { return len(bs) }
func (bs Bins) Less(i, j int) bool { return bs[i].Mean() < bs[j].Mean() }
func (bs Bins) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
func (bs *Bins) Push(x interface{}) {
*bs = append(*bs, x.(*Bin))
}
func (bs *Bins) Pop() interface{} {
return bs.remove(len(*bs) - 1)
}
func (bs *Bins) remove(n int) *Bin {
if n < 0 || len(*bs) < n {
return nil
}
x := (*bs)[n]
*bs = append((*bs)[:n], (*bs)[n+1:]...)
return x
}
type Histogram struct {
res *reservoir
}
func New(maxBins int) *Histogram {
return &Histogram{res: newReservoir(maxBins)}
}
func (h *Histogram) Insert(f float64) {
h.res.insert(&Bin{1, f})
h.res.compress()
}
func (h *Histogram) Bins() Bins {
return h.res.bins
}
type reservoir struct {
n int
maxBins int
bins Bins
}
func newReservoir(maxBins int) *reservoir {
return &reservoir{maxBins: maxBins}
}
func (r *reservoir) insert(bin *Bin) {
r.n += bin.Count
i := sort.Search(len(r.bins), func(i int) bool {
return r.bins[i].Mean() >= bin.Mean()
})
if i < 0 || i == r.bins.Len() {
// TODO(blake): Maybe use an .insert(i, bin) instead of
// performing the extra work of a heap.Push.
heap.Push(&r.bins, bin)
return
}
r.bins[i].Update(bin)
}
func (r *reservoir) compress() {
for r.bins.Len() > r.maxBins {
minGapIndex := -1
minGap := math.MaxFloat64
for i := 0; i < r.bins.Len()-1; i++ {
gap := gapWeight(r.bins[i], r.bins[i+1])
if minGap > gap {
minGap = gap
minGapIndex = i
}
}
prev := r.bins[minGapIndex]
next := r.bins.remove(minGapIndex + 1)
prev.Update(next)
}
}
func gapWeight(prev, next *Bin) float64 {
return next.Mean() - prev.Mean()
}
================================================
FILE: vendor/github.com/beorn7/perks/histogram/histogram_test.go
================================================
package histogram
import (
"math/rand"
"testing"
)
func TestHistogram(t *testing.T) {
const numPoints = 1e6
const maxBins = 3
h := New(maxBins)
for i := 0; i < numPoints; i++ {
f := rand.ExpFloat64()
h.Insert(f)
}
bins := h.Bins()
if g := len(bins); g > maxBins {
t.Fatalf("got %d bins, wanted <= %d", g, maxBins)
}
for _, b := range bins {
t.Logf("%+v", b)
}
if g := count(h.Bins()); g != numPoints {
t.Fatalf("binned %d points, wanted %d", g, numPoints)
}
}
func count(bins Bins) int {
binCounts := 0
for _, b := range bins {
binCounts += b.Count
}
return binCounts
}
================================================
FILE: vendor/github.com/beorn7/perks/quantile/bench_test.go
================================================
package quantile
import (
"testing"
)
func BenchmarkInsertTargeted(b *testing.B) {
b.ReportAllocs()
s := NewTargeted(Targets)
b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ {
s.Insert(i)
}
}
func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) {
s := NewTargeted(TargetsSmallEpsilon)
b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ {
s.Insert(i)
}
}
func BenchmarkInsertBiased(b *testing.B) {
s := NewLowBiased(0.01)
b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ {
s.Insert(i)
}
}
func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) {
s := NewLowBiased(0.0001)
b.ResetTimer()
for i := float64(0); i < float64(b.N); i++ {
s.Insert(i)
}
}
func BenchmarkQuery(b *testing.B) {
s := NewTargeted(Targets)
for i := float64(0); i < 1e6; i++ {
s.Insert(i)
}
b.ResetTimer()
n := float64(b.N)
for i := float64(0); i < n; i++ {
s.Query(i / n)
}
}
func BenchmarkQuerySmallEpsilon(b *testing.B) {
s := NewTargeted(TargetsSmallEpsilon)
for i := float64(0); i < 1e6; i++ {
s.Insert(i)
}
b.ResetTimer()
n := float64(b.N)
for i := float64(0); i < n; i++ {
s.Query(i / n)
}
}
================================================
FILE: vendor/github.com/beorn7/perks/quantile/example_test.go
================================================
// +build go1.1
package quantile_test
import (
"bufio"
"fmt"
"log"
"os"
"strconv"
"time"
"github.com/beorn7/perks/quantile"
)
func Example_simple() {
ch := make(chan float64)
go sendFloats(ch)
// Compute the 50th, 90th, and 99th percentile.
q := quantile.NewTargeted(map[float64]float64{
0.50: 0.005,
0.90: 0.001,
0.99: 0.0001,
})
for v := range ch {
q.Insert(v)
}
fmt.Println("perc50:", q.Query(0.50))
fmt.Println("perc90:", q.Query(0.90))
fmt.Println("perc99:", q.Query(0.99))
fmt.Println("count:", q.Count())
// Output:
// perc50: 5
// perc90: 16
// perc99: 223
// count: 2388
}
func Example_mergeMultipleStreams() {
// Scenario:
// We have multiple database shards. On each shard, there is a process
// collecting query response times from the database logs and inserting
// them into a Stream (created via NewTargeted(0.90)), much like the
// Simple example. These processes expose a network interface for us to
// ask them to serialize and send us the results of their
// Stream.Samples so we may Merge and Query them.
//
// NOTES:
// * These sample sets are small, allowing us to get them
// across the network much faster than sending the entire list of data
// points.
//
// * For this to work correctly, we must supply the same quantiles
// a priori the process collecting the samples supplied to NewTargeted,
// even if we do not plan to query them all here.
ch := make(chan quantile.Samples)
getDBQuerySamples(ch)
q := quantile.NewTargeted(map[float64]float64{0.90: 0.001})
for samples := range ch {
q.Merge(samples)
}
fmt.Println("perc90:", q.Query(0.90))
}
func Example_window() {
// Scenario: We want the 90th, 95th, and 99th percentiles for each
// minute.
ch := make(chan float64)
go sendStreamValues(ch)
tick := time.NewTicker(1 * time.Minute)
q := quantile.NewTargeted(map[float64]float64{
0.90: 0.001,
0.95: 0.0005,
0.99: 0.0001,
})
for {
select {
case t := <-tick.C:
flushToDB(t, q.Samples())
q.Reset()
case v := <-ch:
q.Insert(v)
}
}
}
func sendStreamValues(ch chan float64) {
// Use your imagination
}
func flushToDB(t time.Time, samples quantile.Samples) {
// Use your imagination
}
// This is a stub for the above example. In reality this would hit the remote
// servers via http or something like it.
func getDBQuerySamples(ch chan quantile.Samples) {}
func sendFloats(ch chan<- float64) {
f, err := os.Open("exampledata.txt")
if err != nil {
log.Fatal(err)
}
sc := bufio.NewScanner(f)
for sc.Scan() {
b := sc.Bytes()
v, err := strconv.ParseFloat(string(b), 64)
if err != nil {
log.Fatal(err)
}
ch <- v
}
if sc.Err() != nil {
log.Fatal(sc.Err())
}
close(ch)
}
================================================
FILE: vendor/github.com/beorn7/perks/quantile/exampledata.txt
================================================
8
5
26
12
5
235
13
6
28
30
3
3
3
3
5
2
33
7
2
4
7
12
14
5
8
3
10
4
5
3
6
6
209
20
3
10
14
3
4
6
8
5
11
7
3
2
3
3
212
5
222
4
10
10
5
6
3
8
3
10
254
220
2
3
5
24
5
4
222
7
3
3
223
8
15
12
14
14
3
2
2
3
13
3
11
4
4
6
5
7
13
5
3
5
2
5
3
5
2
7
15
17
14
3
6
6
3
17
5
4
7
6
4
4
8
6
8
3
9
3
6
3
4
5
3
3
660
4
6
10
3
6
3
2
5
13
2
4
4
10
4
8
4
3
7
9
9
3
10
37
3
13
4
12
3
6
10
8
5
21
2
3
8
3
2
3
3
4
12
2
4
8
8
4
3
2
20
1
6
32
2
11
6
18
3
8
11
3
212
3
4
2
6
7
12
11
3
2
16
10
6
4
6
3
2
7
3
2
2
2
2
5
6
4
3
10
3
4
6
5
3
4
4
5
6
4
3
4
4
5
7
5
5
3
2
7
2
4
12
4
5
6
2
4
4
8
4
15
13
7
16
5
3
23
5
5
7
3
2
9
8
7
5
8
11
4
10
76
4
47
4
3
2
7
4
2
3
37
10
4
2
20
5
4
4
10
10
4
3
7
23
240
7
13
5
5
3
3
2
5
4
2
8
7
19
2
23
8
7
2
5
3
8
3
8
13
5
5
5
2
3
23
4
9
8
4
3
3
5
220
2
3
4
6
14
3
53
6
2
5
18
6
3
219
6
5
2
5
3
6
5
15
4
3
17
3
2
4
7
2
3
3
4
4
3
2
664
6
3
23
5
5
16
5
8
2
4
2
24
12
3
2
3
5
8
3
5
4
3
14
3
5
8
2
3
7
9
4
2
3
6
8
4
3
4
6
5
3
3
6
3
19
4
4
6
3
6
3
5
22
5
4
4
3
8
11
4
9
7
6
13
4
4
4
6
17
9
3
3
3
4
3
221
5
11
3
4
2
12
6
3
5
7
5
7
4
9
7
14
37
19
217
16
3
5
2
2
7
19
7
6
7
4
24
5
11
4
7
7
9
13
3
4
3
6
28
4
4
5
5
2
5
6
4
4
6
10
5
4
3
2
3
3
6
5
5
4
3
2
3
7
4
6
18
16
8
16
4
5
8
6
9
13
1545
6
215
6
5
6
3
45
31
5
2
2
4
3
3
2
5
4
3
5
7
7
4
5
8
5
4
749
2
31
9
11
2
11
5
4
4
7
9
11
4
5
4
7
3
4
6
2
15
3
4
3
4
3
5
2
13
5
5
3
3
23
4
4
5
7
4
13
2
4
3
4
2
6
2
7
3
5
5
3
29
5
4
4
3
10
2
3
79
16
6
6
7
7
3
5
5
7
4
3
7
9
5
6
5
9
6
3
6
4
17
2
10
9
3
6
2
3
21
22
5
11
4
2
17
2
224
2
14
3
4
4
2
4
4
4
4
5
3
4
4
10
2
6
3
3
5
7
2
7
5
6
3
218
2
2
5
2
6
3
5
222
14
6
33
3
2
5
3
3
3
9
5
3
3
2
7
4
3
4
3
5
6
5
26
4
13
9
7
3
221
3
3
4
4
4
4
2
18
5
3
7
9
6
8
3
10
3
11
9
5
4
17
5
5
6
6
3
2
4
12
17
6
7
218
4
2
4
10
3
5
15
3
9
4
3
3
6
29
3
3
4
5
5
3
8
5
6
6
7
5
3
5
3
29
2
31
5
15
24
16
5
207
4
3
3
2
15
4
4
13
5
5
4
6
10
2
7
8
4
6
20
5
3
4
3
12
12
5
17
7
3
3
3
6
10
3
5
25
80
4
9
3
2
11
3
3
2
3
8
7
5
5
19
5
3
3
12
11
2
6
5
5
5
3
3
3
4
209
14
3
2
5
19
4
4
3
4
14
5
6
4
13
9
7
4
7
10
2
9
5
7
2
8
4
6
5
5
222
8
7
12
5
216
3
4
4
6
3
14
8
7
13
4
3
3
3
3
17
5
4
3
33
6
6
33
7
5
3
8
7
5
2
9
4
2
233
24
7
4
8
10
3
4
15
2
16
3
3
13
12
7
5
4
207
4
2
4
27
15
2
5
2
25
6
5
5
6
13
6
18
6
4
12
225
10
7
5
2
2
11
4
14
21
8
10
3
5
4
232
2
5
5
3
7
17
11
6
6
23
4
6
3
5
4
2
17
3
6
5
8
3
2
2
14
9
4
4
2
5
5
3
7
6
12
6
10
3
6
2
2
19
5
4
4
9
2
4
13
3
5
6
3
6
5
4
9
6
3
5
7
3
6
6
4
3
10
6
3
221
3
5
3
6
4
8
5
3
6
4
4
2
54
5
6
11
3
3
4
4
4
3
7
3
11
11
7
10
6
13
223
213
15
231
7
3
7
228
2
3
4
4
5
6
7
4
13
3
4
5
3
6
4
6
7
2
4
3
4
3
3
6
3
7
3
5
18
5
6
8
10
3
3
3
2
4
2
4
4
5
6
6
4
10
13
3
12
5
12
16
8
4
19
11
2
4
5
6
8
5
6
4
18
10
4
2
216
6
6
6
2
4
12
8
3
11
5
6
14
5
3
13
4
5
4
5
3
28
6
3
7
219
3
9
7
3
10
6
3
4
19
5
7
11
6
15
19
4
13
11
3
7
5
10
2
8
11
2
6
4
6
24
6
3
3
3
3
6
18
4
11
4
2
5
10
8
3
9
5
3
4
5
6
2
5
7
4
4
14
6
4
4
5
5
7
2
4
3
7
3
3
6
4
5
4
4
4
3
3
3
3
8
14
2
3
5
3
2
4
5
3
7
3
3
18
3
4
4
5
7
3
3
3
13
5
4
8
211
5
5
3
5
2
5
4
2
655
6
3
5
11
2
5
3
12
9
15
11
5
12
217
2
6
17
3
3
207
5
5
4
5
9
3
2
8
5
4
3
2
5
12
4
14
5
4
2
13
5
8
4
225
4
3
4
5
4
3
3
6
23
9
2
6
7
233
4
4
6
18
3
4
6
3
4
4
2
3
7
4
13
227
4
3
5
4
2
12
9
17
3
7
14
6
4
5
21
4
8
9
2
9
25
16
3
6
4
7
8
5
2
3
5
4
3
3
5
3
3
3
2
3
19
2
4
3
4
2
3
4
4
2
4
3
3
3
2
6
3
17
5
6
4
3
13
5
3
3
3
4
9
4
2
14
12
4
5
24
4
3
37
12
11
21
3
4
3
13
4
2
3
15
4
11
4
4
3
8
3
4
4
12
8
5
3
3
4
2
220
3
5
223
3
3
3
10
3
15
4
241
9
7
3
6
6
23
4
13
7
3
4
7
4
9
3
3
4
10
5
5
1
5
24
2
4
5
5
6
14
3
8
2
3
5
13
13
3
5
2
3
15
3
4
2
10
4
4
4
5
5
3
5
3
4
7
4
27
3
6
4
15
3
5
6
6
5
4
8
3
9
2
6
3
4
3
7
4
18
3
11
3
3
8
9
7
24
3
219
7
10
4
5
9
12
2
5
4
4
4
3
3
19
5
8
16
8
6
22
3
23
3
242
9
4
3
3
5
7
3
3
5
8
3
7
5
14
8
10
3
4
3
7
4
6
7
4
10
4
3
11
3
7
10
3
13
6
8
12
10
5
7
9
3
4
7
7
10
8
30
9
19
4
3
19
15
4
13
3
215
223
4
7
4
8
17
16
3
7
6
5
5
4
12
3
7
4
4
13
4
5
2
5
6
5
6
6
7
10
18
23
9
3
3
6
5
2
4
2
7
3
3
2
5
5
14
10
224
6
3
4
3
7
5
9
3
6
4
2
5
11
4
3
3
2
8
4
7
4
10
7
3
3
18
18
17
3
3
3
4
5
3
3
4
12
7
3
11
13
5
4
7
13
5
4
11
3
12
3
6
4
4
21
4
6
9
5
3
10
8
4
6
4
4
6
5
4
8
6
4
6
4
4
5
9
6
3
4
2
9
3
18
2
4
3
13
3
6
6
8
7
9
3
2
16
3
4
6
3
2
33
22
14
4
9
12
4
5
6
3
23
9
4
3
5
5
3
4
5
3
5
3
10
4
5
5
8
4
4
6
8
5
4
3
4
6
3
3
3
5
9
12
6
5
9
3
5
3
2
2
2
18
3
2
21
2
5
4
6
4
5
10
3
9
3
2
10
7
3
6
6
4
4
8
12
7
3
7
3
3
9
3
4
5
4
4
5
5
10
15
4
4
14
6
227
3
14
5
216
22
5
4
2
2
6
3
4
2
9
9
4
3
28
13
11
4
5
3
3
2
3
3
5
3
4
3
5
23
26
3
4
5
6
4
6
3
5
5
3
4
3
2
2
2
7
14
3
6
7
17
2
2
15
14
16
4
6
7
13
6
4
5
6
16
3
3
28
3
6
15
3
9
2
4
6
3
3
22
4
12
6
7
2
5
4
10
3
16
6
9
2
5
12
7
5
5
5
5
2
11
9
17
4
3
11
7
3
5
15
4
3
4
211
8
7
5
4
7
6
7
6
3
6
5
6
5
3
4
4
26
4
6
10
4
4
3
2
3
3
4
5
9
3
9
4
4
5
5
8
2
4
2
3
8
4
11
19
5
8
6
3
5
6
12
3
2
4
16
12
3
4
4
8
6
5
6
6
219
8
222
6
16
3
13
19
5
4
3
11
6
10
4
7
7
12
5
3
3
5
6
10
3
8
2
5
4
7
2
4
4
2
12
9
6
4
2
40
2
4
10
4
223
4
2
20
6
7
24
5
4
5
2
20
16
6
5
13
2
3
3
19
3
2
4
5
6
7
11
12
5
6
7
7
3
5
3
5
3
14
3
4
4
2
11
1
7
3
9
6
11
12
5
8
6
221
4
2
12
4
3
15
4
5
226
7
218
7
5
4
5
18
4
5
9
4
4
2
9
18
18
9
5
6
6
3
3
7
3
5
4
4
4
12
3
6
31
5
4
7
3
6
5
6
5
11
2
2
11
11
6
7
5
8
7
10
5
23
7
4
3
5
34
2
5
23
7
3
6
8
4
4
4
2
5
3
8
5
4
8
25
2
3
17
8
3
4
8
7
3
15
6
5
7
21
9
5
6
6
5
3
2
3
10
3
6
3
14
7
4
4
8
7
8
2
6
12
4
213
6
5
21
8
2
5
23
3
11
2
3
6
25
2
3
6
7
6
6
4
4
6
3
17
9
7
6
4
3
10
7
2
3
3
3
11
8
3
7
6
4
14
36
3
4
3
3
22
13
21
4
2
7
4
4
17
15
3
7
11
2
4
7
6
209
6
3
2
2
24
4
9
4
3
3
3
29
2
2
4
3
3
5
4
6
3
3
2
4
================================================
FILE: vendor/github.com/beorn7/perks/quantile/stream.go
================================================
// Package quantile computes approximate quantiles over an unbounded data
// stream within low memory and CPU bounds.
//
// A small amount of accuracy is traded to achieve the above properties.
//
// Multiple streams can be merged before calling Query to generate a single set
// of results. This is meaningful when the streams represent the same type of
// data. See Merge and Samples.
//
// For more detailed information about the algorithm used, see:
//
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile
import (
"math"
"sort"
)
// Sample holds an observed value and meta information for compression. JSON
// tags have been added for convenience.
type Sample struct {
Value float64 `json:",string"`
Width float64 `json:",string"`
Delta float64 `json:",string"`
}
// Samples represents a slice of samples. It implements sort.Interface.
type Samples []Sample
func (a Samples) Len() int { return len(a) }
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
type invariant func(s *stream, r float64) float64
// NewLowBiased returns an initialized Stream for low-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the lower ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewLowBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * r
}
return newStream(ƒ)
}
// NewHighBiased returns an initialized Stream for high-biased quantiles
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
// error guarantees can still be given even for the higher ranks of the data
// distribution.
//
// The provided epsilon is a relative error, i.e. the true quantile of a value
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
// properties.
func NewHighBiased(epsilon float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
return 2 * epsilon * (s.n - r)
}
return newStream(ƒ)
}
// NewTargeted returns an initialized Stream concerned with a particular set of
// quantile values that are supplied a priori. Knowing these a priori reduces
// space and computation time. The targets map maps the desired quantiles to
// their absolute errors, i.e. the true quantile of a value returned by a query
// is guaranteed to be within (Quantile±Epsilon).
//
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
func NewTargeted(targets map[float64]float64) *Stream {
ƒ := func(s *stream, r float64) float64 {
var m = math.MaxFloat64
var f float64
for quantile, epsilon := range targets {
if quantile*s.n <= r {
f = (2 * epsilon * r) / quantile
} else {
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
}
if f < m {
m = f
}
}
return m
}
return newStream(ƒ)
}
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
// design. Take care when using across multiple goroutines.
type Stream struct {
*stream
b Samples
sorted bool
}
func newStream(ƒ invariant) *Stream {
x := &stream{ƒ: ƒ}
return &Stream{x, make(Samples, 0, 500), true}
}
// Insert inserts v into the stream.
func (s *Stream) Insert(v float64) {
s.insert(Sample{Value: v, Width: 1})
}
func (s *Stream) insert(sample Sample) {
s.b = append(s.b, sample)
s.sorted = false
if len(s.b) == cap(s.b) {
s.flush()
}
}
// Query returns the computed qth percentiles value. If s was created with
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
// will return an unspecified result.
func (s *Stream) Query(q float64) float64 {
if !s.flushed() {
// Fast path when there hasn't been enough data for a flush;
// this also yields better accuracy for small sets of data.
l := len(s.b)
if l == 0 {
return 0
}
i := int(float64(l) * q)
if i > 0 {
i -= 1
}
s.maybeSort()
return s.b[i].Value
}
s.flush()
return s.stream.query(q)
}
// Merge merges samples into the underlying streams samples. This is handy when
// merging multiple streams from separate threads, database shards, etc.
//
// ATTENTION: This method is broken and does not yield correct results. The
// underlying algorithm is not capable of merging streams correctly.
func (s *Stream) Merge(samples Samples) {
sort.Sort(samples)
s.stream.merge(samples)
}
// Reset reinitializes and clears the list reusing the samples buffer memory.
func (s *Stream) Reset() {
s.stream.reset()
s.b = s.b[:0]
}
// Samples returns stream samples held by s.
func (s *Stream) Samples() Samples {
if !s.flushed() {
return s.b
}
s.flush()
return s.stream.samples()
}
// Count returns the total number of samples observed in the stream
// since initialization.
func (s *Stream) Count() int {
return len(s.b) + s.stream.count()
}
func (s *Stream) flush() {
s.maybeSort()
s.stream.merge(s.b)
s.b = s.b[:0]
}
func (s *Stream) maybeSort() {
if !s.sorted {
s.sorted = true
sort.Sort(s.b)
}
}
func (s *Stream) flushed() bool {
return len(s.stream.l) > 0
}
type stream struct {
n float64
l []Sample
ƒ invariant
}
func (s *stream) reset() {
s.l = s.l[:0]
s.n = 0
}
func (s *stream) insert(v float64) {
s.merge(Samples{{v, 1, 0}})
}
func (s *stream) merge(samples Samples) {
// TODO(beorn7): This tries to merge not only individual samples, but
// whole summaries. The paper doesn't mention merging summaries at
// all. Unittests show that the merging is inaccurate. Find out how to
// do merges properly.
var r float64
i := 0
for _, sample := range samples {
for ; i < len(s.l); i++ {
c := s.l[i]
if c.Value > sample.Value {
// Insert at position i.
s.l = append(s.l, Sample{})
copy(s.l[i+1:], s.l[i:])
s.l[i] = Sample{
sample.Value,
sample.Width,
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
// TODO(beorn7): How to calculate delta correctly?
}
i++
goto inserted
}
r += c.Width
}
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
i++
inserted:
s.n += sample.Width
r += sample.Width
}
s.compress()
}
func (s *stream) count() int {
return int(s.n)
}
func (s *stream) query(q float64) float64 {
t := math.Ceil(q * s.n)
t += math.Ceil(s.ƒ(s, t) / 2)
p := s.l[0]
var r float64
for _, c := range s.l[1:] {
r += p.Width
if r+c.Width+c.Delta > t {
return p.Value
}
p = c
}
return p.Value
}
func (s *stream) compress() {
if len(s.l) < 2 {
return
}
x := s.l[len(s.l)-1]
xi := len(s.l) - 1
r := s.n - 1 - x.Width
for i := len(s.l) - 2; i >= 0; i-- {
c := s.l[i]
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
x.Width += c.Width
s.l[xi] = x
// Remove element at i.
copy(s.l[i:], s.l[i+1:])
s.l = s.l[:len(s.l)-1]
xi -= 1
} else {
x = c
xi = i
}
r -= c.Width
}
}
func (s *stream) samples() Samples {
samples := make(Samples, len(s.l))
copy(samples, s.l)
return samples
}
================================================
FILE: vendor/github.com/beorn7/perks/quantile/stream_test.go
================================================
package quantile
import (
"math"
"math/rand"
"sort"
"testing"
)
var (
Targets = map[float64]float64{
0.01: 0.001,
0.10: 0.01,
0.50: 0.05,
0.90: 0.01,
0.99: 0.001,
}
TargetsSmallEpsilon = map[float64]float64{
0.01: 0.0001,
0.10: 0.001,
0.50: 0.005,
0.90: 0.001,
0.99: 0.0001,
}
LowQuantiles = []float64{0.01, 0.1, 0.5}
HighQuantiles = []float64{0.99, 0.9, 0.5}
)
const RelativeEpsilon = 0.01
func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) {
sort.Float64s(a)
for quantile, epsilon := range Targets {
n := float64(len(a))
k := int(quantile * n)
lower := int((quantile - epsilon) * n)
if lower < 1 {
lower = 1
}
upper := int(math.Ceil((quantile + epsilon) * n))
if upper > len(a) {
upper = len(a)
}
w, min, max := a[k-1], a[lower-1], a[upper-1]
if g := s.Query(quantile); g < min || g > max {
t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g)
}
}
}
func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
sort.Float64s(a)
for _, qu := range LowQuantiles {
n := float64(len(a))
k := int(qu * n)
lowerRank := int((1 - RelativeEpsilon) * qu * n)
upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n))
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
if g := s.Query(qu); g < min || g > max {
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
}
}
}
func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) {
sort.Float64s(a)
for _, qu := range HighQuantiles {
n := float64(len(a))
k := int(qu * n)
lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n)
upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n))
w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1]
if g := s.Query(qu); g < min || g > max {
t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g)
}
}
}
func populateStream(s *Stream) []float64 {
a := make([]float64, 0, 1e5+100)
for i := 0; i < cap(a); i++ {
v := rand.NormFloat64()
// Add 5% asymmetric outliers.
if i%20 == 0 {
v = v*v + 1
}
s.Insert(v)
a = append(a, v)
}
return a
}
func TestTargetedQuery(t *testing.T) {
rand.Seed(42)
s := NewTargeted(Targets)
a := populateStream(s)
verifyPercsWithAbsoluteEpsilon(t, a, s)
}
func TestLowBiasedQuery(t *testing.T) {
rand.Seed(42)
s := NewLowBiased(RelativeEpsilon)
a := populateStream(s)
verifyLowPercsWithRelativeEpsilon(t, a, s)
}
func TestHighBiasedQuery(t *testing.T) {
rand.Seed(42)
s := NewHighBiased(RelativeEpsilon)
a := populateStream(s)
verifyHighPercsWithRelativeEpsilon(t, a, s)
}
// BrokenTestTargetedMerge is broken, see Merge doc comment.
func BrokenTestTargetedMerge(t *testing.T) {
rand.Seed(42)
s1 := NewTargeted(Targets)
s2 := NewTargeted(Targets)
a := populateStream(s1)
a = append(a, populateStream(s2)...)
s1.Merge(s2.Samples())
verifyPercsWithAbsoluteEpsilon(t, a, s1)
}
// BrokenTestLowBiasedMerge is broken, see Merge doc comment.
func BrokenTestLowBiasedMerge(t *testing.T) {
rand.Seed(42)
s1 := NewLowBiased(RelativeEpsilon)
s2 := NewLowBiased(RelativeEpsilon)
a := populateStream(s1)
a = append(a, populateStream(s2)...)
s1.Merge(s2.Samples())
verifyLowPercsWithRelativeEpsilon(t, a, s2)
}
// BrokenTestHighBiasedMerge is broken, see Merge doc comment.
func BrokenTestHighBiasedMerge(t *testing.T) {
rand.Seed(42)
s1 := NewHighBiased(RelativeEpsilon)
s2 := NewHighBiased(RelativeEpsilon)
a := populateStream(s1)
a = append(a, populateStream(s2)...)
s1.Merge(s2.Samples())
verifyHighPercsWithRelativeEpsilon(t, a, s2)
}
func TestUncompressed(t *testing.T) {
q := NewTargeted(Targets)
for i := 100; i > 0; i-- {
q.Insert(float64(i))
}
if g := q.Count(); g != 100 {
t.Errorf("want count 100, got %d", g)
}
// Before compression, Query should have 100% accuracy.
for quantile := range Targets {
w := quantile * 100
if g := q.Query(quantile); g != w {
t.Errorf("want %f, got %f", w, g)
}
}
}
func TestUncompressedSamples(t *testing.T) {
q := NewTargeted(map[float64]float64{0.99: 0.001})
for i := 1; i <= 100; i++ {
q.Insert(float64(i))
}
if g := q.Samples().Len(); g != 100 {
t.Errorf("want count 100, got %d", g)
}
}
func TestUncompressedOne(t *testing.T) {
q := NewTargeted(map[float64]float64{0.99: 0.01})
q.Insert(3.14)
if g := q.Query(0.90); g != 3.14 {
t.Error("want PI, got", g)
}
}
func TestDefaults(t *testing.T) {
if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 {
t.Errorf("want 0, got %f", g)
}
}
================================================
FILE: vendor/github.com/beorn7/perks/topk/topk.go
================================================
package topk
import (
"sort"
)
// http://www.cs.ucsb.edu/research/tech_reports/reports/2005-23.pdf
type Element struct {
Value string
Count int
}
type Samples []*Element
func (sm Samples) Len() int {
return len(sm)
}
func (sm Samples) Less(i, j int) bool {
return sm[i].Count < sm[j].Count
}
func (sm Samples) Swap(i, j int) {
sm[i], sm[j] = sm[j], sm[i]
}
type Stream struct {
k int
mon map[string]*Element
// the minimum Element
min *Element
}
func New(k int) *Stream {
s := new(Stream)
s.k = k
s.mon = make(map[string]*Element)
s.min = &Element{}
// Track k+1 so that less frequenet items contended for that spot,
// resulting in k being more accurate.
return s
}
func (s *Stream) Insert(x string) {
s.insert(&Element{x, 1})
}
func (s *Stream) Merge(sm Samples) {
for _, e := range sm {
s.insert(e)
}
}
func (s *Stream) insert(in *Element) {
e := s.mon[in.Value]
if e != nil {
e.Count++
} else {
if len(s.mon) < s.k+1 {
e = &Element{in.Value, in.Count}
s.mon[in.Value] = e
} else {
e = s.min
delete(s.mon, e.Value)
e.Value = in.Value
e.Count += in.Count
s.min = e
}
}
if e.Count < s.min.Count {
s.min = e
}
}
func (s *Stream) Query() Samples {
var sm Samples
for _, e := range s.mon {
sm = append(sm, e)
}
sort.Sort(sort.Reverse(sm))
if len(sm) < s.k {
return sm
}
return sm[:s.k]
}
================================================
FILE: vendor/github.com/beorn7/perks/topk/topk_test.go
================================================
package topk
import (
"fmt"
"math/rand"
"sort"
"testing"
)
func TestTopK(t *testing.T) {
stream := New(10)
ss := []*Stream{New(10), New(10), New(10)}
m := make(map[string]int)
for _, s := range ss {
for i := 0; i < 1e6; i++ {
v := fmt.Sprintf("%x", int8(rand.ExpFloat64()))
s.Insert(v)
m[v]++
}
stream.Merge(s.Query())
}
var sm Samples
for x, s := range m {
sm = append(sm, &Element{x, s})
}
sort.Sort(sort.Reverse(sm))
g := stream.Query()
if len(g) != 10 {
t.Fatalf("got %d, want 10", len(g))
}
for i, e := range g {
if sm[i].Value != e.Value {
t.Errorf("at %d: want %q, got %q", i, sm[i].Value, e.Value)
}
}
}
func TestQuery(t *testing.T) {
queryTests := []struct {
value string
expected int
}{
{"a", 1},
{"b", 2},
{"c", 2},
}
stream := New(2)
for _, tt := range queryTests {
stream.Insert(tt.value)
if n := len(stream.Query()); n != tt.expected {
t.Errorf("want %d, got %d", tt.expected, n)
}
}
}
================================================
FILE: vendor/github.com/blang/semver/LICENSE
================================================
The MIT License
Copyright (c) 2014 Benedikt Lang
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
================================================
FILE: vendor/github.com/blang/semver/README.md
================================================
semver for golang [](https://drone.io/github.com/blang/semver/latest) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master)
======
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
Usage
-----
```bash
$ go get github.com/blang/semver
```
Note: Always vendor your dependencies or fix on a specific version tag.
```go
import github.com/blang/semver
v1, err := semver.Make("1.0.0-beta")
v2, err := semver.Make("2.0.0-beta")
v1.Compare(v2)
```
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
Why should I use this lib?
-----
- Fully spec compatible
- No reflection
- No regex
- Fully tested (Coverage >99%)
- Readable parsing/validation errors
- Fast (See [Benchmarks](#benchmarks))
- Only Stdlib
- Uses values instead of pointers
- Many features, see below
Features
-----
- Parsing and validation at all levels
- Comparator-like comparisons
- Compare Helper Methods
- InPlace manipulation
- Sortable (implements sort.Interface)
- database/sql compatible (sql.Scanner/Valuer)
- encoding/json compatible (json.Marshaler/Unmarshaler)
Example
-----
Have a look at full examples in [examples/main.go](examples/main.go)
```go
import github.com/blang/semver
v, err := semver.Make("0.0.1-alpha.preview+123.github")
fmt.Printf("Major: %d\n", v.Major)
fmt.Printf("Minor: %d\n", v.Minor)
fmt.Printf("Patch: %d\n", v.Patch)
fmt.Printf("Pre: %s\n", v.Pre)
fmt.Printf("Build: %s\n", v.Build)
// Prerelease versions array
if len(v.Pre) > 0 {
fmt.Println("Prerelease versions:")
for i, pre := range v.Pre {
fmt.Printf("%d: %q\n", i, pre)
}
}
// Build meta data array
if len(v.Build) > 0 {
fmt.Println("Build meta data:")
for i, build := range v.Build {
fmt.Printf("%d: %q\n", i, build)
}
}
v001, err := semver.Make("0.0.1")
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
v001.GT(v) == true
v.LT(v001) == true
v.GTE(v) == true
v.LTE(v) == true
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
v001.Compare(v) == 1
v.Compare(v001) == -1
v.Compare(v) == 0
// Manipulate Version in place:
v.Pre[0], err = semver.NewPRVersion("beta")
if err != nil {
fmt.Printf("Error parsing pre release version: %q", err)
}
fmt.Println("\nValidate versions:")
v.Build[0] = "?"
err = v.Validate()
if err != nil {
fmt.Printf("Validation failed: %s\n", err)
}
```
Benchmarks
-----
BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
See benchmark cases at [semver_test.go](semver_test.go)
Motivation
-----
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
Contribution
-----
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
License
-----
See [LICENSE](LICENSE) file.
================================================
FILE: vendor/github.com/blang/semver/examples/main.go
================================================
package main
import (
"fmt"
"github.com/blang/semver"
)
func main() {
v, err := semver.Parse("0.0.1-alpha.preview.222+123.github")
if err != nil {
fmt.Printf("Error while parsing (not valid): %q", err)
}
fmt.Printf("Version to string: %q\n", v)
fmt.Printf("Major: %d\n", v.Major)
fmt.Printf("Minor: %d\n", v.Minor)
fmt.Printf("Patch: %d\n", v.Patch)
// Prerelease versions
if len(v.Pre) > 0 {
fmt.Println("Prerelease versions:")
for i, pre := range v.Pre {
fmt.Printf("%d: %q\n", i, pre)
}
}
// Build meta data
if len(v.Build) > 0 {
fmt.Println("Build meta data:")
for i, build := range v.Build {
fmt.Printf("%d: %q\n", i, build)
}
}
// Make == Parse (Value), New for Pointer
v001, err := semver.Make("0.0.1")
fmt.Println("\nUse Version.Compare for comparisons (-1, 0, 1):")
fmt.Printf("%q is greater than %q: Compare == %d\n", v001, v, v001.Compare(v))
fmt.Printf("%q is less than %q: Compare == %d\n", v, v001, v.Compare(v001))
fmt.Printf("%q is equal to %q: Compare == %d\n", v, v, v.Compare(v))
fmt.Println("\nUse comparison helpers returning booleans:")
fmt.Printf("%q is greater than %q: %t\n", v001, v, v001.GT(v))
fmt.Printf("%q is greater than equal %q: %t\n", v001, v, v001.GTE(v))
fmt.Printf("%q is greater than equal %q: %t\n", v, v, v.GTE(v))
fmt.Printf("%q is less than %q: %t\n", v, v001, v.LT(v001))
fmt.Printf("%q is less than equal %q: %t\n", v, v001, v.LTE(v001))
fmt.Printf("%q is less than equal %q: %t\n", v, v, v.LTE(v))
fmt.Println("\nManipulate Version in place:")
v.Pre[0], err = semver.NewPRVersion("beta")
if err != nil {
fmt.Printf("Error parsing pre release version: %q", err)
}
fmt.Printf("Version to string: %q\n", v)
fmt.Println("\nCompare Prerelease versions:")
pre1, _ := semver.NewPRVersion("123")
pre2, _ := semver.NewPRVersion("alpha")
pre3, _ := semver.NewPRVersion("124")
fmt.Printf("%q is less than %q: Compare == %d\n", pre1, pre2, pre1.Compare(pre2))
fmt.Printf("%q is greater than %q: Compare == %d\n", pre3, pre1, pre3.Compare(pre1))
fmt.Printf("%q is equal to %q: Compare == %d\n", pre1, pre1, pre1.Compare(pre1))
fmt.Println("\nValidate versions:")
v.Build[0] = "?"
err = v.Validate()
if err != nil {
fmt.Printf("Validation failed: %s\n", err)
}
fmt.Println("Create valid build meta data:")
b1, _ := semver.NewBuildVersion("build123")
v.Build[0] = b1
fmt.Printf("Version with new build version %q\n", v)
_, err = semver.NewBuildVersion("build?123")
if err != nil {
fmt.Printf("Create build version failed: %s\n", err)
}
}
================================================
FILE: vendor/github.com/blang/semver/json.go
================================================
package semver
import (
"encoding/json"
)
// MarshalJSON implements the encoding/json.Marshaler interface.
func (v Version) MarshalJSON() ([]byte, error) {
return json.Marshal(v.String())
}
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
func (v *Version) UnmarshalJSON(data []byte) (err error) {
var versionString string
if err = json.Unmarshal(data, &versionString); err != nil {
return
}
*v, err = Parse(versionString)
return
}
================================================
FILE: vendor/github.com/blang/semver/json_test.go
================================================
package semver
import (
"encoding/json"
"strconv"
"testing"
)
func TestJSONMarshal(t *testing.T) {
versionString := "3.1.4-alpha.1.5.9+build.2.6.5"
v, err := Parse(versionString)
if err != nil {
t.Fatal(err)
}
versionJSON, err := json.Marshal(v)
if err != nil {
t.Fatal(err)
}
quotedVersionString := strconv.Quote(versionString)
if string(versionJSON) != quotedVersionString {
t.Fatalf("JSON marshaled semantic version not equal: expected %q, got %q", quotedVersionString, string(versionJSON))
}
}
func TestJSONUnmarshal(t *testing.T) {
versionString := "3.1.4-alpha.1.5.9+build.2.6.5"
quotedVersionString := strconv.Quote(versionString)
var v Version
if err := json.Unmarshal([]byte(quotedVersionString), &v); err != nil {
t.Fatal(err)
}
if v.String() != versionString {
t.Fatalf("JSON unmarshaled semantic version not equal: expected %q, got %q", versionString, v.String())
}
badVersionString := strconv.Quote("3.1.4.1.5.9.2.6.5-other-digits-of-pi")
if err := json.Unmarshal([]byte(badVersionString), &v); err == nil {
t.Fatal("expected JSON unmarshal error, got nil")
}
}
================================================
FILE: vendor/github.com/blang/semver/semver.go
================================================
package semver
import (
"errors"
"fmt"
"strconv"
"strings"
)
const (
numbers string = "0123456789"
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
alphanum = alphas + numbers
)
// SpecVersion is the latest fully supported spec version of semver
var SpecVersion = Version{
Major: 2,
Minor: 0,
Patch: 0,
}
// Version represents a semver compatible version
type Version struct {
Major uint64
Minor uint64
Patch uint64
Pre []PRVersion
Build []string //No Precendence
}
// Version to string
func (v Version) String() string {
b := make([]byte, 0, 5)
b = strconv.AppendUint(b, v.Major, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Minor, 10)
b = append(b, '.')
b = strconv.AppendUint(b, v.Patch, 10)
if len(v.Pre) > 0 {
b = append(b, '-')
b = append(b, v.Pre[0].String()...)
for _, pre := range v.Pre[1:] {
b = append(b, '.')
b = append(b, pre.String()...)
}
}
if len(v.Build) > 0 {
b = append(b, '+')
b = append(b, v.Build[0]...)
for _, build := range v.Build[1:] {
b = append(b, '.')
b = append(b, build...)
}
}
return string(b)
}
// Equals checks if v is equal to o.
func (v Version) Equals(o Version) bool {
return (v.Compare(o) == 0)
}
// EQ checks if v is equal to o.
func (v Version) EQ(o Version) bool {
return (v.Compare(o) == 0)
}
// NE checks if v is not equal to o.
func (v Version) NE(o Version) bool {
return (v.Compare(o) != 0)
}
// GT checks if v is greater than o.
func (v Version) GT(o Version) bool {
return (v.Compare(o) == 1)
}
// GTE checks if v is greater than or equal to o.
func (v Version) GTE(o Version) bool {
return (v.Compare(o) >= 0)
}
// GE checks if v is greater than or equal to o.
func (v Version) GE(o Version) bool {
return (v.Compare(o) >= 0)
}
// LT checks if v is less than o.
func (v Version) LT(o Version) bool {
return (v.Compare(o) == -1)
}
// LTE checks if v is less than or equal to o.
func (v Version) LTE(o Version) bool {
return (v.Compare(o) <= 0)
}
// LE checks if v is less than or equal to o.
func (v Version) LE(o Version) bool {
return (v.Compare(o) <= 0)
}
// Compare compares Versions v to o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v Version) Compare(o Version) int {
if v.Major != o.Major {
if v.Major > o.Major {
return 1
}
return -1
}
if v.Minor != o.Minor {
if v.Minor > o.Minor {
return 1
}
return -1
}
if v.Patch != o.Patch {
if v.Patch > o.Patch {
return 1
}
return -1
}
// Quick comparison if a version has no prerelease versions
if len(v.Pre) == 0 && len(o.Pre) == 0 {
return 0
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
return 1
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
return -1
}
i := 0
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
continue
} else if comp == 1 {
return 1
} else {
return -1
}
}
// If all pr versions are the equal but one has further prversion, this one greater
if i == len(v.Pre) && i == len(o.Pre) {
return 0
} else if i == len(v.Pre) && i < len(o.Pre) {
return -1
} else {
return 1
}
}
// Validate validates v and returns error in case
func (v Version) Validate() error {
// Major, Minor, Patch already validated using uint64
for _, pre := range v.Pre {
if !pre.IsNum { //Numeric prerelease versions already uint64
if len(pre.VersionStr) == 0 {
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
}
if !containsOnly(pre.VersionStr, alphanum) {
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
}
}
}
for _, build := range v.Build {
if len(build) == 0 {
return fmt.Errorf("Build meta data can not be empty %q", build)
}
if !containsOnly(build, alphanum) {
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
}
}
return nil
}
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
func New(s string) (vp *Version, err error) {
v, err := Parse(s)
vp = &v
return
}
// Make is an alias for Parse, parses version string and returns a validated Version or error
func Make(s string) (Version, error) {
return Parse(s)
}
// Parse parses version string and returns a validated Version or error
func Parse(s string) (Version, error) {
if len(s) == 0 {
return Version{}, errors.New("Version string empty")
}
// Split into major.minor.(patch+pr+meta)
parts := strings.SplitN(s, ".", 3)
if len(parts) != 3 {
return Version{}, errors.New("No Major.Minor.Patch elements found")
}
// Major
if !containsOnly(parts[0], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
}
if hasLeadingZeroes(parts[0]) {
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
}
major, err := strconv.ParseUint(parts[0], 10, 64)
if err != nil {
return Version{}, err
}
// Minor
if !containsOnly(parts[1], numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
}
if hasLeadingZeroes(parts[1]) {
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
}
minor, err := strconv.ParseUint(parts[1], 10, 64)
if err != nil {
return Version{}, err
}
v := Version{}
v.Major = major
v.Minor = minor
var build, prerelease []string
patchStr := parts[2]
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
build = strings.Split(patchStr[buildIndex+1:], ".")
patchStr = patchStr[:buildIndex]
}
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
prerelease = strings.Split(patchStr[preIndex+1:], ".")
patchStr = patchStr[:preIndex]
}
if !containsOnly(patchStr, numbers) {
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
}
if hasLeadingZeroes(patchStr) {
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
}
patch, err := strconv.ParseUint(patchStr, 10, 64)
if err != nil {
return Version{}, err
}
v.Patch = patch
// Prerelease
for _, prstr := range prerelease {
parsedPR, err := NewPRVersion(prstr)
if err != nil {
return Version{}, err
}
v.Pre = append(v.Pre, parsedPR)
}
// Build meta data
for _, str := range build {
if len(str) == 0 {
return Version{}, errors.New("Build meta data is empty")
}
if !containsOnly(str, alphanum) {
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
}
v.Build = append(v.Build, str)
}
return v, nil
}
// MustParse is like Parse but panics if the version cannot be parsed.
func MustParse(s string) Version {
v, err := Parse(s)
if err != nil {
panic(`semver: Parse(` + s + `): ` + err.Error())
}
return v
}
// PRVersion represents a PreRelease Version
type PRVersion struct {
VersionStr string
VersionNum uint64
IsNum bool
}
// NewPRVersion creates a new valid prerelease version
func NewPRVersion(s string) (PRVersion, error) {
if len(s) == 0 {
return PRVersion{}, errors.New("Prerelease is empty")
}
v := PRVersion{}
if containsOnly(s, numbers) {
if hasLeadingZeroes(s) {
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
}
num, err := strconv.ParseUint(s, 10, 64)
// Might never be hit, but just in case
if err != nil {
return PRVersion{}, err
}
v.VersionNum = num
v.IsNum = true
} else if containsOnly(s, alphanum) {
v.VersionStr = s
v.IsNum = false
} else {
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
}
return v, nil
}
// IsNumeric checks if prerelease-version is numeric
func (v PRVersion) IsNumeric() bool {
return v.IsNum
}
// Compare compares two PreRelease Versions v and o:
// -1 == v is less than o
// 0 == v is equal to o
// 1 == v is greater than o
func (v PRVersion) Compare(o PRVersion) int {
if v.IsNum && !o.IsNum {
return -1
} else if !v.IsNum && o.IsNum {
return 1
} else if v.IsNum && o.IsNum {
if v.VersionNum == o.VersionNum {
return 0
} else if v.VersionNum > o.VersionNum {
return 1
} else {
return -1
}
} else { // both are Alphas
if v.VersionStr == o.VersionStr {
return 0
} else if v.VersionStr > o.VersionStr {
return 1
} else {
return -1
}
}
}
// PreRelease version to string
func (v PRVersion) String() string {
if v.IsNum {
return strconv.FormatUint(v.VersionNum, 10)
}
return v.VersionStr
}
func containsOnly(s string, set string) bool {
return strings.IndexFunc(s, func(r rune) bool {
return !strings.ContainsRune(set, r)
}) == -1
}
func hasLeadingZeroes(s string) bool {
return len(s) > 1 && s[0] == '0'
}
// NewBuildVersion creates a new valid build version
func NewBuildVersion(s string) (string, error) {
if len(s) == 0 {
return "", errors.New("Buildversion is empty")
}
if !containsOnly(s, alphanum) {
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
}
return s, nil
}
================================================
FILE: vendor/github.com/blang/semver/semver_test.go
================================================
package semver
import (
"testing"
)
func prstr(s string) PRVersion {
return PRVersion{s, 0, false}
}
func prnum(i uint64) PRVersion {
return PRVersion{"", i, true}
}
type formatTest struct {
v Version
result string
}
var formatTests = []formatTest{
{Version{1, 2, 3, nil, nil}, "1.2.3"},
{Version{0, 0, 1, nil, nil}, "0.0.1"},
{Version{0, 0, 1, []PRVersion{prstr("alpha"), prstr("preview")}, []string{"123", "456"}}, "0.0.1-alpha.preview+123.456"},
{Version{1, 2, 3, []PRVersion{prstr("alpha"), prnum(1)}, []string{"123", "456"}}, "1.2.3-alpha.1+123.456"},
{Version{1, 2, 3, []PRVersion{prstr("alpha"), prnum(1)}, nil}, "1.2.3-alpha.1"},
{Version{1, 2, 3, nil, []string{"123", "456"}}, "1.2.3+123.456"},
// Prereleases and build metadata hyphens
{Version{1, 2, 3, []PRVersion{prstr("alpha"), prstr("b-eta")}, []string{"123", "b-uild"}}, "1.2.3-alpha.b-eta+123.b-uild"},
{Version{1, 2, 3, nil, []string{"123", "b-uild"}}, "1.2.3+123.b-uild"},
{Version{1, 2, 3, []PRVersion{prstr("alpha"), prstr("b-eta")}, nil}, "1.2.3-alpha.b-eta"},
}
func TestStringer(t *testing.T) {
for _, test := range formatTests {
if res := test.v.String(); res != test.result {
t.Errorf("Stringer, expected %q but got %q", test.result, res)
}
}
}
func TestParse(t *testing.T) {
for _, test := range formatTests {
if v, err := Parse(test.result); err != nil {
t.Errorf("Error parsing %q: %q", test.result, err)
} else if comp := v.Compare(test.v); comp != 0 {
t.Errorf("Parsing, expected %q but got %q, comp: %d ", test.v, v, comp)
} else if err := v.Validate(); err != nil {
t.Errorf("Error validating parsed version %q: %q", test.v, err)
}
}
}
func TestMustParse(t *testing.T) {
_ = MustParse("32.2.1-alpha")
}
func TestMustParse_panic(t *testing.T) {
defer func() {
if recover() == nil {
t.Errorf("Should have panicked")
}
}()
_ = MustParse("invalid version")
}
func TestValidate(t *testing.T) {
for _, test := range formatTests {
if err := test.v.Validate(); err != nil {
t.Errorf("Error validating %q: %q", test.v, err)
}
}
}
type compareTest struct {
v1 Version
v2 Version
result int
}
var compareTests = []compareTest{
{Version{1, 0, 0, nil, nil}, Version{1, 0, 0, nil, nil}, 0},
{Version{2, 0, 0, nil, nil}, Version{1, 0, 0, nil, nil}, 1},
{Version{0, 1, 0, nil, nil}, Version{0, 1, 0, nil, nil}, 0},
{Version{0, 2, 0, nil, nil}, Version{0, 1, 0, nil, nil}, 1},
{Version{0, 0, 1, nil, nil}, Version{0, 0, 1, nil, nil}, 0},
{Version{0, 0, 2, nil, nil}, Version{0, 0, 1, nil, nil}, 1},
{Version{1, 2, 3, nil, nil}, Version{1, 2, 3, nil, nil}, 0},
{Version{2, 2, 4, nil, nil}, Version{1, 2, 4, nil, nil}, 1},
{Version{1, 3, 3, nil, nil}, Version{1, 2, 3, nil, nil}, 1},
{Version{1, 2, 4, nil, nil}, Version{1, 2, 3, nil, nil}, 1},
// Spec Examples #11
{Version{1, 0, 0, nil, nil}, Version{2, 0, 0, nil, nil}, -1},
{Version{2, 0, 0, nil, nil}, Version{2, 1, 0, nil, nil}, -1},
{Version{2, 1, 0, nil, nil}, Version{2, 1, 1, nil, nil}, -1},
// Spec Examples #9
{Version{1, 0, 0, nil, nil}, Version{1, 0, 0, []PRVersion{prstr("alpha")}, nil}, 1},
{Version{1, 0, 0, []PRVersion{prstr("alpha")}, nil}, Version{1, 0, 0, []PRVersion{prstr("alpha"), prnum(1)}, nil}, -1},
{Version{1, 0, 0, []PRVersion{prstr("alpha"), prnum(1)}, nil}, Version{1, 0, 0, []PRVersion{prstr("alpha"), prstr("beta")}, nil}, -1},
{Version{1, 0, 0, []PRVersion{prstr("alpha"), prstr("beta")}, nil}, Version{1, 0, 0, []PRVersion{prstr("beta")}, nil}, -1},
{Version{1, 0, 0, []PRVersion{prstr("beta")}, nil}, Version{1, 0, 0, []PRVersion{prstr("beta"), prnum(2)}, nil}, -1},
{Version{1, 0, 0, []PRVersion{prstr("beta"), prnum(2)}, nil}, Version{1, 0, 0, []PRVersion{prstr("beta"), prnum(11)}, nil}, -1},
{Version{1, 0, 0, []PRVersion{prstr("beta"), prnum(11)}, nil}, Version{1, 0, 0, []PRVersion{prstr("rc"), prnum(1)}, nil}, -1},
{Version{1, 0, 0, []PRVersion{prstr("rc"), prnum(1)}, nil}, Version{1, 0, 0, nil, nil}, -1},
// Ignore Build metadata
{Version{1, 0, 0, nil, []string{"1", "2", "3"}}, Version{1, 0, 0, nil, nil}, 0},
}
func TestCompare(t *testing.T) {
for _, test := range compareTests {
if res := test.v1.Compare(test.v2); res != test.result {
t.Errorf("Comparing %q : %q, expected %d but got %d", test.v1, test.v2, test.result, res)
}
//Test counterpart
if res := test.v2.Compare(test.v1); res != -test.result {
t.Errorf("Comparing %q : %q, expected %d but got %d", test.v2, test.v1, -test.result, res)
}
}
}
type wrongformatTest struct {
v *Version
str string
}
var wrongformatTests = []wrongformatTest{
{nil, ""},
{nil, "."},
{nil, "1."},
{nil, ".1"},
{nil, "a.b.c"},
{nil, "1.a.b"},
{nil, "1.1.a"},
{nil, "1.a.1"},
{nil, "a.1.1"},
{nil, ".."},
{nil, "1.."},
{nil, "1.1."},
{nil, "1..1"},
{nil, "1.1.+123"},
{nil, "1.1.-beta"},
{nil, "-1.1.1"},
{nil, "1.-1.1"},
{nil, "1.1.-1"},
// giant numbers
{nil, "20000000000000000000.1.1"},
{nil, "1.20000000000000000000.1"},
{nil, "1.1.20000000000000000000"},
{nil, "1.1.1-20000000000000000000"},
// Leading zeroes
{nil, "01.1.1"},
{nil, "001.1.1"},
{nil, "1.01.1"},
{nil, "1.001.1"},
{nil, "1.1.01"},
{nil, "1.1.001"},
{nil, "1.1.1-01"},
{nil, "1.1.1-001"},
{nil, "1.1.1-beta.01"},
{nil, "1.1.1-beta.001"},
{&Version{0, 0, 0, []PRVersion{prstr("!")}, nil}, "0.0.0-!"},
{&Version{0, 0, 0, nil, []string{"!"}}, "0.0.0+!"},
// empty prversion
{&Version{0, 0, 0, []PRVersion{prstr(""), prstr("alpha")}, nil}, "0.0.0-.alpha"},
// empty build meta data
{&Version{0, 0, 0, []PRVersion{prstr("alpha")}, []string{""}}, "0.0.0-alpha+"},
{&Version{0, 0, 0, []PRVersion{prstr("alpha")}, []string{"test", ""}}, "0.0.0-alpha+test."},
}
func TestWrongFormat(t *testing.T) {
for _, test := range wrongformatTests {
if res, err := Parse(test.str); err == nil {
t.Errorf("Parsing wrong format version %q, expected error but got %q", test.str, res)
}
if test.v != nil {
if err := test.v.Validate(); err == nil {
t.Errorf("Validating wrong format version %q (%q), expected error", test.v, test.str)
}
}
}
}
func TestCompareHelper(t *testing.T) {
v := Version{1, 0, 0, []PRVersion{prstr("alpha")}, nil}
v1 := Version{1, 0, 0, nil, nil}
if !v.EQ(v) {
t.Errorf("%q should be equal to %q", v, v)
}
if !v.Equals(v) {
t.Errorf("%q should be equal to %q", v, v)
}
if !v1.NE(v) {
t.Errorf("%q should not be equal to %q", v1, v)
}
if !v.GTE(v) {
t.Errorf("%q should be greater than or equal to %q", v, v)
}
if !v.LTE(v) {
t.Errorf("%q should be less than or equal to %q", v, v)
}
if !v.LT(v1) {
t.Errorf("%q should be less than %q", v, v1)
}
if !v.LTE(v1) {
t.Errorf("%q should be less than or equal %q", v, v1)
}
if !v.LE(v1) {
t.Errorf("%q should be less than or equal %q", v, v1)
}
if !v1.GT(v) {
t.Errorf("%q should be greater than %q", v1, v)
}
if !v1.GTE(v) {
t.Errorf("%q should be greater than or equal %q", v1, v)
}
if !v1.GE(v) {
t.Errorf("%q should be greater than or equal %q", v1, v)
}
}
func TestPreReleaseVersions(t *testing.T) {
p1, err := NewPRVersion("123")
if !p1.IsNumeric() {
t.Errorf("Expected numeric prversion, got %q", p1)
}
if p1.VersionNum != 123 {
t.Error("Wrong prversion number")
}
if err != nil {
t.Errorf("Not expected error %q", err)
}
p2, err := NewPRVersion("alpha")
if p2.IsNumeric() {
t.Errorf("Expected non-numeric prversion, got %q", p2)
}
if p2.VersionStr != "alpha" {
t.Error("Wrong prversion string")
}
if err != nil {
t.Errorf("Not expected error %q", err)
}
}
func TestBuildMetaDataVersions(t *testing.T) {
_, err := NewBuildVersion("123")
if err != nil {
t.Errorf("Unexpected error %q", err)
}
_, err = NewBuildVersion("build")
if err != nil {
t.Errorf("Unexpected error %q", err)
}
_, err = NewBuildVersion("test?")
if err == nil {
t.Error("Expected error, got none")
}
_, err = NewBuildVersion("")
if err == nil {
t.Error("Expected error, got none")
}
}
func TestNewHelper(t *testing.T) {
v, err := New("1.2.3")
if err != nil {
t.Fatalf("Unexpected error %q", err)
}
// New returns pointer
if v == nil {
t.Fatal("Version is nil")
}
if v.Compare(Version{1, 2, 3, nil, nil}) != 0 {
t.Fatal("Unexpected comparison problem")
}
}
func TestMakeHelper(t *testing.T) {
v, err := Make("1.2.3")
if err != nil {
t.Fatalf("Unexpected error %q", err)
}
if v.Compare(Version{1, 2, 3, nil, nil}) != 0 {
t.Fatal("Unexpected comparison problem")
}
}
func BenchmarkParseSimple(b *testing.B) {
const VERSION = "0.0.1"
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
Parse(VERSION)
}
}
func BenchmarkParseComplex(b *testing.B) {
const VERSION = "0.0.1-alpha.preview+123.456"
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
Parse(VERSION)
}
}
func BenchmarkParseAverage(b *testing.B) {
l := len(formatTests)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
Parse(formatTests[n%l].result)
}
}
func BenchmarkStringSimple(b *testing.B) {
const VERSION = "0.0.1"
v, _ := Parse(VERSION)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
v.String()
}
}
func BenchmarkStringLarger(b *testing.B) {
const VERSION = "11.15.2012"
v, _ := Parse(VERSION)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
v.String()
}
}
func BenchmarkStringComplex(b *testing.B) {
const VERSION = "0.0.1-alpha.preview+123.456"
v, _ := Parse(VERSION)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
v.String()
}
}
func BenchmarkStringAverage(b *testing.B) {
l := len(formatTests)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
formatTests[n%l].v.String()
}
}
func BenchmarkValidateSimple(b *testing.B) {
const VERSION = "0.0.1"
v, _ := Parse(VERSION)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
v.Validate()
}
}
func BenchmarkValidateComplex(b *testing.B) {
const VERSION = "0.0.1-alpha.preview+123.456"
v, _ := Parse(VERSION)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
v.Validate()
}
}
func BenchmarkValidateAverage(b *testing.B) {
l := len(formatTests)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
formatTests[n%l].v.Validate()
}
}
func BenchmarkCompareSimple(b *testing.B) {
const VERSION = "0.0.1"
v, _ := Parse(VERSION)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
v.Compare(v)
}
}
func BenchmarkCompareComplex(b *testing.B) {
const VERSION = "0.0.1-alpha.preview+123.456"
v, _ := Parse(VERSION)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
v.Compare(v)
}
}
func BenchmarkCompareAverage(b *testing.B) {
l := len(compareTests)
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
compareTests[n%l].v1.Compare((compareTests[n%l].v2))
}
}
================================================
FILE: vendor/github.com/blang/semver/sort.go
================================================
package semver
import (
"sort"
)
// Versions represents multiple versions.
type Versions []Version
// Len returns length of version collection
func (s Versions) Len() int {
return len(s)
}
// Swap swaps two versions inside the collection by its indices
func (s Versions) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// Less checks if version at index i is less than version at index j
func (s Versions) Less(i, j int) bool {
return s[i].LT(s[j])
}
// Sort sorts a slice of versions
func Sort(versions []Version) {
sort.Sort(Versions(versions))
}
================================================
FILE: vendor/github.com/blang/semver/sort_test.go
================================================
package semver
import (
"reflect"
"testing"
)
func TestSort(t *testing.T) {
v100, _ := Parse("1.0.0")
v010, _ := Parse("0.1.0")
v001, _ := Parse("0.0.1")
versions := []Version{v010, v100, v001}
Sort(versions)
correct := []Version{v001, v010, v100}
if !reflect.DeepEqual(versions, correct) {
t.Fatalf("Sort returned wrong order: %s", versions)
}
}
func BenchmarkSort(b *testing.B) {
v100, _ := Parse("1.0.0")
v010, _ := Parse("0.1.0")
v001, _ := Parse("0.0.1")
b.ReportAllocs()
b.ResetTimer()
for n := 0; n < b.N; n++ {
Sort([]Version{v010, v100, v001})
}
}
================================================
FILE: vendor/github.com/blang/semver/sql.go
================================================
package semver
import (
"database/sql/driver"
"fmt"
)
// Scan implements the database/sql.Scanner interface.
func (v *Version) Scan(src interface{}) (err error) {
var str string
switch src := src.(type) {
case string:
str = src
case []byte:
str = string(src)
default:
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
}
if t, err := Parse(str); err == nil {
*v = t
}
return
}
// Value implements the database/sql/driver.Valuer interface.
func (v Version) Value() (driver.Value, error) {
return v.String(), nil
}
================================================
FILE: vendor/github.com/blang/semver/sql_test.go
================================================
package semver
import (
"testing"
)
type scanTest struct {
val interface{}
shouldError bool
expected string
}
var scanTests = []scanTest{
{"1.2.3", false, "1.2.3"},
{[]byte("1.2.3"), false, "1.2.3"},
{7, true, ""},
{7e4, true, ""},
{true, true, ""},
}
func TestScanString(t *testing.T) {
for _, tc := range scanTests {
s := &Version{}
err := s.Scan(tc.val)
if tc.shouldError {
if err == nil {
t.Fatalf("Scan did not return an error on %v (%T)", tc.val, tc.val)
}
} else {
if err != nil {
t.Fatalf("Scan returned an unexpected error: %s (%T) on %v (%T)", tc.val, tc.val, tc.val, tc.val)
}
if val, _ := s.Value(); val != tc.expected {
t.Errorf("Wrong Value returned, expected %q, got %q", tc.expected, val)
}
}
}
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/.gitignore
================================================
*.coverprofile
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/.travis.yml
================================================
language: go
go:
- 1.4.1
install:
- go get -t -v ./...
- go install github.com/onsi/ginkgo/ginkgo
script:
- export PATH=$HOME/gopath/bin:$PATH
- ginkgo -r -failOnPending -randomizeAllSpecs -race
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE
================================================
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
================================================
[](https://travis-ci.org/cloudfoundry-incubator/candiedyaml)
candiedyaml
===========
YAML for Go
A YAML 1.1 parser with support for YAML 1.2 features
Usage
-----
```go
package myApp
import (
"github.com/cloudfoundry-incubator/candiedyaml"
"fmt"
"os"
)
func main() {
file, err := os.Open("path/to/some/file.yml")
if err != nil {
println("File does not exist:", err.Error())
os.Exit(1)
}
defer file.Close()
document := new(interface{})
decoder := candiedyaml.NewDecoder(file)
err = decoder.Decode(document)
if err != nil {
println("Failed to decode document:", err.Error())
}
println("parsed yml into interface:", fmt.Sprintf("%#v", document))
fileToWrite, err := os.Create("path/to/some/new/file.yml")
if err != nil {
println("Failed to open file for writing:", err.Error())
os.Exit(1)
}
defer fileToWrite.Close()
encoder := candiedyaml.NewEncoder(fileToWrite)
err = encoder.Encode(document)
if err != nil {
println("Failed to encode document:", err.Error())
os.Exit(1)
}
return
}
```
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io"
)
/*
* Create a new parser object.
*/
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE),
buffer: make([]byte, 0, INPUT_BUFFER_SIZE),
}
return true
}
/*
* Destroy a parser object.
*/
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
/*
* String read handler.
*/
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n := copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
/*
* File read handler.
*/
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
return parser.input_reader.Read(buffer)
}
/*
* Set a string input.
*/
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
/*
* Set a reader input
*/
func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = yaml_file_read_handler
parser.input_reader = reader
}
/*
* Set a generic input.
*/
func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = handler
}
/*
* Set the source encoding.
*/
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("encoding already set")
}
parser.encoding = encoding
}
/*
* Create a new emitter object.
*/
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, OUTPUT_BUFFER_SIZE),
raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE),
states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE),
events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE),
}
}
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
/*
* String write handler.
*/
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
/*
* File write handler.
*/
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
/*
* Set a string output.
*/
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = buffer
}
/*
* Set a file output.
*/
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
/*
* Set a generic output handler.
*/
func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = handler
}
/*
* Set the output encoding.
*/
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("encoding already set")
}
emitter.encoding = encoding
}
/*
* Set the canonical output style.
*/
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
/*
* Set the indentation increment.
*/
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
/*
* Set the preferred line width.
*/
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
/*
* Set if unescaped non-ASCII characters are allowed.
*/
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
/*
* Set the preferred line break character.
*/
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
/*
* Destroy a token object.
*/
// yaml_DECLARE(void)
// yaml_token_delete(yaml_token_t *token)
// {
// assert(token); /* Non-NULL token object expected. */
//
// switch (token.type)
// {
// case yaml_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case yaml_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case yaml_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case yaml_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case yaml_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
// }
/*
* Check if a string is a valid UTF-8 sequence.
*
* Check 'reader.c' for more details on UTF-8 encoding.
*/
// static int
// yaml_check_utf8(yaml_char_t *start, size_t length)
// {
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
// }
/*
* Create STREAM-START.
*/
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
event_type: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
/*
* Create STREAM-END.
*/
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_STREAM_END_EVENT,
}
}
/*
* Create DOCUMENT-START.
*/
func yaml_document_start_event_initialize(event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool) {
*event = yaml_event_t{
event_type: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
/*
* Create DOCUMENT-END.
*/
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
event_type: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
/*
* Create ALIAS.
*/
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) {
*event = yaml_event_t{
event_type: yaml_ALIAS_EVENT,
anchor: anchor,
}
}
/*
* Create SCALAR.
*/
func yaml_scalar_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte,
value []byte,
plain_implicit bool, quoted_implicit bool,
style yaml_scalar_style_t) {
*event = yaml_event_t{
event_type: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
}
/*
* Create SEQUENCE-START.
*/
func yaml_sequence_start_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) {
*event = yaml_event_t{
event_type: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
/*
* Create SEQUENCE-END.
*/
func yaml_sequence_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_SEQUENCE_END_EVENT,
}
}
/*
* Create MAPPING-START.
*/
func yaml_mapping_start_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
event_type: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
/*
* Create MAPPING-END.
*/
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_MAPPING_END_EVENT,
}
}
/*
* Destroy an event object.
*/
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
// /*
// * Create a document object.
// */
//
// func yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives []yaml_tag_directive_t,
// start_implicit, end_implicit bool) bool {
//
//
// {
// struct {
// YAML_error_type_t error;
// } context;
// struct {
// yaml_node_t *start;
// yaml_node_t *end;
// yaml_node_t *top;
// } nodes = { NULL, NULL, NULL };
// yaml_version_directive_t *version_directive_copy = NULL;
// struct {
// yaml_tag_directive_t *start;
// yaml_tag_directive_t *end;
// yaml_tag_directive_t *top;
// } tag_directives_copy = { NULL, NULL, NULL };
// yaml_tag_directive_t value = { NULL, NULL };
// YAML_mark_t mark = { 0, 0, 0 };
//
// assert(document); /* Non-NULL document object is expected. */
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end));
// /* Valid tag directives are expected. */
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error;
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
// if (!version_directive_copy) goto error;
// version_directive_copy.major = version_directive.major;
// version_directive_copy.minor = version_directive.minor;
// }
//
// if (tag_directives_start != tag_directives_end) {
// yaml_tag_directive_t *tag_directive;
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error;
// for (tag_directive = tag_directives_start;
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle);
// assert(tag_directive.prefix);
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error;
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error;
// value.handle = yaml_strdup(tag_directive.handle);
// value.prefix = yaml_strdup(tag_directive.prefix);
// if (!value.handle || !value.prefix) goto error;
// if (!PUSH(&context, tag_directives_copy, value))
// goto error;
// value.handle = NULL;
// value.prefix = NULL;
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark);
//
// return 1;
//
// error:
// STACK_DEL(&context, nodes);
// yaml_free(version_directive_copy);
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// yaml_tag_directive_t value = POP(&context, tag_directives_copy);
// yaml_free(value.handle);
// yaml_free(value.prefix);
// }
// STACK_DEL(&context, tag_directives_copy);
// yaml_free(value.handle);
// yaml_free(value.prefix);
//
// return 0;
// }
//
// /*
// * Destroy a document object.
// */
//
// yaml_DECLARE(void)
// yaml_document_delete(document *yaml_document_t)
// {
// struct {
// YAML_error_type_t error;
// } context;
// yaml_tag_directive_t *tag_directive;
//
// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */
//
// assert(document); /* Non-NULL document object is expected. */
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// yaml_node_t node = POP(&context, document.nodes);
// yaml_free(node.tag);
// switch (node.type) {
// case yaml_SCALAR_NODE:
// yaml_free(node.data.scalar.value);
// break;
// case yaml_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items);
// break;
// case yaml_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs);
// break;
// default:
// assert(0); /* Should not happen. */
// }
// }
// STACK_DEL(&context, document.nodes);
//
// yaml_free(document.version_directive);
// for (tag_directive = document.tag_directives.start;
// tag_directive != document.tag_directives.end;
// tag_directive++) {
// yaml_free(tag_directive.handle);
// yaml_free(tag_directive.prefix);
// }
// yaml_free(document.tag_directives.start);
//
// memset(document, 0, sizeof(yaml_document_t));
// }
//
// /**
// * Get a document node.
// */
//
// yaml_DECLARE(yaml_node_t *)
// yaml_document_get_node(document *yaml_document_t, int index)
// {
// assert(document); /* Non-NULL document object is expected. */
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1;
// }
// return NULL;
// }
//
// /**
// * Get the root object.
// */
//
// yaml_DECLARE(yaml_node_t *)
// yaml_document_get_root_node(document *yaml_document_t)
// {
// assert(document); /* Non-NULL document object is expected. */
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start;
// }
// return NULL;
// }
//
// /*
// * Add a scalar node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_scalar(document *yaml_document_t,
// yaml_char_t *tag, yaml_char_t *value, int length,
// yaml_scalar_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// yaml_char_t *value_copy = NULL;
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
// assert(value); /* Non-NULL value is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (length < 0) {
// length = strlen((char *)value);
// }
//
// if (!yaml_check_utf8(value, length)) goto error;
// value_copy = yaml_malloc(length+1);
// if (!value_copy) goto error;
// memcpy(value_copy, value, length);
// value_copy[length] = '\0';
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// yaml_free(tag_copy);
// yaml_free(value_copy);
//
// return 0;
// }
//
// /*
// * Add a sequence node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_sequence(document *yaml_document_t,
// yaml_char_t *tag, yaml_sequence_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// struct {
// yaml_node_item_t *start;
// yaml_node_item_t *end;
// yaml_node_item_t *top;
// } items = { NULL, NULL, NULL };
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error;
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// STACK_DEL(&context, items);
// yaml_free(tag_copy);
//
// return 0;
// }
//
// /*
// * Add a mapping node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_mapping(document *yaml_document_t,
// yaml_char_t *tag, yaml_mapping_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// struct {
// yaml_node_pair_t *start;
// yaml_node_pair_t *end;
// yaml_node_pair_t *top;
// } pairs = { NULL, NULL, NULL };
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// STACK_DEL(&context, pairs);
// yaml_free(tag_copy);
//
// return 0;
// }
//
// /*
// * Append an item to a sequence node.
// */
//
// yaml_DECLARE(int)
// yaml_document_append_sequence_item(document *yaml_document_t,
// int sequence, int item)
// {
// struct {
// YAML_error_type_t error;
// } context;
//
// assert(document); /* Non-NULL document is required. */
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top);
// /* Valid sequence id is required. */
// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE);
// /* A sequence node is required. */
// assert(item > 0 && document.nodes.start + item <= document.nodes.top);
// /* Valid item id is required. */
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0;
//
// return 1;
// }
//
// /*
// * Append a pair of a key and a value to a mapping node.
// */
//
// yaml_DECLARE(int)
// yaml_document_append_mapping_pair(document *yaml_document_t,
// int mapping, int key, int value)
// {
// struct {
// YAML_error_type_t error;
// } context;
//
// yaml_node_pair_t pair;
//
// assert(document); /* Non-NULL document is required. */
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top);
// /* Valid mapping id is required. */
// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE);
// /* A mapping node is required. */
// assert(key > 0 && document.nodes.start + key <= document.nodes.top);
// /* Valid key id is required. */
// assert(value > 0 && document.nodes.start + value <= document.nodes.top);
// /* Valid value id is required. */
//
// pair.key = key;
// pair.value = value;
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0;
//
// return 1;
// }
//
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/candiedyaml_suite_test.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"testing"
)
func TestCandiedyaml(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "Candiedyaml Suite")
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"runtime"
"strconv"
"strings"
)
type Unmarshaler interface {
UnmarshalYAML(tag string, value interface{}) error
}
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
type Decoder struct {
parser yaml_parser_t
event yaml_event_t
replay_events []yaml_event_t
useNumber bool
anchors map[string][]yaml_event_t
tracking_anchors [][]yaml_event_t
}
type ParserError struct {
ErrorType YAML_error_type_t
Context string
ContextMark YAML_mark_t
Problem string
ProblemMark YAML_mark_t
}
func (e *ParserError) Error() string {
return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1)
}
type UnexpectedEventError struct {
Value string
EventType yaml_event_type_t
At YAML_mark_t
}
func (e *UnexpectedEventError) Error() string {
return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1)
}
func recovery(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
var tmpError error
switch r := r.(type) {
case error:
tmpError = r
case string:
tmpError = errors.New(r)
default:
tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String())
}
*err = tmpError
}
}
func Unmarshal(data []byte, v interface{}) error {
d := NewDecoder(bytes.NewBuffer(data))
return d.Decode(v)
}
func NewDecoder(r io.Reader) *Decoder {
d := &Decoder{
anchors: make(map[string][]yaml_event_t),
tracking_anchors: make([][]yaml_event_t, 1),
}
yaml_parser_initialize(&d.parser)
yaml_parser_set_input_reader(&d.parser, r)
return d
}
func (d *Decoder) Decode(v interface{}) (err error) {
defer recovery(&err)
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark)
}
if d.event.event_type == yaml_NO_EVENT {
d.nextEvent()
if d.event.event_type != yaml_STREAM_START_EVENT {
return errors.New("Invalid stream")
}
d.nextEvent()
}
d.document(rv)
return nil
}
func (d *Decoder) UseNumber() { d.useNumber = true }
func (d *Decoder) error(err error) {
panic(err)
}
func (d *Decoder) nextEvent() {
if d.event.event_type == yaml_STREAM_END_EVENT {
d.error(errors.New("The stream is closed"))
}
if d.replay_events != nil {
d.event = d.replay_events[0]
if len(d.replay_events) == 1 {
d.replay_events = nil
} else {
d.replay_events = d.replay_events[1:]
}
} else {
if !yaml_parser_parse(&d.parser, &d.event) {
yaml_event_delete(&d.event)
d.error(&ParserError{
ErrorType: d.parser.error,
Context: d.parser.context,
ContextMark: d.parser.context_mark,
Problem: d.parser.problem,
ProblemMark: d.parser.problem_mark,
})
}
}
last := len(d.tracking_anchors)
// skip aliases when tracking an anchor
if last > 0 && d.event.event_type != yaml_ALIAS_EVENT {
d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event)
}
}
func (d *Decoder) document(rv reflect.Value) {
if d.event.event_type != yaml_DOCUMENT_START_EVENT {
d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark))
}
d.nextEvent()
d.parse(rv)
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark))
}
d.nextEvent()
}
func (d *Decoder) parse(rv reflect.Value) {
if !rv.IsValid() {
// skip ahead since we cannot store
d.valueInterface()
return
}
anchor := string(d.event.anchor)
switch d.event.event_type {
case yaml_SEQUENCE_START_EVENT:
d.begin_anchor(anchor)
d.sequence(rv)
d.end_anchor(anchor)
case yaml_MAPPING_START_EVENT:
d.begin_anchor(anchor)
d.mapping(rv)
d.end_anchor(anchor)
case yaml_SCALAR_EVENT:
d.begin_anchor(anchor)
d.scalar(rv)
d.end_anchor(anchor)
case yaml_ALIAS_EVENT:
d.alias(rv)
case yaml_DOCUMENT_END_EVENT:
default:
d.error(&UnexpectedEventError{
Value: string(d.event.value),
EventType: d.event.event_type,
At: d.event.start_mark,
})
}
}
func (d *Decoder) begin_anchor(anchor string) {
if anchor != "" {
events := []yaml_event_t{d.event}
d.tracking_anchors = append(d.tracking_anchors, events)
}
}
func (d *Decoder) end_anchor(anchor string) {
if anchor != "" {
events := d.tracking_anchors[len(d.tracking_anchors)-1]
d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1]
// remove the anchor, replaying events shouldn't have anchors
events[0].anchor = nil
// we went one too many, remove the extra event
events = events[:len(events)-1]
// if nested, append to all the other anchors
for i, e := range d.tracking_anchors {
d.tracking_anchors[i] = append(e, events...)
}
d.anchors[anchor] = events
}
}
func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(Unmarshaler); ok {
var temp interface{}
return u, reflect.ValueOf(&temp)
}
}
v = v.Elem()
}
return nil, v
}
func (d *Decoder) sequence(v reflect.Value) {
if d.event.event_type != yaml_SEQUENCE_START_EVENT {
d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark))
}
u, pv := d.indirect(v, false)
if u != nil {
defer func() {
if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, false)
}
v = pv
// Check type of target.
switch v.Kind() {
case reflect.Interface:
if v.NumMethod() == 0 {
// Decoding into nil interface? Switch to non-reflect code.
v.Set(reflect.ValueOf(d.sequenceInterface()))
return
}
// Otherwise it's invalid.
fallthrough
default:
d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark))
case reflect.Array:
case reflect.Slice:
break
}
d.nextEvent()
i := 0
done:
for {
switch d.event.event_type {
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
// Get element of array, growing if necessary.
if v.Kind() == reflect.Slice {
// Grow slice if necessary
if i >= v.Cap() {
newcap := v.Cap() + v.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
reflect.Copy(newv, v)
v.Set(newv)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
// Decode into element.
d.parse(v.Index(i))
} else {
// Ran out of fixed array: skip.
d.parse(reflect.Value{})
}
i++
}
if i < v.Len() {
if v.Kind() == reflect.Array {
// Array. Zero the rest.
z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
v.Index(i).Set(z)
}
} else {
v.SetLen(i)
}
}
if i == 0 && v.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
}
func (d *Decoder) mapping(v reflect.Value) {
u, pv := d.indirect(v, false)
if u != nil {
defer func() {
if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, false)
}
v = pv
// Decoding into nil interface? Switch to non-reflect code.
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
v.Set(reflect.ValueOf(d.mappingInterface()))
return
}
// Check type of target: struct or map[X]Y
switch v.Kind() {
case reflect.Struct:
d.mappingStruct(v)
return
case reflect.Map:
default:
d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark))
}
mapt := v.Type()
if v.IsNil() {
v.Set(reflect.MakeMap(mapt))
}
d.nextEvent()
keyt := mapt.Key()
mapElemt := mapt.Elem()
var mapElem reflect.Value
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT:
break done
case yaml_DOCUMENT_END_EVENT:
return
}
key := reflect.New(keyt)
d.parse(key.Elem())
if !mapElem.IsValid() {
mapElem = reflect.New(mapElemt).Elem()
} else {
mapElem.Set(reflect.Zero(mapElemt))
}
d.parse(mapElem)
v.SetMapIndex(key.Elem(), mapElem)
}
d.nextEvent()
}
func (d *Decoder) mappingStruct(v reflect.Value) {
structt := v.Type()
fields := cachedTypeFields(structt)
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT:
break done
case yaml_DOCUMENT_END_EVENT:
return
}
key := ""
d.parse(reflect.ValueOf(&key))
// Figure out field corresponding to key.
var subv reflect.Value
var f *field
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv = v
for _, i := range f.index {
if subv.Kind() == reflect.Ptr {
if subv.IsNil() {
subv.Set(reflect.New(subv.Type().Elem()))
}
subv = subv.Elem()
}
subv = subv.Field(i)
}
}
d.parse(subv)
}
d.nextEvent()
}
func (d *Decoder) scalar(v reflect.Value) {
val := string(d.event.value)
wantptr := null_values[val]
u, pv := d.indirect(v, wantptr)
var tag string
if u != nil {
defer func() {
if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, wantptr)
}
v = pv
var err error
tag, err = resolve(d.event, v, d.useNumber)
if err != nil {
d.error(err)
}
d.nextEvent()
}
func (d *Decoder) alias(rv reflect.Value) {
val, ok := d.anchors[string(d.event.anchor)]
if !ok {
d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark))
}
d.replay_events = val
d.nextEvent()
d.parse(rv)
}
func (d *Decoder) valueInterface() interface{} {
var v interface{}
anchor := string(d.event.anchor)
switch d.event.event_type {
case yaml_SEQUENCE_START_EVENT:
d.begin_anchor(anchor)
v = d.sequenceInterface()
case yaml_MAPPING_START_EVENT:
d.begin_anchor(anchor)
v = d.mappingInterface()
case yaml_SCALAR_EVENT:
d.begin_anchor(anchor)
v = d.scalarInterface()
case yaml_ALIAS_EVENT:
rv := reflect.ValueOf(&v)
d.alias(rv)
return v
case yaml_DOCUMENT_END_EVENT:
d.error(&UnexpectedEventError{
Value: string(d.event.value),
EventType: d.event.event_type,
At: d.event.start_mark,
})
}
d.end_anchor(anchor)
return v
}
func (d *Decoder) scalarInterface() interface{} {
_, v := resolveInterface(d.event, d.useNumber)
d.nextEvent()
return v
}
// sequenceInterface is like sequence but returns []interface{}.
func (d *Decoder) sequenceInterface() []interface{} {
var v = make([]interface{}, 0)
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
v = append(v, d.valueInterface())
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
return v
}
// mappingInterface is like mapping but returns map[interface{}]interface{}.
func (d *Decoder) mappingInterface() map[interface{}]interface{} {
m := make(map[interface{}]interface{})
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
key := d.valueInterface()
// Read value.
m[key] = d.valueInterface()
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
return m
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/decode_test.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"math"
"os"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Decode", func() {
It("Decodes a file", func() {
f, _ := os.Open("fixtures/specification/example2_1.yaml")
d := NewDecoder(f)
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
})
Context("strings", func() {
It("Decodes an empty string", func() {
d := NewDecoder(strings.NewReader(`""
`))
var v string
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(""))
})
It("Decodes an empty string to an interface", func() {
d := NewDecoder(strings.NewReader(`""
`))
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(""))
})
It("Decodes a map containing empty strings to an interface", func() {
d := NewDecoder(strings.NewReader(`"" : ""
`))
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[interface{}]interface{}{"": ""}))
})
It("Decodes strings starting with a colon", func() {
d := NewDecoder(strings.NewReader(`:colon
`))
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(":colon"))
})
})
Context("Sequence", func() {
It("Decodes to interface{}s", func() {
f, _ := os.Open("fixtures/specification/example2_1.yaml")
d := NewDecoder(f)
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect((v).([]interface{})).To(Equal([]interface{}{"Mark McGwire", "Sammy Sosa", "Ken Griffey"}))
})
It("Decodes to []string", func() {
f, _ := os.Open("fixtures/specification/example2_1.yaml")
d := NewDecoder(f)
v := make([]string, 0, 3)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal([]string{"Mark McGwire", "Sammy Sosa", "Ken Griffey"}))
})
It("Decodes a sequence of maps", func() {
f, _ := os.Open("fixtures/specification/example2_12.yaml")
d := NewDecoder(f)
v := make([]map[string]interface{}, 1)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal([]map[string]interface{}{
{"item": "Super Hoop", "quantity": int64(1)},
{"item": "Basketball", "quantity": int64(4)},
{"item": "Big Shoes", "quantity": int64(1)},
}))
})
Describe("As structs", func() {
It("Simple struct", func() {
f, _ := os.Open("fixtures/specification/example2_4.yaml")
d := NewDecoder(f)
type batter struct {
Name string
HR int64
AVG float64
}
v := make([]batter, 0, 1)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal([]batter{
batter{Name: "Mark McGwire", HR: 65, AVG: 0.278},
batter{Name: "Sammy Sosa", HR: 63, AVG: 0.288},
}))
})
It("Tagged struct", func() {
f, _ := os.Open("fixtures/specification/example2_4.yaml")
d := NewDecoder(f)
type batter struct {
N string `yaml:"name"`
H int64 `yaml:"hr"`
A float64 `yaml:"avg"`
}
v := make([]batter, 0, 1)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal([]batter{
batter{N: "Mark McGwire", H: 65, A: 0.278},
batter{N: "Sammy Sosa", H: 63, A: 0.288},
}))
})
It("handles null values", func() {
type S struct {
Default interface{}
}
d := NewDecoder(strings.NewReader(`
---
default:
`))
var s S
err := d.Decode(&s)
Expect(err).NotTo(HaveOccurred())
Expect(s).To(Equal(S{Default: nil}))
})
It("ignores missing tags", func() {
f, _ := os.Open("fixtures/specification/example2_4.yaml")
d := NewDecoder(f)
type batter struct {
N string `yaml:"name"`
HR int64
A float64
}
v := make([]batter, 0, 1)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal([]batter{
batter{N: "Mark McGwire", HR: 65},
batter{N: "Sammy Sosa", HR: 63},
}))
})
})
It("Decodes a sequence of sequences", func() {
f, _ := os.Open("fixtures/specification/example2_5.yaml")
d := NewDecoder(f)
v := make([][]interface{}, 1)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal([][]interface{}{
{"name", "hr", "avg"},
{"Mark McGwire", int64(65), float64(0.278)},
{"Sammy Sosa", int64(63), float64(0.288)},
}))
})
})
Context("Maps", func() {
It("Decodes to interface{}s", func() {
f, _ := os.Open("fixtures/specification/example2_2.yaml")
d := NewDecoder(f)
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect((v).(map[interface{}]interface{})).To(Equal(map[interface{}]interface{}{
"hr": int64(65),
"avg": float64(0.278),
"rbi": int64(147),
}))
})
It("Decodes to a struct", func() {
f, _ := os.Open("fixtures/specification/example2_2.yaml")
d := NewDecoder(f)
type batter struct {
HR int64
AVG float64
RBI int64
}
v := batter{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(batter{HR: 65, AVG: 0.278, RBI: 147}))
})
It("Decodes to a map of string arrays", func() {
f, _ := os.Open("fixtures/specification/example2_9.yaml")
d := NewDecoder(f)
v := make(map[string][]string)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string][]string{"hr": []string{"Mark McGwire", "Sammy Sosa"}, "rbi": []string{"Sammy Sosa", "Ken Griffey"}}))
})
})
Context("Sequence of Maps", func() {
It("Decodes to interface{}s", func() {
f, _ := os.Open("fixtures/specification/example2_4.yaml")
d := NewDecoder(f)
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect((v).([]interface{})).To(Equal([]interface{}{
map[interface{}]interface{}{"name": "Mark McGwire", "hr": int64(65), "avg": float64(0.278)},
map[interface{}]interface{}{"name": "Sammy Sosa", "hr": int64(63), "avg": float64(0.288)},
}))
})
})
It("Decodes ascii art", func() {
f, _ := os.Open("fixtures/specification/example2_13.yaml")
d := NewDecoder(f)
v := ""
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(`\//||\/||
// || ||__
`))
})
It("Decodes folded strings", func() {
f, _ := os.Open("fixtures/specification/example2_15.yaml")
d := NewDecoder(f)
v := ""
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal("Sammy Sosa completed another fine season with great stats.\n\n 63 Home Runs\n 0.288 Batting Average\n\nWhat a year!\n"))
})
It("Decodes literal and folded strings with indents", func() {
f, _ := os.Open("fixtures/specification/example2_16.yaml")
d := NewDecoder(f)
v := make(map[string]string)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]string{
"name": "Mark McGwire",
"accomplishment": `Mark set a major league home run record in 1998.
`,
"stats": `65 Home Runs
0.278 Batting Average
`,
}))
})
It("Decodes single quoted", func() {
f, _ := os.Open("fixtures/specification/example2_17_quoted.yaml")
d := NewDecoder(f)
v := make(map[string]string)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]string{
"quoted": ` # not a 'comment'.`,
}))
})
Context("ints", func() {
It("Decodes into an interface{}", func() {
f, _ := os.Open("fixtures/specification/example2_19.yaml")
d := NewDecoder(f)
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"canonical": int64(12345),
"decimal": int64(12345),
"octal": int64(12),
"hexadecimal": int64(12),
}))
})
It("Decodes into int64", func() {
f, _ := os.Open("fixtures/specification/example2_19.yaml")
d := NewDecoder(f)
v := make(map[string]int64)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]int64{
"canonical": int64(12345),
"decimal": int64(12345),
"octal": int64(12),
"hexadecimal": int64(12),
}))
})
Context("boundary values", func() {
intoInt64 := func(val int64) {
It("Decodes into an int64 value", func() {
var v int64
d := NewDecoder(strings.NewReader(strconv.FormatInt(val, 10)))
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(val))
})
}
intoInt := func(val int) {
It("Decodes into an int value", func() {
var v int
d := NewDecoder(strings.NewReader(strconv.FormatInt(int64(val), 10)))
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(val))
})
}
intoInterface := func(val int64) {
It("Decodes into an interface{}", func() {
var v interface{}
d := NewDecoder(strings.NewReader(strconv.FormatInt(val, 10)))
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(val))
})
}
intoInt64(math.MaxInt64)
intoInterface(math.MaxInt64)
intoInt64(math.MinInt64)
intoInterface(math.MinInt64)
intoInt(math.MaxInt32)
intoInt(math.MinInt32)
})
})
It("Decodes a variety of floats", func() {
f, _ := os.Open("fixtures/specification/example2_20.yaml")
d := NewDecoder(f)
v := make(map[string]float64)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(math.IsNaN(v["not a number"])).To(BeTrue())
delete(v, "not a number")
Expect(v).To(Equal(map[string]float64{
"canonical": float64(1230.15),
"exponential": float64(1230.15),
"fixed": float64(1230.15),
"negative infinity": math.Inf(-1),
}))
})
It("Decodes booleans, nil and strings", func() {
f, _ := os.Open("fixtures/specification/example2_21.yaml")
d := NewDecoder(f)
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"": interface{}(nil),
"true": true,
"false": false,
"string": "12345",
}))
})
It("Decodes a null ptr", func() {
d := NewDecoder(strings.NewReader(`null
`))
var v *bool
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(BeNil())
})
It("Decodes dates/time", func() {
f, _ := os.Open("fixtures/specification/example2_22.yaml")
d := NewDecoder(f)
v := make(map[string]time.Time)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]time.Time{
"canonical": time.Date(2001, time.December, 15, 2, 59, 43, int(1*time.Millisecond), time.UTC),
"iso8601": time.Date(2001, time.December, 14, 21, 59, 43, int(10*time.Millisecond), time.FixedZone("", -5*3600)),
"spaced": time.Date(2001, time.December, 14, 21, 59, 43, int(10*time.Millisecond), time.FixedZone("", -5*3600)),
"date": time.Date(2002, time.December, 14, 0, 0, 0, 0, time.UTC),
}))
})
Context("Tags", func() {
It("Respects tags", func() {
f, _ := os.Open("fixtures/specification/example2_23_non_date.yaml")
d := NewDecoder(f)
v := make(map[string]string)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]string{
"not-date": "2002-04-28",
}))
})
It("handles non-specific tags", func() {
d := NewDecoder(strings.NewReader(`
---
not_parsed: ! 123
`))
v := make(map[string]int)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]int{"not_parsed": 123}))
})
It("handles non-specific tags", func() {
d := NewDecoder(strings.NewReader(`
---
? a complex key
: ! "123"
`))
v := make(map[string]string)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]string{"a complex key": "123"}))
})
})
Context("Decodes binary/base64", func() {
It("to []byte", func() {
f, _ := os.Open("fixtures/specification/example2_23_picture.yaml")
d := NewDecoder(f)
v := make(map[string][]byte)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string][]byte{
"picture": []byte{0x47, 0x49, 0x46, 0x38, 0x39, 0x61, 0x0c, 0x00,
0x0c, 0x00, 0x84, 0x00, 0x00, 0xff, 0xff, 0xf7, 0xf5, 0xf5, 0xee,
0xe9, 0xe9, 0xe5, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0xe7, 0xe7,
0xe7, 0x5e, 0x5e, 0x5e, 0xf3, 0xf3, 0xed, 0x8e, 0x8e, 0x8e, 0xe0,
0xe0, 0xe0, 0x9f, 0x9f, 0x9f, 0x93, 0x93, 0x93, 0xa7, 0xa7, 0xa7,
0x9e, 0x9e, 0x9e, 0x69, 0x5e, 0x10, 0x27, 0x20, 0x82, 0x0a, 0x01,
0x00, 0x3b},
}))
})
It("to string", func() {
d := NewDecoder(strings.NewReader("!binary YWJjZGVmZw=="))
var v string
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal("abcdefg"))
})
It("to string via alternate form", func() {
d := NewDecoder(strings.NewReader("!!binary YWJjZGVmZw=="))
var v string
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal("abcdefg"))
})
It("to interface", func() {
d := NewDecoder(strings.NewReader("!binary YWJjZGVmZw=="))
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal([]byte("abcdefg")))
})
})
Context("Aliases", func() {
Context("to known types", func() {
It("aliases scalars", func() {
f, _ := os.Open("fixtures/specification/example2_10.yaml")
d := NewDecoder(f)
v := make(map[string][]string)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string][]string{
"hr": {"Mark McGwire", "Sammy Sosa"},
"rbi": {"Sammy Sosa", "Ken Griffey"},
}))
})
It("aliases sequences", func() {
d := NewDecoder(strings.NewReader(`
---
hr: &ss
- MG
- SS
rbi: *ss
`))
v := make(map[string][]string)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string][]string{
"hr": {"MG", "SS"},
"rbi": {"MG", "SS"},
}))
})
It("aliases maps", func() {
d := NewDecoder(strings.NewReader(`
---
hr: &ss
MG : SS
rbi: *ss
`))
v := make(map[string]map[string]string)
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]map[string]string{
"hr": {"MG": "SS"},
"rbi": {"MG": "SS"},
}))
})
})
It("aliases to different types", func() {
type S struct {
A map[string]int
C map[string]string
}
d := NewDecoder(strings.NewReader(`
---
a: &map
b : 1
c: *map
`))
var s S
err := d.Decode(&s)
Expect(err).NotTo(HaveOccurred())
Expect(s).To(Equal(S{
A: map[string]int{"b": 1},
C: map[string]string{"b": "1"},
}))
})
It("fails if an anchor is undefined", func() {
d := NewDecoder(strings.NewReader(`
---
a: *missing
`))
m := make(map[string]string)
err := d.Decode(&m)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(MatchRegexp("missing anchor.*line.*column.*"))
})
Context("to Interface", func() {
It("aliases scalars", func() {
f, _ := os.Open("fixtures/specification/example2_10.yaml")
d := NewDecoder(f)
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"hr": []interface{}{"Mark McGwire", "Sammy Sosa"},
"rbi": []interface{}{"Sammy Sosa", "Ken Griffey"},
}))
})
It("aliases sequences", func() {
d := NewDecoder(strings.NewReader(`
---
hr: &ss
- MG
- SS
rbi: *ss
`))
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"hr": []interface{}{"MG", "SS"},
"rbi": []interface{}{"MG", "SS"},
}))
})
It("aliases maps", func() {
d := NewDecoder(strings.NewReader(`
---
hr: &ss
MG : SS
rbi: *ss
`))
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"hr": map[interface{}]interface{}{"MG": "SS"},
"rbi": map[interface{}]interface{}{"MG": "SS"},
}))
})
It("supports duplicate aliases", func() {
d := NewDecoder(strings.NewReader(`
---
a: &a
b: 1
x: *a
y: *a
`))
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"a": map[interface{}]interface{}{"b": int64(1)},
"x": map[interface{}]interface{}{"b": int64(1)},
"y": map[interface{}]interface{}{"b": int64(1)},
}))
})
It("supports overriden anchors", func() {
d := NewDecoder(strings.NewReader(`
---
First occurrence: &anchor Foo
Second occurrence: *anchor
Override anchor: &anchor Bar
Reuse anchor: *anchor
`))
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"First occurrence": "Foo",
"Second occurrence": "Foo",
"Override anchor": "Bar",
"Reuse anchor": "Bar",
}))
})
It("fails if an anchor is undefined", func() {
d := NewDecoder(strings.NewReader(`
---
a: *missing
`))
var i interface{}
err := d.Decode(&i)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(MatchRegexp("missing anchor.*line.*column.*"))
})
})
It("supports composing aliases", func() {
d := NewDecoder(strings.NewReader(`
---
a: &a b
x: &b
d: *a
z: *b
`))
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"a": "b",
"x": map[interface{}]interface{}{"d": "b"},
"z": map[interface{}]interface{}{"d": "b"},
}))
})
It("redefinition while composing aliases", func() {
d := NewDecoder(strings.NewReader(`
---
a: &a b
x: &c
d : &a 1
y: *a
`))
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(Equal(map[string]interface{}{
"a": "b",
"x": map[interface{}]interface{}{"d": int64(1)},
"y": int64(1),
}))
})
It("can parse nested anchors", func() {
d := NewDecoder(strings.NewReader(`
---
a:
aa: &x
aaa: 1
ab:
aba: &y
abaa:
abaaa: *x
b:
- ba:
baa: *y
`))
v := make(map[string]interface{})
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
})
})
Context("When decoding fails", func() {
It("returns an error", func() {
f, _ := os.Open("fixtures/specification/example_empty.yaml")
d := NewDecoder(f)
var v interface{}
err := d.Decode(&v)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Expected document start at line 0, column 0"))
})
})
Context("Unmarshaler support", func() {
Context("Receiver is a value", func() {
It("the Marshaler interface is not used", func() {
d := NewDecoder(strings.NewReader("abc\n"))
v := hasMarshaler{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v.Value).To(BeNil())
})
})
Context("Receiver is a pointer", func() {
It("uses the Marshaler interface when a pointer", func() {
d := NewDecoder(strings.NewReader("abc\n"))
v := hasPtrMarshaler{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
})
It("marshals a scalar", func() {
d := NewDecoder(strings.NewReader("abc\n"))
v := hasPtrMarshaler{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v.Tag).To(Equal(yaml_STR_TAG))
Expect(v.Value).To(Equal("abc"))
})
It("marshals a sequence", func() {
d := NewDecoder(strings.NewReader("[abc, def]\n"))
v := hasPtrMarshaler{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v.Tag).To(Equal(yaml_SEQ_TAG))
Expect(v.Value).To(Equal([]interface{}{"abc", "def"}))
})
It("marshals a map", func() {
d := NewDecoder(strings.NewReader("{ a: bc}\n"))
v := hasPtrMarshaler{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v.Tag).To(Equal(yaml_MAP_TAG))
Expect(v.Value).To(Equal(map[interface{}]interface{}{"a": "bc"}))
})
})
})
Context("Marshals into a Number", func() {
It("when the number is an int", func() {
d := NewDecoder(strings.NewReader("123\n"))
d.UseNumber()
var v Number
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v.String()).To(Equal("123"))
})
It("when the number is an float", func() {
d := NewDecoder(strings.NewReader("1.23\n"))
d.UseNumber()
var v Number
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v.String()).To(Equal("1.23"))
})
It("it fails when its a non-Number", func() {
d := NewDecoder(strings.NewReader("on\n"))
d.UseNumber()
var v Number
err := d.Decode(&v)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(MatchRegexp("Not a number: 'on' at line 0, column 0"))
})
It("returns a Number", func() {
d := NewDecoder(strings.NewReader("123\n"))
d.UseNumber()
var v interface{}
err := d.Decode(&v)
Expect(err).NotTo(HaveOccurred())
Expect(v).To(BeAssignableToTypeOf(Number("")))
n := v.(Number)
Expect(n.String()).To(Equal("123"))
})
})
Context("When there are special characters", func() {
It("returns an error", func() {
d := NewDecoder(strings.NewReader(`
---
applications:
- name: m
services:
- !@#
`))
var v interface{}
err := d.Decode(&v)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(MatchRegexp("yaml.*did not find.*line.*column.*"))
})
})
})
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
)
var default_tag_directives = []yaml_tag_directive_t{
{[]byte("!"), []byte("!")},
{[]byte("!!"), []byte("tag:yaml.org,2002:")},
}
/*
* Flush the buffer if needed.
*/
func flush(emitter *yaml_emitter_t) bool {
if emitter.buffer_pos+5 >= len(emitter.buffer) {
return yaml_emitter_flush(emitter)
}
return true
}
/*
* Put a character to the output buffer.
*/
func put(emitter *yaml_emitter_t, value byte) bool {
if !flush(emitter) {
return false
}
emitter.buffer[emitter.buffer_pos] = value
emitter.buffer_pos++
emitter.column++
return true
}
/*
* Put a line break to the output buffer.
*/
func put_break(emitter *yaml_emitter_t) bool {
if !flush(emitter) {
return false
}
switch emitter.line_break {
case yaml_CR_BREAK:
emitter.buffer[emitter.buffer_pos] = '\r'
emitter.buffer_pos++
case yaml_LN_BREAK:
emitter.buffer[emitter.buffer_pos] = '\n'
emitter.buffer_pos++
case yaml_CRLN_BREAK:
emitter.buffer[emitter.buffer_pos] = '\r'
emitter.buffer[emitter.buffer_pos] = '\n'
emitter.buffer_pos += 2
default:
return false
}
emitter.column = 0
emitter.line++
return true
}
/*
* Copy a character from a string into buffer.
*/
func write(emitter *yaml_emitter_t, src []byte, src_pos *int) bool {
if !flush(emitter) {
return false
}
copy_bytes(emitter.buffer, &emitter.buffer_pos, src, src_pos)
emitter.column++
return true
}
/*
* Copy a line break character from a string into buffer.
*/
func write_break(emitter *yaml_emitter_t, src []byte, src_pos *int) bool {
if src[*src_pos] == '\n' {
if !put_break(emitter) {
return false
}
*src_pos++
} else {
if !write(emitter, src, src_pos) {
return false
}
emitter.column = 0
emitter.line++
}
return true
}
/*
* Set an emitter error and return 0.
*/
func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_EMITTER_ERROR
emitter.problem = problem
return false
}
/*
* Emit an event.
*/
func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
emitter.events = append(emitter.events, *event)
for !yaml_emitter_need_more_events(emitter) {
event := &emitter.events[emitter.events_head]
if !yaml_emitter_analyze_event(emitter, event) {
return false
}
if !yaml_emitter_state_machine(emitter, event) {
return false
}
yaml_event_delete(event)
emitter.events_head++
}
return true
}
/*
* Check if we need to accumulate more events before emitting.
*
* We accumulate extra
* - 1 event for DOCUMENT-START
* - 2 events for SEQUENCE-START
* - 3 events for MAPPING-START
*/
func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
if emitter.events_head == len(emitter.events) {
return true
}
accumulate := 0
switch emitter.events[emitter.events_head].event_type {
case yaml_DOCUMENT_START_EVENT:
accumulate = 1
case yaml_SEQUENCE_START_EVENT:
accumulate = 2
case yaml_MAPPING_START_EVENT:
accumulate = 3
default:
return false
}
if len(emitter.events)-emitter.events_head > accumulate {
return false
}
level := 0
for i := emitter.events_head; i < len(emitter.events); i++ {
switch emitter.events[i].event_type {
case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
level++
case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
level--
}
if level == 0 {
return false
}
}
return true
}
/*
* Append a directive to the directives stack.
*/
func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t,
value *yaml_tag_directive_t, allow_duplicates bool) bool {
for i := range emitter.tag_directives {
if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
if allow_duplicates {
return true
}
return yaml_emitter_set_emitter_error(emitter, "duplicat %TAG directive")
}
}
tag_copy := yaml_tag_directive_t{
handle: value.handle,
prefix: value.prefix,
}
emitter.tag_directives = append(emitter.tag_directives, tag_copy)
return true
}
/*
* Increase the indentation level.
*/
func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow bool, indentless bool) bool {
emitter.indents = append(emitter.indents, emitter.indent)
if emitter.indent < 0 {
if flow {
emitter.indent = emitter.best_indent
} else {
emitter.indent = 0
}
} else if !indentless {
emitter.indent += emitter.best_indent
}
return true
}
/*
* State dispatcher.
*/
func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
switch emitter.state {
case yaml_EMIT_STREAM_START_STATE:
return yaml_emitter_emit_stream_start(emitter, event)
case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
return yaml_emitter_emit_document_start(emitter, event, true)
case yaml_EMIT_DOCUMENT_START_STATE:
return yaml_emitter_emit_document_start(emitter, event, false)
case yaml_EMIT_DOCUMENT_CONTENT_STATE:
return yaml_emitter_emit_document_content(emitter, event)
case yaml_EMIT_DOCUMENT_END_STATE:
return yaml_emitter_emit_document_end(emitter, event)
case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
return yaml_emitter_emit_block_sequence_item(emitter, event, true)
case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
return yaml_emitter_emit_block_sequence_item(emitter, event, false)
case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
return yaml_emitter_emit_block_mapping_key(emitter, event, true)
case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
return yaml_emitter_emit_block_mapping_key(emitter, event, false)
case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
return yaml_emitter_emit_block_mapping_value(emitter, event, true)
case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
return yaml_emitter_emit_block_mapping_value(emitter, event, false)
case yaml_EMIT_END_STATE:
return yaml_emitter_set_emitter_error(emitter,
"expected nothing after STREAM-END")
}
panic("invalid state")
}
/*
* Expect STREAM-START.
*/
func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
if event.event_type != yaml_STREAM_START_EVENT {
return yaml_emitter_set_emitter_error(emitter,
"expected STREAM-START")
}
if emitter.encoding == yaml_ANY_ENCODING {
emitter.encoding = event.encoding
if emitter.encoding == yaml_ANY_ENCODING {
emitter.encoding = yaml_UTF8_ENCODING
}
}
if emitter.best_indent < 2 || emitter.best_indent > 9 {
emitter.best_indent = 2
}
if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
emitter.best_width = 80
}
if emitter.best_width < 0 {
emitter.best_width = 1<<31 - 1
}
if emitter.line_break == yaml_ANY_BREAK {
emitter.line_break = yaml_LN_BREAK
}
emitter.indent = -1
emitter.line = 0
emitter.column = 0
emitter.whitespace = true
emitter.indention = true
if emitter.encoding != yaml_UTF8_ENCODING {
if !yaml_emitter_write_bom(emitter) {
return false
}
}
emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
return true
}
/*
* Expect DOCUMENT-START or STREAM-END.
*/
func yaml_emitter_emit_document_start(emitter *yaml_emitter_t,
event *yaml_event_t, first bool) bool {
if event.event_type == yaml_DOCUMENT_START_EVENT {
if event.version_directive != nil {
if !yaml_emitter_analyze_version_directive(emitter,
*event.version_directive) {
return false
}
}
for i := range event.tag_directives {
tag_directive := &event.tag_directives[i]
if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
return false
}
if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
return false
}
}
for i := range default_tag_directives {
if !yaml_emitter_append_tag_directive(emitter, &default_tag_directives[i], true) {
return false
}
}
implicit := event.implicit
if !first || emitter.canonical {
implicit = false
}
if (event.version_directive != nil || len(event.tag_directives) > 0) &&
emitter.open_ended {
if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
return false
}
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if event.version_directive != nil {
implicit = false
if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
return false
}
if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
return false
}
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if len(event.tag_directives) > 0 {
implicit = false
for i := range event.tag_directives {
tag_directive := &event.tag_directives[i]
if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
return false
}
if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
return false
}
if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
return false
}
if !yaml_emitter_write_indent(emitter) {
return false
}
}
}
if yaml_emitter_check_empty_document(emitter) {
implicit = false
}
if !implicit {
if !yaml_emitter_write_indent(emitter) {
return false
}
if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
return false
}
if emitter.canonical {
if !yaml_emitter_write_indent(emitter) {
return false
}
}
}
emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
return true
} else if event.event_type == yaml_STREAM_END_EVENT {
if emitter.open_ended {
if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
return false
}
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if !yaml_emitter_flush(emitter) {
return false
}
emitter.state = yaml_EMIT_END_STATE
return true
}
return yaml_emitter_set_emitter_error(emitter,
"expected DOCUMENT-START or STREAM-END")
}
/*
* Expect the root node.
*/
func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
return yaml_emitter_emit_node(emitter, event, true, false, false, false)
}
/*
* Expect DOCUMENT-END.
*/
func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
if event.event_type != yaml_DOCUMENT_END_EVENT {
return yaml_emitter_set_emitter_error(emitter,
"expected DOCUMENT-END")
}
if !yaml_emitter_write_indent(emitter) {
return false
}
if !event.implicit {
if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
return false
}
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if !yaml_emitter_flush(emitter) {
return false
}
emitter.state = yaml_EMIT_DOCUMENT_START_STATE
emitter.tag_directives = emitter.tag_directives[:0]
return true
}
/*
*
* Expect a flow item node.
*/
func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
if first {
if !yaml_emitter_write_indicator(emitter, []byte("["), true, true, false) {
return false
}
if !yaml_emitter_increase_indent(emitter, true, false) {
return false
}
emitter.flow_level++
}
if event.event_type == yaml_SEQUENCE_END_EVENT {
emitter.flow_level--
emitter.indent = emitter.indents[len(emitter.indents)-1]
emitter.indents = emitter.indents[:len(emitter.indents)-1]
if emitter.canonical && !first {
if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) {
return false
}
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if !yaml_emitter_write_indicator(emitter, []byte("]"), false, false, false) {
return false
}
emitter.state = emitter.states[len(emitter.states)-1]
emitter.states = emitter.states[:len(emitter.states)-1]
return true
}
if !first {
if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) {
return false
}
}
if emitter.canonical || emitter.column > emitter.best_width {
if !yaml_emitter_write_indent(emitter) {
return false
}
}
emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
return yaml_emitter_emit_node(emitter, event, false, true, false, false)
}
/*
* Expect a flow key node.
*/
func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t,
event *yaml_event_t, first bool) bool {
if first {
if !yaml_emitter_write_indicator(emitter, []byte("{"), true, true, false) {
return false
}
if !yaml_emitter_increase_indent(emitter, true, false) {
return false
}
emitter.flow_level++
}
if event.event_type == yaml_MAPPING_END_EVENT {
emitter.flow_level--
emitter.indent = emitter.indents[len(emitter.indents)-1]
emitter.indents = emitter.indents[:len(emitter.indents)-1]
if emitter.canonical && !first {
if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) {
return false
}
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if !yaml_emitter_write_indicator(emitter, []byte("}"), false, false, false) {
return false
}
emitter.state = emitter.states[len(emitter.states)-1]
emitter.states = emitter.states[:len(emitter.states)-1]
return true
}
if !first {
if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) {
return false
}
}
if emitter.canonical || emitter.column > emitter.best_width {
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
return yaml_emitter_emit_node(emitter, event, false, false, true, true)
} else {
if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, false) {
return false
}
emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
return yaml_emitter_emit_node(emitter, event, false, false, true, false)
}
}
/*
* Expect a flow value node.
*/
func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t,
event *yaml_event_t, simple bool) bool {
if simple {
if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) {
return false
}
} else {
if emitter.canonical || emitter.column > emitter.best_width {
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, false) {
return false
}
}
emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
return yaml_emitter_emit_node(emitter, event, false, false, true, false)
}
/*
* Expect a block item node.
*/
func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t,
event *yaml_event_t, first bool) bool {
if first {
if !yaml_emitter_increase_indent(emitter, false,
(emitter.mapping_context && !emitter.indention)) {
return false
}
}
if event.event_type == yaml_SEQUENCE_END_EVENT {
emitter.indent = emitter.indents[len(emitter.indents)-1]
emitter.indents = emitter.indents[:len(emitter.indents)-1]
emitter.state = emitter.states[len(emitter.states)-1]
emitter.states = emitter.states[:len(emitter.states)-1]
return true
}
if !yaml_emitter_write_indent(emitter) {
return false
}
if !yaml_emitter_write_indicator(emitter, []byte("-"), true, false, true) {
return false
}
emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
return yaml_emitter_emit_node(emitter, event, false, true, false, false)
}
/*
* Expect a block key node.
*/
func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t,
event *yaml_event_t, first bool) bool {
if first {
if !yaml_emitter_increase_indent(emitter, false, false) {
return false
}
}
if event.event_type == yaml_MAPPING_END_EVENT {
emitter.indent = emitter.indents[len(emitter.indents)-1]
emitter.indents = emitter.indents[:len(emitter.indents)-1]
emitter.state = emitter.states[len(emitter.states)-1]
emitter.states = emitter.states[:len(emitter.states)-1]
return true
}
if !yaml_emitter_write_indent(emitter) {
return false
}
if yaml_emitter_check_simple_key(emitter) {
emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
return yaml_emitter_emit_node(emitter, event, false, false, true, true)
} else {
if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, true) {
return false
}
emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
return yaml_emitter_emit_node(emitter, event, false, false, true, false)
}
}
/*
* Expect a block value node.
*/
func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t,
event *yaml_event_t, simple bool) bool {
if simple {
if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) {
return false
}
} else {
if !yaml_emitter_write_indent(emitter) {
return false
}
if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, true) {
return false
}
}
emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
return yaml_emitter_emit_node(emitter, event, false, false, true, false)
}
/*
* Expect a node.
*/
func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
root bool, sequence bool, mapping bool, simple_key bool) bool {
emitter.root_context = root
emitter.sequence_context = sequence
emitter.mapping_context = mapping
emitter.simple_key_context = simple_key
switch event.event_type {
case yaml_ALIAS_EVENT:
return yaml_emitter_emit_alias(emitter, event)
case yaml_SCALAR_EVENT:
return yaml_emitter_emit_scalar(emitter, event)
case yaml_SEQUENCE_START_EVENT:
return yaml_emitter_emit_sequence_start(emitter, event)
case yaml_MAPPING_START_EVENT:
return yaml_emitter_emit_mapping_start(emitter, event)
default:
return yaml_emitter_set_emitter_error(emitter,
"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
}
return false
}
/*
* Expect ALIAS.
*/
func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
if !yaml_emitter_process_anchor(emitter) {
return false
}
emitter.state = emitter.states[len(emitter.states)-1]
emitter.states = emitter.states[:len(emitter.states)-1]
return true
}
/*
* Expect SCALAR.
*/
func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
if !yaml_emitter_select_scalar_style(emitter, event) {
return false
}
if !yaml_emitter_process_anchor(emitter) {
return false
}
if !yaml_emitter_process_tag(emitter) {
return false
}
if !yaml_emitter_increase_indent(emitter, true, false) {
return false
}
if !yaml_emitter_process_scalar(emitter) {
return false
}
emitter.indent = emitter.indents[len(emitter.indents)-1]
emitter.indents = emitter.indents[:len(emitter.indents)-1]
emitter.state = emitter.states[len(emitter.states)-1]
emitter.states = emitter.states[:len(emitter.states)-1]
return true
}
/*
* Expect SEQUENCE-START.
*/
func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
if !yaml_emitter_process_anchor(emitter) {
return false
}
if !yaml_emitter_process_tag(emitter) {
return false
}
if emitter.flow_level > 0 || emitter.canonical ||
event.style == yaml_style_t(yaml_FLOW_SEQUENCE_STYLE) ||
yaml_emitter_check_empty_sequence(emitter) {
emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
} else {
emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
}
return true
}
/*
* Expect MAPPING-START.
*/
func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
if !yaml_emitter_process_anchor(emitter) {
return false
}
if !yaml_emitter_process_tag(emitter) {
return false
}
if emitter.flow_level > 0 || emitter.canonical ||
event.style == yaml_style_t(yaml_FLOW_MAPPING_STYLE) ||
yaml_emitter_check_empty_mapping(emitter) {
emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
} else {
emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
}
return true
}
/*
* Check if the document content is an empty scalar.
*/
func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
return false
}
/*
* Check if the next events represent an empty sequence.
*/
func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
if len(emitter.events)-emitter.events_head < 2 {
return false
}
return (emitter.events[emitter.events_head].event_type == yaml_SEQUENCE_START_EVENT &&
emitter.events[emitter.events_head+1].event_type == yaml_SEQUENCE_END_EVENT)
}
/*
* Check if the next events represent an empty mapping.
*/
func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
if len(emitter.events)-emitter.events_head < 2 {
return false
}
return (emitter.events[emitter.events_head].event_type == yaml_MAPPING_START_EVENT &&
emitter.events[emitter.events_head+1].event_type == yaml_MAPPING_END_EVENT)
}
/*
* Check if the next node can be expressed as a simple key.
*/
func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
length := 0
switch emitter.events[emitter.events_head].event_type {
case yaml_ALIAS_EVENT:
length += len(emitter.anchor_data.anchor)
case yaml_SCALAR_EVENT:
if emitter.scalar_data.multiline {
return false
}
length += len(emitter.anchor_data.anchor) +
len(emitter.tag_data.handle) +
len(emitter.tag_data.suffix) +
len(emitter.scalar_data.value)
case yaml_SEQUENCE_START_EVENT:
if !yaml_emitter_check_empty_sequence(emitter) {
return false
}
length += len(emitter.anchor_data.anchor) +
len(emitter.tag_data.handle) +
len(emitter.tag_data.suffix)
case yaml_MAPPING_START_EVENT:
if !yaml_emitter_check_empty_mapping(emitter) {
return false
}
length += len(emitter.anchor_data.anchor) +
len(emitter.tag_data.handle) +
len(emitter.tag_data.suffix)
default:
return false
}
if length > 128 {
return false
}
return true
}
/*
* Determine an acceptable scalar style.
*/
func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
if no_tag && !event.implicit && !event.quoted_implicit {
return yaml_emitter_set_emitter_error(emitter,
"neither tag nor implicit flags are specified")
}
style := yaml_scalar_style_t(event.style)
if style == yaml_ANY_SCALAR_STYLE {
style = yaml_PLAIN_SCALAR_STYLE
}
if emitter.canonical {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
if emitter.simple_key_context && emitter.scalar_data.multiline {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
if style == yaml_PLAIN_SCALAR_STYLE {
if (emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed) ||
(emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed) {
style = yaml_SINGLE_QUOTED_SCALAR_STYLE
}
if len(emitter.scalar_data.value) == 0 &&
(emitter.flow_level > 0 || emitter.simple_key_context) {
style = yaml_SINGLE_QUOTED_SCALAR_STYLE
}
if no_tag && !event.implicit {
style = yaml_SINGLE_QUOTED_SCALAR_STYLE
}
}
if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
if !emitter.scalar_data.single_quoted_allowed {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
}
if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
if !emitter.scalar_data.block_allowed ||
emitter.flow_level > 0 || emitter.simple_key_context {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
}
if no_tag && !event.quoted_implicit &&
style != yaml_PLAIN_SCALAR_STYLE {
emitter.tag_data.handle = []byte("!")
}
emitter.scalar_data.style = style
return true
}
/*
* Write an achor.
*/
func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
if emitter.anchor_data.anchor == nil {
return true
}
indicator := "*"
if !emitter.anchor_data.alias {
indicator = "&"
}
if !yaml_emitter_write_indicator(emitter, []byte(indicator), true, false, false) {
return false
}
return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
}
/*
* Write a tag.
*/
func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
return true
}
if len(emitter.tag_data.handle) > 0 {
if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
return false
}
if len(emitter.tag_data.suffix) > 0 {
if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
return false
}
}
} else {
if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
return false
}
if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
return false
}
if !yaml_emitter_write_indicator(emitter, []byte(">"), false, false, false) {
return false
}
}
return true
}
/*
* Write a scalar.
*/
func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
switch emitter.scalar_data.style {
case yaml_PLAIN_SCALAR_STYLE:
return yaml_emitter_write_plain_scalar(emitter,
emitter.scalar_data.value,
!emitter.simple_key_context)
case yaml_SINGLE_QUOTED_SCALAR_STYLE:
return yaml_emitter_write_single_quoted_scalar(emitter,
emitter.scalar_data.value,
!emitter.simple_key_context)
case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
return yaml_emitter_write_double_quoted_scalar(emitter,
emitter.scalar_data.value,
!emitter.simple_key_context)
case yaml_LITERAL_SCALAR_STYLE:
return yaml_emitter_write_literal_scalar(emitter,
emitter.scalar_data.value)
case yaml_FOLDED_SCALAR_STYLE:
return yaml_emitter_write_folded_scalar(emitter,
emitter.scalar_data.value)
default:
panic("unknown scalar")
}
return false
}
/*
* Check if a %YAML directive is valid.
*/
func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t,
version_directive yaml_version_directive_t) bool {
if version_directive.major != 1 || version_directive.minor != 1 {
return yaml_emitter_set_emitter_error(emitter,
"incompatible %YAML directive")
}
return true
}
/*
* Check if a %TAG directive is valid.
*/
func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t,
tag_directive *yaml_tag_directive_t) bool {
handle := tag_directive.handle
prefix := tag_directive.prefix
if len(handle) == 0 {
return yaml_emitter_set_emitter_error(emitter,
"tag handle must not be empty")
}
if handle[0] != '!' {
return yaml_emitter_set_emitter_error(emitter,
"tag handle must start with '!'")
}
if handle[len(handle)-1] != '!' {
return yaml_emitter_set_emitter_error(emitter,
"tag handle must end with '!'")
}
for i := 1; i < len(handle)-1; width(handle[i]) {
if !is_alpha(handle[i]) {
return yaml_emitter_set_emitter_error(emitter,
"tag handle must contain alphanumerical characters only")
}
}
if len(prefix) == 0 {
return yaml_emitter_set_emitter_error(emitter,
"tag prefix must not be empty")
}
return true
}
/*
* Check if an anchor is valid.
*/
func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t,
anchor []byte, alias bool) bool {
if len(anchor) == 0 {
errmsg := "alias value must not be empty"
if !alias {
errmsg = "anchor value must not be empty"
}
return yaml_emitter_set_emitter_error(emitter, errmsg)
}
for i := 0; i < len(anchor); i += width(anchor[i]) {
if !is_alpha(anchor[i]) {
errmsg := "alias value must contain alphanumerical characters only"
if !alias {
errmsg = "anchor value must contain alphanumerical characters only"
}
return yaml_emitter_set_emitter_error(emitter, errmsg)
}
}
emitter.anchor_data.anchor = anchor
emitter.anchor_data.alias = alias
return true
}
/*
* Check if a tag is valid.
*/
func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
if len(tag) == 0 {
return yaml_emitter_set_emitter_error(emitter,
"tag value must not be empty")
}
for i := range emitter.tag_directives {
tag_directive := &emitter.tag_directives[i]
if bytes.HasPrefix(tag, tag_directive.prefix) {
emitter.tag_data.handle = tag_directive.handle
emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
return true
}
}
emitter.tag_data.suffix = tag
return true
}
/*
* Check if a scalar is valid.
*/
func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
block_indicators := false
flow_indicators := false
line_breaks := false
special_characters := false
leading_space := false
leading_break := false
trailing_space := false
trailing_break := false
break_space := false
space_break := false
preceeded_by_whitespace := false
followed_by_whitespace := false
previous_space := false
previous_break := false
emitter.scalar_data.value = value
if len(value) == 0 {
emitter.scalar_data.multiline = false
emitter.scalar_data.flow_plain_allowed = false
emitter.scalar_data.block_plain_allowed = true
emitter.scalar_data.single_quoted_allowed = true
emitter.scalar_data.block_allowed = false
return true
}
if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') ||
(value[0] == '.' && value[1] == '.' && value[2] == '.')) {
block_indicators = true
flow_indicators = true
}
preceeded_by_whitespace = true
for i, w := 0, 0; i < len(value); i += w {
w = width(value[i])
followed_by_whitespace = i+w >= len(value) || is_blankz_at(value, i+w)
if i == 0 {
switch value[i] {
case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
flow_indicators = true
block_indicators = true
case '?', ':':
flow_indicators = true
if followed_by_whitespace {
block_indicators = true
}
case '-':
if followed_by_whitespace {
flow_indicators = true
block_indicators = true
}
}
} else {
switch value[i] {
case ',', '?', '[', ']', '{', '}':
flow_indicators = true
case ':':
flow_indicators = true
if followed_by_whitespace {
block_indicators = true
}
case '#':
if preceeded_by_whitespace {
flow_indicators = true
block_indicators = true
}
}
}
if !is_printable_at(value, i) || (!is_ascii(value[i]) && !emitter.unicode) {
special_characters = true
}
if is_break_at(value, i) {
line_breaks = true
}
if is_space(value[i]) {
if i == 0 {
leading_space = true
}
if i+w == len(value) {
trailing_space = true
}
if previous_break {
break_space = true
}
previous_space = true
previous_break = false
} else if is_break_at(value, i) {
if i == 0 {
leading_break = true
}
if i+width(value[i]) == len(value) {
trailing_break = true
}
if previous_space {
space_break = true
}
previous_space = false
previous_break = true
} else {
previous_space = false
previous_break = false
}
preceeded_by_whitespace = is_blankz_at(value, i)
}
emitter.scalar_data.multiline = line_breaks
emitter.scalar_data.flow_plain_allowed = true
emitter.scalar_data.block_plain_allowed = true
emitter.scalar_data.single_quoted_allowed = true
emitter.scalar_data.block_allowed = true
if leading_space || leading_break || trailing_space || trailing_break {
emitter.scalar_data.flow_plain_allowed = false
emitter.scalar_data.block_plain_allowed = false
}
if trailing_space {
emitter.scalar_data.block_allowed = false
}
if break_space {
emitter.scalar_data.flow_plain_allowed = false
emitter.scalar_data.block_plain_allowed = false
emitter.scalar_data.single_quoted_allowed = false
}
if space_break || special_characters {
emitter.scalar_data.flow_plain_allowed = false
emitter.scalar_data.block_plain_allowed = false
emitter.scalar_data.single_quoted_allowed = false
emitter.scalar_data.block_allowed = false
}
if line_breaks {
emitter.scalar_data.flow_plain_allowed = false
emitter.scalar_data.block_plain_allowed = false
}
if flow_indicators {
emitter.scalar_data.flow_plain_allowed = false
}
if block_indicators {
emitter.scalar_data.block_plain_allowed = false
}
return true
}
/*
* Check if the event data is valid.
*/
func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
emitter.anchor_data.anchor = nil
emitter.tag_data.handle = nil
emitter.tag_data.suffix = nil
emitter.scalar_data.value = nil
switch event.event_type {
case yaml_ALIAS_EVENT:
if !yaml_emitter_analyze_anchor(emitter,
event.anchor, true) {
return false
}
case yaml_SCALAR_EVENT:
if len(event.anchor) > 0 {
if !yaml_emitter_analyze_anchor(emitter,
event.anchor, false) {
return false
}
}
if len(event.tag) > 0 && (emitter.canonical ||
(!event.implicit &&
!event.quoted_implicit)) {
if !yaml_emitter_analyze_tag(emitter, event.tag) {
return false
}
}
if !yaml_emitter_analyze_scalar(emitter, event.value) {
return false
}
case yaml_SEQUENCE_START_EVENT:
if len(event.anchor) > 0 {
if !yaml_emitter_analyze_anchor(emitter,
event.anchor, false) {
return false
}
}
if len(event.tag) > 0 && (emitter.canonical ||
!event.implicit) {
if !yaml_emitter_analyze_tag(emitter,
event.tag) {
return false
}
}
case yaml_MAPPING_START_EVENT:
if len(event.anchor) > 0 {
if !yaml_emitter_analyze_anchor(emitter,
event.anchor, false) {
return false
}
}
if len(event.tag) > 0 && (emitter.canonical ||
!event.implicit) {
if !yaml_emitter_analyze_tag(emitter,
event.tag) {
return false
}
}
}
return true
}
/*
* Write the BOM character.
*/
func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
if !flush(emitter) {
return false
}
pos := emitter.buffer_pos
emitter.buffer[pos] = '\xEF'
emitter.buffer[pos+1] = '\xBB'
emitter.buffer[pos+2] = '\xBF'
emitter.buffer_pos += 3
return true
}
func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
indent := emitter.indent
if indent < 0 {
indent = 0
}
if !emitter.indention || emitter.column > indent ||
(emitter.column == indent && !emitter.whitespace) {
if !put_break(emitter) {
return false
}
}
for emitter.column < indent {
if !put(emitter, ' ') {
return false
}
}
emitter.whitespace = true
emitter.indention = true
return true
}
func yaml_emitter_write_indicator(emitter *yaml_emitter_t,
indicator []byte, need_whitespace bool,
is_whitespace bool, is_indention bool) bool {
if need_whitespace && !emitter.whitespace {
if !put(emitter, ' ') {
return false
}
}
ind_pos := 0
for ind_pos < len(indicator) {
if !write(emitter, indicator, &ind_pos) {
return false
}
}
emitter.whitespace = is_whitespace
emitter.indention = (emitter.indention && is_indention)
emitter.open_ended = false
return true
}
func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
pos := 0
for pos < len(value) {
if !write(emitter, value, &pos) {
return false
}
}
emitter.whitespace = false
emitter.indention = false
return true
}
func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
if !emitter.whitespace {
if !put(emitter, ' ') {
return false
}
}
pos := 0
for pos < len(value) {
if !write(emitter, value, &pos) {
return false
}
}
emitter.whitespace = false
emitter.indention = false
return true
}
func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte,
need_whitespace bool) bool {
if need_whitespace && !emitter.whitespace {
if !put(emitter, ' ') {
return false
}
}
for i := 0; i < len(value); {
write_it := false
switch value[i] {
case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_',
'.', '!', '~', '*', '\'', '(', ')', '[', ']':
write_it = true
default:
write_it = is_alpha(value[i])
}
if write_it {
if !write(emitter, value, &i) {
return false
}
} else {
w := width(value[i])
for j := 0; j < w; j++ {
val := value[i]
i++
if !put(emitter, '%') {
return false
}
c := val >> 4
if c < 10 {
c += '0'
} else {
c += 'A' - 10
}
if !put(emitter, c) {
return false
}
c = val & 0x0f
if c < 10 {
c += '0'
} else {
c += 'A' - 10
}
if !put(emitter, c) {
return false
}
}
}
}
emitter.whitespace = false
emitter.indention = false
return true
}
func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte,
allow_breaks bool) bool {
spaces := false
breaks := false
if !emitter.whitespace {
if !put(emitter, ' ') {
return false
}
}
for i := 0; i < len(value); {
if is_space(value[i]) {
if allow_breaks && !spaces &&
emitter.column > emitter.best_width &&
!is_space(value[i+1]) {
if !yaml_emitter_write_indent(emitter) {
return false
}
i += width(value[i])
} else {
if !write(emitter, value, &i) {
return false
}
}
spaces = true
} else if is_break_at(value, i) {
if !breaks && value[i] == '\n' {
if !put_break(emitter) {
return false
}
}
if !write_break(emitter, value, &i) {
return false
}
emitter.indention = true
breaks = true
} else {
if breaks {
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if !write(emitter, value, &i) {
return false
}
emitter.indention = false
spaces = false
breaks = false
}
}
emitter.whitespace = false
emitter.indention = false
if emitter.root_context {
emitter.open_ended = true
}
return true
}
func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte,
allow_breaks bool) bool {
spaces := false
breaks := false
if !yaml_emitter_write_indicator(emitter, []byte("'"), true, false, false) {
return false
}
for i := 0; i < len(value); {
if is_space(value[i]) {
if allow_breaks && !spaces &&
emitter.column > emitter.best_width &&
i > 0 && i < len(value)-1 &&
!is_space(value[i+1]) {
if !yaml_emitter_write_indent(emitter) {
return false
}
i += width(value[i])
} else {
if !write(emitter, value, &i) {
return false
}
}
spaces = true
} else if is_break_at(value, i) {
if !breaks && value[i] == '\n' {
if !put_break(emitter) {
return false
}
}
if !write_break(emitter, value, &i) {
return false
}
emitter.indention = true
breaks = true
} else {
if breaks {
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if value[i] == '\'' {
if !put(emitter, '\'') {
return false
}
}
if !write(emitter, value, &i) {
return false
}
emitter.indention = false
spaces = false
breaks = false
}
}
if !yaml_emitter_write_indicator(emitter, []byte("'"), false, false, false) {
return false
}
emitter.whitespace = false
emitter.indention = false
return true
}
func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte,
allow_breaks bool) bool {
spaces := false
if !yaml_emitter_write_indicator(emitter, []byte("\""), true, false, false) {
return false
}
for i := 0; i < len(value); {
if !is_printable_at(value, i) || (!emitter.unicode && !is_ascii(value[i])) ||
is_bom_at(value, i) || is_break_at(value, i) ||
value[i] == '"' || value[i] == '\\' {
octet := value[i]
var w int
var v rune
switch {
case octet&0x80 == 0x00:
w, v = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, v = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, v = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, v = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = value[i+k]
v = (v << 6) + (rune(octet) & 0x3F)
}
i += w
if !put(emitter, '\\') {
return false
}
switch v {
case 0x00:
if !put(emitter, '0') {
return false
}
case 0x07:
if !put(emitter, 'a') {
return false
}
case 0x08:
if !put(emitter, 'b') {
return false
}
case 0x09:
if !put(emitter, 't') {
return false
}
case 0x0A:
if !put(emitter, 'n') {
return false
}
case 0x0B:
if !put(emitter, 'v') {
return false
}
case 0x0C:
if !put(emitter, 'f') {
return false
}
case 0x0D:
if !put(emitter, 'r') {
return false
}
case 0x1B:
if !put(emitter, 'e') {
return false
}
case 0x22:
if !put(emitter, '"') {
return false
}
case 0x5C:
if !put(emitter, '\\') {
return false
}
case 0x85:
if !put(emitter, 'N') {
return false
}
case 0xA0:
if !put(emitter, '_') {
return false
}
case 0x2028:
if !put(emitter, 'L') {
return false
}
case 0x2029:
if !put(emitter, 'P') {
return false
}
default:
if v <= 0xFF {
if !put(emitter, 'x') {
return false
}
w = 2
} else if v <= 0xFFFF {
if !put(emitter, 'u') {
return false
}
w = 4
} else {
if !put(emitter, 'U') {
return false
}
w = 8
}
for k := (w - 1) * 4; k >= 0; k -= 4 {
digit := byte((v >> uint(k)) & 0x0F)
c := digit + '0'
if c > 9 {
c = digit + 'A' - 10
}
if !put(emitter, c) {
return false
}
}
}
spaces = false
} else if is_space(value[i]) {
if allow_breaks && !spaces &&
emitter.column > emitter.best_width &&
i > 0 && i < len(value)-1 {
if !yaml_emitter_write_indent(emitter) {
return false
}
if is_space(value[i+1]) {
if !put(emitter, '\\') {
return false
}
}
i += width(value[i])
} else {
if !write(emitter, value, &i) {
return false
}
}
spaces = true
} else {
if !write(emitter, value, &i) {
return false
}
spaces = false
}
}
if !yaml_emitter_write_indicator(emitter, []byte("\""), false, false, false) {
return false
}
emitter.whitespace = false
emitter.indention = false
return true
}
func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
if is_space(value[0]) || is_break_at(value, 0) {
indent_hint := []byte{'0' + byte(emitter.best_indent)}
if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
return false
}
}
emitter.open_ended = false
var chomp_hint [1]byte
if len(value) == 0 {
chomp_hint[0] = '-'
} else {
i := len(value) - 1
for value[i]&0xC0 == 0x80 {
i--
}
if !is_break_at(value, i) {
chomp_hint[0] = '-'
} else if i == 0 {
chomp_hint[0] = '+'
emitter.open_ended = true
} else {
for value[i]&0xC0 == 0x80 {
i--
}
if is_break_at(value, i) {
chomp_hint[0] = '+'
emitter.open_ended = true
}
}
}
if chomp_hint[0] != 0 {
if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
return false
}
}
return true
}
func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
breaks := true
if !yaml_emitter_write_indicator(emitter, []byte("|"), true, false, false) {
return false
}
if !yaml_emitter_write_block_scalar_hints(emitter, value) {
return false
}
if !put_break(emitter) {
return false
}
emitter.indention = true
emitter.whitespace = true
for i := 0; i < len(value); {
if is_break_at(value, i) {
if !write_break(emitter, value, &i) {
return false
}
emitter.indention = true
breaks = true
} else {
if breaks {
if !yaml_emitter_write_indent(emitter) {
return false
}
}
if !write(emitter, value, &i) {
return false
}
emitter.indention = false
breaks = false
}
}
return true
}
func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
breaks := true
leading_spaces := true
if !yaml_emitter_write_indicator(emitter, []byte(">"), true, false, false) {
return false
}
if !yaml_emitter_write_block_scalar_hints(emitter, value) {
return false
}
if !put_break(emitter) {
return false
}
emitter.indention = true
emitter.whitespace = true
for i := 0; i < len(value); {
if is_break_at(value, i) {
if !breaks && !leading_spaces && value[i] == '\n' {
k := i
for is_break_at(value, k) {
k += width(value[k])
}
if !is_blankz_at(value, k) {
if !put_break(emitter) {
return false
}
}
}
if !write_break(emitter, value, &i) {
return false
}
emitter.indention = true
breaks = true
} else {
if breaks {
if !yaml_emitter_write_indent(emitter) {
return false
}
leading_spaces = is_blank(value[i])
}
if !breaks && is_space(value[i]) && !is_space(value[i+1]) &&
emitter.column > emitter.best_width {
if !yaml_emitter_write_indent(emitter) {
return false
}
i += width(value[i])
} else {
if !write(emitter, value, &i) {
return false
}
}
emitter.indention = false
breaks = false
}
}
return true
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"encoding/base64"
"io"
"math"
"reflect"
"regexp"
"sort"
"strconv"
"time"
)
var (
timeTimeType = reflect.TypeOf(time.Time{})
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
numberType = reflect.TypeOf(Number(""))
nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]")
multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029")
shortTags = map[string]string{
yaml_NULL_TAG: "!!null",
yaml_BOOL_TAG: "!!bool",
yaml_STR_TAG: "!!str",
yaml_INT_TAG: "!!int",
yaml_FLOAT_TAG: "!!float",
yaml_TIMESTAMP_TAG: "!!timestamp",
yaml_SEQ_TAG: "!!seq",
yaml_MAP_TAG: "!!map",
yaml_BINARY_TAG: "!!binary",
}
)
type Marshaler interface {
MarshalYAML() (tag string, value interface{}, err error)
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
emitter yaml_emitter_t
event yaml_event_t
flow bool
err error
}
func Marshal(v interface{}) ([]byte, error) {
b := bytes.Buffer{}
e := NewEncoder(&b)
err := e.Encode(v)
return b.Bytes(), err
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
e := &Encoder{w: w}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, e.w)
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
return e
}
func (e *Encoder) Encode(v interface{}) (err error) {
defer recovery(&err)
if e.err != nil {
return e.err
}
e.marshal("", reflect.ValueOf(v), true)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
return nil
}
func (e *Encoder) emit() {
if !yaml_emitter_emit(&e.emitter, &e.event) {
panic("bad emit")
}
}
func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) {
vt := v.Type()
if vt.Implements(marshalerType) {
e.emitMarshaler(tag, v)
return
}
if vt.Kind() != reflect.Ptr && allowAddr {
if reflect.PtrTo(vt).Implements(marshalerType) {
e.emitAddrMarshaler(tag, v)
return
}
}
switch v.Kind() {
case reflect.Interface:
if v.IsNil() {
e.emitNil()
} else {
e.marshal(tag, v.Elem(), allowAddr)
}
case reflect.Map:
e.emitMap(tag, v)
case reflect.Ptr:
if v.IsNil() {
e.emitNil()
} else {
e.marshal(tag, v.Elem(), true)
}
case reflect.Struct:
e.emitStruct(tag, v)
case reflect.Slice:
e.emitSlice(tag, v)
case reflect.String:
e.emitString(tag, v)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
e.emitInt(tag, v)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.emitUint(tag, v)
case reflect.Float32, reflect.Float64:
e.emitFloat(tag, v)
case reflect.Bool:
e.emitBool(tag, v)
default:
panic("Can't marshal type yet: " + v.Type().String())
}
}
func (e *Encoder) emitMap(tag string, v reflect.Value) {
e.mapping(tag, func() {
var keys stringValues = v.MapKeys()
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k, true)
e.marshal("", v.MapIndex(k), true)
}
})
}
func (e *Encoder) emitStruct(tag string, v reflect.Value) {
if v.Type() == timeTimeType {
e.emitTime(tag, v)
return
}
fields := cachedTypeFields(v.Type())
e.mapping(tag, func() {
for _, f := range fields {
fv := fieldByIndex(v, f.index)
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
continue
}
e.marshal("", reflect.ValueOf(f.name), true)
e.flow = f.flow
e.marshal("", fv, true)
}
})
}
func (e *Encoder) emitTime(tag string, v reflect.Value) {
t := v.Interface().(time.Time)
bytes, _ := t.MarshalText()
e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (e *Encoder) mapping(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *Encoder) emitSlice(tag string, v reflect.Value) {
if v.Type() == byteSliceType {
e.emitBase64(tag, v)
return
}
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
n := v.Len()
for i := 0; i < n; i++ {
e.marshal("", v.Index(i), true)
}
yaml_sequence_end_event_initialize(&e.event)
e.emit()
}
func (e *Encoder) emitBase64(tag string, v reflect.Value) {
if v.IsNil() {
e.emitNil()
return
}
s := v.Bytes()
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
base64.StdEncoding.Encode(dst, s)
e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE)
}
func (e *Encoder) emitString(tag string, v reflect.Value) {
var style yaml_scalar_style_t
s := v.String()
if nonPrintable.MatchString(s) {
e.emitBase64(tag, v)
return
}
if v.Type() == numberType {
style = yaml_PLAIN_SCALAR_STYLE
} else {
event := yaml_event_t{
implicit: true,
value: []byte(s),
}
rtag, _ := resolveInterface(event, false)
if tag == "" && rtag != yaml_STR_TAG {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if multiline.MatchString(s) {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
}
e.emitScalar(s, "", tag, style)
}
func (e *Encoder) emitBool(tag string, v reflect.Value) {
s := strconv.FormatBool(v.Bool())
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitInt(tag string, v reflect.Value) {
s := strconv.FormatInt(v.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitUint(tag string, v reflect.Value) {
s := strconv.FormatUint(v.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitFloat(tag string, v reflect.Value) {
f := v.Float()
var s string
switch {
case math.IsNaN(f):
s = ".nan"
case math.IsInf(f, 1):
s = "+.inf"
case math.IsInf(f, -1):
s = "-.inf"
default:
s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits())
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitNil() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
if !implicit {
style = yaml_PLAIN_SCALAR_STYLE
}
stag := shortTags[tag]
if stag == "" {
stag = tag
}
yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style)
e.emit()
}
func (e *Encoder) emitMarshaler(tag string, v reflect.Value) {
if v.Kind() == reflect.Ptr && v.IsNil() {
e.emitNil()
return
}
m := v.Interface().(Marshaler)
if m == nil {
e.emitNil()
return
}
t, val, err := m.MarshalYAML()
if err != nil {
panic(err)
}
if val == nil {
e.emitNil()
return
}
e.marshal(t, reflect.ValueOf(val), false)
}
func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) {
if !v.CanAddr() {
e.marshal(tag, v, false)
return
}
va := v.Addr()
if va.IsNil() {
e.emitNil()
return
}
m := v.Interface().(Marshaler)
t, val, err := m.MarshalYAML()
if err != nil {
panic(err)
}
if val == nil {
e.emitNil()
return
}
e.marshal(t, reflect.ValueOf(val), false)
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/encode_test.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"errors"
"math"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Encode", func() {
var buf *bytes.Buffer
var enc *Encoder
BeforeEach(func() {
buf = &bytes.Buffer{}
enc = NewEncoder(buf)
})
Context("Scalars", func() {
It("handles strings", func() {
err := enc.Encode("abc")
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`abc
`))
})
It("handles really short strings", func() {
err := enc.Encode(".")
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`.
`))
})
It("encodes strings with multilines", func() {
err := enc.Encode("a\nc")
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`|-
a
c
`))
})
It("handles strings that match known scalars", func() {
err := enc.Encode("true")
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`"true"
`))
})
It("handles strings that contain colons followed by whitespace", func() {
err := enc.Encode("contains: colon")
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`'contains: colon'
`))
})
Context("handles ints", func() {
It("handles ints", func() {
err := enc.Encode(13)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("13\n"))
})
It("handles uints", func() {
err := enc.Encode(uint64(1))
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("1\n"))
})
})
Context("handles floats", func() {
It("handles float32", func() {
err := enc.Encode(float32(1.234))
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("1.234\n"))
})
It("handles float64", func() {
err := enc.Encode(float64(1.2e23))
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("1.2e+23\n"))
})
It("handles NaN", func() {
err := enc.Encode(math.NaN())
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(".nan\n"))
})
It("handles infinity", func() {
err := enc.Encode(math.Inf(-1))
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("-.inf\n"))
})
})
It("handles bools", func() {
err := enc.Encode(true)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("true\n"))
})
It("handles time.Time", func() {
t := time.Now()
err := enc.Encode(t)
Expect(err).NotTo(HaveOccurred())
bytes, _ := t.MarshalText()
Expect(buf.String()).To(Equal(string(bytes) + "\n"))
})
Context("Null", func() {
It("fails on nil", func() {
err := enc.Encode(nil)
Expect(err).To(HaveOccurred())
})
})
It("handles []byte", func() {
err := enc.Encode([]byte{'a', 'b', 'c'})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("!!binary YWJj\n"))
})
Context("Ptrs", func() {
It("handles ptr of a type", func() {
p := new(int)
*p = 10
err := enc.Encode(p)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("10\n"))
})
It("handles nil ptr", func() {
var p *int
err := enc.Encode(p)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("null\n"))
})
})
Context("Structs", func() {
It("handles simple structs", func() {
type batter struct {
Name string
HR int64
AVG float64
}
batters := []batter{
batter{Name: "Mark McGwire", HR: 65, AVG: 0.278},
batter{Name: "Sammy Sosa", HR: 63, AVG: 0.288},
}
err := enc.Encode(batters)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`- Name: Mark McGwire
HR: 65
AVG: 0.278
- Name: Sammy Sosa
HR: 63
AVG: 0.288
`))
})
It("handles tagged structs", func() {
type batter struct {
Name string `yaml:"name"`
HR int64
AVG float64 `yaml:"avg"`
}
batters := []batter{
batter{Name: "Mark McGwire", HR: 65, AVG: 0.278},
batter{Name: "Sammy Sosa", HR: 63, AVG: 0.288},
}
err := enc.Encode(batters)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`- name: Mark McGwire
HR: 65
avg: 0.278
- name: Sammy Sosa
HR: 63
avg: 0.288
`))
})
It("handles nested structs", func() {
type nestedConfig struct {
AString string `yaml:"str"`
Integer int `yaml:"int"`
}
type config struct {
TopString string
Nested nestedConfig
}
cfg := config{
TopString: "def",
Nested: nestedConfig{
AString: "abc",
Integer: 123,
},
}
err := enc.Encode(cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`TopString: def
Nested:
str: abc
int: 123
`))
})
It("handles inline structs", func() {
type NestedConfig struct {
AString string `yaml:"str"`
Integer int `yaml:"int"`
}
type config struct {
TopString string
NestedConfig
}
cfg := config{
TopString: "def",
NestedConfig: NestedConfig{
AString: "abc",
Integer: 123,
},
}
err := enc.Encode(cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`TopString: def
str: abc
int: 123
`))
})
It("handles inline structs with conflicts", func() {
type NestedConfig struct {
AString string `yaml:"str"`
Integer int `yaml:"int"`
}
type config struct {
AString string `yaml:"str"`
NestedConfig
}
cfg := config{
AString: "def",
NestedConfig: NestedConfig{
AString: "abc",
Integer: 123,
},
}
err := enc.Encode(cfg)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`str: def
int: 123
`))
})
})
})
Context("Sequence", func() {
It("handles slices", func() {
val := []string{"a", "b", "c"}
err := enc.Encode(val)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`- a
- b
- c
`))
})
})
Context("Maps", func() {
It("Encodes simple maps", func() {
err := enc.Encode(&map[string]string{
"name": "Mark McGwire",
"hr": "65",
"avg": "0.278",
})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`avg: "0.278"
hr: "65"
name: Mark McGwire
`))
})
It("sorts by key when strings otherwise by kind", func() {
err := enc.Encode(&map[interface{}]string{
1.2: "float",
8: "integer",
"name": "Mark McGwire",
"hr": "65",
"avg": "0.278",
})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`8: integer
1.2: float
avg: "0.278"
hr: "65"
name: Mark McGwire
`))
})
It("encodes mix types", func() {
err := enc.Encode(&map[string]interface{}{
"name": "Mark McGwire",
"hr": 65,
"avg": 0.278,
})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`avg: 0.278
hr: 65
name: Mark McGwire
`))
})
})
Context("Sequence of Maps", func() {
It("encodes", func() {
err := enc.Encode([]map[string]interface{}{
{"name": "Mark McGwire",
"hr": 65,
"avg": 0.278,
},
{"name": "Sammy Sosa",
"hr": 63,
"avg": 0.288,
},
})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`- avg: 0.278
hr: 65
name: Mark McGwire
- avg: 0.288
hr: 63
name: Sammy Sosa
`))
})
})
Context("Maps of Sequence", func() {
It("encodes", func() {
err := enc.Encode(map[string][]interface{}{
"name": []interface{}{"Mark McGwire", "Sammy Sosa"},
"hr": []interface{}{65, 63},
"avg": []interface{}{0.278, 0.288},
})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`avg:
- 0.278
- 0.288
hr:
- 65
- 63
name:
- Mark McGwire
- Sammy Sosa
`))
})
})
Context("Flow", func() {
It("flows structs", func() {
type i struct {
A string
}
type o struct {
I i `yaml:"i,flow"`
}
err := enc.Encode(o{
I: i{A: "abc"},
})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`i: {A: abc}
`))
})
It("flows sequences", func() {
type i struct {
A string
}
type o struct {
I []i `yaml:"i,flow"`
}
err := enc.Encode(o{
I: []i{{A: "abc"}},
})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`i: [{A: abc}]
`))
})
})
Context("Omit empty", func() {
It("omits nil ptrs", func() {
type i struct {
A *string `yaml:"a,omitempty"`
}
type o struct {
I []i `yaml:"i,flow"`
}
err := enc.Encode(o{
I: []i{{A: nil}},
})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`i: [{}]
`))
})
})
Context("Skip field", func() {
It("does not include the field", func() {
type a struct {
B string `yaml:"-"`
C string
}
err := enc.Encode(a{B: "b", C: "c"})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`C: c
`))
})
})
Context("Marshaler support", func() {
Context("Receiver is a value", func() {
It("uses the Marshaler interface when a value", func() {
err := enc.Encode(hasMarshaler{Value: 123})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("123\n"))
})
It("uses the Marshaler interface when a pointer", func() {
err := enc.Encode(&hasMarshaler{Value: "abc"})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`abc
`))
})
Context("when it fails", func() {
It("returns an error", func() {
err := enc.Encode(&hasMarshaler{Value: "abc", Error: errors.New("fail")})
Expect(err).To(MatchError("fail"))
})
})
})
Context("Receiver is a pointer", func() {
It("uses the Marshaler interface when a pointer", func() {
err := enc.Encode(&hasPtrMarshaler{Value: map[string]string{"a": "b"}})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`a: b
`))
})
It("skips the Marshaler when its a value", func() {
err := enc.Encode(hasPtrMarshaler{Value: map[string]string{"a": "b"}})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`Tag: ""
Value:
a: b
Error: null
`))
})
Context("the receiver is nil", func() {
var ptr *hasPtrMarshaler
Context("when it fails", func() {
It("returns an error", func() {
err := enc.Encode(&hasPtrMarshaler{Value: "abc", Error: errors.New("fail")})
Expect(err).To(MatchError("fail"))
})
})
It("returns a null", func() {
err := enc.Encode(ptr)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`null
`))
})
It("returns a null value for ptr types", func() {
err := enc.Encode(map[string]*hasPtrMarshaler{"a": ptr})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`a: null
`))
})
It("panics when used as a nil interface", func() {
Expect(func() { enc.Encode(map[string]Marshaler{"a": ptr}) }).To(Panic())
})
})
Context("the receiver has a nil value", func() {
ptr := &hasPtrMarshaler{Value: nil}
It("returns null", func() {
err := enc.Encode(ptr)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`null
`))
})
Context("in a map", func() {
It("returns a null value for ptr types", func() {
err := enc.Encode(map[string]*hasPtrMarshaler{"a": ptr})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`a: null
`))
})
It("returns a null value for interface types", func() {
err := enc.Encode(map[string]Marshaler{"a": ptr})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`a: null
`))
})
})
Context("in a slice", func() {
It("returns a null value for ptr types", func() {
err := enc.Encode([]*hasPtrMarshaler{ptr})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`- null
`))
})
It("returns a null value for interface types", func() {
err := enc.Encode([]Marshaler{ptr})
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal(`- null
`))
})
})
})
})
})
Context("Number type", func() {
It("encodes as a number", func() {
n := Number("12345")
err := enc.Encode(n)
Expect(err).NotTo(HaveOccurred())
Expect(buf.String()).To(Equal("12345\n"))
})
})
})
type hasMarshaler struct {
Value interface{}
Error error
}
func (m hasMarshaler) MarshalYAML() (string, interface{}, error) {
return "", m.Value, m.Error
}
func (m hasMarshaler) UnmarshalYAML(tag string, value interface{}) error {
m.Value = value
return nil
}
type hasPtrMarshaler struct {
Tag string
Value interface{}
Error error
}
func (m *hasPtrMarshaler) MarshalYAML() (string, interface{}, error) {
return "", m.Value, m.Error
}
func (m *hasPtrMarshaler) UnmarshalYAML(tag string, value interface{}) error {
m.Tag = tag
m.Value = value
return nil
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_1.yaml
================================================
- Mark McGwire
- Sammy Sosa
- Ken Griffey
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_10.yaml
================================================
---
hr:
- Mark McGwire
# Following node labeled SS
- &SS Sammy Sosa
rbi:
- *SS # Subsequent occurrence
- Ken Griffey
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_11.yaml
================================================
? - Detroit Tigers
- Chicago cubs
:
- 2001-07-23
? [ New York Yankees,
Atlanta Braves ]
: [ 2001-07-02, 2001-08-12,
2001-08-14 ]
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_12.yaml
================================================
---
# products purchased
- item : Super Hoop
quantity: 1
- item : Basketball
quantity: 4
- item : Big Shoes
quantity: 1
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_13.yaml
================================================
# ASCII Art
--- |
\//||\/||
// || ||__
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_14.yaml
================================================
---
Mark McGwire's
year was crippled
by a knee injury.
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_15.yaml
================================================
>
Sammy Sosa completed another
fine season with great stats.
63 Home Runs
0.288 Batting Average
What a year!
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_15_dumped.yaml
================================================
>
Sammy Sosa completed another fine season with great stats.
63 Home Runs
0.288 Batting Average
What a year!
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_16.yaml
================================================
name: Mark McGwire
accomplishment: >
Mark set a major league
home run record in 1998.
stats: |
65 Home Runs
0.278 Batting Average
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17.yaml
================================================
unicode: "Sosa did fine.\u263A"
control: "\b1998\t1999\t2000\n"
hexesc: "\x0D\x0A is \r\n"
single: '"Howdy!" he cried.'
quoted: ' # not a ''comment''.'
tie-fighter: '|\-*-/|'
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_control.yaml
================================================
control: "\b1998\t1999\t2000\n"
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_hexesc.yaml
================================================
hexesc: "\x0D\x0A is \r\n"
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_quoted.yaml
================================================
quoted: ' # not a ''comment''.'
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_single.yaml
================================================
single: '"Howdy!" he cried.'
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_tie_fighter.yaml
================================================
tie-fighter: '|\-*-/|'
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_17_unicode.yaml
================================================
unicode: "Sosa did fine.\u263A"
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_18.yaml
================================================
plain:
This unquoted scalar
spans many lines.
quoted: "So does this
quoted scalar.\n"
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_19.yaml
================================================
canonical: 12345
decimal: +12_345
octal: 014
hexadecimal: 0xC
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_2.yaml
================================================
hr: 65 # Home runs
avg: 0.278 # Batting average
rbi: 147 # Runs Batted In
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_20.yaml
================================================
canonical: 1.23015e+3
exponential: 12.3015e+02
fixed: 1_230.15
negative infinity: -.inf
not a number: .NaN
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_21.yaml
================================================
null: ~
true: yes
false: no
string: '12345'
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_22.yaml
================================================
canonical: 2001-12-15T02:59:43.1Z
iso8601: 2001-12-14t21:59:43.10-05:00
spaced: 2001-12-14 21:59:43.10 -5
date: 2002-12-14
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_23.yaml
================================================
---
not-date: !!str 2002-04-28
picture: !!binary "\
R0lGODlhDAAMAIQAAP//9/X\
17unp5WZmZgAAAOfn515eXv\
Pz7Y6OjuDg4J+fn5OTk6enp\
56enmleECcgggoBADs="
application specific tag: !something |
The semantics of the tag
above may be different for
different documents.
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_23_application.yaml
================================================
---
application specific tag: !something |
The semantics of the tag
above may be different for
different documents.
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_23_non_date.yaml
================================================
---
not-date: !!str 2002-04-28
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_23_picture.yaml
================================================
---
picture: !!binary "\
R0lGODlhDAAMAIQAAP//9/X\
17unp5WZmZgAAAOfn515eXv\
Pz7Y6OjuDg4J+fn5OTk6enp\
56enmleECcgggoBADs="
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_24.yaml
================================================
%TAG ! tag:clarkevans.com,2002:
--- !shape
# Use the ! handle for presenting
# tag:clarkevans.com,2002:circle
- !circle
center: &ORIGIN {x: 73, y: 129}
radius: 7
- !line
start: *ORIGIN
finish: { x: 89, y: 102 }
- !label
start: *ORIGIN
color: 0xFFEEBB
text: Pretty vector drawing.
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_24_dumped.yaml
================================================
!shape
- !circle
center: &id001 {x: 73, y: 129}
radius: 7
- !line
finish: {x: 89, y: 102}
start: *id001
- !label
color: 0xFFEEBB
start: *id001
text: Pretty vector drawing.
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_25.yaml
================================================
# sets are represented as a
# mapping where each key is
# associated with the empty string
--- !!set
? Mark McGwire
? Sammy Sosa
? Ken Griff
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_26.yaml
================================================
# ordered maps are represented as
# a sequence of mappings, with
# each mapping having one key
--- !!omap
- Mark McGwire: 65
- Sammy Sosa: 63
- Ken Griffy: 58
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_27.yaml
================================================
--- !
invoice: 34843
date : 2001-01-23
billTo: &id001
given : Chris
family : Dumars
address:
lines: |
458 Walkman Dr.
Suite #292
city : Royal Oak
state : MI
postal : 48046
shipTo: *id001
product:
- sku : BL394D
quantity : 4
description : Basketball
price : 450.00
- sku : BL4438H
quantity : 1
description : Super Hoop
price : 2392.00
tax : 251.42
total: 4443.52
comments:
Late afternoon is best.
Backup contact is Nancy
Billsmer @ 338-4338.
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_27_dumped.yaml
================================================
!!org.yaml.snakeyaml.Invoice
billTo: &id001
address:
city: Royal Oak
lines: |
458 Walkman Dr.
Suite #292
postal: '48046'
state: MI
family: Dumars
given: Chris
comments: Late afternoon is best. Backup contact is Nancy Billsmer @ 338-4338.
date: '2001-01-23'
invoice: 34843
product:
- {description: Basketball, price: 450.0, quantity: 4, sku: BL394D}
- {description: Super Hoop, price: 2392.0, quantity: 1, sku: BL4438H}
shipTo: *id001
tax: 251.42
total: 4443.52
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_28.yaml
================================================
---
Time: 2001-11-23 15:01:42 -5
User: ed
Warning:
This is an error message
for the log file
---
Time: 2001-11-23 15:02:31 -5
User: ed
Warning:
A slightly different error
message.
---
Date: 2001-11-23 15:03:17 -5
User: ed
Fatal:
Unknown variable "bar"
Stack:
- file: TopClass.py
line: 23
code: |
x = MoreObject("345\n")
- file: MoreClass.py
line: 58
code: |-
foo = bar
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_3.yaml
================================================
american:
- Boston Red Sox
- Detroit Tigers
- New York Yankees
national:
- New York Mets
- Chicago Cubs
- Atlanta Braves
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_4.yaml
================================================
-
name: Mark McGwire
hr: 65
avg: 0.278
-
name: Sammy Sosa
hr: 63
avg: 0.288
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_5.yaml
================================================
- [name , hr, avg ]
- [Mark McGwire, 65, 0.278]
- [Sammy Sosa , 63, 0.288]
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_6.yaml
================================================
Mark McGwire: {hr: 65, avg: 0.278}
Sammy Sosa: {
hr: 63,
avg: 0.288
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_7.yaml
================================================
# Ranking of 1998 home runs
---
- Mark McGwire
- Sammy Sosa
- Ken Griffey
# Team ranking
---
- Chicago Cubs
- St Louis Cardinals
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_8.yaml
================================================
---
time: 20:03:20
player: Sammy Sosa
action: strike (miss)
...
---
time: 20:03:47
player: Sammy Sosa
action: grand slam
...
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example2_9.yaml
================================================
---
hr: # 1998 hr ranking
- Mark McGwire
- Sammy Sosa
rbi:
# 1998 rbi ranking
- Sammy Sosa
- Ken Griffey
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/example_empty.yaml
================================================
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/map.yaml
================================================
# Unordered set of key: value pairs.
Block style: !!map
Clark : Evans
Brian : Ingerson
Oren : Ben-Kiki
Flow style: !!map { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki }
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/map_mixed_tags.yaml
================================================
# Unordered set of key: value pairs.
Block style: !
Clark : Evans
Brian : Ingerson
Oren : Ben-Kiki
Flow style: { Clark: Evans, Brian: Ingerson, Oren: Ben-Kiki }
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/merge.yaml
================================================
---
- &CENTER { x: 1, y: 2 }
- &LEFT { x: 0, y: 2 }
- &BIG { r: 10 }
- &SMALL { r: 1 }
# All the following maps are equal:
- # Explicit keys
x: 1
y: 2
r: 10
label: center/big
- # Merge one map
<< : *CENTER
r: 10
label: center/big
- # Merge multiple maps
<< : [ *CENTER, *BIG ]
label: center/big
- # Override
<< : [ *BIG, *LEFT, *SMALL ]
x: 1
label: center/big
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/omap.yaml
================================================
# Explicitly typed ordered map (dictionary).
Bestiary: !!omap
- aardvark: African pig-like ant eater. Ugly.
- anteater: South-American ant eater. Two species.
- anaconda: South-American constrictor snake. Scaly.
# Etc.
# Flow style
Numbers: !!omap [ one: 1, two: 2, three : 3 ]
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/pairs.yaml
================================================
# Explicitly typed pairs.
Block tasks: !!pairs
- meeting: with team.
- meeting: with boss.
- break: lunch.
- meeting: with client.
Flow tasks: !!pairs [ meeting: with team, meeting: with boss ]
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/seq.yaml
================================================
# Ordered sequence of nodes
Block style: !!seq
- Mercury # Rotates - no light/dark sides.
- Venus # Deadliest. Aptly named.
- Earth # Mostly dirt.
- Mars # Seems empty.
- Jupiter # The king.
- Saturn # Pretty.
- Uranus # Where the sun hardly shines.
- Neptune # Boring. No rings.
- Pluto # You call this a planet?
Flow style: !!seq [ Mercury, Venus, Earth, Mars, # Rocks
Jupiter, Saturn, Uranus, Neptune, # Gas
Pluto ] # Overrated
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/set.yaml
================================================
# Explicitly typed set.
baseball players: !!set
? Mark McGwire
? Sammy Sosa
? Ken Griffey
# Flow style
baseball teams: !!set { Boston Red Sox, Detroit Tigers, New York Yankees }
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/v.yaml
================================================
--- # New schema
link with:
- = : library1.dll
version: 1.2
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/fixtures/specification/types/value.yaml
================================================
--- # Old schema
link with:
- library1.dll
- library2.dll
--- # New schema
link with:
- = : library1.dll
version: 1.2
- = : library2.dll
version: 2.3
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE
================================================
Copyright (c) 2006 Kirill Simonov
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
)
/*
* The parser implements the following grammar:
*
* stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
* implicit_document ::= block_node DOCUMENT-END*
* explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
* block_node_or_indentless_sequence ::=
* ALIAS
* | properties (block_content | indentless_block_sequence)?
* | block_content
* | indentless_block_sequence
* block_node ::= ALIAS
* | properties block_content?
* | block_content
* flow_node ::= ALIAS
* | properties flow_content?
* | flow_content
* properties ::= TAG ANCHOR? | ANCHOR TAG?
* block_content ::= block_collection | flow_collection | SCALAR
* flow_content ::= flow_collection | SCALAR
* block_collection ::= block_sequence | block_mapping
* flow_collection ::= flow_sequence | flow_mapping
* block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
* indentless_sequence ::= (BLOCK-ENTRY block_node?)+
* block_mapping ::= BLOCK-MAPPING_START
* ((KEY block_node_or_indentless_sequence?)?
* (VALUE block_node_or_indentless_sequence?)?)*
* BLOCK-END
* flow_sequence ::= FLOW-SEQUENCE-START
* (flow_sequence_entry FLOW-ENTRY)*
* flow_sequence_entry?
* FLOW-SEQUENCE-END
* flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
* flow_mapping ::= FLOW-MAPPING-START
* (flow_mapping_entry FLOW-ENTRY)*
* flow_mapping_entry?
* FLOW-MAPPING-END
* flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
*/
/*
* Peek the next token in the token queue.
*/
func peek_token(parser *yaml_parser_t) *yaml_token_t {
if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
return &parser.tokens[parser.tokens_head]
}
return nil
}
/*
* Remove the next token from the queue (must be called after peek_token).
*/
func skip_token(parser *yaml_parser_t) {
parser.token_available = false
parser.tokens_parsed++
parser.stream_end_produced = parser.tokens[parser.tokens_head].token_type == yaml_STREAM_END_TOKEN
parser.tokens_head++
}
/*
* Get the next event.
*/
func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
/* Erase the event object. */
*event = yaml_event_t{}
/* No events after the end of the stream or error. */
if parser.stream_end_produced || parser.error != yaml_NO_ERROR ||
parser.state == yaml_PARSE_END_STATE {
return true
}
/* Generate the next event. */
return yaml_parser_state_machine(parser, event)
}
/*
* Set parser error.
*/
func yaml_parser_set_parser_error(parser *yaml_parser_t,
problem string, problem_mark YAML_mark_t) bool {
parser.error = yaml_PARSER_ERROR
parser.problem = problem
parser.problem_mark = problem_mark
return false
}
func yaml_parser_set_parser_error_context(parser *yaml_parser_t,
context string, context_mark YAML_mark_t,
problem string, problem_mark YAML_mark_t) bool {
parser.error = yaml_PARSER_ERROR
parser.context = context
parser.context_mark = context_mark
parser.problem = problem
parser.problem_mark = problem_mark
return false
}
/*
* State dispatcher.
*/
func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
switch parser.state {
case yaml_PARSE_STREAM_START_STATE:
return yaml_parser_parse_stream_start(parser, event)
case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
return yaml_parser_parse_document_start(parser, event, true)
case yaml_PARSE_DOCUMENT_START_STATE:
return yaml_parser_parse_document_start(parser, event, false)
case yaml_PARSE_DOCUMENT_CONTENT_STATE:
return yaml_parser_parse_document_content(parser, event)
case yaml_PARSE_DOCUMENT_END_STATE:
return yaml_parser_parse_document_end(parser, event)
case yaml_PARSE_BLOCK_NODE_STATE:
return yaml_parser_parse_node(parser, event, true, false)
case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
return yaml_parser_parse_node(parser, event, true, true)
case yaml_PARSE_FLOW_NODE_STATE:
return yaml_parser_parse_node(parser, event, false, false)
case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
return yaml_parser_parse_block_sequence_entry(parser, event, true)
case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
return yaml_parser_parse_block_sequence_entry(parser, event, false)
case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
return yaml_parser_parse_indentless_sequence_entry(parser, event)
case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
return yaml_parser_parse_block_mapping_key(parser, event, true)
case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
return yaml_parser_parse_block_mapping_key(parser, event, false)
case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
return yaml_parser_parse_block_mapping_value(parser, event)
case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
return yaml_parser_parse_flow_sequence_entry(parser, event, true)
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
return yaml_parser_parse_flow_sequence_entry(parser, event, false)
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
return yaml_parser_parse_flow_mapping_key(parser, event, true)
case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
return yaml_parser_parse_flow_mapping_key(parser, event, false)
case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
return yaml_parser_parse_flow_mapping_value(parser, event, false)
case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
return yaml_parser_parse_flow_mapping_value(parser, event, true)
}
panic("invalid parser state")
}
/*
* Parse the production:
* stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
* ************
*/
func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_STREAM_START_TOKEN {
return yaml_parser_set_parser_error(parser,
"did not find expected ", token.start_mark)
}
parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
*event = yaml_event_t{
event_type: yaml_STREAM_START_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
encoding: token.encoding,
}
skip_token(parser)
return true
}
/*
* Parse the productions:
* implicit_document ::= block_node DOCUMENT-END*
* *
* explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
* *************************
*/
func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t,
implicit bool) bool {
token := peek_token(parser)
if token == nil {
return false
}
/* Parse extra document end indicators. */
if !implicit {
for token.token_type == yaml_DOCUMENT_END_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
}
}
/* Parse an implicit document. */
if implicit && token.token_type != yaml_VERSION_DIRECTIVE_TOKEN &&
token.token_type != yaml_TAG_DIRECTIVE_TOKEN &&
token.token_type != yaml_DOCUMENT_START_TOKEN &&
token.token_type != yaml_STREAM_END_TOKEN {
if !yaml_parser_process_directives(parser, nil, nil) {
return false
}
parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
parser.state = yaml_PARSE_BLOCK_NODE_STATE
*event = yaml_event_t{
event_type: yaml_DOCUMENT_START_EVENT,
implicit: true,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
} else if token.token_type != yaml_STREAM_END_TOKEN {
/* Parse an explicit document. */
var version_directive *yaml_version_directive_t
var tag_directives []yaml_tag_directive_t
start_mark := token.start_mark
if !yaml_parser_process_directives(parser, &version_directive,
&tag_directives) {
return false
}
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_DOCUMENT_START_TOKEN {
yaml_parser_set_parser_error(parser,
"did not find expected ", token.start_mark)
return false
}
parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
end_mark := token.end_mark
*event = yaml_event_t{
event_type: yaml_DOCUMENT_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: false,
}
skip_token(parser)
} else {
/* Parse the stream end. */
parser.state = yaml_PARSE_END_STATE
*event = yaml_event_t{
event_type: yaml_STREAM_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
}
return true
}
/*
* Parse the productions:
* explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
* ***********
*/
func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN ||
token.token_type == yaml_TAG_DIRECTIVE_TOKEN ||
token.token_type == yaml_DOCUMENT_START_TOKEN ||
token.token_type == yaml_DOCUMENT_END_TOKEN ||
token.token_type == yaml_STREAM_END_TOKEN {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
return yaml_parser_process_empty_scalar(parser, event,
token.start_mark)
} else {
return yaml_parser_parse_node(parser, event, true, false)
}
}
/*
* Parse the productions:
* implicit_document ::= block_node DOCUMENT-END*
* *************
* explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
* *************
*/
func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
implicit := true
token := peek_token(parser)
if token == nil {
return false
}
start_mark, end_mark := token.start_mark, token.start_mark
if token.token_type == yaml_DOCUMENT_END_TOKEN {
end_mark = token.end_mark
skip_token(parser)
implicit = false
}
parser.tag_directives = parser.tag_directives[:0]
parser.state = yaml_PARSE_DOCUMENT_START_STATE
*event = yaml_event_t{
event_type: yaml_DOCUMENT_END_EVENT,
start_mark: start_mark,
end_mark: end_mark,
implicit: implicit,
}
return true
}
/*
* Parse the productions:
* block_node_or_indentless_sequence ::=
* ALIAS
* *****
* | properties (block_content | indentless_block_sequence)?
* ********** *
* | block_content | indentless_block_sequence
* *
* block_node ::= ALIAS
* *****
* | properties block_content?
* ********** *
* | block_content
* *
* flow_node ::= ALIAS
* *****
* | properties flow_content?
* ********** *
* | flow_content
* *
* properties ::= TAG ANCHOR? | ANCHOR TAG?
* *************************
* block_content ::= block_collection | flow_collection | SCALAR
* ******
* flow_content ::= flow_collection | SCALAR
* ******
*/
func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t,
block bool, indentless_sequence bool) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_ALIAS_TOKEN {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
*event = yaml_event_t{
event_type: yaml_ALIAS_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
anchor: token.value,
}
skip_token(parser)
return true
} else {
start_mark, end_mark := token.start_mark, token.start_mark
var tag_handle []byte
var tag_suffix, anchor []byte
var tag_mark YAML_mark_t
if token.token_type == yaml_ANCHOR_TOKEN {
anchor = token.value
start_mark = token.start_mark
end_mark = token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_TAG_TOKEN {
tag_handle = token.value
tag_suffix = token.suffix
tag_mark = token.start_mark
end_mark = token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
}
} else if token.token_type == yaml_TAG_TOKEN {
tag_handle = token.value
tag_suffix = token.suffix
start_mark, tag_mark = token.start_mark, token.start_mark
end_mark = token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_ANCHOR_TOKEN {
anchor = token.value
end_mark = token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
}
}
var tag []byte
if tag_handle != nil {
if len(tag_handle) == 0 {
tag = tag_suffix
tag_handle = nil
tag_suffix = nil
} else {
for i := range parser.tag_directives {
tag_directive := &parser.tag_directives[i]
if bytes.Equal(tag_directive.handle, tag_handle) {
tag = append([]byte(nil), tag_directive.prefix...)
tag = append(tag, tag_suffix...)
tag_handle = nil
tag_suffix = nil
break
}
}
if len(tag) == 0 {
yaml_parser_set_parser_error_context(parser,
"while parsing a node", start_mark,
"found undefined tag handle", tag_mark)
return false
}
}
}
implicit := len(tag) == 0
if indentless_sequence && token.token_type == yaml_BLOCK_ENTRY_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
*event = yaml_event_t{
event_type: yaml_SEQUENCE_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
}
return true
} else {
if token.token_type == yaml_SCALAR_TOKEN {
plain_implicit := false
quoted_implicit := false
end_mark = token.end_mark
if (token.style == yaml_PLAIN_SCALAR_STYLE && len(tag) == 0) ||
(len(tag) == 1 && tag[0] == '!') {
plain_implicit = true
} else if len(tag) == 0 {
quoted_implicit = true
}
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
*event = yaml_event_t{
event_type: yaml_SCALAR_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
value: token.value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(token.style),
}
skip_token(parser)
return true
} else if token.token_type == yaml_FLOW_SEQUENCE_START_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
*event = yaml_event_t{
event_type: yaml_SEQUENCE_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
}
return true
} else if token.token_type == yaml_FLOW_MAPPING_START_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
*event = yaml_event_t{
event_type: yaml_MAPPING_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
}
return true
} else if block && token.token_type == yaml_BLOCK_SEQUENCE_START_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
*event = yaml_event_t{
event_type: yaml_SEQUENCE_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
}
return true
} else if block && token.token_type == yaml_BLOCK_MAPPING_START_TOKEN {
end_mark = token.end_mark
parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
*event = yaml_event_t{
event_type: yaml_MAPPING_START_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
}
return true
} else if len(anchor) > 0 || len(tag) > 0 {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
*event = yaml_event_t{
event_type: yaml_SCALAR_EVENT,
start_mark: start_mark,
end_mark: end_mark,
anchor: anchor,
tag: tag,
implicit: implicit,
quoted_implicit: false,
style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
}
return true
} else {
msg := "while parsing a block node"
if !block {
msg = "while parsing a flow node"
}
yaml_parser_set_parser_error_context(parser, msg, start_mark,
"did not find expected node content", token.start_mark)
return false
}
}
}
return false
}
/*
* Parse the productions:
* block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
* ******************** *********** * *********
*/
func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t,
event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_BLOCK_ENTRY_TOKEN {
mark := token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_BLOCK_ENTRY_TOKEN &&
token.token_type != yaml_BLOCK_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
return yaml_parser_parse_node(parser, event, true, false)
} else {
parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
} else if token.token_type == yaml_BLOCK_END_TOKEN {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
*event = yaml_event_t{
event_type: yaml_SEQUENCE_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
return true
} else {
mark := parser.marks[len(parser.marks)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
return yaml_parser_set_parser_error_context(parser,
"while parsing a block collection", mark,
"did not find expected '-' indicator", token.start_mark)
}
}
/*
* Parse the productions:
* indentless_sequence ::= (BLOCK-ENTRY block_node?)+
* *********** *
*/
func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t,
event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_BLOCK_ENTRY_TOKEN {
mark := token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_BLOCK_ENTRY_TOKEN &&
token.token_type != yaml_KEY_TOKEN &&
token.token_type != yaml_VALUE_TOKEN &&
token.token_type != yaml_BLOCK_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
return yaml_parser_parse_node(parser, event, true, false)
} else {
parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
} else {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
*event = yaml_event_t{
event_type: yaml_SEQUENCE_END_EVENT,
start_mark: token.start_mark,
end_mark: token.start_mark,
}
return true
}
}
/*
* Parse the productions:
* block_mapping ::= BLOCK-MAPPING_START
* *******************
* ((KEY block_node_or_indentless_sequence?)?
* *** *
* (VALUE block_node_or_indentless_sequence?)?)*
*
* BLOCK-END
* *********
*/
func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t,
event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_KEY_TOKEN {
mark := token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_KEY_TOKEN &&
token.token_type != yaml_VALUE_TOKEN &&
token.token_type != yaml_BLOCK_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
return yaml_parser_parse_node(parser, event, true, true)
} else {
parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
} else if token.token_type == yaml_BLOCK_END_TOKEN {
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
*event = yaml_event_t{
event_type: yaml_MAPPING_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
return true
} else {
mark := parser.marks[len(parser.marks)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
return yaml_parser_set_parser_error_context(parser,
"while parsing a block mapping", mark,
"did not find expected key", token.start_mark)
}
}
/*
* Parse the productions:
* block_mapping ::= BLOCK-MAPPING_START
*
* ((KEY block_node_or_indentless_sequence?)?
*
* (VALUE block_node_or_indentless_sequence?)?)*
* ***** *
* BLOCK-END
*
*/
func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t,
event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_VALUE_TOKEN {
mark := token.end_mark
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_KEY_TOKEN &&
token.token_type != yaml_VALUE_TOKEN &&
token.token_type != yaml_BLOCK_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
return yaml_parser_parse_node(parser, event, true, true)
} else {
parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
} else {
parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
}
}
/*
* Parse the productions:
* flow_sequence ::= FLOW-SEQUENCE-START
* *******************
* (flow_sequence_entry FLOW-ENTRY)*
* * **********
* flow_sequence_entry?
* *
* FLOW-SEQUENCE-END
* *****************
* flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
* *
*/
func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t,
event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN {
if !first {
if token.token_type == yaml_FLOW_ENTRY_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
} else {
mark := parser.marks[len(parser.marks)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
return yaml_parser_set_parser_error_context(parser,
"while parsing a flow sequence", mark,
"did not find expected ',' or ']'", token.start_mark)
}
}
if token.token_type == yaml_KEY_TOKEN {
parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
*event = yaml_event_t{
event_type: yaml_MAPPING_START_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
implicit: true,
style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
}
skip_token(parser)
return true
} else if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
}
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
*event = yaml_event_t{
event_type: yaml_SEQUENCE_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
return true
}
/*
* Parse the productions:
* flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
* *** *
*/
func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t,
event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_VALUE_TOKEN &&
token.token_type != yaml_FLOW_ENTRY_TOKEN &&
token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
return yaml_parser_parse_node(parser, event, false, false)
} else {
mark := token.end_mark
skip_token(parser)
parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
return yaml_parser_process_empty_scalar(parser, event, mark)
}
}
/*
* Parse the productions:
* flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
* ***** *
*/
func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t,
event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type == yaml_VALUE_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_FLOW_ENTRY_TOKEN &&
token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
}
parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
}
/*
* Parse the productions:
* flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
* *
*/
func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t,
event *yaml_event_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
*event = yaml_event_t{
event_type: yaml_MAPPING_END_EVENT,
start_mark: token.start_mark,
end_mark: token.start_mark,
}
return true
}
/*
* Parse the productions:
* flow_mapping ::= FLOW-MAPPING-START
* ******************
* (flow_mapping_entry FLOW-ENTRY)*
* * **********
* flow_mapping_entry?
* ******************
* FLOW-MAPPING-END
* ****************
* flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
* * *** *
*/
func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t,
event *yaml_event_t, first bool) bool {
if first {
token := peek_token(parser)
parser.marks = append(parser.marks, token.start_mark)
skip_token(parser)
}
token := peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_FLOW_MAPPING_END_TOKEN {
if !first {
if token.token_type == yaml_FLOW_ENTRY_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
} else {
mark := parser.marks[len(parser.marks)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
return yaml_parser_set_parser_error_context(parser,
"while parsing a flow mapping", mark,
"did not find expected ',' or '}'", token.start_mark)
}
}
if token.token_type == yaml_KEY_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_VALUE_TOKEN &&
token.token_type != yaml_FLOW_ENTRY_TOKEN &&
token.token_type != yaml_FLOW_MAPPING_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
return yaml_parser_parse_node(parser, event, false, false)
} else {
parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
return yaml_parser_process_empty_scalar(parser, event,
token.start_mark)
}
} else if token.token_type != yaml_FLOW_MAPPING_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
}
parser.state = parser.states[len(parser.states)-1]
parser.states = parser.states[:len(parser.states)-1]
parser.marks = parser.marks[:len(parser.marks)-1]
*event = yaml_event_t{
event_type: yaml_MAPPING_END_EVENT,
start_mark: token.start_mark,
end_mark: token.end_mark,
}
skip_token(parser)
return true
}
/*
* Parse the productions:
* flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
* * ***** *
*/
func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t,
event *yaml_event_t, empty bool) bool {
token := peek_token(parser)
if token == nil {
return false
}
if empty {
parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
return yaml_parser_process_empty_scalar(parser, event,
token.start_mark)
}
if token.token_type == yaml_VALUE_TOKEN {
skip_token(parser)
token = peek_token(parser)
if token == nil {
return false
}
if token.token_type != yaml_FLOW_ENTRY_TOKEN &&
token.token_type != yaml_FLOW_MAPPING_END_TOKEN {
parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
return yaml_parser_parse_node(parser, event, false, false)
}
}
parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
}
/*
* Generate an empty scalar event.
*/
func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t,
mark YAML_mark_t) bool {
*event = yaml_event_t{
event_type: yaml_SCALAR_EVENT,
start_mark: mark,
end_mark: mark,
value: nil,
implicit: true,
style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
}
return true
}
/*
* Parse directives.
*/
func yaml_parser_process_directives(parser *yaml_parser_t,
version_directive_ref **yaml_version_directive_t,
tag_directives_ref *[]yaml_tag_directive_t) bool {
token := peek_token(parser)
if token == nil {
return false
}
var version_directive *yaml_version_directive_t
var tag_directives []yaml_tag_directive_t
for token.token_type == yaml_VERSION_DIRECTIVE_TOKEN ||
token.token_type == yaml_TAG_DIRECTIVE_TOKEN {
if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN {
if version_directive != nil {
yaml_parser_set_parser_error(parser,
"found duplicate %YAML directive", token.start_mark)
return false
}
if token.major != 1 ||
token.minor != 1 {
yaml_parser_set_parser_error(parser,
"found incompatible YAML document", token.start_mark)
return false
}
version_directive = &yaml_version_directive_t{
major: token.major,
minor: token.minor,
}
} else if token.token_type == yaml_TAG_DIRECTIVE_TOKEN {
value := yaml_tag_directive_t{
handle: token.value,
prefix: token.prefix,
}
if !yaml_parser_append_tag_directive(parser, value, false,
token.start_mark) {
return false
}
tag_directives = append(tag_directives, value)
}
skip_token(parser)
token := peek_token(parser)
if token == nil {
return false
}
}
for i := range default_tag_directives {
if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
return false
}
}
if version_directive_ref != nil {
*version_directive_ref = version_directive
}
if tag_directives_ref != nil {
*tag_directives_ref = tag_directives
}
return true
}
/*
* Append a tag directive to the directives stack.
*/
func yaml_parser_append_tag_directive(parser *yaml_parser_t,
value yaml_tag_directive_t, allow_duplicates bool, mark YAML_mark_t) bool {
for i := range parser.tag_directives {
tag := &parser.tag_directives[i]
if bytes.Equal(value.handle, tag.handle) {
if allow_duplicates {
return true
}
return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
}
}
parser.tag_directives = append(parser.tag_directives, value)
return true
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/parser_test.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io/ioutil"
"os"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var parses = func(filename string) {
It("parses "+filename, func() {
file, err := os.Open(filename)
Expect(err).To(BeNil())
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_reader(&parser, file)
failed := false
event := yaml_event_t{}
for {
if !yaml_parser_parse(&parser, &event) {
failed = true
println("---", parser.error, parser.problem, parser.context, "line", parser.problem_mark.line, "col", parser.problem_mark.column)
break
}
if event.event_type == yaml_STREAM_END_EVENT {
break
}
}
file.Close()
// msg := "SUCCESS"
// if failed {
// msg = "FAILED"
// if parser.error != yaml_NO_ERROR {
// m := parser.problem_mark
// fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
// parser.context, parser.problem, m.line, m.column)
// }
// }
Expect(failed).To(BeFalse())
})
}
var parseYamls = func(dirname string) {
fileInfos, err := ioutil.ReadDir(dirname)
if err != nil {
panic(err.Error())
}
for _, fileInfo := range fileInfos {
if !fileInfo.IsDir() {
parses(filepath.Join(dirname, fileInfo.Name()))
}
}
}
var _ = Describe("Parser", func() {
parseYamls("fixtures/specification")
parseYamls("fixtures/specification/types")
})
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io"
)
/*
* Set the reader error and return 0.
*/
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string,
offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
/*
* Byte order marks.
*/
const (
BOM_UTF8 = "\xef\xbb\xbf"
BOM_UTF16LE = "\xff\xfe"
BOM_UTF16BE = "\xfe\xff"
)
/*
* Determine the input stream encoding by checking the BOM symbol. If no BOM is
* found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
*/
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
/* Ensure that we had enough bytes in the raw buffer. */
for !parser.eof &&
len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
/* Determine the encoding. */
raw := parser.raw_buffer
pos := parser.raw_buffer_pos
remaining := len(raw) - pos
if remaining >= 2 &&
raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if remaining >= 2 &&
raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if remaining >= 3 &&
raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
/*
* Update the raw buffer.
*/
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
/* Return if the raw buffer is full. */
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
/* Return on EOF. */
if parser.eof {
return true
}
/* Move the remaining bytes in the raw buffer to the beginning. */
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
/* Call the read handler to fill the buffer. */
size_read, err := parser.read_handler(parser,
parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(),
parser.offset, -1)
}
return true
}
/*
* Ensure that the buffer contains at least `length` characters.
* Return 1 on success, 0 on failure.
*
* The length is supposed to be significantly less that the buffer size.
*/
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
/* Read handler must be set. */
if parser.read_handler == nil {
panic("read handler must be set")
}
/* If the EOF flag is set and the raw buffer is empty, do nothing. */
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
/* Return if the buffer contains enough characters. */
if parser.unread >= length {
return true
}
/* Determine the input encoding if it is not known yet. */
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
/* Move the unread characters to the beginning of the buffer. */
buffer_end := len(parser.buffer)
if 0 < parser.buffer_pos &&
parser.buffer_pos < buffer_end {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_end -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_end {
buffer_end = 0
parser.buffer_pos = 0
}
parser.buffer = parser.buffer[:cap(parser.buffer)]
/* Fill the buffer until it has enough characters. */
first := true
for parser.unread < length {
/* Fill the raw buffer if necessary. */
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_end]
return false
}
}
first = false
/* Decode the raw buffer. */
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var w int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
incomplete := false
/* Decode the next character. */
switch parser.encoding {
case yaml_UTF8_ENCODING:
/*
* Decode a UTF-8 character. Check RFC 3629
* (http://www.ietf.org/rfc/rfc3629.txt) for more details.
*
* The following table (taken from the RFC) is used for
* decoding.
*
* Char. number range | UTF-8 octet sequence
* (hexadecimal) | (binary)
* --------------------+------------------------------------
* 0000 0000-0000 007F | 0xxxxxxx
* 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
* 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
* 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
*
* Additionally, the characters in the range 0xD800-0xDFFF
* are prohibited as they are reserved for use with UTF-16
* surrogate pairs.
*/
/* Determine the length of the UTF-8 sequence. */
octet := parser.raw_buffer[parser.raw_buffer_pos]
w = width(octet)
/* Check if the leading octet is valid. */
if w == 0 {
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
/* Check if the raw buffer contains an incomplete character. */
if w > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
incomplete = true
break
}
/* Decode the leading octet. */
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
/* Check and decode the trailing octets. */
for k := 1; k < w; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
/* Check if the octet is valid. */
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
/* Decode the octet. */
value = (value << 6) + rune(octet&0x3F)
}
/* Check the length of the sequence against the value. */
switch {
case w == 1:
case w == 2 && value >= 0x80:
case w == 3 && value >= 0x800:
case w == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
/* Check the range of the value. */
if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING,
yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
/*
* The UTF-16 encoding is not as simple as one might
* naively think. Check RFC 2781
* (http://www.ietf.org/rfc/rfc2781.txt).
*
* Normally, two subsequent bytes describe a Unicode
* character. However a special technique (called a
* surrogate pair) is used for specifying character
* values larger than 0xFFFF.
*
* A surrogate pair consists of two pseudo-characters:
* high surrogate area (0xD800-0xDBFF)
* low surrogate area (0xDC00-0xDFFF)
*
* The following formulas are used for decoding
* and encoding characters using surrogate pairs:
*
* U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
* U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
* W1 = 110110yyyyyyyyyy
* W2 = 110111xxxxxxxxxx
*
* where U is the character value, W1 is the high surrogate
* area, W2 is the low surrogate area.
*/
/* Check for incomplete UTF-16 character. */
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
incomplete = true
break
}
/* Get the character. */
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
/* Check for unexpected low surrogate area. */
if (value & 0xFC00) == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
/* Check for a high surrogate area. */
if (value & 0xFC00) == 0xD800 {
w = 4
/* Check for incomplete surrogate pair. */
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
incomplete = true
break
}
/* Get the next character. */
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
/* Check for a low surrogate area. */
if (value2 & 0xFC00) != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
/* Generate the value of the surrogate pair. */
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
w = 2
}
break
default:
panic("Impossible") /* Impossible. */
}
/* Check if the raw buffer contains enough bytes to form a character. */
if incomplete {
break
}
/*
* Check if the character is in the allowed range:
* #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
* | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
* | [#x10000-#x10FFFF] (32 bit)
*/
if !(value == 0x09 || value == 0x0A || value == 0x0D ||
(value >= 0x20 && value <= 0x7E) ||
(value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) ||
(value >= 0xE000 && value <= 0xFFFD) ||
(value >= 0x10000 && value <= 0x10FFFF)) {
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
/* Move the raw pointers. */
parser.raw_buffer_pos += w
parser.offset += w
/* Finally put the character into the buffer. */
/* 0000 0000-0000 007F . 0xxxxxxx */
if value <= 0x7F {
parser.buffer[buffer_end] = byte(value)
} else if value <= 0x7FF {
/* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F))
} else if value <= 0xFFFF {
/* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F))
} else {
/* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F))
}
buffer_end += w
parser.unread++
}
/* On EOF, put NUL into the buffer and return. */
if parser.eof {
parser.buffer[buffer_end] = 0
buffer_end++
parser.buffer = parser.buffer[:buffer_end]
parser.unread++
return true
}
}
parser.buffer = parser.buffer[:buffer_end]
return true
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/reader_test.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
// "fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
/*
* Test cases are stolen from
* http://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt
*/
type test_case struct {
title string
test string
result bool
}
var _ = Describe("Reader", func() {
LONG := 100000
Context("UTF8 Sequences", func() {
utf8_sequences := []test_case{
/* {"title", "test 1|test 2|...|test N!", (0 or 1)}, */
{"a simple test", "'test' is '\xd0\xbf\xd1\x80\xd0\xbe\xd0\xb2\xd0\xb5\xd1\x80\xd0\xba\xd0\xb0' in Russian!", true},
{"an empty line", "!", true},
{"u-0 is a control character", "\x00!", false},
{"u-80 is a control character", "\xc2\x80!", false},
{"u-800 is valid", "\xe0\xa0\x80!", true},
{"u-10000 is valid", "\xf0\x90\x80\x80!", true},
{"5 bytes sequences are not allowed", "\xf8\x88\x80\x80\x80!", false},
{"6 bytes sequences are not allowed", "\xfc\x84\x80\x80\x80\x80!", false},
{"u-7f is a control character", "\x7f!", false},
{"u-7FF is valid", "\xdf\xbf!", true},
{"u-FFFF is a control character", "\xef\xbf\xbf!", false},
{"u-1FFFFF is too large", "\xf7\xbf\xbf\xbf!", false},
{"u-3FFFFFF is 5 bytes", "\xfb\xbf\xbf\xbf\xbf!", false},
{"u-7FFFFFFF is 6 bytes", "\xfd\xbf\xbf\xbf\xbf\xbf!", false},
{"u-D7FF", "\xed\x9f\xbf!", true},
{"u-E000", "\xee\x80\x80!", true},
{"u-FFFD", "\xef\xbf\xbd!", true},
{"u-10FFFF", "\xf4\x8f\xbf\xbf!", true},
{"u-110000", "\xf4\x90\x80\x80!", false},
{"first continuation byte", "\x80!", false},
{"last continuation byte", "\xbf!", false},
{"2 continuation bytes", "\x80\xbf!", false},
{"3 continuation bytes", "\x80\xbf\x80!", false},
{"4 continuation bytes", "\x80\xbf\x80\xbf!", false},
{"5 continuation bytes", "\x80\xbf\x80\xbf\x80!", false},
{"6 continuation bytes", "\x80\xbf\x80\xbf\x80\xbf!", false},
{"7 continuation bytes", "\x80\xbf\x80\xbf\x80\xbf\x80!", false},
{"sequence of all 64 possible continuation bytes",
"\x80|\x81|\x82|\x83|\x84|\x85|\x86|\x87|\x88|\x89|\x8a|\x8b|\x8c|\x8d|\x8e|\x8f|" +
"\x90|\x91|\x92|\x93|\x94|\x95|\x96|\x97|\x98|\x99|\x9a|\x9b|\x9c|\x9d|\x9e|\x9f|" +
"\xa0|\xa1|\xa2|\xa3|\xa4|\xa5|\xa6|\xa7|\xa8|\xa9|\xaa|\xab|\xac|\xad|\xae|\xaf|" +
"\xb0|\xb1|\xb2|\xb3|\xb4|\xb5|\xb6|\xb7|\xb8|\xb9|\xba|\xbb|\xbc|\xbd|\xbe|\xbf!", false},
{"32 first bytes of 2-byte sequences {0xc0-0xdf}",
"\xc0 |\xc1 |\xc2 |\xc3 |\xc4 |\xc5 |\xc6 |\xc7 |\xc8 |\xc9 |\xca |\xcb |\xcc |\xcd |\xce |\xcf |" +
"\xd0 |\xd1 |\xd2 |\xd3 |\xd4 |\xd5 |\xd6 |\xd7 |\xd8 |\xd9 |\xda |\xdb |\xdc |\xdd |\xde |\xdf !", false},
{"16 first bytes of 3-byte sequences {0xe0-0xef}",
"\xe0 |\xe1 |\xe2 |\xe3 |\xe4 |\xe5 |\xe6 |\xe7 |\xe8 |\xe9 |\xea |\xeb |\xec |\xed |\xee |\xef !", false},
{"8 first bytes of 4-byte sequences {0xf0-0xf7}", "\xf0 |\xf1 |\xf2 |\xf3 |\xf4 |\xf5 |\xf6 |\xf7 !", false},
{"4 first bytes of 5-byte sequences {0xf8-0xfb}", "\xf8 |\xf9 |\xfa |\xfb !", false},
{"2 first bytes of 6-byte sequences {0xfc-0xfd}", "\xfc |\xfd !", false},
{"sequences with last byte missing {u-0}",
"\xc0|\xe0\x80|\xf0\x80\x80|\xf8\x80\x80\x80|\xfc\x80\x80\x80\x80!", false},
{"sequences with last byte missing {u-...FF}",
"\xdf|\xef\xbf|\xf7\xbf\xbf|\xfb\xbf\xbf\xbf|\xfd\xbf\xbf\xbf\xbf!", false},
{"impossible bytes", "\xfe|\xff|\xfe\xfe\xff\xff!", false},
{"overlong sequences {u-2f}",
"\xc0\xaf|\xe0\x80\xaf|\xf0\x80\x80\xaf|\xf8\x80\x80\x80\xaf|\xfc\x80\x80\x80\x80\xaf!", false},
{"maximum overlong sequences",
"\xc1\xbf|\xe0\x9f\xbf|\xf0\x8f\xbf\xbf|\xf8\x87\xbf\xbf\xbf|\xfc\x83\xbf\xbf\xbf\xbf!", false},
{"overlong representation of the NUL character",
"\xc0\x80|\xe0\x80\x80|\xf0\x80\x80\x80|\xf8\x80\x80\x80\x80|\xfc\x80\x80\x80\x80\x80!", false},
{"single UTF-16 surrogates",
"\xed\xa0\x80|\xed\xad\xbf|\xed\xae\x80|\xed\xaf\xbf|\xed\xb0\x80|\xed\xbe\x80|\xed\xbf\xbf!", false},
{"paired UTF-16 surrogates",
"\xed\xa0\x80\xed\xb0\x80|\xed\xa0\x80\xed\xbf\xbf|\xed\xad\xbf\xed\xb0\x80|" +
"\xed\xad\xbf\xed\xbf\xbf|\xed\xae\x80\xed\xb0\x80|\xed\xae\x80\xed\xbf\xbf|" +
"\xed\xaf\xbf\xed\xb0\x80|\xed\xaf\xbf\xed\xbf\xbf!", false},
{"other illegal code positions", "\xef\xbf\xbe|\xef\xbf\xbf!", false},
}
check_sequence := func(tc test_case) {
It(tc.title, func() {
start := 0
end := start
bytes := []byte(tc.test)
for {
for bytes[end] != '|' && bytes[end] != '!' {
end++
}
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_string(&parser, bytes)
result := yaml_parser_update_buffer(&parser, end-start)
Expect(result).To(Equal(tc.result))
// outcome := '+'
// if result != tc.result {
// outcome = '-'
// }
// fmt.Printf("\t\t %c %s", outcome, tc.title)
// if parser.error == yaml_NO_ERROR {
// fmt.Printf("(no error)\n")
// } else if parser.error == yaml_READER_ERROR {
// if parser.problem_value != -1 {
// fmt.Printf("(reader error: %s: #%X at %d)\n",
// parser.problem, parser.problem_value, parser.problem_offset)
// } else {
// fmt.Printf("(reader error: %s: at %d)\n",
// parser.problem, parser.problem_offset)
// }
// }
if bytes[end] == '!' {
break
}
end++
start = end
yaml_parser_delete(&parser)
}
})
}
for _, test := range utf8_sequences {
check_sequence(test)
}
})
Context("BOMs", func() {
boms := []test_case{
/* {"title", "test!", lenth}, */
{"no bom (utf-8)", "Hi is \xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82!", true},
{"bom (utf-8)", "\xef\xbb\xbfHi is \xd0\x9f\xd1\x80\xd0\xb8\xd0\xb2\xd0\xb5\xd1\x82!", true},
{"bom (utf-16-le)", "\xff\xfeH\x00i\x00 \x00i\x00s\x00 \x00\x1f\x04@\x04" + "8\x04" + "2\x04" + "5\x04" + "B\x04!", true},
{"bom (utf-16-be)", "\xfe\xff\x00H\x00i\x00 \x00i\x00s\x00 \x04\x1f\x04@\x04" + "8\x04" + "2\x04" + "5\x04" + "B!", true},
}
check_bom := func(tc test_case) {
It(tc.title, func() {
start := 0
end := start
bytes := []byte(tc.test)
for bytes[end] != '!' {
end++
}
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_string(&parser, bytes[:end-start])
result := yaml_parser_update_buffer(&parser, end-start)
Expect(result).To(Equal(tc.result))
yaml_parser_delete(&parser)
})
}
for _, test := range boms {
check_bom(test)
}
})
Context("Long UTF8", func() {
It("parses properly", func() {
buffer := make([]byte, 0, 3+LONG*2)
buffer = append(buffer, '\xef', '\xbb', '\xbf')
for j := 0; j < LONG; j++ {
if j%2 == 1 {
buffer = append(buffer, '\xd0', '\x90')
} else {
buffer = append(buffer, '\xd0', '\xaf')
}
}
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_string(&parser, buffer)
for k := 0; k < LONG; k++ {
if parser.unread == 0 {
updated := yaml_parser_update_buffer(&parser, 1)
Expect(updated).To(BeTrue())
// printf("\treader error: %s at %d\n", parser.problem, parser.problem_offset);
}
Expect(parser.unread).NotTo(Equal(0))
// printf("\tnot enough characters at %d\n", k);
var ch0, ch1 byte
if k%2 == 1 {
ch0 = '\xd0'
ch1 = '\x90'
} else {
ch0 = '\xd0'
ch1 = '\xaf'
}
Expect(parser.buffer[parser.buffer_pos]).To(Equal(ch0))
Expect(parser.buffer[parser.buffer_pos+1]).To(Equal(ch1))
// printf("\tincorrect UTF-8 sequence: %X %X instead of %X %X\n",
// (int)parser.buffer.pointer[0], (int)parser.buffer.pointer[1],
// (int)ch0, (int)ch1);
parser.buffer_pos += 2
parser.unread -= 1
}
updated := yaml_parser_update_buffer(&parser, 1)
Expect(updated).To(BeTrue())
// printf("\treader error: %s at %d\n", parser.problem, parser.problem_offset);
yaml_parser_delete(&parser)
})
})
Context("Long UTF16", func() {
It("parses properly", func() {
buffer := make([]byte, 0, 2+LONG*2)
buffer = append(buffer, '\xff', '\xfe')
for j := 0; j < LONG; j++ {
if j%2 == 1 {
buffer = append(buffer, '\x10', '\x04')
} else {
buffer = append(buffer, '/', '\x04')
}
}
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_string(&parser, buffer)
for k := 0; k < LONG; k++ {
if parser.unread == 0 {
updated := yaml_parser_update_buffer(&parser, 1)
Expect(updated).To(BeTrue())
// printf("\treader error: %s at %d\n", parser.problem, parser.problem_offset);
}
Expect(parser.unread).NotTo(Equal(0))
// printf("\tnot enough characters at %d\n", k);
var ch0, ch1 byte
if k%2 == 1 {
ch0 = '\xd0'
ch1 = '\x90'
} else {
ch0 = '\xd0'
ch1 = '\xaf'
}
Expect(parser.buffer[parser.buffer_pos]).To(Equal(ch0))
Expect(parser.buffer[parser.buffer_pos+1]).To(Equal(ch1))
// printf("\tincorrect UTF-8 sequence: %X %X instead of %X %X\n",
// (int)parser.buffer.pointer[0], (int)parser.buffer.pointer[1],
// (int)ch0, (int)ch1);
parser.buffer_pos += 2
parser.unread -= 1
}
updated := yaml_parser_update_buffer(&parser, 1)
Expect(updated).To(BeTrue())
// printf("\treader error: %s at %d\n", parser.problem, parser.problem_offset);
yaml_parser_delete(&parser)
})
})
})
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"encoding/base64"
"fmt"
"math"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
var byteSliceType = reflect.TypeOf([]byte(nil))
var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)}
var bool_values map[string]bool
var null_values map[string]bool
var signs = []byte{'-', '+'}
var nulls = []byte{'~', 'n', 'N'}
var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'}
var timestamp_regexp *regexp.Regexp
var ymd_regexp *regexp.Regexp
func init() {
bool_values = make(map[string]bool)
bool_values["y"] = true
bool_values["yes"] = true
bool_values["n"] = false
bool_values["no"] = false
bool_values["true"] = true
bool_values["false"] = false
bool_values["on"] = true
bool_values["off"] = false
null_values = make(map[string]bool)
null_values["~"] = true
null_values["null"] = true
null_values["Null"] = true
null_values["NULL"] = true
timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$")
ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$")
}
func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) {
val := string(event.value)
if null_values[val] {
v.Set(reflect.Zero(v.Type()))
return yaml_NULL_TAG, nil
}
switch v.Kind() {
case reflect.String:
if useNumber && v.Type() == numberType {
tag, i := resolveInterface(event, useNumber)
if n, ok := i.(Number); ok {
v.Set(reflect.ValueOf(n))
return tag, nil
}
return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark)
}
return resolve_string(val, v, event)
case reflect.Bool:
return resolve_bool(val, v, event)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return resolve_int(val, v, useNumber, event)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return resolve_uint(val, v, useNumber, event)
case reflect.Float32, reflect.Float64:
return resolve_float(val, v, useNumber, event)
case reflect.Interface:
_, i := resolveInterface(event, useNumber)
if i != nil {
v.Set(reflect.ValueOf(i))
} else {
v.Set(reflect.Zero(v.Type()))
}
case reflect.Struct:
return resolve_time(val, v, event)
case reflect.Slice:
if v.Type() != byteSliceType {
return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark)
}
b, err := decode_binary(event.value, event)
if err != nil {
return "", err
}
v.Set(reflect.ValueOf(b))
default:
return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark)
}
return yaml_STR_TAG, nil
}
func hasBinaryTag(event yaml_event_t) bool {
for _, tag := range binary_tags {
if bytes.Equal(event.tag, tag) {
return true
}
}
return false
}
func decode_binary(value []byte, event yaml_event_t) ([]byte, error) {
b := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
n, err := base64.StdEncoding.Decode(b, value)
if err != nil {
return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark)
}
return b[:n], nil
}
func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) {
if len(event.tag) > 0 {
if hasBinaryTag(event) {
b, err := decode_binary(event.value, event)
if err != nil {
return "", err
}
val = string(b)
}
}
v.SetString(val)
return yaml_STR_TAG, nil
}
func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) {
b, found := bool_values[strings.ToLower(val)]
if !found {
return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark)
}
v.SetBool(b)
return yaml_BOOL_TAG, nil
}
func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
original := val
val = strings.Replace(val, "_", "", -1)
var value uint64
isNumberValue := v.Type() == numberType
sign := int64(1)
if val[0] == '-' {
sign = -1
val = val[1:]
} else if val[0] == '+' {
val = val[1:]
}
base := 0
if val == "0" {
if isNumberValue {
v.SetString("0")
} else {
v.Set(reflect.Zero(v.Type()))
}
return yaml_INT_TAG, nil
}
if strings.HasPrefix(val, "0o") {
base = 8
val = val[2:]
}
value, err := strconv.ParseUint(val, base, 64)
if err != nil {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
var val64 int64
if value <= math.MaxInt64 {
val64 = int64(value)
if sign == -1 {
val64 = -val64
}
} else if sign == -1 && value == uint64(math.MaxInt64)+1 {
val64 = math.MinInt64
} else {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
if isNumberValue {
v.SetString(strconv.FormatInt(val64, 10))
} else {
if v.OverflowInt(val64) {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
v.SetInt(val64)
}
return yaml_INT_TAG, nil
}
func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
original := val
val = strings.Replace(val, "_", "", -1)
var value uint64
isNumberValue := v.Type() == numberType
if val[0] == '-' {
return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark)
}
if val[0] == '+' {
val = val[1:]
}
base := 0
if val == "0" {
if isNumberValue {
v.SetString("0")
} else {
v.Set(reflect.Zero(v.Type()))
}
return yaml_INT_TAG, nil
}
if strings.HasPrefix(val, "0o") {
base = 8
val = val[2:]
}
value, err := strconv.ParseUint(val, base, 64)
if err != nil {
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
}
if isNumberValue {
v.SetString(strconv.FormatUint(value, 10))
} else {
if v.OverflowUint(value) {
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
}
v.SetUint(value)
}
return yaml_INT_TAG, nil
}
func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
val = strings.Replace(val, "_", "", -1)
var value float64
isNumberValue := v.Type() == numberType
typeBits := 64
if !isNumberValue {
typeBits = v.Type().Bits()
}
sign := 1
if val[0] == '-' {
sign = -1
val = val[1:]
} else if val[0] == '+' {
val = val[1:]
}
valLower := strings.ToLower(val)
if valLower == ".inf" {
value = math.Inf(sign)
} else if valLower == ".nan" {
value = math.NaN()
} else {
var err error
value, err = strconv.ParseFloat(val, typeBits)
value *= float64(sign)
if err != nil {
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
}
}
if isNumberValue {
v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits))
} else {
if v.OverflowFloat(value) {
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
}
v.SetFloat(value)
}
return yaml_FLOAT_TAG, nil
}
func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) {
var parsedTime time.Time
matches := ymd_regexp.FindStringSubmatch(val)
if len(matches) > 0 {
year, _ := strconv.Atoi(matches[1])
month, _ := strconv.Atoi(matches[2])
day, _ := strconv.Atoi(matches[3])
parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
} else {
matches = timestamp_regexp.FindStringSubmatch(val)
if len(matches) == 0 {
return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark)
}
year, _ := strconv.Atoi(matches[1])
month, _ := strconv.Atoi(matches[2])
day, _ := strconv.Atoi(matches[3])
hour, _ := strconv.Atoi(matches[4])
min, _ := strconv.Atoi(matches[5])
sec, _ := strconv.Atoi(matches[6])
nsec := 0
if matches[7] != "" {
millis, _ := strconv.Atoi(matches[7])
nsec = int(time.Duration(millis) * time.Millisecond)
}
loc := time.UTC
if matches[8] != "" {
sign := matches[8][0]
hr, _ := strconv.Atoi(matches[8][1:])
min := 0
if matches[9] != "" {
min, _ = strconv.Atoi(matches[9])
}
zoneOffset := (hr*60 + min) * 60
if sign == '-' {
zoneOffset = -zoneOffset
}
loc = time.FixedZone("", zoneOffset)
}
parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
}
v.Set(reflect.ValueOf(parsedTime))
return "", nil
}
func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) {
val := string(event.value)
if len(event.tag) == 0 && !event.implicit {
return "", val
}
if len(val) == 0 {
return yaml_NULL_TAG, nil
}
var result interface{}
sign := false
c := val[0]
switch {
case bytes.IndexByte(signs, c) != -1:
sign = true
fallthrough
case c >= '0' && c <= '9':
i := int64(0)
result = &i
if useNumber {
var n Number
result = &n
}
v := reflect.ValueOf(result).Elem()
if _, err := resolve_int(val, v, useNumber, event); err == nil {
return yaml_INT_TAG, v.Interface()
}
f := float64(0)
result = &f
if useNumber {
var n Number
result = &n
}
v = reflect.ValueOf(result).Elem()
if _, err := resolve_float(val, v, useNumber, event); err == nil {
return yaml_FLOAT_TAG, v.Interface()
}
if !sign {
t := time.Time{}
if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil {
return "", t
}
}
case bytes.IndexByte(nulls, c) != -1:
if null_values[val] {
return yaml_NULL_TAG, nil
}
b := false
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
return yaml_BOOL_TAG, b
}
case c == '.':
f := float64(0)
result = &f
if useNumber {
var n Number
result = &n
}
v := reflect.ValueOf(result).Elem()
if _, err := resolve_float(val, v, useNumber, event); err == nil {
return yaml_FLOAT_TAG, v.Interface()
}
case bytes.IndexByte(bools, c) != -1:
b := false
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
return yaml_BOOL_TAG, b
}
}
if hasBinaryTag(event) {
bytes, err := decode_binary(event.value, event)
if err == nil {
return yaml_BINARY_TAG, bytes
}
}
return yaml_STR_TAG, val
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver_test.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"math"
"reflect"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("Resolver", func() {
var event yaml_event_t
var nulls = []string{"~", "null", "Null", "NULL"}
BeforeEach(func() {
event = yaml_event_t{}
})
Context("Resolve", func() {
Context("Implicit events", func() {
checkNulls := func(f func()) {
for _, null := range nulls {
event = yaml_event_t{implicit: true}
event.value = []byte(null)
f()
}
}
BeforeEach(func() {
event.implicit = true
})
Context("String", func() {
It("resolves a string", func() {
aString := ""
v := reflect.ValueOf(&aString)
event.value = []byte("abc")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_STR_TAG))
Expect(aString).To(Equal("abc"))
})
It("resolves the empty string", func() {
aString := "abc"
v := reflect.ValueOf(&aString)
event.value = []byte("")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_STR_TAG))
Expect(aString).To(Equal(""))
})
It("resolves null", func() {
checkNulls(func() {
aString := "abc"
v := reflect.ValueOf(&aString)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(aString).To(Equal(""))
})
})
It("resolves null pointers", func() {
checkNulls(func() {
aString := "abc"
pString := &aString
v := reflect.ValueOf(&pString)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(pString).To(BeNil())
})
})
})
Context("Booleans", func() {
match_bool := func(val string, expected bool) {
b := !expected
v := reflect.ValueOf(&b)
event.value = []byte(val)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_BOOL_TAG))
Expect(b).To(Equal(expected))
}
It("resolves on", func() {
match_bool("on", true)
match_bool("ON", true)
})
It("resolves off", func() {
match_bool("off", false)
match_bool("OFF", false)
})
It("resolves true", func() {
match_bool("true", true)
match_bool("TRUE", true)
})
It("resolves false", func() {
match_bool("false", false)
match_bool("FALSE", false)
})
It("resolves yes", func() {
match_bool("yes", true)
match_bool("YES", true)
})
It("resolves no", func() {
match_bool("no", false)
match_bool("NO", false)
})
It("reports an error otherwise", func() {
b := true
v := reflect.ValueOf(&b)
event.value = []byte("fail")
_, err := resolve(event, v.Elem(), false)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Invalid boolean: 'fail' at line 0, column 0"))
})
It("resolves null", func() {
checkNulls(func() {
b := true
v := reflect.ValueOf(&b)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(b).To(BeFalse())
})
})
It("resolves null pointers", func() {
checkNulls(func() {
b := true
pb := &b
v := reflect.ValueOf(&pb)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(pb).To(BeNil())
})
})
})
Context("Ints", func() {
It("simple ints", func() {
i := 0
v := reflect.ValueOf(&i)
event.value = []byte("1234")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(1234))
})
It("positive ints", func() {
i := int16(0)
v := reflect.ValueOf(&i)
event.value = []byte("+678")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(int16(678)))
})
It("negative ints", func() {
i := int32(0)
v := reflect.ValueOf(&i)
event.value = []byte("-2345")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(int32(-2345)))
})
It("base 8", func() {
i := 0
v := reflect.ValueOf(&i)
event.value = []byte("0o12")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(10))
})
It("base 16", func() {
i := 0
v := reflect.ValueOf(&i)
event.value = []byte("0xff")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(255))
})
It("fails on overflow", func() {
i := int8(0)
v := reflect.ValueOf(&i)
event.value = []byte("2345")
_, err := resolve(event, v.Elem(), false)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Invalid integer: '2345' at line 0, column 0"))
})
It("fails on invalid int", func() {
i := 0
v := reflect.ValueOf(&i)
event.value = []byte("234f")
_, err := resolve(event, v.Elem(), false)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Invalid integer: '234f' at line 0, column 0"))
})
It("resolves null", func() {
checkNulls(func() {
i := 1
v := reflect.ValueOf(&i)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(i).To(Equal(0))
})
})
It("resolves null pointers", func() {
checkNulls(func() {
i := 1
pi := &i
v := reflect.ValueOf(&pi)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(pi).To(BeNil())
})
})
It("returns a Number", func() {
var i Number
v := reflect.ValueOf(&i)
tag, err := resolve_int("12345", v.Elem(), true, event)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(Number("12345")))
Expect(i.Int64()).To(Equal(int64(12345)))
event.value = []byte("1234")
tag, err = resolve(event, v.Elem(), true)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(Number("1234")))
})
})
Context("UInts", func() {
It("resolves simple uints", func() {
i := uint(0)
v := reflect.ValueOf(&i)
event.value = []byte("1234")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(uint(1234)))
})
It("resolves positive uints", func() {
i := uint16(0)
v := reflect.ValueOf(&i)
event.value = []byte("+678")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(uint16(678)))
})
It("base 8", func() {
i := uint(0)
v := reflect.ValueOf(&i)
event.value = []byte("0o12")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(uint(10)))
})
It("base 16", func() {
i := uint(0)
v := reflect.ValueOf(&i)
event.value = []byte("0xff")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(uint(255)))
})
It("fails with negative ints", func() {
i := uint(0)
v := reflect.ValueOf(&i)
event.value = []byte("-2345")
_, err := resolve(event, v.Elem(), false)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Unsigned int with negative value: '-2345' at line 0, column 0"))
})
It("fails on overflow", func() {
i := uint8(0)
v := reflect.ValueOf(&i)
event.value = []byte("2345")
_, err := resolve(event, v.Elem(), false)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Invalid unsigned integer: '2345' at line 0, column 0"))
})
It("resolves null", func() {
checkNulls(func() {
i := uint(1)
v := reflect.ValueOf(&i)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(i).To(Equal(uint(0)))
})
})
It("resolves null pointers", func() {
checkNulls(func() {
i := uint(1)
pi := &i
v := reflect.ValueOf(&pi)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(pi).To(BeNil())
})
})
It("returns a Number", func() {
var i Number
v := reflect.ValueOf(&i)
tag, err := resolve_uint("12345", v.Elem(), true, event)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(Number("12345")))
event.value = []byte("1234")
tag, err = resolve(event, v.Elem(), true)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_INT_TAG))
Expect(i).To(Equal(Number("1234")))
})
})
Context("Floats", func() {
It("float32", func() {
f := float32(0)
v := reflect.ValueOf(&f)
event.value = []byte("2345.01")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_FLOAT_TAG))
Expect(f).To(Equal(float32(2345.01)))
})
It("float64", func() {
f := float64(0)
v := reflect.ValueOf(&f)
event.value = []byte("-456456.01")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_FLOAT_TAG))
Expect(f).To(Equal(float64(-456456.01)))
})
It("+inf", func() {
f := float64(0)
v := reflect.ValueOf(&f)
event.value = []byte("+.inf")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_FLOAT_TAG))
Expect(f).To(Equal(math.Inf(1)))
})
It("-inf", func() {
f := float32(0)
v := reflect.ValueOf(&f)
event.value = []byte("-.inf")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_FLOAT_TAG))
Expect(f).To(Equal(float32(math.Inf(-1))))
})
It("nan", func() {
f := float64(0)
v := reflect.ValueOf(&f)
event.value = []byte(".NaN")
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_FLOAT_TAG))
Expect(math.IsNaN(f)).To(BeTrue())
})
It("fails on overflow", func() {
i := float32(0)
v := reflect.ValueOf(&i)
event.value = []byte("123e10000")
_, err := resolve(event, v.Elem(), false)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Invalid float: '123e10000' at line 0, column 0"))
})
It("fails on invalid float", func() {
i := float32(0)
v := reflect.ValueOf(&i)
event.value = []byte("123e1a")
_, err := resolve(event, v.Elem(), false)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Invalid float: '123e1a' at line 0, column 0"))
})
It("resolves null", func() {
checkNulls(func() {
f := float64(1)
v := reflect.ValueOf(&f)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(f).To(Equal(0.0))
})
})
It("resolves null pointers", func() {
checkNulls(func() {
f := float64(1)
pf := &f
v := reflect.ValueOf(&pf)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(pf).To(BeNil())
})
})
It("returns a Number", func() {
var i Number
v := reflect.ValueOf(&i)
tag, err := resolve_float("12.345", v.Elem(), true, event)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_FLOAT_TAG))
Expect(i).To(Equal(Number("12.345")))
Expect(i.Float64()).To(Equal(12.345))
event.value = []byte("1.234")
tag, err = resolve(event, v.Elem(), true)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_FLOAT_TAG))
Expect(i).To(Equal(Number("1.234")))
})
})
Context("Timestamps", func() {
parse_date := func(val string, date time.Time) {
d := time.Now()
v := reflect.ValueOf(&d)
event.value = []byte(val)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(""))
Expect(d).To(Equal(date))
}
It("date", func() {
parse_date("2002-12-14", time.Date(2002, time.December, 14, 0, 0, 0, 0, time.UTC))
})
It("canonical", func() {
parse_date("2001-12-15T02:59:43.1Z", time.Date(2001, time.December, 15, 2, 59, 43, int(1*time.Millisecond), time.UTC))
})
It("iso8601", func() {
parse_date("2001-12-14t21:59:43.10-05:00", time.Date(2001, time.December, 14, 21, 59, 43, int(10*time.Millisecond), time.FixedZone("", -5*3600)))
})
It("space separated", func() {
parse_date("2001-12-14 21:59:43.10 -5", time.Date(2001, time.December, 14, 21, 59, 43, int(10*time.Millisecond), time.FixedZone("", -5*3600)))
})
It("no time zone", func() {
parse_date("2001-12-15 2:59:43.10", time.Date(2001, time.December, 15, 2, 59, 43, int(10*time.Millisecond), time.UTC))
})
It("resolves null", func() {
checkNulls(func() {
d := time.Now()
v := reflect.ValueOf(&d)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(d).To(Equal(time.Time{}))
})
})
It("resolves null pointers", func() {
checkNulls(func() {
d := time.Now()
pd := &d
v := reflect.ValueOf(&pd)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_NULL_TAG))
Expect(pd).To(BeNil())
})
})
})
Context("Binary tag", func() {
It("string", func() {
checkNulls(func() {
event.value = []byte("YWJjZGVmZw==")
event.tag = []byte("!binary")
aString := ""
v := reflect.ValueOf(&aString)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_STR_TAG))
Expect(aString).To(Equal("abcdefg"))
})
})
It("[]byte", func() {
checkNulls(func() {
event.value = []byte("YWJjZGVmZw==")
event.tag = []byte("!binary")
bytes := []byte(nil)
v := reflect.ValueOf(&bytes)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_STR_TAG))
Expect(bytes).To(Equal([]byte("abcdefg")))
})
})
It("returns a []byte when provided no hints", func() {
checkNulls(func() {
event.value = []byte("YWJjZGVmZw==")
event.tag = []byte("!binary")
var intf interface{}
v := reflect.ValueOf(&intf)
tag, err := resolve(event, v.Elem(), false)
Expect(err).NotTo(HaveOccurred())
Expect(tag).To(Equal(yaml_STR_TAG))
Expect(intf).To(Equal([]byte("abcdefg")))
})
})
})
It("fails to resolve a pointer", func() {
aString := ""
pString := &aString
v := reflect.ValueOf(&pString)
event.value = []byte("abc")
_, err := resolve(event, v.Elem(), false)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal("Unknown resolution for 'abc' using <*string Value> at line 0, column 0"))
})
})
Context("Not an implicit event && no tag", func() {
It("bool returns a string", func() {
event.value = []byte("on")
tag, result := resolveInterface(event, false)
Expect(result).To(Equal("on"))
Expect(tag).To(Equal(""))
})
It("number returns a string", func() {
event.value = []byte("1234")
tag, result := resolveInterface(event, false)
Expect(result).To(Equal("1234"))
Expect(tag).To(Equal(""))
})
It("returns the empty string", func() {
event.value = []byte("")
// event.implicit = true
tag, result := resolveInterface(event, false)
Expect(result).To(Equal(""))
Expect(tag).To(Equal(""))
})
})
})
})
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"fmt"
"os"
)
func Run_parser(cmd string, args []string) {
for i := 0; i < len(args); i++ {
fmt.Printf("[%d] Scanning '%s'", i, args[i])
file, err := os.Open(args[i])
if err != nil {
panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error()))
}
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_reader(&parser, file)
failed := false
token := yaml_token_t{}
count := 0
for {
if !yaml_parser_scan(&parser, &token) {
failed = true
break
}
if token.token_type == yaml_STREAM_END_TOKEN {
break
}
count++
}
file.Close()
msg := "SUCCESS"
if failed {
msg = "FAILED"
if parser.error != yaml_NO_ERROR {
m := parser.problem_mark
fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
parser.context, parser.problem, m.line, m.column)
}
}
fmt.Printf("%s (%d tokens)\n", msg, count)
}
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
)
/*
* Introduction
* ************
*
* The following notes assume that you are familiar with the YAML specification
* (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
* some cases we are less restrictive that it requires.
*
* The process of transforming a YAML stream into a sequence of events is
* divided on two steps: Scanning and Parsing.
*
* The Scanner transforms the input stream into a sequence of tokens, while the
* parser transform the sequence of tokens produced by the Scanner into a
* sequence of parsing events.
*
* The Scanner is rather clever and complicated. The Parser, on the contrary,
* is a straightforward implementation of a recursive-descendant parser (or,
* LL(1) parser, as it is usually called).
*
* Actually there are two issues of Scanning that might be called "clever", the
* rest is quite straightforward. The issues are "block collection start" and
* "simple keys". Both issues are explained below in details.
*
* Here the Scanning step is explained and implemented. We start with the list
* of all the tokens produced by the Scanner together with short descriptions.
*
* Now, tokens:
*
* STREAM-START(encoding) # The stream start.
* STREAM-END # The stream end.
* VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
* TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
* DOCUMENT-START # '---'
* DOCUMENT-END # '...'
* BLOCK-SEQUENCE-START # Indentation increase denoting a block
* BLOCK-MAPPING-START # sequence or a block mapping.
* BLOCK-END # Indentation decrease.
* FLOW-SEQUENCE-START # '['
* FLOW-SEQUENCE-END # ']'
* BLOCK-SEQUENCE-START # '{'
* BLOCK-SEQUENCE-END # '}'
* BLOCK-ENTRY # '-'
* FLOW-ENTRY # ','
* KEY # '?' or nothing (simple keys).
* VALUE # ':'
* ALIAS(anchor) # '*anchor'
* ANCHOR(anchor) # '&anchor'
* TAG(handle,suffix) # '!handle!suffix'
* SCALAR(value,style) # A scalar.
*
* The following two tokens are "virtual" tokens denoting the beginning and the
* end of the stream:
*
* STREAM-START(encoding)
* STREAM-END
*
* We pass the information about the input stream encoding with the
* STREAM-START token.
*
* The next two tokens are responsible for tags:
*
* VERSION-DIRECTIVE(major,minor)
* TAG-DIRECTIVE(handle,prefix)
*
* Example:
*
* %YAML 1.1
* %TAG ! !foo
* %TAG !yaml! tag:yaml.org,2002:
* ---
*
* The correspoding sequence of tokens:
*
* STREAM-START(utf-8)
* VERSION-DIRECTIVE(1,1)
* TAG-DIRECTIVE("!","!foo")
* TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
* DOCUMENT-START
* STREAM-END
*
* Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
* line.
*
* The document start and end indicators are represented by:
*
* DOCUMENT-START
* DOCUMENT-END
*
* Note that if a YAML stream contains an implicit document (without '---'
* and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
* produced.
*
* In the following examples, we present whole documents together with the
* produced tokens.
*
* 1. An implicit document:
*
* 'a scalar'
*
* Tokens:
*
* STREAM-START(utf-8)
* SCALAR("a scalar",single-quoted)
* STREAM-END
*
* 2. An explicit document:
*
* ---
* 'a scalar'
* ...
*
* Tokens:
*
* STREAM-START(utf-8)
* DOCUMENT-START
* SCALAR("a scalar",single-quoted)
* DOCUMENT-END
* STREAM-END
*
* 3. Several documents in a stream:
*
* 'a scalar'
* ---
* 'another scalar'
* ---
* 'yet another scalar'
*
* Tokens:
*
* STREAM-START(utf-8)
* SCALAR("a scalar",single-quoted)
* DOCUMENT-START
* SCALAR("another scalar",single-quoted)
* DOCUMENT-START
* SCALAR("yet another scalar",single-quoted)
* STREAM-END
*
* We have already introduced the SCALAR token above. The following tokens are
* used to describe aliases, anchors, tag, and scalars:
*
* ALIAS(anchor)
* ANCHOR(anchor)
* TAG(handle,suffix)
* SCALAR(value,style)
*
* The following series of examples illustrate the usage of these tokens:
*
* 1. A recursive sequence:
*
* &A [ *A ]
*
* Tokens:
*
* STREAM-START(utf-8)
* ANCHOR("A")
* FLOW-SEQUENCE-START
* ALIAS("A")
* FLOW-SEQUENCE-END
* STREAM-END
*
* 2. A tagged scalar:
*
* !!float "3.14" # A good approximation.
*
* Tokens:
*
* STREAM-START(utf-8)
* TAG("!!","float")
* SCALAR("3.14",double-quoted)
* STREAM-END
*
* 3. Various scalar styles:
*
* --- # Implicit empty plain scalars do not produce tokens.
* --- a plain scalar
* --- 'a single-quoted scalar'
* --- "a double-quoted scalar"
* --- |-
* a literal scalar
* --- >-
* a folded
* scalar
*
* Tokens:
*
* STREAM-START(utf-8)
* DOCUMENT-START
* DOCUMENT-START
* SCALAR("a plain scalar",plain)
* DOCUMENT-START
* SCALAR("a single-quoted scalar",single-quoted)
* DOCUMENT-START
* SCALAR("a double-quoted scalar",double-quoted)
* DOCUMENT-START
* SCALAR("a literal scalar",literal)
* DOCUMENT-START
* SCALAR("a folded scalar",folded)
* STREAM-END
*
* Now it's time to review collection-related tokens. We will start with
* flow collections:
*
* FLOW-SEQUENCE-START
* FLOW-SEQUENCE-END
* FLOW-MAPPING-START
* FLOW-MAPPING-END
* FLOW-ENTRY
* KEY
* VALUE
*
* The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
* FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
* correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
* indicators '?' and ':', which are used for denoting mapping keys and values,
* are represented by the KEY and VALUE tokens.
*
* The following examples show flow collections:
*
* 1. A flow sequence:
*
* [item 1, item 2, item 3]
*
* Tokens:
*
* STREAM-START(utf-8)
* FLOW-SEQUENCE-START
* SCALAR("item 1",plain)
* FLOW-ENTRY
* SCALAR("item 2",plain)
* FLOW-ENTRY
* SCALAR("item 3",plain)
* FLOW-SEQUENCE-END
* STREAM-END
*
* 2. A flow mapping:
*
* {
* a simple key: a value, # Note that the KEY token is produced.
* ? a complex key: another value,
* }
*
* Tokens:
*
* STREAM-START(utf-8)
* FLOW-MAPPING-START
* KEY
* SCALAR("a simple key",plain)
* VALUE
* SCALAR("a value",plain)
* FLOW-ENTRY
* KEY
* SCALAR("a complex key",plain)
* VALUE
* SCALAR("another value",plain)
* FLOW-ENTRY
* FLOW-MAPPING-END
* STREAM-END
*
* A simple key is a key which is not denoted by the '?' indicator. Note that
* the Scanner still produce the KEY token whenever it encounters a simple key.
*
* For scanning block collections, the following tokens are used (note that we
* repeat KEY and VALUE here):
*
* BLOCK-SEQUENCE-START
* BLOCK-MAPPING-START
* BLOCK-END
* BLOCK-ENTRY
* KEY
* VALUE
*
* The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
* increase that precedes a block collection (cf. the INDENT token in Python).
* The token BLOCK-END denote indentation decrease that ends a block collection
* (cf. the DEDENT token in Python). However YAML has some syntax pecularities
* that makes detections of these tokens more complex.
*
* The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
* '-', '?', and ':' correspondingly.
*
* The following examples show how the tokens BLOCK-SEQUENCE-START,
* BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
*
* 1. Block sequences:
*
* - item 1
* - item 2
* -
* - item 3.1
* - item 3.2
* -
* key 1: value 1
* key 2: value 2
*
* Tokens:
*
* STREAM-START(utf-8)
* BLOCK-SEQUENCE-START
* BLOCK-ENTRY
* SCALAR("item 1",plain)
* BLOCK-ENTRY
* SCALAR("item 2",plain)
* BLOCK-ENTRY
* BLOCK-SEQUENCE-START
* BLOCK-ENTRY
* SCALAR("item 3.1",plain)
* BLOCK-ENTRY
* SCALAR("item 3.2",plain)
* BLOCK-END
* BLOCK-ENTRY
* BLOCK-MAPPING-START
* KEY
* SCALAR("key 1",plain)
* VALUE
* SCALAR("value 1",plain)
* KEY
* SCALAR("key 2",plain)
* VALUE
* SCALAR("value 2",plain)
* BLOCK-END
* BLOCK-END
* STREAM-END
*
* 2. Block mappings:
*
* a simple key: a value # The KEY token is produced here.
* ? a complex key
* : another value
* a mapping:
* key 1: value 1
* key 2: value 2
* a sequence:
* - item 1
* - item 2
*
* Tokens:
*
* STREAM-START(utf-8)
* BLOCK-MAPPING-START
* KEY
* SCALAR("a simple key",plain)
* VALUE
* SCALAR("a value",plain)
* KEY
* SCALAR("a complex key",plain)
* VALUE
* SCALAR("another value",plain)
* KEY
* SCALAR("a mapping",plain)
* BLOCK-MAPPING-START
* KEY
* SCALAR("key 1",plain)
* VALUE
* SCALAR("value 1",plain)
* KEY
* SCALAR("key 2",plain)
* VALUE
* SCALAR("value 2",plain)
* BLOCK-END
* KEY
* SCALAR("a sequence",plain)
* VALUE
* BLOCK-SEQUENCE-START
* BLOCK-ENTRY
* SCALAR("item 1",plain)
* BLOCK-ENTRY
* SCALAR("item 2",plain)
* BLOCK-END
* BLOCK-END
* STREAM-END
*
* YAML does not always require to start a new block collection from a new
* line. If the current line contains only '-', '?', and ':' indicators, a new
* block collection may start at the current line. The following examples
* illustrate this case:
*
* 1. Collections in a sequence:
*
* - - item 1
* - item 2
* - key 1: value 1
* key 2: value 2
* - ? complex key
* : complex value
*
* Tokens:
*
* STREAM-START(utf-8)
* BLOCK-SEQUENCE-START
* BLOCK-ENTRY
* BLOCK-SEQUENCE-START
* BLOCK-ENTRY
* SCALAR("item 1",plain)
* BLOCK-ENTRY
* SCALAR("item 2",plain)
* BLOCK-END
* BLOCK-ENTRY
* BLOCK-MAPPING-START
* KEY
* SCALAR("key 1",plain)
* VALUE
* SCALAR("value 1",plain)
* KEY
* SCALAR("key 2",plain)
* VALUE
* SCALAR("value 2",plain)
* BLOCK-END
* BLOCK-ENTRY
* BLOCK-MAPPING-START
* KEY
* SCALAR("complex key")
* VALUE
* SCALAR("complex value")
* BLOCK-END
* BLOCK-END
* STREAM-END
*
* 2. Collections in a mapping:
*
* ? a sequence
* : - item 1
* - item 2
* ? a mapping
* : key 1: value 1
* key 2: value 2
*
* Tokens:
*
* STREAM-START(utf-8)
* BLOCK-MAPPING-START
* KEY
* SCALAR("a sequence",plain)
* VALUE
* BLOCK-SEQUENCE-START
* BLOCK-ENTRY
* SCALAR("item 1",plain)
* BLOCK-ENTRY
* SCALAR("item 2",plain)
* BLOCK-END
* KEY
* SCALAR("a mapping",plain)
* VALUE
* BLOCK-MAPPING-START
* KEY
* SCALAR("key 1",plain)
* VALUE
* SCALAR("value 1",plain)
* KEY
* SCALAR("key 2",plain)
* VALUE
* SCALAR("value 2",plain)
* BLOCK-END
* BLOCK-END
* STREAM-END
*
* YAML also permits non-indented sequences if they are included into a block
* mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
*
* key:
* - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
* - item 2
*
* Tokens:
*
* STREAM-START(utf-8)
* BLOCK-MAPPING-START
* KEY
* SCALAR("key",plain)
* VALUE
* BLOCK-ENTRY
* SCALAR("item 1",plain)
* BLOCK-ENTRY
* SCALAR("item 2",plain)
* BLOCK-END
*/
/*
* Ensure that the buffer contains the required number of characters.
* Return 1 on success, 0 on failure (reader error or memory error).
*/
func cache(parser *yaml_parser_t, length int) bool {
if parser.unread >= length {
return true
}
return yaml_parser_update_buffer(parser, length)
}
/*
* Advance the buffer pointer.
*/
func skip(parser *yaml_parser_t) {
parser.mark.index++
parser.mark.column++
parser.unread--
parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
}
func skip_line(parser *yaml_parser_t) {
if is_crlf_at(parser.buffer, parser.buffer_pos) {
parser.mark.index += 2
parser.mark.column = 0
parser.mark.line++
parser.unread -= 2
parser.buffer_pos += 2
} else if is_break_at(parser.buffer, parser.buffer_pos) {
parser.mark.index++
parser.mark.column = 0
parser.mark.line++
parser.unread--
parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
}
}
/*
* Copy a character to a string buffer and advance pointers.
*/
func read(parser *yaml_parser_t, s []byte) []byte {
w := width(parser.buffer[parser.buffer_pos])
if w == 0 {
panic("invalid character sequence")
}
if len(s) == 0 {
s = make([]byte, 0, 32)
}
if w == 1 && len(s)+w <= cap(s) {
s = s[:len(s)+1]
s[len(s)-1] = parser.buffer[parser.buffer_pos]
parser.buffer_pos++
} else {
s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
parser.buffer_pos += w
}
parser.mark.index++
parser.mark.column++
parser.unread--
return s
}
/*
* Copy a line break character to a string buffer and advance pointers.
*/
func read_line(parser *yaml_parser_t, s []byte) []byte {
buf := parser.buffer
pos := parser.buffer_pos
if buf[pos] == '\r' && buf[pos+1] == '\n' {
/* CR LF . LF */
s = append(s, '\n')
parser.buffer_pos += 2
parser.mark.index++
parser.unread--
} else if buf[pos] == '\r' || buf[pos] == '\n' {
/* CR|LF . LF */
s = append(s, '\n')
parser.buffer_pos += 1
} else if buf[pos] == '\xC2' && buf[pos+1] == '\x85' {
/* NEL . LF */
s = append(s, '\n')
parser.buffer_pos += 2
} else if buf[pos] == '\xE2' && buf[pos+1] == '\x80' &&
(buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9') {
// LS|PS . LS|PS
s = append(s, buf[parser.buffer_pos:pos+3]...)
parser.buffer_pos += 3
} else {
return s
}
parser.mark.index++
parser.mark.column = 0
parser.mark.line++
parser.unread--
return s
}
/*
* Get the next token.
*/
func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
/* Erase the token object. */
*token = yaml_token_t{}
/* No tokens after STREAM-END or error. */
if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
return true
}
/* Ensure that the tokens queue contains enough tokens. */
if !parser.token_available {
if !yaml_parser_fetch_more_tokens(parser) {
return false
}
}
/* Fetch the next token from the queue. */
*token = parser.tokens[parser.tokens_head]
parser.tokens_head++
parser.token_available = false
parser.tokens_parsed++
if token.token_type == yaml_STREAM_END_TOKEN {
parser.stream_end_produced = true
}
return true
}
/*
* Set the scanner error and return 0.
*/
func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string,
context_mark YAML_mark_t, problem string) bool {
parser.error = yaml_SCANNER_ERROR
parser.context = context
parser.context_mark = context_mark
parser.problem = problem
parser.problem_mark = parser.mark
return false
}
func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark YAML_mark_t, problem string) bool {
context := "while parsing a %TAG directive"
if directive {
context = "while parsing a tag"
}
return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
}
/*
* Ensure that the tokens queue contains at least one token which can be
* returned to the Parser.
*/
func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
/* While we need more tokens to fetch, do it. */
for {
/*
* Check if we really need to fetch more tokens.
*/
need_more_tokens := false
if parser.tokens_head == len(parser.tokens) {
/* Queue is empty. */
need_more_tokens = true
} else {
/* Check if any potential simple key may occupy the head position. */
if !yaml_parser_stale_simple_keys(parser) {
return false
}
for i := range parser.simple_keys {
simple_key := &parser.simple_keys[i]
if simple_key.possible &&
simple_key.token_number == parser.tokens_parsed {
need_more_tokens = true
break
}
}
}
if len(parser.simple_keys) > 0 {
}
/* We are finished. */
if !need_more_tokens {
break
}
/* Fetch the next token. */
if !yaml_parser_fetch_next_token(parser) {
return false
}
}
parser.token_available = true
return true
}
/*
* The dispatcher for token fetchers.
*/
func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
/* Ensure that the buffer is initialized. */
if !cache(parser, 1) {
return false
}
/* Check if we just started scanning. Fetch STREAM-START then. */
if !parser.stream_start_produced {
return yaml_parser_fetch_stream_start(parser)
}
/* Eat whitespaces and comments until we reach the next token. */
if !yaml_parser_scan_to_next_token(parser) {
return false
}
/* Remove obsolete potential simple keys. */
if !yaml_parser_stale_simple_keys(parser) {
return false
}
/* Check the indentation level against the current column. */
if !yaml_parser_unroll_indent(parser, parser.mark.column) {
return false
}
/*
* Ensure that the buffer contains at least 4 characters. 4 is the length
* of the longest indicators ('--- ' and '... ').
*/
if !cache(parser, 4) {
return false
}
/* Is it the end of the stream? */
buf := parser.buffer
pos := parser.buffer_pos
if is_z(buf[pos]) {
return yaml_parser_fetch_stream_end(parser)
}
/* Is it a directive? */
if parser.mark.column == 0 && buf[pos] == '%' {
return yaml_parser_fetch_directive(parser)
}
/* Is it the document start indicator? */
if parser.mark.column == 0 &&
buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' &&
is_blankz_at(buf, pos+3) {
return yaml_parser_fetch_document_indicator(parser,
yaml_DOCUMENT_START_TOKEN)
}
/* Is it the document end indicator? */
if parser.mark.column == 0 &&
buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' &&
is_blankz_at(buf, pos+3) {
return yaml_parser_fetch_document_indicator(parser,
yaml_DOCUMENT_END_TOKEN)
}
/* Is it the flow sequence start indicator? */
if buf[pos] == '[' {
return yaml_parser_fetch_flow_collection_start(parser,
yaml_FLOW_SEQUENCE_START_TOKEN)
}
/* Is it the flow mapping start indicator? */
if buf[pos] == '{' {
return yaml_parser_fetch_flow_collection_start(parser,
yaml_FLOW_MAPPING_START_TOKEN)
}
/* Is it the flow sequence end indicator? */
if buf[pos] == ']' {
return yaml_parser_fetch_flow_collection_end(parser,
yaml_FLOW_SEQUENCE_END_TOKEN)
}
/* Is it the flow mapping end indicator? */
if buf[pos] == '}' {
return yaml_parser_fetch_flow_collection_end(parser,
yaml_FLOW_MAPPING_END_TOKEN)
}
/* Is it the flow entry indicator? */
if buf[pos] == ',' {
return yaml_parser_fetch_flow_entry(parser)
}
/* Is it the block entry indicator? */
if buf[pos] == '-' && is_blankz_at(buf, pos+1) {
return yaml_parser_fetch_block_entry(parser)
}
/* Is it the key indicator? */
if buf[pos] == '?' &&
(parser.flow_level > 0 || is_blankz_at(buf, pos+1)) {
return yaml_parser_fetch_key(parser)
}
/* Is it the value indicator? */
if buf[pos] == ':' &&
(parser.flow_level > 0 || is_blankz_at(buf, pos+1)) {
return yaml_parser_fetch_value(parser)
}
/* Is it an alias? */
if buf[pos] == '*' {
return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
}
/* Is it an anchor? */
if buf[pos] == '&' {
return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
}
/* Is it a tag? */
if buf[pos] == '!' {
return yaml_parser_fetch_tag(parser)
}
/* Is it a literal scalar? */
if buf[pos] == '|' && parser.flow_level == 0 {
return yaml_parser_fetch_block_scalar(parser, true)
}
/* Is it a folded scalar? */
if buf[pos] == '>' && parser.flow_level == 0 {
return yaml_parser_fetch_block_scalar(parser, false)
}
/* Is it a single-quoted scalar? */
if buf[pos] == '\'' {
return yaml_parser_fetch_flow_scalar(parser, true)
}
/* Is it a double-quoted scalar? */
if buf[pos] == '"' {
return yaml_parser_fetch_flow_scalar(parser, false)
}
/*
* Is it a plain scalar?
*
* A plain scalar may start with any non-blank characters except
*
* '-', '?', ':', ',', '[', ']', '{', '}',
* '#', '&', '*', '!', '|', '>', '\'', '\"',
* '%', '@', '`'.
*
* In the block context (and, for the '-' indicator, in the flow context
* too), it may also start with the characters
*
* '-', '?', ':'
*
* if it is followed by a non-space character.
*
* The last rule is more restrictive than the specification requires.
*/
b := buf[pos]
if !(is_blankz_at(buf, pos) || b == '-' ||
b == '?' || b == ':' ||
b == ',' || b == '[' ||
b == ']' || b == '{' ||
b == '}' || b == '#' ||
b == '&' || b == '*' ||
b == '!' || b == '|' ||
b == '>' || b == '\'' ||
b == '"' || b == '%' ||
b == '@' || b == '`') ||
(b == '-' && !is_blank(buf[pos+1])) ||
(parser.flow_level == 0 &&
(buf[pos] == '?' || buf[pos] == ':') &&
!is_blank(buf[pos+1])) {
return yaml_parser_fetch_plain_scalar(parser)
}
/*
* If we don't determine the token type so far, it is an error.
*/
return yaml_parser_set_scanner_error(parser,
"while scanning for the next token", parser.mark,
"found character that cannot start any token")
}
/*
* Check the list of potential simple keys and remove the positions that
* cannot contain simple keys anymore.
*/
func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
/* Check for a potential simple key for each flow level. */
for i := range parser.simple_keys {
/*
* The specification requires that a simple key
*
* - is limited to a single line,
* - is shorter than 1024 characters.
*/
simple_key := &parser.simple_keys[i]
if simple_key.possible &&
(simple_key.mark.line < parser.mark.line ||
simple_key.mark.index+1024 < parser.mark.index) {
/* Check if the potential simple key to be removed is required. */
if simple_key.required {
return yaml_parser_set_scanner_error(parser,
"while scanning a simple key", simple_key.mark,
"could not find expected ':'")
}
simple_key.possible = false
}
}
return true
}
/*
* Check if a simple key may start at the current position and add it if
* needed.
*/
func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
/*
* A simple key is required at the current position if the scanner is in
* the block context and the current column coincides with the indentation
* level.
*/
required := (parser.flow_level == 0 &&
parser.indent == parser.mark.column)
/*
* A simple key is required only when it is the first token in the current
* line. Therefore it is always allowed. But we add a check anyway.
*/
if required && !parser.simple_key_allowed {
panic("impossible") /* Impossible. */
}
/*
* If the current position may start a simple key, save it.
*/
if parser.simple_key_allowed {
simple_key := yaml_simple_key_t{
possible: true,
required: required,
token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
}
simple_key.mark = parser.mark
if !yaml_parser_remove_simple_key(parser) {
return false
}
parser.simple_keys[len(parser.simple_keys)-1] = simple_key
}
return true
}
/*
* Remove a potential simple key at the current flow level.
*/
func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
if simple_key.possible {
/* If the key is required, it is an error. */
if simple_key.required {
return yaml_parser_set_scanner_error(parser,
"while scanning a simple key", simple_key.mark,
"could not find expected ':'")
}
}
/* Remove the key from the stack. */
simple_key.possible = false
return true
}
/*
* Increase the flow level and resize the simple key list if needed.
*/
func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
/* Reset the simple key on the next level. */
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
/* Increase the flow level. */
parser.flow_level++
return true
}
/*
* Decrease the flow level.
*/
func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
if parser.flow_level > 0 {
parser.flow_level--
parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
}
return true
}
/*
* Push the current indentation level to the stack and set the new level
* the current column is greater than the indentation level. In this case,
* append or insert the specified token into the token queue.
*
*/
func yaml_parser_roll_indent(parser *yaml_parser_t, column int,
number int, token_type yaml_token_type_t, mark YAML_mark_t) bool {
/* In the flow context, do nothing. */
if parser.flow_level > 0 {
return true
}
if parser.indent == -1 || parser.indent < column {
/*
* Push the current indentation level to the stack and set the new
* indentation level.
*/
parser.indents = append(parser.indents, parser.indent)
parser.indent = column
/* Create a token and insert it into the queue. */
token := yaml_token_t{
token_type: token_type,
start_mark: mark,
end_mark: mark,
}
// number == -1 -> enqueue otherwise insert
if number > -1 {
number -= parser.tokens_parsed
}
insert_token(parser, number, &token)
}
return true
}
/*
* Pop indentation levels from the indents stack until the current level
* becomes less or equal to the column. For each indentation level, append
* the BLOCK-END token.
*/
func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
/* In the flow context, do nothing. */
if parser.flow_level > 0 {
return true
}
/*
* column is unsigned and parser->indent is signed, so if
* parser->indent is less than zero the conditional in the while
* loop below is incorrect. Guard against that.
*/
if parser.indent < 0 {
return true
}
/* Loop through the indentation levels in the stack. */
for parser.indent > column {
/* Create a token and append it to the queue. */
token := yaml_token_t{
token_type: yaml_BLOCK_END_TOKEN,
start_mark: parser.mark,
end_mark: parser.mark,
}
insert_token(parser, -1, &token)
/* Pop the indentation level. */
parser.indent = parser.indents[len(parser.indents)-1]
parser.indents = parser.indents[:len(parser.indents)-1]
}
return true
}
/*
* Pop indentation levels from the indents stack until the current
* level resets to -1. For each indentation level, append the
* BLOCK-END token.
*/
func yaml_parser_reset_indent(parser *yaml_parser_t) bool {
/* In the flow context, do nothing. */
if parser.flow_level > 0 {
return true
}
/* Loop through the indentation levels in the stack. */
for parser.indent > -1 {
/* Create a token and append it to the queue. */
token := yaml_token_t{
token_type: yaml_BLOCK_END_TOKEN,
start_mark: parser.mark,
end_mark: parser.mark,
}
insert_token(parser, -1, &token)
/* Pop the indentation level. */
parser.indent = parser.indents[len(parser.indents)-1]
parser.indents = parser.indents[:len(parser.indents)-1]
}
return true
}
/*
* Initialize the scanner and produce the STREAM-START token.
*/
func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
/* Set the initial indentation. */
parser.indent = -1
/* Initialize the simple key stack. */
parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
/* A simple key is allowed at the beginning of the stream. */
parser.simple_key_allowed = true
/* We have started. */
parser.stream_start_produced = true
/* Create the STREAM-START token and append it to the queue. */
token := yaml_token_t{
token_type: yaml_STREAM_START_TOKEN,
start_mark: parser.mark,
end_mark: parser.mark,
encoding: parser.encoding,
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the STREAM-END token and shut down the scanner.
*/
func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
/* Force new line. */
if parser.mark.column != 0 {
parser.mark.column = 0
parser.mark.line++
}
/* Reset the indentation level. */
if !yaml_parser_reset_indent(parser) {
return false
}
/* Reset simple keys. */
if !yaml_parser_remove_simple_key(parser) {
return false
}
parser.simple_key_allowed = false
/* Create the STREAM-END token and append it to the queue. */
token := yaml_token_t{
token_type: yaml_STREAM_END_TOKEN,
start_mark: parser.mark,
end_mark: parser.mark,
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
*/
func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
/* Reset the indentation level. */
if !yaml_parser_reset_indent(parser) {
return false
}
/* Reset simple keys. */
if !yaml_parser_remove_simple_key(parser) {
return false
}
parser.simple_key_allowed = false
/* Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. */
var token yaml_token_t
if !yaml_parser_scan_directive(parser, &token) {
return false
}
/* Append the token to the queue. */
insert_token(parser, -1, &token)
return true
}
/*
* Produce the DOCUMENT-START or DOCUMENT-END token.
*/
func yaml_parser_fetch_document_indicator(parser *yaml_parser_t,
token_type yaml_token_type_t) bool {
/* Reset the indentation level. */
if !yaml_parser_reset_indent(parser) {
return false
}
/* Reset simple keys. */
if !yaml_parser_remove_simple_key(parser) {
return false
}
parser.simple_key_allowed = false
/* Consume the token. */
start_mark := parser.mark
skip(parser)
skip(parser)
skip(parser)
end_mark := parser.mark
/* Create the DOCUMENT-START or DOCUMENT-END token. */
token := yaml_token_t{
token_type: token_type,
start_mark: start_mark,
end_mark: end_mark,
}
/* Append the token to the queue. */
insert_token(parser, -1, &token)
return true
}
/*
* Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
*/
func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t,
token_type yaml_token_type_t) bool {
/* The indicators '[' and '{' may start a simple key. */
if !yaml_parser_save_simple_key(parser) {
return false
}
/* Increase the flow level. */
if !yaml_parser_increase_flow_level(parser) {
return false
}
/* A simple key may follow the indicators '[' and '{'. */
parser.simple_key_allowed = true
/* Consume the token. */
start_mark := parser.mark
skip(parser)
end_mark := parser.mark
/* Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. */
token := yaml_token_t{
token_type: token_type,
start_mark: start_mark,
end_mark: end_mark,
}
/* Append the token to the queue. */
insert_token(parser, -1, &token)
return true
}
/*
* Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
*/
func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t,
token_type yaml_token_type_t) bool {
/* Reset any potential simple key on the current flow level. */
if !yaml_parser_remove_simple_key(parser) {
return false
}
/* Decrease the flow level. */
if !yaml_parser_decrease_flow_level(parser) {
return false
}
/* No simple keys after the indicators ']' and '}'. */
parser.simple_key_allowed = false
/* Consume the token. */
start_mark := parser.mark
skip(parser)
end_mark := parser.mark
/* Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. */
token := yaml_token_t{
token_type: token_type,
start_mark: start_mark,
end_mark: end_mark,
}
/* Append the token to the queue. */
insert_token(parser, -1, &token)
return true
}
/*
* Produce the FLOW-ENTRY token.
*/
func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
/* Reset any potential simple keys on the current flow level. */
if !yaml_parser_remove_simple_key(parser) {
return false
}
/* Simple keys are allowed after ','. */
parser.simple_key_allowed = true
/* Consume the token. */
start_mark := parser.mark
skip(parser)
end_mark := parser.mark
/* Create the FLOW-ENTRY token and append it to the queue. */
token := yaml_token_t{
token_type: yaml_FLOW_ENTRY_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the BLOCK-ENTRY token.
*/
func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
/* Check if the scanner is in the block context. */
if parser.flow_level == 0 {
/* Check if we are allowed to start a new entry. */
if !parser.simple_key_allowed {
return yaml_parser_set_scanner_error(parser, "", parser.mark,
"block sequence entries are not allowed in this context")
}
/* Add the BLOCK-SEQUENCE-START token if needed. */
if !yaml_parser_roll_indent(parser, parser.mark.column, -1,
yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
return false
}
} else {
/*
* It is an error for the '-' indicator to occur in the flow context,
* but we let the Parser detect and report about it because the Parser
* is able to point to the context.
*/
}
/* Reset any potential simple keys on the current flow level. */
if !yaml_parser_remove_simple_key(parser) {
return false
}
/* Simple keys are allowed after '-'. */
parser.simple_key_allowed = true
/* Consume the token. */
start_mark := parser.mark
skip(parser)
end_mark := parser.mark
/* Create the BLOCK-ENTRY token and append it to the queue. */
token := yaml_token_t{
token_type: yaml_BLOCK_ENTRY_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the KEY token.
*/
func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
/* In the block context, additional checks are required. */
if parser.flow_level == 0 {
/* Check if we are allowed to start a new key (not nessesary simple). */
if !parser.simple_key_allowed {
return yaml_parser_set_scanner_error(parser, "", parser.mark,
"mapping keys are not allowed in this context")
}
/* Add the BLOCK-MAPPING-START token if needed. */
if !yaml_parser_roll_indent(parser, parser.mark.column, -1,
yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
return false
}
}
/* Reset any potential simple keys on the current flow level. */
if !yaml_parser_remove_simple_key(parser) {
return false
}
/* Simple keys are allowed after '?' in the block context. */
parser.simple_key_allowed = (parser.flow_level == 0)
/* Consume the token. */
start_mark := parser.mark
skip(parser)
end_mark := parser.mark
/* Create the KEY token and append it to the queue. */
token := yaml_token_t{
token_type: yaml_KEY_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the VALUE token.
*/
func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
/* Have we found a simple key? */
if simple_key.possible {
/* Create the KEY token and insert it into the queue. */
token := yaml_token_t{
token_type: yaml_KEY_TOKEN,
start_mark: simple_key.mark,
end_mark: simple_key.mark,
}
insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
/* In the block context, we may need to add the BLOCK-MAPPING-START token. */
if !yaml_parser_roll_indent(parser, simple_key.mark.column,
simple_key.token_number,
yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
return false
}
/* Remove the simple key. */
simple_key.possible = false
/* A simple key cannot follow another simple key. */
parser.simple_key_allowed = false
} else {
/* The ':' indicator follows a complex key. */
/* In the block context, extra checks are required. */
if parser.flow_level == 0 {
/* Check if we are allowed to start a complex value. */
if !parser.simple_key_allowed {
return yaml_parser_set_scanner_error(parser, "", parser.mark,
"mapping values are not allowed in this context")
}
/* Add the BLOCK-MAPPING-START token if needed. */
if !yaml_parser_roll_indent(parser, parser.mark.column, -1,
yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
return false
}
}
/* Simple keys after ':' are allowed in the block context. */
parser.simple_key_allowed = (parser.flow_level == 0)
}
/* Consume the token. */
start_mark := parser.mark
skip(parser)
end_mark := parser.mark
/* Create the VALUE token and append it to the queue. */
token := yaml_token_t{
token_type: yaml_VALUE_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the ALIAS or ANCHOR token.
*/
func yaml_parser_fetch_anchor(parser *yaml_parser_t, token_type yaml_token_type_t) bool {
/* An anchor or an alias could be a simple key. */
if !yaml_parser_save_simple_key(parser) {
return false
}
/* A simple key cannot follow an anchor or an alias. */
parser.simple_key_allowed = false
/* Create the ALIAS or ANCHOR token and append it to the queue. */
var token yaml_token_t
if !yaml_parser_scan_anchor(parser, &token, token_type) {
return false
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the TAG token.
*/
func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
/* A tag could be a simple key. */
if !yaml_parser_save_simple_key(parser) {
return false
}
/* A simple key cannot follow a tag. */
parser.simple_key_allowed = false
/* Create the TAG token and append it to the queue. */
var token yaml_token_t
if !yaml_parser_scan_tag(parser, &token) {
return false
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
*/
func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
/* Remove any potential simple keys. */
if !yaml_parser_remove_simple_key(parser) {
return false
}
/* A simple key may follow a block scalar. */
parser.simple_key_allowed = true
/* Create the SCALAR token and append it to the queue. */
var token yaml_token_t
if !yaml_parser_scan_block_scalar(parser, &token, literal) {
return false
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
*/
func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
/* A plain scalar could be a simple key. */
if !yaml_parser_save_simple_key(parser) {
return false
}
/* A simple key cannot follow a flow scalar. */
parser.simple_key_allowed = false
/* Create the SCALAR token and append it to the queue. */
var token yaml_token_t
if !yaml_parser_scan_flow_scalar(parser, &token, single) {
return false
}
insert_token(parser, -1, &token)
return true
}
/*
* Produce the SCALAR(...,plain) token.
*/
func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
/* A plain scalar could be a simple key. */
if !yaml_parser_save_simple_key(parser) {
return false
}
/* A simple key cannot follow a flow scalar. */
parser.simple_key_allowed = false
/* Create the SCALAR token and append it to the queue. */
var token yaml_token_t
if !yaml_parser_scan_plain_scalar(parser, &token) {
return false
}
insert_token(parser, -1, &token)
return true
}
/*
* Eat whitespaces and comments until the next token is found.
*/
func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
/* Until the next token is not found. */
for {
/* Allow the BOM mark to start a line. */
if !cache(parser, 1) {
return false
}
if parser.mark.column == 0 && is_bom_at(parser.buffer, parser.buffer_pos) {
skip(parser)
}
/*
* Eat whitespaces.
*
* Tabs are allowed:
*
* - in the flow context;
* - in the block context, but not at the beginning of the line or
* after '-', '?', or ':' (complex value).
*/
if !cache(parser, 1) {
return false
}
for parser.buffer[parser.buffer_pos] == ' ' ||
((parser.flow_level > 0 || !parser.simple_key_allowed) &&
parser.buffer[parser.buffer_pos] == '\t') {
skip(parser)
if !cache(parser, 1) {
return false
}
}
/* Eat a comment until a line break. */
if parser.buffer[parser.buffer_pos] == '#' {
for !is_breakz_at(parser.buffer, parser.buffer_pos) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
}
/* If it is a line break, eat it. */
if is_break_at(parser.buffer, parser.buffer_pos) {
if !cache(parser, 2) {
return false
}
skip_line(parser)
/* In the block context, a new line may start a simple key. */
if parser.flow_level == 0 {
parser.simple_key_allowed = true
}
} else {
/* We have found a token. */
break
}
}
return true
}
/*
* Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
*
* Scope:
* %YAML 1.1 # a comment \n
* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
* %TAG !yaml! tag:yaml.org,2002: \n
* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*/
func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
/* Eat '%'. */
start_mark := parser.mark
skip(parser)
/* Scan the directive name. */
var name []byte
if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
return false
}
/* Is it a YAML directive? */
var major, minor int
if bytes.Equal(name, []byte("YAML")) {
/* Scan the VERSION directive value. */
if !yaml_parser_scan_version_directive_value(parser, start_mark,
&major, &minor) {
return false
}
end_mark := parser.mark
/* Create a VERSION-DIRECTIVE token. */
*token = yaml_token_t{
token_type: yaml_VERSION_DIRECTIVE_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
major: major,
minor: minor,
}
} else if bytes.Equal(name, []byte("TAG")) {
/* Is it a TAG directive? */
/* Scan the TAG directive value. */
var handle, prefix []byte
if !yaml_parser_scan_tag_directive_value(parser, start_mark,
&handle, &prefix) {
return false
}
end_mark := parser.mark
/* Create a TAG-DIRECTIVE token. */
*token = yaml_token_t{
token_type: yaml_TAG_DIRECTIVE_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
value: handle,
prefix: prefix,
}
} else {
/* Unknown directive. */
yaml_parser_set_scanner_error(parser, "while scanning a directive",
start_mark, "found uknown directive name")
return false
}
/* Eat the rest of the line including any comments. */
if !cache(parser, 1) {
return false
}
for is_blank(parser.buffer[parser.buffer_pos]) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
if parser.buffer[parser.buffer_pos] == '#' {
for !is_breakz_at(parser.buffer, parser.buffer_pos) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
}
/* Check if we are at the end of the line. */
if !is_breakz_at(parser.buffer, parser.buffer_pos) {
yaml_parser_set_scanner_error(parser, "while scanning a directive",
start_mark, "did not find expected comment or line break")
return false
}
/* Eat a line break. */
if is_break_at(parser.buffer, parser.buffer_pos) {
if !cache(parser, 2) {
return false
}
skip_line(parser)
}
return true
}
/*
* Scan the directive name.
*
* Scope:
* %YAML 1.1 # a comment \n
* ^^^^
* %TAG !yaml! tag:yaml.org,2002: \n
* ^^^
*/
func yaml_parser_scan_directive_name(parser *yaml_parser_t,
start_mark YAML_mark_t, name *[]byte) bool {
/* Consume the directive name. */
if !cache(parser, 1) {
return false
}
var s []byte
for is_alpha(parser.buffer[parser.buffer_pos]) {
s = read(parser, s)
if !cache(parser, 1) {
return false
}
}
/* Check if the name is empty. */
if len(s) == 0 {
yaml_parser_set_scanner_error(parser, "while scanning a directive",
start_mark, "could not find expected directive name")
return false
}
/* Check for an blank character after the name. */
if !is_blankz_at(parser.buffer, parser.buffer_pos) {
yaml_parser_set_scanner_error(parser, "while scanning a directive",
start_mark, "found unexpected non-alphabetical character")
return false
}
*name = s
return true
}
/*
* Scan the value of VERSION-DIRECTIVE.
*
* Scope:
* %YAML 1.1 # a comment \n
* ^^^^^^
*/
func yaml_parser_scan_version_directive_value(parser *yaml_parser_t,
start_mark YAML_mark_t, major *int, minor *int) bool {
/* Eat whitespaces. */
if !cache(parser, 1) {
return false
}
for is_blank(parser.buffer[parser.buffer_pos]) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
/* Consume the major version number. */
if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
return false
}
/* Eat '.'. */
if parser.buffer[parser.buffer_pos] != '.' {
return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
start_mark, "did not find expected digit or '.' character")
}
skip(parser)
/* Consume the minor version number. */
if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
return false
}
return true
}
const MAX_NUMBER_LENGTH = 9
/*
* Scan the version number of VERSION-DIRECTIVE.
*
* Scope:
* %YAML 1.1 # a comment \n
* ^
* %YAML 1.1 # a comment \n
* ^
*/
func yaml_parser_scan_version_directive_number(parser *yaml_parser_t,
start_mark YAML_mark_t, number *int) bool {
/* Repeat while the next character is digit. */
if !cache(parser, 1) {
return false
}
value := 0
length := 0
for is_digit(parser.buffer[parser.buffer_pos]) {
/* Check if the number is too long. */
length++
if length > MAX_NUMBER_LENGTH {
return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
start_mark, "found extremely long version number")
}
value = value*10 + as_digit(parser.buffer[parser.buffer_pos])
skip(parser)
if !cache(parser, 1) {
return false
}
}
/* Check if the number was present. */
if length == 0 {
return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
start_mark, "did not find expected version number")
}
*number = value
return true
}
/*
* Scan the value of a TAG-DIRECTIVE token.
*
* Scope:
* %TAG !yaml! tag:yaml.org,2002: \n
* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
*/
func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t,
start_mark YAML_mark_t, handle, prefix *[]byte) bool {
/* Eat whitespaces. */
if !cache(parser, 1) {
return false
}
for is_blank(parser.buffer[parser.buffer_pos]) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
/* Scan a handle. */
var handle_value []byte
if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
return false
}
/* Expect a whitespace. */
if !cache(parser, 1) {
return false
}
if !is_blank(parser.buffer[parser.buffer_pos]) {
yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
start_mark, "did not find expected whitespace")
return false
}
/* Eat whitespaces. */
for is_blank(parser.buffer[parser.buffer_pos]) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
/* Scan a prefix. */
var prefix_value []byte
if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
return false
}
/* Expect a whitespace or line break. */
if !cache(parser, 1) {
return false
}
if !is_blankz_at(parser.buffer, parser.buffer_pos) {
yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
start_mark, "did not find expected whitespace or line break")
return false
}
*handle = handle_value
*prefix = prefix_value
return true
}
func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t,
token_type yaml_token_type_t) bool {
/* Eat the indicator character. */
start_mark := parser.mark
skip(parser)
/* Consume the value. */
if !cache(parser, 1) {
return false
}
var s []byte
for is_alpha(parser.buffer[parser.buffer_pos]) {
s = read(parser, s)
if !cache(parser, 1) {
return false
}
}
end_mark := parser.mark
/*
* Check if length of the anchor is greater than 0 and it is followed by
* a whitespace character or one of the indicators:
*
* '?', ':', ',', ']', '}', '%', '@', '`'.
*/
b := parser.buffer[parser.buffer_pos]
if len(s) == 0 || !(is_blankz_at(parser.buffer, parser.buffer_pos) || b == '?' ||
b == ':' || b == ',' ||
b == ']' || b == '}' ||
b == '%' || b == '@' ||
b == '`') {
context := "while scanning an anchor"
if token_type != yaml_ANCHOR_TOKEN {
context = "while scanning an alias"
}
yaml_parser_set_scanner_error(parser, context, start_mark,
"did not find expected alphabetic or numeric character")
return false
}
/* Create a token. */
*token = yaml_token_t{
token_type: token_type,
start_mark: start_mark,
end_mark: end_mark,
value: s,
}
return true
}
/*
* Scan a TAG token.
*/
func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
start_mark := parser.mark
/* Check if the tag is in the canonical form. */
if !cache(parser, 2) {
return false
}
var handle []byte
var suffix []byte
if parser.buffer[parser.buffer_pos+1] == '<' {
/* Set the handle to '' */
/* Eat '!<' */
skip(parser)
skip(parser)
/* Consume the tag value. */
if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
return false
}
/* Check for '>' and eat it. */
if parser.buffer[parser.buffer_pos] != '>' {
yaml_parser_set_scanner_error(parser, "while scanning a tag",
start_mark, "did not find the expected '>'")
return false
}
skip(parser)
} else if is_blank(parser.buffer[parser.buffer_pos+1]) {
// NON-SPECIFIED
skip(parser)
} else {
/* The tag has either the '!suffix' or the '!handle!suffix' form. */
/* First, try to scan a handle. */
if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
return false
}
/* Check if it is, indeed, handle. */
if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
/* Scan the suffix now. */
if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
return false
}
} else {
/* It wasn't a handle after all. Scan the rest of the tag. */
if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
return false
}
/* Set the handle to '!'. */
handle = []byte{'!'}
/*
* A special case: the '!' tag. Set the handle to '' and the
* suffix to '!'.
*/
if len(suffix) == 0 {
handle, suffix = suffix, handle
}
}
}
/* Check the character which ends the tag. */
if !cache(parser, 1) {
return false
}
if !is_blankz_at(parser.buffer, parser.buffer_pos) {
yaml_parser_set_scanner_error(parser, "while scanning a tag",
start_mark, "did not find expected whitespace or line break")
return false
}
end_mark := parser.mark
/* Create a token. */
*token = yaml_token_t{
token_type: yaml_TAG_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
value: handle,
suffix: suffix,
}
return true
}
/*
* Scan a tag handle.
*/
func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool,
start_mark YAML_mark_t, handle *[]byte) bool {
/* Check the initial '!' character. */
if !cache(parser, 1) {
return false
}
if parser.buffer[parser.buffer_pos] != '!' {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected '!'")
return false
}
/* Copy the '!' character. */
var s []byte
s = read(parser, s)
/* Copy all subsequent alphabetical and numerical characters. */
if !cache(parser, 1) {
return false
}
for is_alpha(parser.buffer[parser.buffer_pos]) {
s = read(parser, s)
if !cache(parser, 1) {
return false
}
}
/* Check if the trailing character is '!' and copy it. */
if parser.buffer[parser.buffer_pos] == '!' {
s = read(parser, s)
} else {
/*
* It's either the '!' tag or not really a tag handle. If it's a %TAG
* directive, it's an error. If it's a tag token, it must be a part of
* URI.
*/
if directive && !(s[0] == '!' && len(s) == 1) {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected '!'")
return false
}
}
*handle = s
return true
}
/*
* Scan a tag.
*/
func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool,
head []byte, start_mark YAML_mark_t, uri *[]byte) bool {
var s []byte
/*
* Copy the head if needed.
*
* Note that we don't copy the leading '!' character.
*/
if len(head) > 1 {
s = append(s, head[1:]...)
}
/* Scan the tag. */
if !cache(parser, 1) {
return false
}
/*
* The set of characters that may appear in URI is as follows:
*
* '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
* '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
* '%'.
*/
b := parser.buffer[parser.buffer_pos]
for is_alpha(b) || b == ';' ||
b == '/' || b == '?' ||
b == ':' || b == '@' ||
b == '&' || b == '=' ||
b == '+' || b == '$' ||
b == ',' || b == '.' ||
b == '!' || b == '~' ||
b == '*' || b == '\'' ||
b == '(' || b == ')' ||
b == '[' || b == ']' ||
b == '%' {
/* Check if it is a URI-escape sequence. */
if b == '%' {
if !yaml_parser_scan_uri_escapes(parser,
directive, start_mark, &s) {
return false
}
} else {
s = read(parser, s)
}
if !cache(parser, 1) {
return false
}
b = parser.buffer[parser.buffer_pos]
}
/* Check if the tag is non-empty. */
if len(s) == 0 {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected tag URI")
return false
}
*uri = s
return true
}
/*
* Decode an URI-escape sequence corresponding to a single UTF-8 character.
*/
func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool,
start_mark YAML_mark_t, s *[]byte) bool {
/* Decode the required number of characters. */
w := 10
for w > 0 {
/* Check for a URI-escaped octet. */
if !cache(parser, 3) {
return false
}
if !(parser.buffer[parser.buffer_pos] == '%' &&
is_hex(parser.buffer[parser.buffer_pos+1]) &&
is_hex(parser.buffer[parser.buffer_pos+2])) {
return yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find URI escaped octet")
}
/* Get the octet. */
octet := byte((as_hex(parser.buffer[parser.buffer_pos+1]) << 4) +
as_hex(parser.buffer[parser.buffer_pos+2]))
/* If it is the leading octet, determine the length of the UTF-8 sequence. */
if w == 10 {
w = width(octet)
if w == 0 {
return yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "found an incorrect leading UTF-8 octet")
}
} else {
/* Check if the trailing octet is correct. */
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "found an incorrect trailing UTF-8 octet")
}
}
/* Copy the octet and move the pointers. */
*s = append(*s, octet)
skip(parser)
skip(parser)
skip(parser)
w--
}
return true
}
/*
* Scan a block scalar.
*/
func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t,
literal bool) bool {
/* Eat the indicator '|' or '>'. */
start_mark := parser.mark
skip(parser)
/* Scan the additional block scalar indicators. */
if !cache(parser, 1) {
return false
}
/* Check for a chomping indicator. */
chomping := 0
increment := 0
if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
/* Set the chomping method and eat the indicator. */
if parser.buffer[parser.buffer_pos] == '+' {
chomping = +1
} else {
chomping = -1
}
skip(parser)
/* Check for an indentation indicator. */
if !cache(parser, 1) {
return false
}
if is_digit(parser.buffer[parser.buffer_pos]) {
/* Check that the indentation is greater than 0. */
if parser.buffer[parser.buffer_pos] == '0' {
yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
start_mark, "found an indentation indicator equal to 0")
return false
}
/* Get the indentation level and eat the indicator. */
increment = as_digit(parser.buffer[parser.buffer_pos])
skip(parser)
}
} else if is_digit(parser.buffer[parser.buffer_pos]) {
/* Do the same as above, but in the opposite order. */
if parser.buffer[parser.buffer_pos] == '0' {
yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
start_mark, "found an indentation indicator equal to 0")
return false
}
increment = as_digit(parser.buffer[parser.buffer_pos])
skip(parser)
if !cache(parser, 1) {
return false
}
if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
if parser.buffer[parser.buffer_pos] == '+' {
chomping = +1
} else {
chomping = -1
}
skip(parser)
}
}
/* Eat whitespaces and comments to the end of the line. */
if !cache(parser, 1) {
return false
}
for is_blank(parser.buffer[parser.buffer_pos]) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
if parser.buffer[parser.buffer_pos] == '#' {
for !is_breakz_at(parser.buffer, parser.buffer_pos) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
}
/* Check if we are at the end of the line. */
if !is_breakz_at(parser.buffer, parser.buffer_pos) {
yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
start_mark, "did not find expected comment or line break")
return false
}
/* Eat a line break. */
if is_break_at(parser.buffer, parser.buffer_pos) {
if !cache(parser, 2) {
return false
}
skip_line(parser)
}
end_mark := parser.mark
/* Set the indentation level if it was specified. */
indent := 0
if increment > 0 {
if parser.indent >= 0 {
indent = parser.indent + increment
} else {
indent = increment
}
}
/* Scan the leading line breaks and determine the indentation level if needed. */
var trailing_breaks []byte
if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks,
start_mark, &end_mark) {
return false
}
/* Scan the block scalar content. */
if !cache(parser, 1) {
return false
}
var s []byte
var leading_break []byte
leading_blank := false
trailing_blank := false
for parser.mark.column == indent && !is_z(parser.buffer[parser.buffer_pos]) {
/*
* We are at the beginning of a non-empty line.
*/
/* Is it a trailing whitespace? */
trailing_blank = is_blank(parser.buffer[parser.buffer_pos])
/* Check if we need to fold the leading line break. */
if !literal && len(leading_break) > 0 && leading_break[0] == '\n' &&
!leading_blank && !trailing_blank {
/* Do we need to join the lines by space? */
if len(trailing_breaks) == 0 {
s = append(s, ' ')
}
leading_break = leading_break[:0]
} else {
s = append(s, leading_break...)
leading_break = leading_break[:0]
}
/* Append the remaining line breaks. */
s = append(s, trailing_breaks...)
trailing_breaks = trailing_breaks[:0]
/* Is it a leading whitespace? */
leading_blank = is_blank(parser.buffer[parser.buffer_pos])
/* Consume the current line. */
for !is_breakz_at(parser.buffer, parser.buffer_pos) {
s = read(parser, s)
if !cache(parser, 1) {
return false
}
}
/* Consume the line break. */
if !cache(parser, 2) {
return false
}
leading_break = read_line(parser, leading_break)
/* Eat the following indentation spaces and line breaks. */
if !yaml_parser_scan_block_scalar_breaks(parser,
&indent, &trailing_breaks, start_mark, &end_mark) {
return false
}
}
/* Chomp the tail. */
if chomping != -1 {
s = append(s, leading_break...)
}
if chomping == 1 {
s = append(s, trailing_breaks...)
}
/* Create a token. */
*token = yaml_token_t{
token_type: yaml_SCALAR_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
value: s,
style: yaml_LITERAL_SCALAR_STYLE,
}
if !literal {
token.style = yaml_FOLDED_SCALAR_STYLE
}
return true
}
/*
* Scan indentation spaces and line breaks for a block scalar. Determine the
* indentation level if needed.
*/
func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t,
indent *int, breaks *[]byte,
start_mark YAML_mark_t, end_mark *YAML_mark_t) bool {
*end_mark = parser.mark
/* Eat the indentation spaces and line breaks. */
max_indent := 0
for {
/* Eat the indentation spaces. */
if !cache(parser, 1) {
return false
}
for (*indent == 0 || parser.mark.column < *indent) &&
is_space(parser.buffer[parser.buffer_pos]) {
skip(parser)
if !cache(parser, 1) {
return false
}
}
if parser.mark.column > max_indent {
max_indent = parser.mark.column
}
/* Check for a tab character messing the indentation. */
if (*indent == 0 || parser.mark.column < *indent) &&
is_tab(parser.buffer[parser.buffer_pos]) {
return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
start_mark, "found a tab character where an indentation space is expected")
}
/* Have we found a non-empty line? */
if !is_break_at(parser.buffer, parser.buffer_pos) {
break
}
/* Consume the line break. */
if !cache(parser, 2) {
return false
}
*breaks = read_line(parser, *breaks)
*end_mark = parser.mark
}
/* Determine the indentation level if needed. */
if *indent == 0 {
*indent = max_indent
if *indent < parser.indent+1 {
*indent = parser.indent + 1
}
if *indent < 1 {
*indent = 1
}
}
return true
}
/*
* Scan a quoted scalar.
*/
func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t,
single bool) bool {
/* Eat the left quote. */
start_mark := parser.mark
skip(parser)
/* Consume the content of the quoted scalar. */
var s []byte
var leading_break []byte
var trailing_breaks []byte
var whitespaces []byte
for {
/* Check that there are no document indicators at the beginning of the line. */
if !cache(parser, 4) {
return false
}
if parser.mark.column == 0 &&
((parser.buffer[parser.buffer_pos] == '-' &&
parser.buffer[parser.buffer_pos+1] == '-' &&
parser.buffer[parser.buffer_pos+2] == '-') ||
(parser.buffer[parser.buffer_pos] == '.' &&
parser.buffer[parser.buffer_pos+1] == '.' &&
parser.buffer[parser.buffer_pos+2] == '.')) &&
is_blankz_at(parser.buffer, parser.buffer_pos+3) {
yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
start_mark, "found unexpected document indicator")
return false
}
/* Check for EOF. */
if is_z(parser.buffer[parser.buffer_pos]) {
yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
start_mark, "found unexpected end of stream")
return false
}
/* Consume non-blank characters. */
if !cache(parser, 2) {
return false
}
leading_blanks := false
for !is_blankz_at(parser.buffer, parser.buffer_pos) {
/* Check for an escaped single quote. */
if single && parser.buffer[parser.buffer_pos] == '\'' &&
parser.buffer[parser.buffer_pos+1] == '\'' {
// Is is an escaped single quote.
s = append(s, '\'')
skip(parser)
skip(parser)
} else if single && parser.buffer[parser.buffer_pos] == '\'' {
/* Check for the right quote. */
break
} else if !single && parser.buffer[parser.buffer_pos] == '"' {
/* Check for the right quote. */
break
} else if !single && parser.buffer[parser.buffer_pos] == '\\' &&
is_break_at(parser.buffer, parser.buffer_pos+1) {
/* Check for an escaped line break. */
if !cache(parser, 3) {
return false
}
skip(parser)
skip_line(parser)
leading_blanks = true
break
} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
/* Check for an escape sequence. */
code_length := 0
/* Check the escape character. */
switch parser.buffer[parser.buffer_pos+1] {
case '0':
s = append(s, 0)
case 'a':
s = append(s, '\x07')
case 'b':
s = append(s, '\x08')
case 't', '\t':
s = append(s, '\x09')
case 'n':
s = append(s, '\x0A')
case 'v':
s = append(s, '\x0B')
case 'f':
s = append(s, '\x0C')
case 'r':
s = append(s, '\x0D')
case 'e':
s = append(s, '\x1B')
case ' ':
s = append(s, '\x20')
case '"':
s = append(s, '"')
case '/':
s = append(s, '/')
case '\\':
s = append(s, '\\')
case 'N': /* NEL (#x85) */
s = append(s, '\xC2')
s = append(s, '\x85')
case '_': /* #xA0 */
s = append(s, '\xC2')
s = append(s, '\xA0')
case 'L': /* LS (#x2028) */
s = append(s, '\xE2')
s = append(s, '\x80')
s = append(s, '\xA8')
case 'P': /* PS (#x2029) */
s = append(s, '\xE2')
s = append(s, '\x80')
s = append(s, '\xA9')
case 'x':
code_length = 2
case 'u':
code_length = 4
case 'U':
code_length = 8
default:
yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
start_mark, "found unknown escape character")
return false
}
skip(parser)
skip(parser)
/* Consume an arbitrary escape code. */
if code_length > 0 {
value := 0
/* Scan the character value. */
if !cache(parser, code_length) {
return false
}
for k := 0; k < code_length; k++ {
if !is_hex(parser.buffer[parser.buffer_pos+k]) {
yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
start_mark, "did not find expected hexdecimal number")
return false
}
value = (value << 4) + as_hex(parser.buffer[parser.buffer_pos+k])
}
/* Check the value and write the character. */
if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
start_mark, "found invalid Unicode character escape code")
return false
}
if value <= 0x7F {
s = append(s, byte(value))
} else if value <= 0x7FF {
s = append(s, byte(0xC0+(value>>6)))
s = append(s, byte(0x80+(value&0x3F)))
} else if value <= 0xFFFF {
s = append(s, byte(0xE0+(value>>12)))
s = append(s, byte(0x80+((value>>6)&0x3F)))
s = append(s, byte(0x80+(value&0x3F)))
} else {
s = append(s, byte(0xF0+(value>>18)))
s = append(s, byte(0x80+((value>>12)&0x3F)))
s = append(s, byte(0x80+((value>>6)&0x3F)))
s = append(s, byte(0x80+(value&0x3F)))
}
/* Advance the pointer. */
for k := 0; k < code_length; k++ {
skip(parser)
}
}
} else {
/* It is a non-escaped non-blank character. */
s = read(parser, s)
}
if !cache(parser, 2) {
return false
}
}
/* Check if we are at the end of the scalar. */
b := parser.buffer[parser.buffer_pos]
if single {
if b == '\'' {
break
}
} else if b == '"' {
break
}
/* Consume blank characters. */
if !cache(parser, 1) {
return false
}
for is_blank(parser.buffer[parser.buffer_pos]) || is_break_at(parser.buffer, parser.buffer_pos) {
if is_blank(parser.buffer[parser.buffer_pos]) {
/* Consume a space or a tab character. */
if !leading_blanks {
whitespaces = read(parser, whitespaces)
} else {
skip(parser)
}
} else {
if !cache(parser, 2) {
return false
}
/* Check if it is a first line break. */
if !leading_blanks {
whitespaces = whitespaces[:0]
leading_break = read_line(parser, leading_break)
leading_blanks = true
} else {
trailing_breaks = read_line(parser, trailing_breaks)
}
}
if !cache(parser, 1) {
return false
}
}
/* Join the whitespaces or fold line breaks. */
if leading_blanks {
/* Do we need to fold line breaks? */
if len(leading_break) > 0 && leading_break[0] == '\n' {
if len(trailing_breaks) == 0 {
s = append(s, ' ')
} else {
s = append(s, trailing_breaks...)
trailing_breaks = trailing_breaks[:0]
}
leading_break = leading_break[:0]
} else {
s = append(s, leading_break...)
s = append(s, trailing_breaks...)
leading_break = leading_break[:0]
trailing_breaks = trailing_breaks[:0]
}
} else {
s = append(s, whitespaces...)
whitespaces = whitespaces[:0]
}
}
/* Eat the right quote. */
skip(parser)
end_mark := parser.mark
/* Create a token. */
*token = yaml_token_t{
token_type: yaml_SCALAR_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
value: s,
style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
}
if !single {
token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
return true
}
/*
* Scan a plain scalar.
*/
func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
var s []byte
var leading_break []byte
var trailing_breaks []byte
var whitespaces []byte
leading_blanks := false
indent := parser.indent + 1
start_mark := parser.mark
end_mark := parser.mark
/* Consume the content of the plain scalar. */
for {
/* Check for a document indicator. */
if !cache(parser, 4) {
return false
}
if parser.mark.column == 0 &&
((parser.buffer[parser.buffer_pos] == '-' &&
parser.buffer[parser.buffer_pos+1] == '-' &&
parser.buffer[parser.buffer_pos+2] == '-') ||
(parser.buffer[parser.buffer_pos] == '.' &&
parser.buffer[parser.buffer_pos+1] == '.' &&
parser.buffer[parser.buffer_pos+2] == '.')) &&
is_blankz_at(parser.buffer, parser.buffer_pos+3) {
break
}
/* Check for a comment. */
if parser.buffer[parser.buffer_pos] == '#' {
break
}
/* Consume non-blank characters. */
for !is_blankz_at(parser.buffer, parser.buffer_pos) {
/* Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". */
if parser.flow_level > 0 &&
parser.buffer[parser.buffer_pos] == ':' &&
!is_blankz_at(parser.buffer, parser.buffer_pos+1) {
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
start_mark, "found unexpected ':'")
return false
}
/* Check for indicators that may end a plain scalar. */
b := parser.buffer[parser.buffer_pos]
if (b == ':' && is_blankz_at(parser.buffer, parser.buffer_pos+1)) ||
(parser.flow_level > 0 &&
(b == ',' || b == ':' ||
b == '?' || b == '[' ||
b == ']' || b == '{' ||
b == '}')) {
break
}
/* Check if we need to join whitespaces and breaks. */
if leading_blanks || len(whitespaces) > 0 {
if leading_blanks {
/* Do we need to fold line breaks? */
if leading_break[0] == '\n' {
if len(trailing_breaks) == 0 {
s = append(s, ' ')
} else {
s = append(s, trailing_breaks...)
trailing_breaks = trailing_breaks[:0]
}
leading_break = leading_break[:0]
} else {
s = append(s, leading_break...)
s = append(s, trailing_breaks...)
leading_break = leading_break[:0]
trailing_breaks = trailing_breaks[:0]
}
leading_blanks = false
} else {
s = append(s, whitespaces...)
whitespaces = whitespaces[:0]
}
}
/* Copy the character. */
s = read(parser, s)
end_mark = parser.mark
if !cache(parser, 2) {
return false
}
}
/* Is it the end? */
if !(is_blank(parser.buffer[parser.buffer_pos]) ||
is_break_at(parser.buffer, parser.buffer_pos)) {
break
}
/* Consume blank characters. */
if !cache(parser, 1) {
return false
}
for is_blank(parser.buffer[parser.buffer_pos]) ||
is_break_at(parser.buffer, parser.buffer_pos) {
if is_blank(parser.buffer[parser.buffer_pos]) {
/* Check for tab character that abuse indentation. */
if leading_blanks && parser.mark.column < indent &&
is_tab(parser.buffer[parser.buffer_pos]) {
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
start_mark, "found a tab character that violate indentation")
return false
}
/* Consume a space or a tab character. */
if !leading_blanks {
whitespaces = read(parser, whitespaces)
} else {
skip(parser)
}
} else {
if !cache(parser, 2) {
return false
}
/* Check if it is a first line break. */
if !leading_blanks {
whitespaces = whitespaces[:0]
leading_break = read_line(parser, leading_break)
leading_blanks = true
} else {
trailing_breaks = read_line(parser, trailing_breaks)
}
}
if !cache(parser, 1) {
return false
}
}
/* Check indentation level. */
if parser.flow_level == 0 && parser.mark.column < indent {
break
}
}
/* Create a token. */
*token = yaml_token_t{
token_type: yaml_SCALAR_TOKEN,
start_mark: start_mark,
end_mark: end_mark,
value: s,
style: yaml_PLAIN_SCALAR_STYLE,
}
/* Note that we change the 'simple_key_allowed' flag. */
if leading_blanks {
parser.simple_key_allowed = true
}
return true
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner_test.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io/ioutil"
"os"
"path/filepath"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var scan = func(filename string) {
It("scan "+filename, func() {
file, err := os.Open(filename)
Expect(err).To(BeNil())
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_reader(&parser, file)
failed := false
token := yaml_token_t{}
for {
if !yaml_parser_scan(&parser, &token) {
failed = true
break
}
if token.token_type == yaml_STREAM_END_TOKEN {
break
}
}
file.Close()
// msg := "SUCCESS"
// if failed {
// msg = "FAILED"
// if parser.error != yaml_NO_ERROR {
// m := parser.problem_mark
// fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
// parser.context, parser.problem, m.line, m.column)
// }
// }
Expect(failed).To(BeFalse())
})
}
var scanYamls = func(dirname string) {
fileInfos, err := ioutil.ReadDir(dirname)
if err != nil {
panic(err.Error())
}
for _, fileInfo := range fileInfos {
if !fileInfo.IsDir() {
scan(filepath.Join(dirname, fileInfo.Name()))
}
}
}
var _ = Describe("Scanner", func() {
scanYamls("fixtures/specification")
scanYamls("fixtures/specification/types")
})
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"reflect"
"sort"
"strings"
"sync"
"unicode"
)
// A field represents a single field found in a struct.
type field struct {
name string
tag bool
index []int
typ reflect.Type
omitEmpty bool
flow bool
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("yaml")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft,
opts.Contains("omitempty"), opts.Contains("flow")})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft})
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
// It implements the methods to sort by string.
type stringValues []reflect.Value
func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool {
av, ak := getElem(sv[i])
bv, bk := getElem(sv[j])
if ak == reflect.String && bk == reflect.String {
return av.String() < bv.String()
}
return ak < bk
}
func getElem(v reflect.Value) (reflect.Value, reflect.Kind) {
k := v.Kind()
for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() {
v = v.Elem()
k = v.Kind()
}
return v, k
}
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
/*
* Set the writer error and return 0.
*/
func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
emitter.error = yaml_WRITER_ERROR
emitter.problem = problem
return false
}
/*
* Flush the output buffer.
*/
func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
if emitter.write_handler == nil {
panic("Write handler must be set") /* Write handler must be set. */
}
if emitter.encoding == yaml_ANY_ENCODING {
panic("Encoding must be set") /* Output encoding must be set. */
}
/* Check if the buffer is empty. */
if emitter.buffer_pos == 0 {
return true
}
/* If the output encoding is UTF-8, we don't need to recode the buffer. */
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter,
emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
/* Recode the buffer into the raw buffer. */
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
/*
* See the "reader.c" code for more details on UTF-8 encoding. Note
* that we assume that the buffer contains a valid UTF-8 sequence.
*/
/* Read the next UTF-8 character. */
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
/* Write the character. */
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
/* Write the character using a surrogate pair (check "reader.c"). */
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
/* Write the raw buffer. */
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
const (
yaml_VERSION_MAJOR = 0
yaml_VERSION_MINOR = 1
yaml_VERSION_PATCH = 6
yaml_VERSION_STRING = "0.1.6"
)
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
const (
INPUT_RAW_BUFFER_SIZE = 1024
/*
* The size of the input buffer.
*
* It should be possible to decode the whole raw buffer.
*/
INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3)
/*
* The size of the output buffer.
*/
OUTPUT_BUFFER_SIZE = 512
/*
* The size of the output raw buffer.
*
* It should be possible to encode the whole output buffer.
*/
OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2)
INITIAL_STACK_SIZE = 16
INITIAL_QUEUE_SIZE = 16
)
func width(b byte) int {
if b&0x80 == 0 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}
func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) {
w := width(src[*src_pos])
switch w {
case 4:
dest[*dest_pos+3] = src[*src_pos+3]
fallthrough
case 3:
dest[*dest_pos+2] = src[*src_pos+2]
fallthrough
case 2:
dest[*dest_pos+1] = src[*src_pos+1]
fallthrough
case 1:
dest[*dest_pos] = src[*src_pos]
default:
panic("invalid width")
}
*dest_pos += w
*src_pos += w
}
// /*
// * Check if the character at the specified position is an alphabetical
// * character, a digit, '_', or '-'.
// */
func is_alpha(b byte) bool {
return (b >= '0' && b <= '9') ||
(b >= 'A' && b <= 'Z') ||
(b >= 'a' && b <= 'z') ||
b == '_' || b == '-'
}
// /*
// * Check if the character at the specified position is a digit.
// */
//
func is_digit(b byte) bool {
return b >= '0' && b <= '9'
}
// /*
// * Get the value of a digit.
// */
//
func as_digit(b byte) int {
return int(b) - '0'
}
// /*
// * Check if the character at the specified position is a hex-digit.
// */
//
func is_hex(b byte) bool {
return (b >= '0' && b <= '9') ||
(b >= 'A' && b <= 'F') ||
(b >= 'a' && b <= 'f')
}
//
// /*
// * Get the value of a hex-digit.
// */
//
func as_hex(b byte) int {
if b >= 'A' && b <= 'F' {
return int(b) - 'A' + 10
} else if b >= 'a' && b <= 'f' {
return int(b) - 'a' + 10
}
return int(b) - '0'
}
// #define AS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
// ((string).pointer[offset] - (yaml_char_t) '0'))
// /*
// * Check if the character is a line break, space, tab, or NUL.
// */
func is_blankz_at(b []byte, i int) bool {
return is_blank(b[i]) || is_breakz_at(b, i)
}
// /*
// * Check if the character at the specified position is a line break.
// */
func is_break_at(b []byte, i int) bool {
return b[i] == '\r' || /* CR (#xD)*/
b[i] == '\n' || /* LF (#xA) */
(b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */
}
func is_breakz_at(b []byte, i int) bool {
return is_break_at(b, i) || is_z(b[i])
}
func is_crlf_at(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// /*
// * Check if the character at the specified position is NUL.
// */
func is_z(b byte) bool {
return b == 0x0
}
// /*
// * Check if the character at the specified position is space.
// */
func is_space(b byte) bool {
return b == ' '
}
//
// /*
// * Check if the character at the specified position is tab.
// */
func is_tab(b byte) bool {
return b == '\t'
}
// /*
// * Check if the character at the specified position is blank (space or tab).
// */
func is_blank(b byte) bool {
return is_space(b) || is_tab(b)
}
// /*
// * Check if the character is ASCII.
// */
func is_ascii(b byte) bool {
return b <= '\x7f'
}
// /*
// * Check if the character can be printed unescaped.
// */
func is_printable_at(b []byte, i int) bool {
return ((b[i] == 0x0A) || /* . == #x0A */
(b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */
(b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && /* && . != #xFEFF */
!(b[i+1] == 0xBB && b[i+2] == 0xBF) &&
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
// collapse the slice
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
// move the tokens down
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
// readjust the length
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// /*
// * Check if the character at the specified position is BOM.
// */
//
func is_bom_at(b []byte, i int) bool {
return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF
}
//
// #ifdef HAVE_CONFIG_H
// #include
// #endif
//
// #include "./yaml.h"
//
// #include
// #include
//
// /*
// * Memory management.
// */
//
// yaml_DECLARE(void *)
// yaml_malloc(size_t size);
//
// yaml_DECLARE(void *)
// yaml_realloc(void *ptr, size_t size);
//
// yaml_DECLARE(void)
// yaml_free(void *ptr);
//
// yaml_DECLARE(yaml_char_t *)
// yaml_strdup(const yaml_char_t *);
//
// /*
// * Reader: Ensure that the buffer contains at least `length` characters.
// */
//
// yaml_DECLARE(int)
// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length);
//
// /*
// * Scanner: Ensure that the token stack contains at least one token ready.
// */
//
// yaml_DECLARE(int)
// yaml_parser_fetch_more_tokens(yaml_parser_t *parser);
//
// /*
// * The size of the input raw buffer.
// */
//
// #define INPUT_RAW_BUFFER_SIZE 16384
//
// /*
// * The size of the input buffer.
// *
// * It should be possible to decode the whole raw buffer.
// */
//
// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3)
//
// /*
// * The size of the output buffer.
// */
//
// #define OUTPUT_BUFFER_SIZE 16384
//
// /*
// * The size of the output raw buffer.
// *
// * It should be possible to encode the whole output buffer.
// */
//
// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2)
//
// /*
// * The size of other stacks and queues.
// */
//
// #define INITIAL_STACK_SIZE 16
// #define INITIAL_QUEUE_SIZE 16
// #define INITIAL_STRING_SIZE 16
//
// /*
// * Buffer management.
// */
//
// #define BUFFER_INIT(context,buffer,size) \
// (((buffer).start = yaml_malloc(size)) ? \
// ((buffer).last = (buffer).pointer = (buffer).start, \
// (buffer).end = (buffer).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define BUFFER_DEL(context,buffer) \
// (yaml_free((buffer).start), \
// (buffer).start = (buffer).pointer = (buffer).end = 0)
//
// /*
// * String management.
// */
//
// typedef struct {
// yaml_char_t *start;
// yaml_char_t *end;
// yaml_char_t *pointer;
// } yaml_string_t;
//
// yaml_DECLARE(int)
// yaml_string_extend(yaml_char_t **start,
// yaml_char_t **pointer, yaml_char_t **end);
//
// yaml_DECLARE(int)
// yaml_string_join(
// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end,
// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end);
//
// #define NULL_STRING { NULL, NULL, NULL }
//
// #define STRING(string,length) { (string), (string)+(length), (string) }
//
// #define STRING_ASSIGN(value,string,length) \
// ((value).start = (string), \
// (value).end = (string)+(length), \
// (value).pointer = (string))
//
// #define STRING_INIT(context,string,size) \
// (((string).start = yaml_malloc(size)) ? \
// ((string).pointer = (string).start, \
// (string).end = (string).start+(size), \
// memset((string).start, 0, (size)), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define STRING_DEL(context,string) \
// (yaml_free((string).start), \
// (string).start = (string).pointer = (string).end = 0)
//
// #define STRING_EXTEND(context,string) \
// (((string).pointer+5 < (string).end) \
// || yaml_string_extend(&(string).start, \
// &(string).pointer, &(string).end))
//
// #define CLEAR(context,string) \
// ((string).pointer = (string).start, \
// memset((string).start, 0, (string).end-(string).start))
//
// #define JOIN(context,string_a,string_b) \
// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \
// &(string_a).end, &(string_b).start, \
// &(string_b).pointer, &(string_b).end)) ? \
// ((string_b).pointer = (string_b).start, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// /*
// * String check operations.
// */
//
// /*
// * Check the octet at the specified position.
// */
//
// #define CHECK_AT(string,octet,offset) \
// ((string).pointer[offset] == (yaml_char_t)(octet))
//
// /*
// * Check the current octet in the buffer.
// */
//
// #define CHECK(string,octet) CHECK_AT((string),(octet),0)
//
// /*
// * Check if the character at the specified position is an alphabetical
// * character, a digit, '_', or '-'.
// */
//
// #define IS_ALPHA_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9') || \
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'Z') || \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'z') || \
// (string).pointer[offset] == '_' || \
// (string).pointer[offset] == '-')
//
// #define IS_ALPHA(string) IS_ALPHA_AT((string),0)
//
// /*
// * Check if the character at the specified position is a digit.
// */
//
// #define IS_DIGIT_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9'))
//
// #define IS_DIGIT(string) IS_DIGIT_AT((string),0)
//
// /*
// * Get the value of a digit.
// */
//
// #define AS_DIGIT_AT(string,offset) \
// ((string).pointer[offset] - (yaml_char_t) '0')
//
// #define AS_DIGIT(string) AS_DIGIT_AT((string),0)
//
// /*
// * Check if the character at the specified position is a hex-digit.
// */
//
// #define IS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9') || \
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') || \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f'))
//
// #define IS_HEX(string) IS_HEX_AT((string),0)
//
// /*
// * Get the value of a hex-digit.
// */
//
// #define AS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
// ((string).pointer[offset] - (yaml_char_t) '0'))
//
// #define AS_HEX(string) AS_HEX_AT((string),0)
//
// /*
// * Check if the character is ASCII.
// */
//
// #define IS_ASCII_AT(string,offset) \
// ((string).pointer[offset] <= (yaml_char_t) '\x7F')
//
// #define IS_ASCII(string) IS_ASCII_AT((string),0)
//
// /*
// * Check if the character can be printed unescaped.
// */
//
// #define IS_PRINTABLE_AT(string,offset) \
// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \
// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \
// && (string).pointer[offset] <= 0x7E) \
// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \
// && (string).pointer[offset+1] >= 0xA0) \
// || ((string).pointer[offset] > 0xC2 \
// && (string).pointer[offset] < 0xED) \
// || ((string).pointer[offset] == 0xED \
// && (string).pointer[offset+1] < 0xA0) \
// || ((string).pointer[offset] == 0xEE) \
// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \
// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \
// && (string).pointer[offset+2] == 0xBF) \
// && !((string).pointer[offset+1] == 0xBF \
// && ((string).pointer[offset+2] == 0xBE \
// || (string).pointer[offset+2] == 0xBF))))
//
// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0)
//
// /*
// * Check if the character at the specified position is NUL.
// */
//
// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset))
//
// #define IS_Z(string) IS_Z_AT((string),0)
//
// /*
// * Check if the character at the specified position is BOM.
// */
//
// #define IS_BOM_AT(string,offset) \
// (CHECK_AT((string),'\xEF',(offset)) \
// && CHECK_AT((string),'\xBB',(offset)+1) \
// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */
//
// #define IS_BOM(string) IS_BOM_AT(string,0)
//
// /*
// * Check if the character at the specified position is space.
// */
//
// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset))
//
// #define IS_SPACE(string) IS_SPACE_AT((string),0)
//
// /*
// * Check if the character at the specified position is tab.
// */
//
// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset))
//
// #define IS_TAB(string) IS_TAB_AT((string),0)
//
// /*
// * Check if the character at the specified position is blank (space or tab).
// */
//
// #define IS_BLANK_AT(string,offset) \
// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset)))
//
// #define IS_BLANK(string) IS_BLANK_AT((string),0)
//
// /*
// * Check if the character at the specified position is a line break.
// */
//
// #define IS_BREAK_AT(string,offset) \
// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \
// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \
// || (CHECK_AT((string),'\xC2',(offset)) \
// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \
// || (CHECK_AT((string),'\xE2',(offset)) \
// && CHECK_AT((string),'\x80',(offset)+1) \
// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \
// || (CHECK_AT((string),'\xE2',(offset)) \
// && CHECK_AT((string),'\x80',(offset)+1) \
// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */
//
// #define IS_BREAK(string) IS_BREAK_AT((string),0)
//
// #define IS_CRLF_AT(string,offset) \
// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1))
//
// #define IS_CRLF(string) IS_CRLF_AT((string),0)
//
// /*
// * Check if the character is a line break or NUL.
// */
//
// #define IS_BREAKZ_AT(string,offset) \
// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset)))
//
// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0)
//
// /*
// * Check if the character is a line break, space, or NUL.
// */
//
// #define IS_SPACEZ_AT(string,offset) \
// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
//
// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0)
//
// /*
// * Check if the character is a line break, space, tab, or NUL.
// */
//
// #define IS_BLANKZ_AT(string,offset) \
// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
//
// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0)
//
// /*
// * Determine the width of the character.
// */
//
// #define WIDTH_AT(string,offset) \
// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \
// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \
// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \
// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0)
//
// #define WIDTH(string) WIDTH_AT((string),0)
//
// /*
// * Move the string pointer to the next character.
// */
//
// #define MOVE(string) ((string).pointer += WIDTH((string)))
//
// /*
// * Copy a character and move the pointers of both strings.
// */
//
// #define COPY(string_a,string_b) \
// ((*(string_b).pointer & 0x80) == 0x00 ? \
// (*((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xE0) == 0xC0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xF0) == 0xE0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xF8) == 0xF0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : 0)
//
// /*
// * Stack and queue management.
// */
//
// yaml_DECLARE(int)
// yaml_stack_extend(void **start, void **top, void **end);
//
// yaml_DECLARE(int)
// yaml_queue_extend(void **start, void **head, void **tail, void **end);
//
// #define STACK_INIT(context,stack,size) \
// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \
// ((stack).top = (stack).start, \
// (stack).end = (stack).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define STACK_DEL(context,stack) \
// (yaml_free((stack).start), \
// (stack).start = (stack).top = (stack).end = 0)
//
// #define STACK_EMPTY(context,stack) \
// ((stack).start == (stack).top)
//
// #define PUSH(context,stack,value) \
// (((stack).top != (stack).end \
// || yaml_stack_extend((void **)&(stack).start, \
// (void **)&(stack).top, (void **)&(stack).end)) ? \
// (*((stack).top++) = value, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define POP(context,stack) \
// (*(--(stack).top))
//
// #define QUEUE_INIT(context,queue,size) \
// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \
// ((queue).head = (queue).tail = (queue).start, \
// (queue).end = (queue).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define QUEUE_DEL(context,queue) \
// (yaml_free((queue).start), \
// (queue).start = (queue).head = (queue).tail = (queue).end = 0)
//
// #define QUEUE_EMPTY(context,queue) \
// ((queue).head == (queue).tail)
//
// #define ENQUEUE(context,queue,value) \
// (((queue).tail != (queue).end \
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
// (*((queue).tail++) = value, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define DEQUEUE(context,queue) \
// (*((queue).head++))
//
// #define QUEUE_INSERT(context,queue,index,value) \
// (((queue).tail != (queue).end \
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
// (memmove((queue).head+(index)+1,(queue).head+(index), \
// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \
// *((queue).head+(index)) = value, \
// (queue).tail++, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// /*
// * Token initializers.
// */
//
// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \
// (memset(&(token), 0, sizeof(yaml_token_t)), \
// (token).type = (token_type), \
// (token).start_mark = (token_start_mark), \
// (token).end_mark = (token_end_mark))
//
// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \
// (token).data.stream_start.encoding = (token_encoding))
//
// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark)))
//
// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \
// (token).data.alias.value = (token_value))
//
// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \
// (token).data.anchor.value = (token_value))
//
// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \
// (token).data.tag.handle = (token_handle), \
// (token).data.tag.suffix = (token_suffix))
//
// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \
// (token).data.scalar.value = (token_value), \
// (token).data.scalar.length = (token_length), \
// (token).data.scalar.style = (token_style))
//
// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
// (token).data.version_directive.major = (token_major), \
// (token).data.version_directive.minor = (token_minor))
//
// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
// (token).data.tag_directive.handle = (token_handle), \
// (token).data.tag_directive.prefix = (token_prefix))
//
// /*
// * Event initializers.
// */
//
// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \
// (memset(&(event), 0, sizeof(yaml_event_t)), \
// (event).type = (event_type), \
// (event).start_mark = (event_start_mark), \
// (event).end_mark = (event_end_mark))
//
// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \
// (event).data.stream_start.encoding = (event_encoding))
//
// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark)))
//
// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \
// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \
// (event).data.document_start.version_directive = (event_version_directive), \
// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \
// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \
// (event).data.document_start.implicit = (event_implicit))
//
// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \
// (event).data.document_end.implicit = (event_implicit))
//
// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \
// (event).data.alias.anchor = (event_anchor))
//
// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \
// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \
// (event).data.scalar.anchor = (event_anchor), \
// (event).data.scalar.tag = (event_tag), \
// (event).data.scalar.value = (event_value), \
// (event).data.scalar.length = (event_length), \
// (event).data.scalar.plain_implicit = (event_plain_implicit), \
// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \
// (event).data.scalar.style = (event_style))
//
// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \
// event_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \
// (event).data.sequence_start.anchor = (event_anchor), \
// (event).data.sequence_start.tag = (event_tag), \
// (event).data.sequence_start.implicit = (event_implicit), \
// (event).data.sequence_start.style = (event_style))
//
// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark)))
//
// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \
// event_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \
// (event).data.mapping_start.anchor = (event_anchor), \
// (event).data.mapping_start.tag = (event_tag), \
// (event).data.mapping_start.implicit = (event_implicit), \
// (event).data.mapping_start.style = (event_style))
//
// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark)))
//
// /*
// * Document initializer.
// */
//
// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \
// document_version_directive,document_tag_directives_start, \
// document_tag_directives_end,document_start_implicit, \
// document_end_implicit,document_start_mark,document_end_mark) \
// (memset(&(document), 0, sizeof(yaml_document_t)), \
// (document).nodes.start = (document_nodes_start), \
// (document).nodes.end = (document_nodes_end), \
// (document).nodes.top = (document_nodes_start), \
// (document).version_directive = (document_version_directive), \
// (document).tag_directives.start = (document_tag_directives_start), \
// (document).tag_directives.end = (document_tag_directives_end), \
// (document).start_implicit = (document_start_implicit), \
// (document).end_implicit = (document_end_implicit), \
// (document).start_mark = (document_start_mark), \
// (document).end_mark = (document_end_mark))
//
// /*
// * Node initializers.
// */
//
// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \
// (memset(&(node), 0, sizeof(yaml_node_t)), \
// (node).type = (node_type), \
// (node).tag = (node_tag), \
// (node).start_mark = (node_start_mark), \
// (node).end_mark = (node_end_mark))
//
// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.scalar.value = (node_value), \
// (node).data.scalar.length = (node_length), \
// (node).data.scalar.style = (node_style))
//
// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.sequence.items.start = (node_items_start), \
// (node).data.sequence.items.end = (node_items_end), \
// (node).data.sequence.items.top = (node_items_start), \
// (node).data.sequence.style = (node_style))
//
// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.mapping.pairs.start = (node_pairs_start), \
// (node).data.mapping.pairs.end = (node_pairs_end), \
// (node).data.mapping.pairs.top = (node_pairs_start), \
// (node).data.mapping.style = (node_style))
//
================================================
FILE: vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
================================================
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"fmt"
"io"
)
/** The version directive data. */
type yaml_version_directive_t struct {
major int // The major version number
minor int // The minor version number
}
/** The tag directive data. */
type yaml_tag_directive_t struct {
handle []byte // The tag handle
prefix []byte // The tag prefix
}
/** The stream encoding. */
type yaml_encoding_t int
const (
/** Let the parser choose the encoding. */
yaml_ANY_ENCODING yaml_encoding_t = iota
/** The defau lt UTF-8 encoding. */
yaml_UTF8_ENCODING
/** The UTF-16-LE encoding with BOM. */
yaml_UTF16LE_ENCODING
/** The UTF-16-BE encoding with BOM. */
yaml_UTF16BE_ENCODING
)
/** Line break types. */
type yaml_break_t int
const (
yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */
yaml_CR_BREAK /** Use CR for line breaks (Mac style). */
yaml_LN_BREAK /** Use LN for line breaks (Unix style). */
yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */
)
/** Many bad things could happen with the parser and emitter. */
type YAML_error_type_t int
const (
/** No error is produced. */
yaml_NO_ERROR YAML_error_type_t = iota
/** Cannot allocate or reallocate a block of memory. */
yaml_MEMORY_ERROR
/** Cannot read or decode the input stream. */
yaml_READER_ERROR
/** Cannot scan the input stream. */
yaml_SCANNER_ERROR
/** Cannot parse the input stream. */
yaml_PARSER_ERROR
/** Cannot compose a YAML document. */
yaml_COMPOSER_ERROR
/** Cannot write to the output stream. */
yaml_WRITER_ERROR
/** Cannot emit a YAML stream. */
yaml_EMITTER_ERROR
)
/** The pointer position. */
type YAML_mark_t struct {
/** The position index. */
index int
/** The position line. */
line int
/** The position column. */
column int
}
func (m YAML_mark_t) String() string {
return fmt.Sprintf("line %d, column %d", m.line, m.column)
}
/** @} */
/**
* @defgroup styles Node Styles
* @{
*/
type yaml_style_t int
/** Scalar styles. */
type yaml_scalar_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
/** The plain scalar style. */
yaml_PLAIN_SCALAR_STYLE
/** The single-quoted scalar style. */
yaml_SINGLE_QUOTED_SCALAR_STYLE
/** The double-quoted scalar style. */
yaml_DOUBLE_QUOTED_SCALAR_STYLE
/** The literal scalar style. */
yaml_LITERAL_SCALAR_STYLE
/** The folded scalar style. */
yaml_FOLDED_SCALAR_STYLE
)
/** Sequence styles. */
type yaml_sequence_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
/** The block sequence style. */
yaml_BLOCK_SEQUENCE_STYLE
/** The flow sequence style. */
yaml_FLOW_SEQUENCE_STYLE
)
/** Mapping styles. */
type yaml_mapping_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
/** The block mapping style. */
yaml_BLOCK_MAPPING_STYLE
/** The flow mapping style. */
yaml_FLOW_MAPPING_STYLE
/* yaml_FLOW_SET_MAPPING_STYLE */
)
/** @} */
/**
* @defgroup tokens Tokens
* @{
*/
/** Token types. */
type yaml_token_type_t int
const (
/** An empty token. */
yaml_NO_TOKEN yaml_token_type_t = iota
/** A STREAM-START token. */
yaml_STREAM_START_TOKEN
/** A STREAM-END token. */
yaml_STREAM_END_TOKEN
/** A VERSION-DIRECTIVE token. */
yaml_VERSION_DIRECTIVE_TOKEN
/** A TAG-DIRECTIVE token. */
yaml_TAG_DIRECTIVE_TOKEN
/** A DOCUMENT-START token. */
yaml_DOCUMENT_START_TOKEN
/** A DOCUMENT-END token. */
yaml_DOCUMENT_END_TOKEN
/** A BLOCK-SEQUENCE-START token. */
yaml_BLOCK_SEQUENCE_START_TOKEN
/** A BLOCK-SEQUENCE-END token. */
yaml_BLOCK_MAPPING_START_TOKEN
/** A BLOCK-END token. */
yaml_BLOCK_END_TOKEN
/** A FLOW-SEQUENCE-START token. */
yaml_FLOW_SEQUENCE_START_TOKEN
/** A FLOW-SEQUENCE-END token. */
yaml_FLOW_SEQUENCE_END_TOKEN
/** A FLOW-MAPPING-START token. */
yaml_FLOW_MAPPING_START_TOKEN
/** A FLOW-MAPPING-END token. */
yaml_FLOW_MAPPING_END_TOKEN
/** A BLOCK-ENTRY token. */
yaml_BLOCK_ENTRY_TOKEN
/** A FLOW-ENTRY token. */
yaml_FLOW_ENTRY_TOKEN
/** A KEY token. */
yaml_KEY_TOKEN
/** A VALUE token. */
yaml_VALUE_TOKEN
/** An ALIAS token. */
yaml_ALIAS_TOKEN
/** An ANCHOR token. */
yaml_ANCHOR_TOKEN
/** A TAG token. */
yaml_TAG_TOKEN
/** A SCALAR token. */
yaml_SCALAR_TOKEN
)
/** The token structure. */
type yaml_token_t struct {
/** The token type. */
token_type yaml_token_type_t
/** The token data. */
/** The stream start (for @c yaml_STREAM_START_TOKEN). */
encoding yaml_encoding_t
/** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */
/** The anchor (for @c ). */
/** The scalar value (for @c ). */
value []byte
/** The tag suffix. */
suffix []byte
/** The scalar value (for @c yaml_SCALAR_TOKEN). */
/** The scalar style. */
style yaml_scalar_style_t
/** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */
version_directive yaml_version_directive_t
/** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */
prefix []byte
/** The beginning of the token. */
start_mark YAML_mark_t
/** The end of the token. */
end_mark YAML_mark_t
major, minor int
}
/**
* @defgroup events Events
* @{
*/
/** Event types. */
type yaml_event_type_t int
const (
/** An empty event. */
yaml_NO_EVENT yaml_event_type_t = iota
/** A STREAM-START event. */
yaml_STREAM_START_EVENT
/** A STREAM-END event. */
yaml_STREAM_END_EVENT
/** A DOCUMENT-START event. */
yaml_DOCUMENT_START_EVENT
/** A DOCUMENT-END event. */
yaml_DOCUMENT_END_EVENT
/** An ALIAS event. */
yaml_ALIAS_EVENT
/** A SCALAR event. */
yaml_SCALAR_EVENT
/** A SEQUENCE-START event. */
yaml_SEQUENCE_START_EVENT
/** A SEQUENCE-END event. */
yaml_SEQUENCE_END_EVENT
/** A MAPPING-START event. */
yaml_MAPPING_START_EVENT
/** A MAPPING-END event. */
yaml_MAPPING_END_EVENT
)
/** The event structure. */
type yaml_event_t struct {
/** The event type. */
event_type yaml_event_type_t
/** The stream parameters (for @c yaml_STREAM_START_EVENT). */
encoding yaml_encoding_t
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */
version_directive *yaml_version_directive_t
/** The beginning and end of the tag directives list. */
tag_directives []yaml_tag_directive_t
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */
/** Is the document indicator implicit? */
implicit bool
/** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The anchor. */
anchor []byte
/** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The tag. */
tag []byte
/** The scalar value. */
value []byte
/** Is the tag optional for the plain style? */
plain_implicit bool
/** Is the tag optional for any non-plain style? */
quoted_implicit bool
/** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The sequence style. */
/** The scalar style. */
style yaml_style_t
/** The beginning of the event. */
start_mark, end_mark YAML_mark_t
}
/**
* @defgroup nodes Nodes
* @{
*/
const (
/** The tag @c !!null with the only possible value: @c null. */
yaml_NULL_TAG = "tag:yaml.org,2002:null"
/** The tag @c !!bool with the values: @c true and @c falce. */
yaml_BOOL_TAG = "tag:yaml.org,2002:bool"
/** The tag @c !!str for string values. */
yaml_STR_TAG = "tag:yaml.org,2002:str"
/** The tag @c !!int for integer values. */
yaml_INT_TAG = "tag:yaml.org,2002:int"
/** The tag @c !!float for float values. */
yaml_FLOAT_TAG = "tag:yaml.org,2002:float"
/** The tag @c !!timestamp for date and time values. */
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp"
/** The tag @c !!seq is used to denote sequences. */
yaml_SEQ_TAG = "tag:yaml.org,2002:seq"
/** The tag @c !!map is used to denote mapping. */
yaml_MAP_TAG = "tag:yaml.org,2002:map"
/** The default scalar tag is @c !!str. */
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG
/** The default sequence tag is @c !!seq. */
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG
/** The default mapping tag is @c !!map. */
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
)
/** Node types. */
type yaml_node_type_t int
const (
/** An empty node. */
yaml_NO_NODE yaml_node_type_t = iota
/** A scalar node. */
yaml_SCALAR_NODE
/** A sequence node. */
yaml_SEQUENCE_NODE
/** A mapping node. */
yaml_MAPPING_NODE
)
/** An element of a sequence node. */
type yaml_node_item_t int
/** An element of a mapping node. */
type yaml_node_pair_t struct {
/** The key of the element. */
key int
/** The value of the element. */
value int
}
/** The node structure. */
type yaml_node_t struct {
/** The node type. */
node_type yaml_node_type_t
/** The node tag. */
tag []byte
/** The scalar parameters (for @c yaml_SCALAR_NODE). */
scalar struct {
/** The scalar value. */
value []byte
/** The scalar style. */
style yaml_scalar_style_t
}
/** The sequence parameters (for @c yaml_SEQUENCE_NODE). */
sequence struct {
/** The stack of sequence items. */
items []yaml_node_item_t
/** The sequence style. */
style yaml_sequence_style_t
}
/** The mapping parameters (for @c yaml_MAPPING_NODE). */
mapping struct {
/** The stack of mapping pairs (key, value). */
pairs []yaml_node_pair_t
/** The mapping style. */
style yaml_mapping_style_t
}
/** The beginning of the node. */
start_mark YAML_mark_t
/** The end of the node. */
end_mark YAML_mark_t
}
/** The document structure. */
type yaml_document_t struct {
/** The document nodes. */
nodes []yaml_node_t
/** The version directive. */
version_directive *yaml_version_directive_t
/** The list of tag directives. */
tags []yaml_tag_directive_t
/** Is the document start indicator implicit? */
start_implicit bool
/** Is the document end indicator implicit? */
end_implicit bool
/** The beginning of the document. */
start_mark YAML_mark_t
/** The end of the document. */
end_mark YAML_mark_t
}
/**
* The prototype of a read handler.
*
* The read handler is called when the parser needs to read more bytes from the
* source. The handler should write not more than @a size bytes to the @a
* buffer. The number of written bytes should be set to the @a length variable.
*
* @param[in,out] data A pointer to an application data specified by
* yaml_parser_set_input().
* @param[out] buffer The buffer to write the data from the source.
* @param[in] size The size of the buffer.
* @param[out] size_read The actual number of bytes read from the source.
*
* @returns On success, the handler should return @c 1. If the handler failed,
* the returned value should be @c 0. On EOF, the handler should set the
* @a size_read to @c 0 and return @c 1.
*/
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
/**
* This structure holds information about a potential simple key.
*/
type yaml_simple_key_t struct {
/** Is a simple key possible? */
possible bool
/** Is a simple key required? */
required bool
/** The number of the token. */
token_number int
/** The position mark. */
mark YAML_mark_t
}
/**
* The states of the parser.
*/
type yaml_parser_state_t int
const (
/** Expect STREAM-START. */
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
/** Expect the beginning of an implicit document. */
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
/** Expect DOCUMENT-START. */
yaml_PARSE_DOCUMENT_START_STATE
/** Expect the content of a document. */
yaml_PARSE_DOCUMENT_CONTENT_STATE
/** Expect DOCUMENT-END. */
yaml_PARSE_DOCUMENT_END_STATE
/** Expect a block node. */
yaml_PARSE_BLOCK_NODE_STATE
/** Expect a block node or indentless sequence. */
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE
/** Expect a flow node. */
yaml_PARSE_FLOW_NODE_STATE
/** Expect the first entry of a block sequence. */
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
/** Expect an entry of a block sequence. */
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
/** Expect an entry of an indentless sequence. */
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
/** Expect the first key of a block mapping. */
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
/** Expect a block mapping key. */
yaml_PARSE_BLOCK_MAPPING_KEY_STATE
/** Expect a block mapping value. */
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
/** Expect the first entry of a flow sequence. */
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
/** Expect an entry of a flow sequence. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
/** Expect a key of an ordered mapping. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
/** Expect a value of an ordered mapping. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
/** Expect the and of an ordered mapping entry. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
/** Expect the first key of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
/** Expect a key of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_KEY_STATE
/** Expect a value of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_VALUE_STATE
/** Expect an empty value of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE
/** Expect nothing. */
yaml_PARSE_END_STATE
)
/**
* This structure holds aliases data.
*/
type yaml_alias_data_t struct {
/** The anchor. */
anchor []byte
/** The node id. */
index int
/** The anchor mark. */
mark YAML_mark_t
}
/**
* The parser structure.
*
* All members are internal. Manage the structure using the @c yaml_parser_
* family of functions.
*/
type yaml_parser_t struct {
/**
* @name Error handling
* @{
*/
/** Error type. */
error YAML_error_type_t
/** Error description. */
problem string
/** The byte about which the problem occured. */
problem_offset int
/** The problematic value (@c -1 is none). */
problem_value int
/** The problem position. */
problem_mark YAML_mark_t
/** The error context. */
context string
/** The context position. */
context_mark YAML_mark_t
/**
* @}
*/
/**
* @name Reader stuff
* @{
*/
/** Read handler. */
read_handler yaml_read_handler_t
/** Reader input data. */
input_reader io.Reader
input []byte
input_pos int
/** EOF flag */
eof bool
/** The working buffer. */
buffer []byte
buffer_pos int
/* The number of unread characters in the buffer. */
unread int
/** The raw buffer. */
raw_buffer []byte
raw_buffer_pos int
/** The input encoding. */
encoding yaml_encoding_t
/** The offset of the current position (in bytes). */
offset int
/** The mark of the current position. */
mark YAML_mark_t
/**
* @}
*/
/**
* @name Scanner stuff
* @{
*/
/** Have we started to scan the input stream? */
stream_start_produced bool
/** Have we reached the end of the input stream? */
stream_end_produced bool
/** The number of unclosed '[' and '{' indicators. */
flow_level int
/** The tokens queue. */
tokens []yaml_token_t
tokens_head int
/** The number of tokens fetched from the queue. */
tokens_parsed int
/* Does the tokens queue contain a token ready for dequeueing. */
token_available bool
/** The indentation levels stack. */
indents []int
/** The current indentation level. */
indent int
/** May a simple key occur at the current position? */
simple_key_allowed bool
/** The stack of simple keys. */
simple_keys []yaml_simple_key_t
/**
* @}
*/
/**
* @name Parser stuff
* @{
*/
/** The parser states stack. */
states []yaml_parser_state_t
/** The current parser state. */
state yaml_parser_state_t
/** The stack of marks. */
marks []YAML_mark_t
/** The list of TAG directives. */
tag_directives []yaml_tag_directive_t
/**
* @}
*/
/**
* @name Dumper stuff
* @{
*/
/** The alias data. */
aliases []yaml_alias_data_t
/** The currently parsed document. */
document *yaml_document_t
/**
* @}
*/
}
/**
* The prototype of a write handler.
*
* The write handler is called when the emitter needs to flush the accumulated
* characters to the output. The handler should write @a size bytes of the
* @a buffer to the output.
*
* @param[in,out] data A pointer to an application data specified by
* yaml_emitter_set_output().
* @param[in] buffer The buffer with bytes to be written.
* @param[in] size The size of the buffer.
*
* @returns On success, the handler should return @c 1. If the handler failed,
* the returned value should be @c 0.
*/
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
/** The emitter states. */
type yaml_emitter_state_t int
const (
/** Expect STREAM-START. */
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
/** Expect the first DOCUMENT-START or STREAM-END. */
yaml_EMIT_FIRST_DOCUMENT_START_STATE
/** Expect DOCUMENT-START or STREAM-END. */
yaml_EMIT_DOCUMENT_START_STATE
/** Expect the content of a document. */
yaml_EMIT_DOCUMENT_CONTENT_STATE
/** Expect DOCUMENT-END. */
yaml_EMIT_DOCUMENT_END_STATE
/** Expect the first item of a flow sequence. */
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
/** Expect an item of a flow sequence. */
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE
/** Expect the first key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
/** Expect a key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_KEY_STATE
/** Expect a value for a simple key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE
/** Expect a value of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_VALUE_STATE
/** Expect the first item of a block sequence. */
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
/** Expect an item of a block sequence. */
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE
/** Expect the first key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
/** Expect the key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_KEY_STATE
/** Expect a value for a simple key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE
/** Expect a value of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE
/** Expect nothing. */
yaml_EMIT_END_STATE
)
/**
* The emitter structure.
*
* All members are internal. Manage the structure using the @c yaml_emitter_
* family of functions.
*/
type yaml_emitter_t struct {
/**
* @name Error handling
* @{
*/
/** Error type. */
error YAML_error_type_t
/** Error description. */
problem string
/**
* @}
*/
/**
* @name Writer stuff
* @{
*/
/** Write handler. */
write_handler yaml_write_handler_t
/** Standard (string or file) output data. */
output_buffer *[]byte
output_writer io.Writer
/** The working buffer. */
buffer []byte
buffer_pos int
/** The raw buffer. */
raw_buffer []byte
raw_buffer_pos int
/** The stream encoding. */
encoding yaml_encoding_t
/**
* @}
*/
/**
* @name Emitter stuff
* @{
*/
/** If the output is in the canonical style? */
canonical bool
/** The number of indentation spaces. */
best_indent int
/** The preferred width of the output lines. */
best_width int
/** Allow unescaped non-ASCII characters? */
unicode bool
/** The preferred line break. */
line_break yaml_break_t
/** The stack of states. */
states []yaml_emitter_state_t
/** The current emitter state. */
state yaml_emitter_state_t
/** The event queue. */
events []yaml_event_t
events_head int
/** The stack of indentation levels. */
indents []int
/** The list of tag directives. */
tag_directives []yaml_tag_directive_t
/** The current indentation level. */
indent int
/** The current flow level. */
flow_level int
/** Is it the document root context? */
root_context bool
/** Is it a sequence context? */
sequence_context bool
/** Is it a mapping context? */
mapping_context bool
/** Is it a simple mapping key context? */
simple_key_context bool
/** The current line. */
line int
/** The current column. */
column int
/** If the last character was a whitespace? */
whitespace bool
/** If the last character was an indentation character (' ', '-', '?', ':')? */
indention bool
/** If an explicit document end is required? */
open_ended bool
/** Anchor analysis. */
anchor_data struct {
/** The anchor value. */
anchor []byte
/** Is it an alias? */
alias bool
}
/** Tag analysis. */
tag_data struct {
/** The tag handle. */
handle []byte
/** The tag suffix. */
suffix []byte
}
/** Scalar analysis. */
scalar_data struct {
/** The scalar value. */
value []byte
/** Does the scalar contain line breaks? */
multiline bool
/** Can the scalar be expessed in the flow plain style? */
flow_plain_allowed bool
/** Can the scalar be expressed in the block plain style? */
block_plain_allowed bool
/** Can the scalar be expressed in the single quoted style? */
single_quoted_allowed bool
/** Can the scalar be expressed in the literal or folded styles? */
block_allowed bool
/** The output style. */
style yaml_scalar_style_t
}
/**
* @}
*/
/**
* @name Dumper stuff
* @{
*/
/** If the stream was already opened? */
opened bool
/** If the stream was already closed? */
closed bool
/** The information associated with the document nodes. */
anchors *struct {
/** The number of references. */
references int
/** The anchor id. */
anchor int
/** If the node has been emitted? */
serialized bool
}
/** The last assigned anchor id. */
last_anchor_id int
/** The currently emitted document. */
document *yaml_document_t
/**
* @}
*/
}
================================================
FILE: vendor/github.com/davecgh/go-spew/.gitignore
================================================
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
================================================
FILE: vendor/github.com/davecgh/go-spew/.travis.yml
================================================
language: go
go: 1.2
install:
- go get -v code.google.com/p/go.tools/cmd/cover
script:
- go test -v -tags=testcgo ./spew -covermode=count -coverprofile=profile.cov
after_success:
- go get -v github.com/mattn/goveralls
- export PATH=$PATH:$HOME/gopath/bin
- goveralls -coverprofile=profile.cov -service=travis-ci
================================================
FILE: vendor/github.com/davecgh/go-spew/LICENSE
================================================
Copyright (c) 2012-2013 Dave Collins
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
================================================
FILE: vendor/github.com/davecgh/go-spew/README.md
================================================
go-spew
=======
[]
(https://travis-ci.org/davecgh/go-spew) [![Coverage Status]
(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)]
(https://coveralls.io/r/davecgh/go-spew?branch=master)
Go-spew implements a deep pretty printer for Go data structures to aid in
debugging. A comprehensive suite of tests with 100% test coverage is provided
to ensure proper functionality. See `test_coverage.txt` for the gocov coverage
report. Go-spew is licensed under the liberal ISC license, so it may be used in
open source or commercial projects.
If you're interested in reading about how this package came to life and some
of the challenges involved in providing a deep pretty printer, there is a blog
post about it
[here](https://blog.cyphertite.com/go-spew-a-journey-into-dumping-go-data-structures/).
## Documentation
[]
(http://godoc.org/github.com/davecgh/go-spew/spew)
Full `go doc` style documentation for the project can be viewed online without
installing this package by using the excellent GoDoc site here:
http://godoc.org/github.com/davecgh/go-spew/spew
You can also view the documentation locally once the package is installed with
the `godoc` tool by running `godoc -http=":6060"` and pointing your browser to
http://localhost:6060/pkg/github.com/davecgh/go-spew/spew
## Installation
```bash
$ go get -u github.com/davecgh/go-spew/spew
```
## Quick Start
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
```Go
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
```
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with %v (most
compact), %+v (adds pointer addresses), %#v (adds types), or %#+v (adds types
and pointer addresses):
```Go
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
```
## Sample Dump Output
```
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr)
}),
ExportedField: (map[interface {}]interface {}) {
(string) "one": (bool) true
}
}
([]uint8) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
```
## Sample Formatter Output
Double pointer to a uint8:
```
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
```
Pointer to circular struct with a uint8 field and a pointer to itself:
```
%v: <*>{1 <*>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
```
## Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available via the
spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
```
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are supported,
with other types sorted according to the reflect.Value.String() output
which guarantees display stability. Natural map order is used by
default.
* SpewKeys
SpewKeys specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only considered
if SortKeys is true.
```
## License
Go-spew is licensed under the liberal ISC License.
================================================
FILE: vendor/github.com/davecgh/go-spew/cov_report.sh
================================================
#!/bin/sh
# This script uses gocov to generate a test coverage report.
# The gocov tool my be obtained with the following command:
# go get github.com/axw/gocov/gocov
#
# It will be installed to $GOPATH/bin, so ensure that location is in your $PATH.
# Check for gocov.
if ! type gocov >/dev/null 2>&1; then
echo >&2 "This script requires the gocov tool."
echo >&2 "You may obtain it with the following command:"
echo >&2 "go get github.com/axw/gocov/gocov"
exit 1
fi
# Only run the cgo tests if gcc is installed.
if type gcc >/dev/null 2>&1; then
(cd spew && gocov test -tags testcgo | gocov report)
else
(cd spew && gocov test | gocov report)
fi
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/common.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"unsafe"
)
const (
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("")
maxNewlineBytes = []byte("\n")
maxShortBytes = []byte("")
circularBytes = []byte("")
circularShortBytes = []byte("")
invalidAngleBytes = []byte("")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe to bypass these restrictions
// since this package does not mutate the values.
if !v.CanInterface() {
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
var viface interface{}
if !cs.DisablePointerMethods {
if !v.CanAddr() {
v = unsafeReflectValue(v)
}
viface = v.Addr().Interface()
} else {
if v.CanAddr() {
v = v.Addr()
}
viface = v.Interface()
}
// Is it an error or Stringer?
switch iface := viface.(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/common_test.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
// custom type to test Stinger interface on non-pointer receiver.
type stringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with non-pointer receivers.
func (s stringer) String() string {
return "stringer " + string(s)
}
// custom type to test Stinger interface on pointer receiver.
type pstringer string
// String implements the Stringer interface for testing invocation of custom
// stringers on types with only pointer receivers.
func (s *pstringer) String() string {
return "stringer " + string(*s)
}
// xref1 and xref2 are cross referencing structs for testing circular reference
// detection.
type xref1 struct {
ps2 *xref2
}
type xref2 struct {
ps1 *xref1
}
// indirCir1, indirCir2, and indirCir3 are used to generate an indirect circular
// reference for testing detection.
type indirCir1 struct {
ps2 *indirCir2
}
type indirCir2 struct {
ps3 *indirCir3
}
type indirCir3 struct {
ps1 *indirCir1
}
// embed is used to test embedded structures.
type embed struct {
a string
}
// embedwrap is used to test embedded structures.
type embedwrap struct {
*embed
e *embed
}
// panicer is used to intentionally cause a panic for testing spew properly
// handles them
type panicer int
func (p panicer) String() string {
panic("test panic")
}
// customError is used to test custom error interface invocation.
type customError int
func (e customError) Error() string {
return fmt.Sprintf("error: %d", int(e))
}
// stringizeWants converts a slice of wanted test output into a format suitable
// for a test error message.
func stringizeWants(wants []string) string {
s := ""
for i, want := range wants {
if i > 0 {
s += fmt.Sprintf("want%d: %s", i+1, want)
} else {
s += "want: " + want
}
}
return s
}
// testFailed returns whether or not a test failed by checking if the result
// of the test is in the slice of wanted strings.
func testFailed(result string, wants []string) bool {
for _, want := range wants {
if result == want {
return false
}
}
return true
}
type sortableStruct struct {
x int
}
func (ss sortableStruct) String() string {
return fmt.Sprintf("ss.%d", ss.x)
}
type unsortableStruct struct {
x int
}
type sortTestCase struct {
input []reflect.Value
expected []reflect.Value
}
func helpTestSortValues(tests []sortTestCase, cs *spew.ConfigState, t *testing.T) {
getInterfaces := func(values []reflect.Value) []interface{} {
interfaces := []interface{}{}
for _, v := range values {
interfaces = append(interfaces, v.Interface())
}
return interfaces
}
for _, test := range tests {
spew.SortValues(test.input, cs)
// reflect.DeepEqual cannot really make sense of reflect.Value,
// probably because of all the pointer tricks. For instance,
// v(2.0) != v(2.0) on a 32-bits system. Turn them into interface{}
// instead.
input := getInterfaces(test.input)
expected := getInterfaces(test.expected)
if !reflect.DeepEqual(input, expected) {
t.Errorf("Sort mismatch:\n %v != %v", input, expected)
}
}
}
// TestSortValues ensures the sort functionality for relect.Value based sorting
// works as intended.
func TestSortValues(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
embedA := v(embed{"a"})
embedB := v(embed{"b"})
embedC := v(embed{"c"})
tests := []sortTestCase{
// No values.
{
[]reflect.Value{},
[]reflect.Value{},
},
// Bools.
{
[]reflect.Value{v(false), v(true), v(false)},
[]reflect.Value{v(false), v(false), v(true)},
},
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Uints.
{
[]reflect.Value{v(uint8(2)), v(uint8(1)), v(uint8(3))},
[]reflect.Value{v(uint8(1)), v(uint8(2)), v(uint8(3))},
},
// Floats.
{
[]reflect.Value{v(2.0), v(1.0), v(3.0)},
[]reflect.Value{v(1.0), v(2.0), v(3.0)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// Array
{
[]reflect.Value{v([3]int{3, 2, 1}), v([3]int{1, 3, 2}), v([3]int{1, 2, 3})},
[]reflect.Value{v([3]int{1, 2, 3}), v([3]int{1, 3, 2}), v([3]int{3, 2, 1})},
},
// Uintptrs.
{
[]reflect.Value{v(uintptr(2)), v(uintptr(1)), v(uintptr(3))},
[]reflect.Value{v(uintptr(1)), v(uintptr(2)), v(uintptr(3))},
},
// SortableStructs.
{
// Note: not sorted - DisableMethods is set.
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
// Invalid.
{
[]reflect.Value{embedB, embedA, embedC},
[]reflect.Value{embedB, embedA, embedC},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithMethods ensures the sort functionality for relect.Value
// based sorting works as intended when using string methods.
func TestSortValuesWithMethods(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
// Note: not sorted - SpewKeys is false.
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: false, SpewKeys: false}
helpTestSortValues(tests, &cs, t)
}
// TestSortValuesWithSpew ensures the sort functionality for relect.Value
// based sorting works as intended when using spew to stringify keys.
func TestSortValuesWithSpew(t *testing.T) {
v := reflect.ValueOf
a := v("a")
b := v("b")
c := v("c")
tests := []sortTestCase{
// Ints.
{
[]reflect.Value{v(2), v(1), v(3)},
[]reflect.Value{v(1), v(2), v(3)},
},
// Strings.
{
[]reflect.Value{b, a, c},
[]reflect.Value{a, b, c},
},
// SortableStructs.
{
[]reflect.Value{v(sortableStruct{2}), v(sortableStruct{1}), v(sortableStruct{3})},
[]reflect.Value{v(sortableStruct{1}), v(sortableStruct{2}), v(sortableStruct{3})},
},
// UnsortableStructs.
{
[]reflect.Value{v(unsortableStruct{2}), v(unsortableStruct{1}), v(unsortableStruct{3})},
[]reflect.Value{v(unsortableStruct{1}), v(unsortableStruct{2}), v(unsortableStruct{3})},
},
}
cs := spew.ConfigState{DisableMethods: true, SpewKeys: true}
helpTestSortValues(tests, &cs, t)
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/config.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods.
DisablePointerMethods bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/doc.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr)
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/dump.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type back
// into a byte slice. However, the reflect package won't give
// us an interface on certain things like unexported struct
// fields in order to enforce visibility rules. We use unsafe
// to bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be type
// asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/dump_test.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Test Summary:
NOTE: For each test, a nil pointer, a single pointer and double pointer to the
base test element are also tested to ensure proper indirection across all types.
- Max int8, int16, int32, int64, int
- Max uint8, uint16, uint32, uint64, uint
- Boolean true and false
- Standard complex64 and complex128
- Array containing standard ints
- Array containing type with custom formatter on pointer receiver only
- Array containing interfaces
- Array containing bytes
- Slice containing standard float32 values
- Slice containing type with custom formatter on pointer receiver only
- Slice containing interfaces
- Slice containing bytes
- Nil slice
- Standard string
- Nil interface
- Sub-interface
- Map with string keys and int vals
- Map with custom formatter type on pointer receiver only keys and vals
- Map with interface keys and values
- Map with nil interface value
- Struct with primitives
- Struct that contains another struct
- Struct that contains custom type with Stringer pointer interface via both
exported and unexported fields
- Struct that contains embedded struct and field to same struct
- Uintptr to 0 (null pointer)
- Uintptr address of real variable
- Unsafe.Pointer to 0 (null pointer)
- Unsafe.Pointer to address of real variable
- Nil channel
- Standard int channel
- Function with no params and no returns
- Function with param and no returns
- Function with multiple params and multiple returns
- Struct that is circular through self referencing
- Structs that are circular through cross referencing
- Structs that are indirectly circular
- Type that panics in its Stringer interface
*/
package spew_test
import (
"bytes"
"fmt"
"testing"
"unsafe"
"github.com/davecgh/go-spew/spew"
)
// dumpTest is used to describe a test to be perfomed against the Dump method.
type dumpTest struct {
in interface{}
wants []string
}
// dumpTests houses all of the tests to be performed against the Dump method.
var dumpTests = make([]dumpTest, 0)
// addDumpTest is a helper method to append the passed input and desired result
// to dumpTests
func addDumpTest(in interface{}, wants ...string) {
test := dumpTest{in, wants}
dumpTests = append(dumpTests, test)
}
func addIntDumpTests() {
// Max int8.
v := int8(127)
nv := (*int8)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "int8"
vs := "127"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Max int16.
v2 := int16(32767)
nv2 := (*int16)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "int16"
v2s := "32767"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")()\n")
// Max int32.
v3 := int32(2147483647)
nv3 := (*int32)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "int32"
v3s := "2147483647"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")()\n")
// Max int64.
v4 := int64(9223372036854775807)
nv4 := (*int64)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "int64"
v4s := "9223372036854775807"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")()\n")
// Max int.
v5 := int(2147483647)
nv5 := (*int)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "int"
v5s := "2147483647"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
addDumpTest(nv5, "(*"+v5t+")()\n")
}
func addUintDumpTests() {
// Max uint8.
v := uint8(255)
nv := (*uint8)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "uint8"
vs := "255"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Max uint16.
v2 := uint16(65535)
nv2 := (*uint16)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uint16"
v2s := "65535"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")()\n")
// Max uint32.
v3 := uint32(4294967295)
nv3 := (*uint32)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "uint32"
v3s := "4294967295"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")()\n")
// Max uint64.
v4 := uint64(18446744073709551615)
nv4 := (*uint64)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "uint64"
v4s := "18446744073709551615"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")()\n")
// Max uint.
v5 := uint(4294967295)
nv5 := (*uint)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "uint"
v5s := "4294967295"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
addDumpTest(nv5, "(*"+v5t+")()\n")
}
func addBoolDumpTests() {
// Boolean true.
v := bool(true)
nv := (*bool)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "bool"
vs := "true"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Boolean false.
v2 := bool(false)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "bool"
v2s := "false"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
}
func addFloatDumpTests() {
// Standard float32.
v := float32(3.1415)
nv := (*float32)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "float32"
vs := "3.1415"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Standard float64.
v2 := float64(3.1415926)
nv2 := (*float64)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "float64"
v2s := "3.1415926"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")()\n")
}
func addComplexDumpTests() {
// Standard complex64.
v := complex(float32(6), -2)
nv := (*complex64)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "complex64"
vs := "(6-2i)"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Standard complex128.
v2 := complex(float64(-6), 2)
nv2 := (*complex128)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "complex128"
v2s := "(-6+2i)"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")()\n")
}
func addArrayDumpTests() {
// Array containing standard ints.
v := [3]int{1, 2, 3}
vLen := fmt.Sprintf("%d", len(v))
vCap := fmt.Sprintf("%d", cap(v))
nv := (*[3]int)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "int"
vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 1,\n (" +
vt + ") 2,\n (" + vt + ") 3\n}"
addDumpTest(v, "([3]"+vt+") "+vs+"\n")
addDumpTest(pv, "(*[3]"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**[3]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*[3]"+vt+")()\n")
// Array containing type with custom formatter on pointer receiver only.
v2i0 := pstringer("1")
v2i1 := pstringer("2")
v2i2 := pstringer("3")
v2 := [3]pstringer{v2i0, v2i1, v2i2}
v2i0Len := fmt.Sprintf("%d", len(v2i0))
v2i1Len := fmt.Sprintf("%d", len(v2i1))
v2i2Len := fmt.Sprintf("%d", len(v2i2))
v2Len := fmt.Sprintf("%d", len(v2))
v2Cap := fmt.Sprintf("%d", cap(v2))
nv2 := (*[3]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.pstringer"
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" +
v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len +
") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " +
"stringer 3\n}"
addDumpTest(v2, "([3]"+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*[3]"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**[3]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*[3]"+v2t+")()\n")
// Array containing interfaces.
v3i0 := "one"
v3 := [3]interface{}{v3i0, int(2), uint(3)}
v3i0Len := fmt.Sprintf("%d", len(v3i0))
v3Len := fmt.Sprintf("%d", len(v3))
v3Cap := fmt.Sprintf("%d", cap(v3))
nv3 := (*[3]interface{})(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "[3]interface {}"
v3t2 := "string"
v3t3 := "int"
v3t4 := "uint"
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
"(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
v3t4 + ") 3\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")()\n")
// Array containing bytes.
v4 := [34]byte{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32,
}
v4Len := fmt.Sprintf("%d", len(v4))
v4Cap := fmt.Sprintf("%d", cap(v4))
nv4 := (*[34]byte)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "[34]uint8"
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
"{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" +
" |............... |\n" +
" 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" +
" |!\"#$%&'()*+,-./0|\n" +
" 00000020 31 32 " +
" |12|\n}"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")()\n")
}
func addSliceDumpTests() {
// Slice containing standard float32 values.
v := []float32{3.14, 6.28, 12.56}
vLen := fmt.Sprintf("%d", len(v))
vCap := fmt.Sprintf("%d", cap(v))
nv := (*[]float32)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "float32"
vs := "(len=" + vLen + " cap=" + vCap + ") {\n (" + vt + ") 3.14,\n (" +
vt + ") 6.28,\n (" + vt + ") 12.56\n}"
addDumpTest(v, "([]"+vt+") "+vs+"\n")
addDumpTest(pv, "(*[]"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**[]"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*[]"+vt+")()\n")
// Slice containing type with custom formatter on pointer receiver only.
v2i0 := pstringer("1")
v2i1 := pstringer("2")
v2i2 := pstringer("3")
v2 := []pstringer{v2i0, v2i1, v2i2}
v2i0Len := fmt.Sprintf("%d", len(v2i0))
v2i1Len := fmt.Sprintf("%d", len(v2i1))
v2i2Len := fmt.Sprintf("%d", len(v2i2))
v2Len := fmt.Sprintf("%d", len(v2))
v2Cap := fmt.Sprintf("%d", cap(v2))
nv2 := (*[]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.pstringer"
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") {\n (" + v2t + ") (len=" +
v2i0Len + ") stringer 1,\n (" + v2t + ") (len=" + v2i1Len +
") stringer 2,\n (" + v2t + ") (len=" + v2i2Len + ") " +
"stringer 3\n}"
addDumpTest(v2, "([]"+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*[]"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**[]"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*[]"+v2t+")()\n")
// Slice containing interfaces.
v3i0 := "one"
v3 := []interface{}{v3i0, int(2), uint(3), nil}
v3i0Len := fmt.Sprintf("%d", len(v3i0))
v3Len := fmt.Sprintf("%d", len(v3))
v3Cap := fmt.Sprintf("%d", cap(v3))
nv3 := (*[]interface{})(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "[]interface {}"
v3t2 := "string"
v3t3 := "int"
v3t4 := "uint"
v3t5 := "interface {}"
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") {\n (" + v3t2 + ") " +
"(len=" + v3i0Len + ") \"one\",\n (" + v3t3 + ") 2,\n (" +
v3t4 + ") 3,\n (" + v3t5 + ") \n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")()\n")
// Slice containing bytes.
v4 := []byte{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32,
}
v4Len := fmt.Sprintf("%d", len(v4))
v4Cap := fmt.Sprintf("%d", cap(v4))
nv4 := (*[]byte)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "[]uint8"
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
"{\n 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20" +
" |............... |\n" +
" 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30" +
" |!\"#$%&'()*+,-./0|\n" +
" 00000020 31 32 " +
" |12|\n}"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")()\n")
// Nil slice.
v5 := []int(nil)
nv5 := (*[]int)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "[]int"
v5s := ""
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
addDumpTest(pv5, "(*"+v5t+")("+v5Addr+")("+v5s+")\n")
addDumpTest(&pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")("+v5s+")\n")
addDumpTest(nv5, "(*"+v5t+")()\n")
}
func addStringDumpTests() {
// Standard string.
v := "test"
vLen := fmt.Sprintf("%d", len(v))
nv := (*string)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "string"
vs := "(len=" + vLen + ") \"test\""
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
}
func addInterfaceDumpTests() {
// Nil interface.
var v interface{}
nv := (*interface{})(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "interface {}"
vs := ""
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Sub-interface.
v2 := interface{}(uint16(65535))
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uint16"
v2s := "65535"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
}
func addMapDumpTests() {
// Map with string keys and int vals.
k := "one"
kk := "two"
m := map[string]int{k: 1, kk: 2}
klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up
kkLen := fmt.Sprintf("%d", len(kk))
mLen := fmt.Sprintf("%d", len(m))
nilMap := map[string]int(nil)
nm := (*map[string]int)(nil)
pm := &m
mAddr := fmt.Sprintf("%p", pm)
pmAddr := fmt.Sprintf("%p", &pm)
mt := "map[string]int"
mt1 := "string"
mt2 := "int"
ms := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + klen + ") " +
"\"one\": (" + mt2 + ") 1,\n (" + mt1 + ") (len=" + kkLen +
") \"two\": (" + mt2 + ") 2\n}"
ms2 := "(len=" + mLen + ") {\n (" + mt1 + ") (len=" + kkLen + ") " +
"\"two\": (" + mt2 + ") 2,\n (" + mt1 + ") (len=" + klen +
") \"one\": (" + mt2 + ") 1\n}"
addDumpTest(m, "("+mt+") "+ms+"\n", "("+mt+") "+ms2+"\n")
addDumpTest(pm, "(*"+mt+")("+mAddr+")("+ms+")\n",
"(*"+mt+")("+mAddr+")("+ms2+")\n")
addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n",
"(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n")
addDumpTest(nm, "(*"+mt+")()\n")
addDumpTest(nilMap, "("+mt+") \n")
// Map with custom formatter type on pointer receiver only keys and vals.
k2 := pstringer("one")
v2 := pstringer("1")
m2 := map[pstringer]pstringer{k2: v2}
k2Len := fmt.Sprintf("%d", len(k2))
v2Len := fmt.Sprintf("%d", len(v2))
m2Len := fmt.Sprintf("%d", len(m2))
nilMap2 := map[pstringer]pstringer(nil)
nm2 := (*map[pstringer]pstringer)(nil)
pm2 := &m2
m2Addr := fmt.Sprintf("%p", pm2)
pm2Addr := fmt.Sprintf("%p", &pm2)
m2t := "map[spew_test.pstringer]spew_test.pstringer"
m2t1 := "spew_test.pstringer"
m2t2 := "spew_test.pstringer"
m2s := "(len=" + m2Len + ") {\n (" + m2t1 + ") (len=" + k2Len + ") " +
"stringer one: (" + m2t2 + ") (len=" + v2Len + ") stringer 1\n}"
addDumpTest(m2, "("+m2t+") "+m2s+"\n")
addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n")
addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n")
addDumpTest(nm2, "(*"+m2t+")()\n")
addDumpTest(nilMap2, "("+m2t+") \n")
// Map with interface keys and values.
k3 := "one"
k3Len := fmt.Sprintf("%d", len(k3))
m3 := map[interface{}]interface{}{k3: 1}
m3Len := fmt.Sprintf("%d", len(m3))
nilMap3 := map[interface{}]interface{}(nil)
nm3 := (*map[interface{}]interface{})(nil)
pm3 := &m3
m3Addr := fmt.Sprintf("%p", pm3)
pm3Addr := fmt.Sprintf("%p", &pm3)
m3t := "map[interface {}]interface {}"
m3t1 := "string"
m3t2 := "int"
m3s := "(len=" + m3Len + ") {\n (" + m3t1 + ") (len=" + k3Len + ") " +
"\"one\": (" + m3t2 + ") 1\n}"
addDumpTest(m3, "("+m3t+") "+m3s+"\n")
addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n")
addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n")
addDumpTest(nm3, "(*"+m3t+")()\n")
addDumpTest(nilMap3, "("+m3t+") \n")
// Map with nil interface value.
k4 := "nil"
k4Len := fmt.Sprintf("%d", len(k4))
m4 := map[string]interface{}{k4: nil}
m4Len := fmt.Sprintf("%d", len(m4))
nilMap4 := map[string]interface{}(nil)
nm4 := (*map[string]interface{})(nil)
pm4 := &m4
m4Addr := fmt.Sprintf("%p", pm4)
pm4Addr := fmt.Sprintf("%p", &pm4)
m4t := "map[string]interface {}"
m4t1 := "string"
m4t2 := "interface {}"
m4s := "(len=" + m4Len + ") {\n (" + m4t1 + ") (len=" + k4Len + ")" +
" \"nil\": (" + m4t2 + ") \n}"
addDumpTest(m4, "("+m4t+") "+m4s+"\n")
addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n")
addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n")
addDumpTest(nm4, "(*"+m4t+")()\n")
addDumpTest(nilMap4, "("+m4t+") \n")
}
func addStructDumpTests() {
// Struct with primitives.
type s1 struct {
a int8
b uint8
}
v := s1{127, 255}
nv := (*s1)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.s1"
vt2 := "int8"
vt3 := "uint8"
vs := "{\n a: (" + vt2 + ") 127,\n b: (" + vt3 + ") 255\n}"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Struct that contains another struct.
type s2 struct {
s1 s1
b bool
}
v2 := s2{s1{127, 255}, true}
nv2 := (*s2)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.s2"
v2t2 := "spew_test.s1"
v2t3 := "int8"
v2t4 := "uint8"
v2t5 := "bool"
v2s := "{\n s1: (" + v2t2 + ") {\n a: (" + v2t3 + ") 127,\n b: (" +
v2t4 + ") 255\n },\n b: (" + v2t5 + ") true\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")()\n")
// Struct that contains custom type with Stringer pointer interface via both
// exported and unexported fields.
type s3 struct {
s pstringer
S pstringer
}
v3 := s3{"test", "test2"}
nv3 := (*s3)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "spew_test.s3"
v3t2 := "spew_test.pstringer"
v3s := "{\n s: (" + v3t2 + ") (len=4) stringer test,\n S: (" + v3t2 +
") (len=5) stringer test2\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")()\n")
// Struct that contains embedded struct and field to same struct.
e := embed{"embedstr"}
eLen := fmt.Sprintf("%d", len("embedstr"))
v4 := embedwrap{embed: &e, e: &e}
nv4 := (*embedwrap)(nil)
pv4 := &v4
eAddr := fmt.Sprintf("%p", &e)
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "spew_test.embedwrap"
v4t2 := "spew_test.embed"
v4t3 := "string"
v4s := "{\n embed: (*" + v4t2 + ")(" + eAddr + ")({\n a: (" + v4t3 +
") (len=" + eLen + ") \"embedstr\"\n }),\n e: (*" + v4t2 +
")(" + eAddr + ")({\n a: (" + v4t3 + ") (len=" + eLen + ")" +
" \"embedstr\"\n })\n}"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
addDumpTest(pv4, "(*"+v4t+")("+v4Addr+")("+v4s+")\n")
addDumpTest(&pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")("+v4s+")\n")
addDumpTest(nv4, "(*"+v4t+")()\n")
}
func addUintptrDumpTests() {
// Null pointer.
v := uintptr(0)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "uintptr"
vs := ""
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
// Address of real variable.
i := 1
v2 := uintptr(unsafe.Pointer(&i))
nv2 := (*uintptr)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uintptr"
v2s := fmt.Sprintf("%p", &i)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")()\n")
}
func addUnsafePointerDumpTests() {
// Null pointer.
v := unsafe.Pointer(uintptr(0))
nv := (*unsafe.Pointer)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "unsafe.Pointer"
vs := ""
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Address of real variable.
i := 1
v2 := unsafe.Pointer(&i)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "unsafe.Pointer"
v2s := fmt.Sprintf("%p", &i)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
}
func addChanDumpTests() {
// Nil channel.
var v chan int
pv := &v
nv := (*chan int)(nil)
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "chan int"
vs := ""
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Real channel.
v2 := make(chan int)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "chan int"
v2s := fmt.Sprintf("%p", v2)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
}
func addFuncDumpTests() {
// Function with no params and no returns.
v := addIntDumpTests
nv := (*func())(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "func()"
vs := fmt.Sprintf("%p", v)
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
// Function with param and no returns.
v2 := TestDump
nv2 := (*func(*testing.T))(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "func(*testing.T)"
v2s := fmt.Sprintf("%p", v2)
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s+")\n")
addDumpTest(nv2, "(*"+v2t+")()\n")
// Function with multiple params and multiple returns.
var v3 = func(i int, s string) (b bool, err error) {
return true, nil
}
nv3 := (*func(int, string) (bool, error))(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "func(int, string) (bool, error)"
v3s := fmt.Sprintf("%p", v3)
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s+")\n")
addDumpTest(nv3, "(*"+v3t+")()\n")
}
func addCircularDumpTests() {
// Struct that is circular through self referencing.
type circular struct {
c *circular
}
v := circular{nil}
v.c = &v
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.circular"
vs := "{\n c: (*" + vt + ")(" + vAddr + ")({\n c: (*" + vt + ")(" +
vAddr + ")()\n })\n}"
vs2 := "{\n c: (*" + vt + ")(" + vAddr + ")()\n}"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs2+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs2+")\n")
// Structs that are circular through cross referencing.
v2 := xref1{nil}
ts2 := xref2{&v2}
v2.ps2 = &ts2
pv2 := &v2
ts2Addr := fmt.Sprintf("%p", &ts2)
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.xref1"
v2t2 := "spew_test.xref2"
v2s := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
")(" + v2Addr + ")({\n ps2: (*" + v2t2 + ")(" + ts2Addr +
")()\n })\n })\n}"
v2s2 := "{\n ps2: (*" + v2t2 + ")(" + ts2Addr + ")({\n ps1: (*" + v2t +
")(" + v2Addr + ")()\n })\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
addDumpTest(pv2, "(*"+v2t+")("+v2Addr+")("+v2s2+")\n")
addDumpTest(&pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")("+v2s2+")\n")
// Structs that are indirectly circular.
v3 := indirCir1{nil}
tic2 := indirCir2{nil}
tic3 := indirCir3{&v3}
tic2.ps3 = &tic3
v3.ps2 = &tic2
pv3 := &v3
tic2Addr := fmt.Sprintf("%p", &tic2)
tic3Addr := fmt.Sprintf("%p", &tic3)
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "spew_test.indirCir1"
v3t2 := "spew_test.indirCir2"
v3t3 := "spew_test.indirCir3"
v3s := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
")({\n ps2: (*" + v3t2 + ")(" + tic2Addr +
")()\n })\n })\n })\n}"
v3s2 := "{\n ps2: (*" + v3t2 + ")(" + tic2Addr + ")({\n ps3: (*" + v3t3 +
")(" + tic3Addr + ")({\n ps1: (*" + v3t + ")(" + v3Addr +
")()\n })\n })\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
addDumpTest(pv3, "(*"+v3t+")("+v3Addr+")("+v3s2+")\n")
addDumpTest(&pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")("+v3s2+")\n")
}
func addPanicDumpTests() {
// Type that panics in its Stringer interface.
v := panicer(127)
nv := (*panicer)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.panicer"
vs := "(PANIC=test panic)127"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
}
func addErrorDumpTests() {
// Type that has a custom Error interface.
v := customError(127)
nv := (*customError)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.customError"
vs := "error: 127"
addDumpTest(v, "("+vt+") "+vs+"\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")("+vs+")\n")
addDumpTest(nv, "(*"+vt+")()\n")
}
// TestDump executes all of the tests described by dumpTests.
func TestDump(t *testing.T) {
// Setup tests.
addIntDumpTests()
addUintDumpTests()
addBoolDumpTests()
addFloatDumpTests()
addComplexDumpTests()
addArrayDumpTests()
addSliceDumpTests()
addStringDumpTests()
addInterfaceDumpTests()
addMapDumpTests()
addStructDumpTests()
addUintptrDumpTests()
addUnsafePointerDumpTests()
addChanDumpTests()
addFuncDumpTests()
addCircularDumpTests()
addPanicDumpTests()
addErrorDumpTests()
addCgoDumpTests()
t.Logf("Running %d tests", len(dumpTests))
for i, test := range dumpTests {
buf := new(bytes.Buffer)
spew.Fdump(buf, test.in)
s := buf.String()
if testFailed(s, test.wants) {
t.Errorf("Dump #%d\n got: %s %s", i, s, stringizeWants(test.wants))
continue
}
}
}
func TestDumpSortedKeys(t *testing.T) {
cfg := spew.ConfigState{SortKeys: true}
s := cfg.Sdump(map[int]string{1: "1", 3: "3", 2: "2"})
expected := `(map[int]string) (len=3) {
(int) 1: (string) (len=1) "1",
(int) 2: (string) (len=1) "2",
(int) 3: (string) (len=1) "3"
}
`
if s != expected {
t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
}
s = cfg.Sdump(map[stringer]int{"1": 1, "3": 3, "2": 2})
expected = `(map[spew_test.stringer]int) (len=3) {
(spew_test.stringer) (len=1) stringer 1: (int) 1,
(spew_test.stringer) (len=1) stringer 2: (int) 2,
(spew_test.stringer) (len=1) stringer 3: (int) 3
}
`
if s != expected {
t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
}
s = cfg.Sdump(map[pstringer]int{pstringer("1"): 1, pstringer("3"): 3, pstringer("2"): 2})
expected = `(map[spew_test.pstringer]int) (len=3) {
(spew_test.pstringer) (len=1) stringer 1: (int) 1,
(spew_test.pstringer) (len=1) stringer 2: (int) 2,
(spew_test.pstringer) (len=1) stringer 3: (int) 3
}
`
if s != expected {
t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
}
s = cfg.Sdump(map[customError]int{customError(1): 1, customError(3): 3, customError(2): 2})
expected = `(map[spew_test.customError]int) (len=3) {
(spew_test.customError) error: 1: (int) 1,
(spew_test.customError) error: 2: (int) 2,
(spew_test.customError) error: 3: (int) 3
}
`
if s != expected {
t.Errorf("Sorted keys mismatch:\n %v %v", s, expected)
}
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go
================================================
// Copyright (c) 2013 Dave Collins
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when both cgo is supported and "-tags testcgo" is added to the go test
// command line. This means the cgo tests are only added (and hence run) when
// specifially requested. This configuration is used because spew itself
// does not require cgo to run even though it does handle certain cgo types
// specially. Rather than forcing all clients to require cgo and an external
// C compiler just to run the tests, this scheme makes them optional.
// +build cgo,testcgo
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew/testdata"
)
func addCgoDumpTests() {
// C char pointer.
v := testdata.GetCgoCharPointer()
nv := testdata.GetCgoNullCharPointer()
pv := &v
vcAddr := fmt.Sprintf("%p", v)
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "*testdata._Ctype_char"
vs := "116"
addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n")
addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n")
addDumpTest(nv, "("+vt+")()\n")
// C char array.
v2, v2l, v2c := testdata.GetCgoCharArray()
v2Len := fmt.Sprintf("%d", v2l)
v2Cap := fmt.Sprintf("%d", v2c)
v2t := "[6]testdata._Ctype_char"
v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " +
"{\n 00000000 74 65 73 74 32 00 " +
" |test2.|\n}"
addDumpTest(v2, "("+v2t+") "+v2s+"\n")
// C unsigned char array.
v3, v3l, v3c := testdata.GetCgoUnsignedCharArray()
v3Len := fmt.Sprintf("%d", v3l)
v3Cap := fmt.Sprintf("%d", v3c)
v3t := "[6]testdata._Ctype_unsignedchar"
v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " +
"{\n 00000000 74 65 73 74 33 00 " +
" |test3.|\n}"
addDumpTest(v3, "("+v3t+") "+v3s+"\n")
// C signed char array.
v4, v4l, v4c := testdata.GetCgoSignedCharArray()
v4Len := fmt.Sprintf("%d", v4l)
v4Cap := fmt.Sprintf("%d", v4c)
v4t := "[6]testdata._Ctype_schar"
v4t2 := "testdata._Ctype_schar"
v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " +
"{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 +
") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 +
") 0\n}"
addDumpTest(v4, "("+v4t+") "+v4s+"\n")
// C uint8_t array.
v5, v5l, v5c := testdata.GetCgoUint8tArray()
v5Len := fmt.Sprintf("%d", v5l)
v5Cap := fmt.Sprintf("%d", v5c)
v5t := "[6]testdata._Ctype_uint8_t"
v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " +
"{\n 00000000 74 65 73 74 35 00 " +
" |test5.|\n}"
addDumpTest(v5, "("+v5t+") "+v5s+"\n")
// C typedefed unsigned char array.
v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray()
v6Len := fmt.Sprintf("%d", v6l)
v6Cap := fmt.Sprintf("%d", v6c)
v6t := "[6]testdata._Ctype_custom_uchar_t"
v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " +
"{\n 00000000 74 65 73 74 36 00 " +
" |test6.|\n}"
addDumpTest(v6, "("+v6t+") "+v6s+"\n")
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/dumpnocgo_test.go
================================================
// Copyright (c) 2013 Dave Collins
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when either cgo is not supported or "-tags testcgo" is not added to the go
// test command line. This file intentionally does not setup any cgo tests in
// this scenario.
// +build !cgo !testcgo
package spew_test
func addCgoDumpTests() {
// Don't add any tests for cgo since this file is only compiled when
// there should not be any cgo tests.
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/example_test.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew_test
import (
"fmt"
"github.com/davecgh/go-spew/spew"
)
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
flag Flag
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
// This example demonstrates how to use Dump to dump variables to stdout.
func ExampleDump() {
// The following package level declarations are assumed for this example:
/*
type Flag int
const (
flagOne Flag = iota
flagTwo
)
var flagStrings = map[Flag]string{
flagOne: "flagOne",
flagTwo: "flagTwo",
}
func (f Flag) String() string {
if s, ok := flagStrings[f]; ok {
return s
}
return fmt.Sprintf("Unknown flag (%d)", int(f))
}
type Bar struct {
flag Flag
data uintptr
}
type Foo struct {
unexportedField Bar
ExportedField map[interface{}]interface{}
}
*/
// Setup some sample data structures for the example.
bar := Bar{Flag(flagTwo), uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
f := Flag(5)
b := []byte{
0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18,
0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20,
0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28,
0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30,
0x31, 0x32,
}
// Dump!
spew.Dump(s1, f, b)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// flag: (spew_test.Flag) flagTwo,
// data: (uintptr)
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Flag) Unknown flag (5)
// ([]uint8) (len=34 cap=34) {
// 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
// 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
// 00000020 31 32 |12|
// }
//
}
// This example demonstrates how to use Printf to display a variable with a
// format string and inline formatting.
func ExamplePrintf() {
// Create a double pointer to a uint 8.
ui8 := uint8(5)
pui8 := &ui8
ppui8 := &pui8
// Create a circular data type.
type circular struct {
ui8 uint8
c *circular
}
c := circular{ui8: 1}
c.c = &c
// Print!
spew.Printf("ppui8: %v\n", ppui8)
spew.Printf("circular: %v\n", c)
// Output:
// ppui8: <**>5
// circular: {1 <*>{1 <*>}}
}
// This example demonstrates how to use a ConfigState.
func ExampleConfigState() {
// Modify the indent level of the ConfigState only. The global
// configuration is not modified.
scs := spew.ConfigState{Indent: "\t"}
// Output using the ConfigState instance.
v := map[string]int{"one": 1}
scs.Printf("v: %v\n", v)
scs.Dump(v)
// Output:
// v: map[one:1]
// (map[string]int) (len=1) {
// (string) (len=3) "one": (int) 1
// }
}
// This example demonstrates how to use ConfigState.Dump to dump variables to
// stdout
func ExampleConfigState_Dump() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances with different indentation.
scs := spew.ConfigState{Indent: "\t"}
scs2 := spew.ConfigState{Indent: " "}
// Setup some sample data structures for the example.
bar := Bar{Flag(flagTwo), uintptr(0)}
s1 := Foo{bar, map[interface{}]interface{}{"one": true}}
// Dump using the ConfigState instances.
scs.Dump(s1)
scs2.Dump(s1)
// Output:
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// flag: (spew_test.Flag) flagTwo,
// data: (uintptr)
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
// (spew_test.Foo) {
// unexportedField: (spew_test.Bar) {
// flag: (spew_test.Flag) flagTwo,
// data: (uintptr)
// },
// ExportedField: (map[interface {}]interface {}) (len=1) {
// (string) (len=3) "one": (bool) true
// }
// }
//
}
// This example demonstrates how to use ConfigState.Printf to display a variable
// with a format string and inline formatting.
func ExampleConfigState_Printf() {
// See the top-level Dump example for details on the types used in this
// example.
// Create two ConfigState instances and modify the method handling of the
// first ConfigState only.
scs := spew.NewDefaultConfig()
scs2 := spew.NewDefaultConfig()
scs.DisableMethods = true
// Alternatively
// scs := spew.ConfigState{Indent: " ", DisableMethods: true}
// scs2 := spew.ConfigState{Indent: " "}
// This is of type Flag which implements a Stringer and has raw value 1.
f := flagTwo
// Dump using the ConfigState instances.
scs.Printf("f: %v\n", f)
scs2.Printf("f: %v\n", f)
// Output:
// f: 1
// f: flagTwo
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/format.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}
================================================
FILE: vendor/github.com/davecgh/go-spew/spew/format_test.go
================================================
/*
* Copyright (c) 2013 Dave Collins
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Test Summary:
NOTE: For each test, a nil pointer, a single pointer and double pointer to the
base test element are also tested to ensure proper indirection across all types.
- Max int8, int16, int32, int64, int
- Max uint8, uint16, uint32, uint64, uint
- Boolean true and false
- Standard complex64 and complex128
- Array containing standard ints
- Array containing type with custom formatter on pointer receiver only
- Array containing interfaces
- Slice containing standard float32 values
- Slice containing type with custom formatter on pointer receiver only
- Slice containing interfaces
- Nil slice
- Standard string
- Nil interface
- Sub-interface
- Map with string keys and int vals
- Map with custom formatter type on pointer receiver only keys and vals
- Map with interface keys and values
- Map with nil interface value
- Struct with primitives
- Struct that contains another struct
- Struct that contains custom type with Stringer pointer interface via both
exported and unexported fields
- Struct that contains embedded struct and field to same struct
- Uintptr to 0 (null pointer)
- Uintptr address of real variable
- Unsafe.Pointer to 0 (null pointer)
- Unsafe.Pointer to address of real variable
- Nil channel
- Standard int channel
- Function with no params and no returns
- Function with param and no returns
- Function with multiple params and multiple returns
- Struct that is circular through self referencing
- Structs that are circular through cross referencing
- Structs that are indirectly circular
- Type that panics in its Stringer interface
- Type that has a custom Error interface
- %x passthrough with uint
- %#x passthrough with uint
- %f passthrough with precision
- %f passthrough with width and precision
- %d passthrough with width
- %q passthrough with string
*/
package spew_test
import (
"bytes"
"fmt"
"testing"
"unsafe"
"github.com/davecgh/go-spew/spew"
)
// formatterTest is used to describe a test to be perfomed against NewFormatter.
type formatterTest struct {
format string
in interface{}
wants []string
}
// formatterTests houses all of the tests to be performed against NewFormatter.
var formatterTests = make([]formatterTest, 0)
// addFormatterTest is a helper method to append the passed input and desired
// result to formatterTests.
func addFormatterTest(format string, in interface{}, wants ...string) {
test := formatterTest{format, in, wants}
formatterTests = append(formatterTests, test)
}
func addIntFormatterTests() {
// Max int8.
v := int8(127)
nv := (*int8)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "int8"
vs := "127"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Max int16.
v2 := int16(32767)
nv2 := (*int16)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "int16"
v2s := "32767"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%v", nv2, "")
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
// Max int32.
v3 := int32(2147483647)
nv3 := (*int32)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "int32"
v3s := "2147483647"
addFormatterTest("%v", v3, v3s)
addFormatterTest("%v", pv3, "<*>"+v3s)
addFormatterTest("%v", &pv3, "<**>"+v3s)
addFormatterTest("%v", nv3, "")
addFormatterTest("%+v", v3, v3s)
addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
addFormatterTest("%+v", nv3, "")
addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
// Max int64.
v4 := int64(9223372036854775807)
nv4 := (*int64)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "int64"
v4s := "9223372036854775807"
addFormatterTest("%v", v4, v4s)
addFormatterTest("%v", pv4, "<*>"+v4s)
addFormatterTest("%v", &pv4, "<**>"+v4s)
addFormatterTest("%v", nv4, "")
addFormatterTest("%+v", v4, v4s)
addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
addFormatterTest("%+v", nv4, "")
addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
// Max int.
v5 := int(2147483647)
nv5 := (*int)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "int"
v5s := "2147483647"
addFormatterTest("%v", v5, v5s)
addFormatterTest("%v", pv5, "<*>"+v5s)
addFormatterTest("%v", &pv5, "<**>"+v5s)
addFormatterTest("%v", nv5, "")
addFormatterTest("%+v", v5, v5s)
addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
addFormatterTest("%+v", nv5, "")
addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
addFormatterTest("%#v", nv5, "(*"+v5t+")"+"")
addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
addFormatterTest("%#+v", nv5, "(*"+v5t+")"+"")
}
func addUintFormatterTests() {
// Max uint8.
v := uint8(255)
nv := (*uint8)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "uint8"
vs := "255"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Max uint16.
v2 := uint16(65535)
nv2 := (*uint16)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uint16"
v2s := "65535"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%v", nv2, "")
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
// Max uint32.
v3 := uint32(4294967295)
nv3 := (*uint32)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "uint32"
v3s := "4294967295"
addFormatterTest("%v", v3, v3s)
addFormatterTest("%v", pv3, "<*>"+v3s)
addFormatterTest("%v", &pv3, "<**>"+v3s)
addFormatterTest("%v", nv3, "")
addFormatterTest("%+v", v3, v3s)
addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
addFormatterTest("%+v", nv3, "")
addFormatterTest("%#v", v3, "("+v3t+")"+v3s)
addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s)
addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s)
addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
addFormatterTest("%#+v", v3, "("+v3t+")"+v3s)
addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s)
addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s)
addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
// Max uint64.
v4 := uint64(18446744073709551615)
nv4 := (*uint64)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "uint64"
v4s := "18446744073709551615"
addFormatterTest("%v", v4, v4s)
addFormatterTest("%v", pv4, "<*>"+v4s)
addFormatterTest("%v", &pv4, "<**>"+v4s)
addFormatterTest("%v", nv4, "")
addFormatterTest("%+v", v4, v4s)
addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
addFormatterTest("%+v", nv4, "")
addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
// Max uint.
v5 := uint(4294967295)
nv5 := (*uint)(nil)
pv5 := &v5
v5Addr := fmt.Sprintf("%p", pv5)
pv5Addr := fmt.Sprintf("%p", &pv5)
v5t := "uint"
v5s := "4294967295"
addFormatterTest("%v", v5, v5s)
addFormatterTest("%v", pv5, "<*>"+v5s)
addFormatterTest("%v", &pv5, "<**>"+v5s)
addFormatterTest("%v", nv5, "")
addFormatterTest("%+v", v5, v5s)
addFormatterTest("%+v", pv5, "<*>("+v5Addr+")"+v5s)
addFormatterTest("%+v", &pv5, "<**>("+pv5Addr+"->"+v5Addr+")"+v5s)
addFormatterTest("%+v", nv5, "")
addFormatterTest("%#v", v5, "("+v5t+")"+v5s)
addFormatterTest("%#v", pv5, "(*"+v5t+")"+v5s)
addFormatterTest("%#v", &pv5, "(**"+v5t+")"+v5s)
addFormatterTest("%#v", nv5, "(*"+v5t+")"+"")
addFormatterTest("%#+v", v5, "("+v5t+")"+v5s)
addFormatterTest("%#+v", pv5, "(*"+v5t+")("+v5Addr+")"+v5s)
addFormatterTest("%#+v", &pv5, "(**"+v5t+")("+pv5Addr+"->"+v5Addr+")"+v5s)
addFormatterTest("%#v", nv5, "(*"+v5t+")"+"")
}
func addBoolFormatterTests() {
// Boolean true.
v := bool(true)
nv := (*bool)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "bool"
vs := "true"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Boolean false.
v2 := bool(false)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "bool"
v2s := "false"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
}
func addFloatFormatterTests() {
// Standard float32.
v := float32(3.1415)
nv := (*float32)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "float32"
vs := "3.1415"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Standard float64.
v2 := float64(3.1415926)
nv2 := (*float64)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "float64"
v2s := "3.1415926"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
}
func addComplexFormatterTests() {
// Standard complex64.
v := complex(float32(6), -2)
nv := (*complex64)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "complex64"
vs := "(6-2i)"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Standard complex128.
v2 := complex(float64(-6), 2)
nv2 := (*complex128)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "complex128"
v2s := "(-6+2i)"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
}
func addArrayFormatterTests() {
// Array containing standard ints.
v := [3]int{1, 2, 3}
nv := (*[3]int)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "[3]int"
vs := "[1 2 3]"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Array containing type with custom formatter on pointer receiver only.
v2 := [3]pstringer{"1", "2", "3"}
nv2 := (*[3]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "[3]spew_test.pstringer"
v2s := "[stringer 1 stringer 2 stringer 3]"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
// Array containing interfaces.
v3 := [3]interface{}{"one", int(2), uint(3)}
nv3 := (*[3]interface{})(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "[3]interface {}"
v3t2 := "string"
v3t3 := "int"
v3t4 := "uint"
v3s := "[one 2 3]"
v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3]"
addFormatterTest("%v", v3, v3s)
addFormatterTest("%v", pv3, "<*>"+v3s)
addFormatterTest("%v", &pv3, "<**>"+v3s)
addFormatterTest("%+v", nv3, "")
addFormatterTest("%+v", v3, v3s)
addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
addFormatterTest("%+v", nv3, "")
addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"")
}
func addSliceFormatterTests() {
// Slice containing standard float32 values.
v := []float32{3.14, 6.28, 12.56}
nv := (*[]float32)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "[]float32"
vs := "[3.14 6.28 12.56]"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Slice containing type with custom formatter on pointer receiver only.
v2 := []pstringer{"1", "2", "3"}
nv2 := (*[]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "[]spew_test.pstringer"
v2s := "[stringer 1 stringer 2 stringer 3]"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
// Slice containing interfaces.
v3 := []interface{}{"one", int(2), uint(3), nil}
nv3 := (*[]interface{})(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "[]interface {}"
v3t2 := "string"
v3t3 := "int"
v3t4 := "uint"
v3t5 := "interface {}"
v3s := "[one 2 3 ]"
v3s2 := "[(" + v3t2 + ")one (" + v3t3 + ")2 (" + v3t4 + ")3 (" + v3t5 +
")]"
addFormatterTest("%v", v3, v3s)
addFormatterTest("%v", pv3, "<*>"+v3s)
addFormatterTest("%v", &pv3, "<**>"+v3s)
addFormatterTest("%+v", nv3, "")
addFormatterTest("%+v", v3, v3s)
addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
addFormatterTest("%+v", nv3, "")
addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"")
// Nil slice.
var v4 []int
nv4 := (*[]int)(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "[]int"
v4s := ""
addFormatterTest("%v", v4, v4s)
addFormatterTest("%v", pv4, "<*>"+v4s)
addFormatterTest("%v", &pv4, "<**>"+v4s)
addFormatterTest("%+v", nv4, "")
addFormatterTest("%+v", v4, v4s)
addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
addFormatterTest("%+v", nv4, "")
addFormatterTest("%#v", v4, "("+v4t+")"+v4s)
addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s)
addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s)
addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
addFormatterTest("%#+v", v4, "("+v4t+")"+v4s)
addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s)
addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s)
addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
}
func addStringFormatterTests() {
// Standard string.
v := "test"
nv := (*string)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "string"
vs := "test"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
}
func addInterfaceFormatterTests() {
// Nil interface.
var v interface{}
nv := (*interface{})(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "interface {}"
vs := ""
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%+v", v, vs)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Sub-interface.
v2 := interface{}(uint16(65535))
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "uint16"
v2s := "65535"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
}
func addMapFormatterTests() {
// Map with string keys and int vals.
v := map[string]int{"one": 1, "two": 2}
nilMap := map[string]int(nil)
nv := (*map[string]int)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "map[string]int"
vs := "map[one:1 two:2]"
vs2 := "map[two:2 one:1]"
addFormatterTest("%v", v, vs, vs2)
addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2)
addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2)
addFormatterTest("%+v", nilMap, "")
addFormatterTest("%+v", nv, "")
addFormatterTest("%+v", v, vs, vs2)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs,
"<**>("+pvAddr+"->"+vAddr+")"+vs2)
addFormatterTest("%+v", nilMap, "")
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2)
addFormatterTest("%#v", nilMap, "("+vt+")"+"")
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs,
"(*"+vt+")("+vAddr+")"+vs2)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs,
"(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2)
addFormatterTest("%#+v", nilMap, "("+vt+")"+"")
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Map with custom formatter type on pointer receiver only keys and vals.
v2 := map[pstringer]pstringer{"one": "1"}
nv2 := (*map[pstringer]pstringer)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "map[spew_test.pstringer]spew_test.pstringer"
v2s := "map[stringer one:stringer 1]"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%+v", v2, v2s)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%#v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s)
addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s)
addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
// Map with interface keys and values.
v3 := map[interface{}]interface{}{"one": 1}
nv3 := (*map[interface{}]interface{})(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "map[interface {}]interface {}"
v3t1 := "string"
v3t2 := "int"
v3s := "map[one:1]"
v3s2 := "map[(" + v3t1 + ")one:(" + v3t2 + ")1]"
addFormatterTest("%v", v3, v3s)
addFormatterTest("%v", pv3, "<*>"+v3s)
addFormatterTest("%v", &pv3, "<**>"+v3s)
addFormatterTest("%+v", nv3, "")
addFormatterTest("%+v", v3, v3s)
addFormatterTest("%+v", pv3, "<*>("+v3Addr+")"+v3s)
addFormatterTest("%+v", &pv3, "<**>("+pv3Addr+"->"+v3Addr+")"+v3s)
addFormatterTest("%+v", nv3, "")
addFormatterTest("%#v", v3, "("+v3t+")"+v3s2)
addFormatterTest("%#v", pv3, "(*"+v3t+")"+v3s2)
addFormatterTest("%#v", &pv3, "(**"+v3t+")"+v3s2)
addFormatterTest("%#v", nv3, "(*"+v3t+")"+"")
addFormatterTest("%#+v", v3, "("+v3t+")"+v3s2)
addFormatterTest("%#+v", pv3, "(*"+v3t+")("+v3Addr+")"+v3s2)
addFormatterTest("%#+v", &pv3, "(**"+v3t+")("+pv3Addr+"->"+v3Addr+")"+v3s2)
addFormatterTest("%#+v", nv3, "(*"+v3t+")"+"")
// Map with nil interface value
v4 := map[string]interface{}{"nil": nil}
nv4 := (*map[string]interface{})(nil)
pv4 := &v4
v4Addr := fmt.Sprintf("%p", pv4)
pv4Addr := fmt.Sprintf("%p", &pv4)
v4t := "map[string]interface {}"
v4t1 := "interface {}"
v4s := "map[nil:]"
v4s2 := "map[nil:(" + v4t1 + ")]"
addFormatterTest("%v", v4, v4s)
addFormatterTest("%v", pv4, "<*>"+v4s)
addFormatterTest("%v", &pv4, "<**>"+v4s)
addFormatterTest("%+v", nv4, "")
addFormatterTest("%+v", v4, v4s)
addFormatterTest("%+v", pv4, "<*>("+v4Addr+")"+v4s)
addFormatterTest("%+v", &pv4, "<**>("+pv4Addr+"->"+v4Addr+")"+v4s)
addFormatterTest("%+v", nv4, "")
addFormatterTest("%#v", v4, "("+v4t+")"+v4s2)
addFormatterTest("%#v", pv4, "(*"+v4t+")"+v4s2)
addFormatterTest("%#v", &pv4, "(**"+v4t+")"+v4s2)
addFormatterTest("%#v", nv4, "(*"+v4t+")"+"")
addFormatterTest("%#+v", v4, "("+v4t+")"+v4s2)
addFormatterTest("%#+v", pv4, "(*"+v4t+")("+v4Addr+")"+v4s2)
addFormatterTest("%#+v", &pv4, "(**"+v4t+")("+pv4Addr+"->"+v4Addr+")"+v4s2)
addFormatterTest("%#+v", nv4, "(*"+v4t+")"+"")
}
func addStructFormatterTests() {
// Struct with primitives.
type s1 struct {
a int8
b uint8
}
v := s1{127, 255}
nv := (*s1)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
pvAddr := fmt.Sprintf("%p", &pv)
vt := "spew_test.s1"
vt2 := "int8"
vt3 := "uint8"
vs := "{127 255}"
vs2 := "{a:127 b:255}"
vs3 := "{a:(" + vt2 + ")127 b:(" + vt3 + ")255}"
addFormatterTest("%v", v, vs)
addFormatterTest("%v", pv, "<*>"+vs)
addFormatterTest("%v", &pv, "<**>"+vs)
addFormatterTest("%+v", nv, "")
addFormatterTest("%+v", v, vs2)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs2)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs2)
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs3)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs3)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs3)
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs3)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs3)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs3)
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Struct that contains another struct.
type s2 struct {
s1 s1
b bool
}
v2 := s2{s1{127, 255}, true}
nv2 := (*s2)(nil)
pv2 := &v2
v2Addr := fmt.Sprintf("%p", pv2)
pv2Addr := fmt.Sprintf("%p", &pv2)
v2t := "spew_test.s2"
v2t2 := "spew_test.s1"
v2t3 := "int8"
v2t4 := "uint8"
v2t5 := "bool"
v2s := "{{127 255} true}"
v2s2 := "{s1:{a:127 b:255} b:true}"
v2s3 := "{s1:(" + v2t2 + "){a:(" + v2t3 + ")127 b:(" + v2t4 + ")255} b:(" +
v2t5 + ")true}"
addFormatterTest("%v", v2, v2s)
addFormatterTest("%v", pv2, "<*>"+v2s)
addFormatterTest("%v", &pv2, "<**>"+v2s)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%+v", v2, v2s2)
addFormatterTest("%+v", pv2, "<*>("+v2Addr+")"+v2s2)
addFormatterTest("%+v", &pv2, "<**>("+pv2Addr+"->"+v2Addr+")"+v2s2)
addFormatterTest("%+v", nv2, "")
addFormatterTest("%#v", v2, "("+v2t+")"+v2s3)
addFormatterTest("%#v", pv2, "(*"+v2t+")"+v2s3)
addFormatterTest("%#v", &pv2, "(**"+v2t+")"+v2s3)
addFormatterTest("%#v", nv2, "(*"+v2t+")"+"")
addFormatterTest("%#+v", v2, "("+v2t+")"+v2s3)
addFormatterTest("%#+v", pv2, "(*"+v2t+")("+v2Addr+")"+v2s3)
addFormatterTest("%#+v", &pv2, "(**"+v2t+")("+pv2Addr+"->"+v2Addr+")"+v2s3)
addFormatterTest("%#+v", nv2, "(*"+v2t+")"+"")
// Struct that contains custom type with Stringer pointer interface via both
// exported and unexported fields.
type s3 struct {
s pstringer
S pstringer
}
v3 := s3{"test", "test2"}
nv3 := (*s3)(nil)
pv3 := &v3
v3Addr := fmt.Sprintf("%p", pv3)
pv3Addr := fmt.Sprintf("%p", &pv3)
v3t := "spew_test.s3"
v3t2 := "spew_test.pstringer"
v3s := "{stringer test stringer test2}"
v3s2 := "{s:stringer test S:stringer test2}"
v3s3 := "{s:(" + v3t2 + ")stringer test S:(" + v3t2 + ")stringer test2}"
addFormatterTest("%v", v3, v3s)
addFormatterTest("%v", pv3, "<*>"+v3s)
addFormatterTest("%v", &pv3, "<**>"+v3s)
addFormatterTest("%+v", nv3, "