Repository: EagerIO/Stout Branch: master Commit: e50d63a2f523 Files: 97 Total size: 1.3 MB Directory structure: gitextract_dxdit0ui/ ├── .github/ │ └── workflows/ │ └── semgrep.yml ├── .gitignore ├── Godeps/ │ ├── Godeps.json │ └── Readme ├── LICENSE ├── README.md ├── docs/ │ └── getting-started.md ├── src/ │ ├── admin.go │ ├── cli.go │ ├── deploy.go │ ├── rollback.go │ └── utils.go ├── utils/ │ ├── build.sh │ ├── create_site.sh │ ├── release.sh │ └── xc.sh └── vendor/ ├── github.com/ │ ├── cenk/ │ │ └── backoff/ │ │ ├── .gitignore │ │ ├── .travis.yml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── backoff.go │ │ ├── exponential.go │ │ ├── retry.go │ │ └── ticker.go │ ├── imdario/ │ │ └── mergo/ │ │ ├── .travis.yml │ │ ├── LICENSE │ │ ├── README.md │ │ ├── doc.go │ │ ├── map.go │ │ ├── merge.go │ │ └── mergo.go │ ├── mitchellh/ │ │ └── go-homedir/ │ │ ├── LICENSE │ │ ├── README.md │ │ └── homedir.go │ ├── wsxiaoys/ │ │ └── terminal/ │ │ ├── LICENSE │ │ └── color/ │ │ └── color.go │ └── zackbloom/ │ ├── go-ini/ │ │ ├── .gitignore │ │ ├── README.md │ │ ├── decode.go │ │ └── stack.go │ └── goamz/ │ ├── LICENSE │ ├── aws/ │ │ ├── attempt.go │ │ ├── aws.go │ │ ├── client.go │ │ ├── regions.go │ │ ├── retry.go │ │ └── sign.go │ ├── cloudfront/ │ │ └── cloudfront.go │ ├── iam/ │ │ ├── iam.go │ │ └── sign.go │ ├── route53/ │ │ └── route53.go │ └── s3/ │ ├── lifecycle.go │ ├── multi.go │ ├── s3.go │ └── sign.go ├── golang.org/ │ └── x/ │ ├── crypto/ │ │ ├── LICENSE │ │ ├── PATENTS │ │ └── ssh/ │ │ └── terminal/ │ │ ├── terminal.go │ │ ├── util.go │ │ ├── util_bsd.go │ │ ├── util_linux.go │ │ ├── util_plan9.go │ │ └── util_windows.go │ └── net/ │ ├── LICENSE │ ├── PATENTS │ ├── html/ │ │ ├── atom/ │ │ │ ├── atom.go │ │ │ ├── gen.go │ │ │ └── table.go │ │ ├── const.go │ │ ├── doc.go │ │ ├── doctype.go │ │ ├── entity.go │ │ ├── escape.go │ │ ├── foreign.go │ │ ├── node.go │ │ ├── parse.go │ │ ├── render.go │ │ └── token.go │ └── publicsuffix/ │ ├── gen.go │ ├── list.go │ └── table.go └── gopkg.in/ └── yaml.v1/ ├── LICENSE ├── LICENSE.libyaml ├── README.md ├── apic.go ├── decode.go ├── emitterc.go ├── encode.go ├── parserc.go ├── readerc.go ├── resolve.go ├── scannerc.go ├── sorter.go ├── writerc.go ├── yaml.go ├── yamlh.go └── yamlprivateh.go ================================================ FILE CONTENTS ================================================ ================================================ FILE: .github/workflows/semgrep.yml ================================================ on: pull_request: {} workflow_dispatch: {} push: branches: - main - master schedule: - cron: '0 0 * * *' name: Semgrep config jobs: semgrep: name: semgrep/ci runs-on: ubuntu-latest env: SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} SEMGREP_URL: https://cloudflare.semgrep.dev SEMGREP_APP_URL: https://cloudflare.semgrep.dev SEMGREP_VERSION_CHECK_URL: https://cloudflare.semgrep.dev/api/check-version container: image: semgrep/semgrep steps: - uses: actions/checkout@v4 - run: semgrep ci ================================================ FILE: .gitignore ================================================ .DS_Store builds/ debian/ ================================================ FILE: Godeps/Godeps.json ================================================ { "ImportPath": "github.com/eagerio/stout", "GoVersion": "go1.6", "GodepVersion": "v63", "Packages": [ "./..." ], "Deps": [ { "ImportPath": "github.com/cenk/backoff", "Comment": "v1.0.0-7-gcdf48bb", "Rev": "cdf48bbc1eb78d1349cbda326a4a037f7ba565c6" }, { "ImportPath": "github.com/imdario/mergo", "Comment": "0.2.2-6-g50d4dbd", "Rev": "50d4dbd4eb0e84778abe37cefef140271d96fade" }, { "ImportPath": "github.com/mitchellh/go-homedir", "Rev": "1111e456ffea841564ac0fa5f69c26ef44dafec9" }, { "ImportPath": "github.com/wsxiaoys/terminal/color", "Rev": "0940f3fc43a0ed42d04916b1c04578462c650b09" }, { "ImportPath": "github.com/zackbloom/go-ini", "Rev": "3db81f263990ac57212cfdc3ce9a65e8af8af966" }, { "ImportPath": "github.com/zackbloom/goamz/aws", "Rev": "0f589c21f1937992b2952e1f4ce0d31f132b685d" }, { "ImportPath": "github.com/zackbloom/goamz/cloudfront", "Rev": "0f589c21f1937992b2952e1f4ce0d31f132b685d" }, { "ImportPath": "github.com/zackbloom/goamz/iam", "Rev": "0f589c21f1937992b2952e1f4ce0d31f132b685d" }, { "ImportPath": "github.com/zackbloom/goamz/route53", "Rev": "0f589c21f1937992b2952e1f4ce0d31f132b685d" }, { "ImportPath": "github.com/zackbloom/goamz/s3", "Rev": "0f589c21f1937992b2952e1f4ce0d31f132b685d" }, { "ImportPath": "golang.org/x/crypto/ssh/terminal", "Rev": "5bcd134fee4dd1475da17714aac19c0aa0142e2f" }, { "ImportPath": "golang.org/x/net/html", "Rev": "8a52c78636f6b7be1b1e5cb58b01a85f1e082659" }, { "ImportPath": "golang.org/x/net/html/atom", "Rev": "8a52c78636f6b7be1b1e5cb58b01a85f1e082659" }, { "ImportPath": "golang.org/x/net/publicsuffix", "Rev": "8a52c78636f6b7be1b1e5cb58b01a85f1e082659" }, { "ImportPath": "gopkg.in/yaml.v1", "Rev": "9f9df34309c04878acc86042b16630b0f696e1de" } ] } ================================================ FILE: Godeps/Readme ================================================ This directory tree is generated automatically by godep. Please do not edit. See https://github.com/tools/godep for more information. ================================================ FILE: LICENSE ================================================ Copyright (c) 2014 Eager Platform Co. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: README.md ================================================ # Stout #### [View the site →](http://stout.is/) Stout is a deploy tool for static websites. It takes a website and uploads it to S3 in a more reliable way than other tools. It can be an alternative to paid services like Divshot, to dynamic web servers like Rails, or to manually uploading your site to S3 or an FTP server. ## Why You Need Stout Traditionally uploading your files to S3 introduces a serious caching issue we ran into in practice at [Eager](https://eager.io). The cache for the various files your site depends on can expire at different times, meaning your users get an inconsistent (broken) set of files for a time after every single deploy. Further, traditional static site deployments don't offer any method of rolling back a previous deploy. We built Stout to fix these issues. ### Features - Versions script and style files to ensure your pages don't use an inconsistent set of files during or after a deploy - Supports rollback to any previous version - Does not depend on any specific build tool or workflow (it is a standalone executable written in Go) - Does not require a datastore of any kind to maintain state or history - Can be used by multiple developers simultaneously without locking or a danger of inconsistent state - Properly handles caching headers - Supports deploying multiple projects to various subdirectories of the same site without conflicts - Compresses files for faster delivery ### Limitations - Stout doesn't currently support rolling back files that aren't HTML, JS or CSS (images, videos, etc.). See the Versioning section for more information. - All-or-nothing consistency is only guarenteed on a per-html-file basis, not for the entire deploy. See the Consistency section for more information. ## Getting Started [Download](https://github.com/EagerIO/Stout/releases) the `stout` executable for your system from our latest release into a directory on your `$PATH`, like `/usr/local/bin`. You can use the `create` command to create a new site. It automatically creates an S3 bucket, a CloudFront distribution, and a user account for deployment. It therefore requires credentials for an Amazon AWS account which has permissions to those services along with Route 53. ```sh stout create --bucket my.website.com --key MY_AWS_KEY --secret MY_AWS_SECRET ``` You can then deploy your project: ```sh stout deploy --bucket my.website.com --key MY_AWS_KEY --secret MY_AWS_SECRET ``` If your built files are in another directory, add the `--root` option: ```sh stout deploy --bucket my.website.com --key MY_AWS_KEY --secret MY_AWS_SECRET --root ./build ``` If your bucket located not in the default region, which is us-east-1, add the `--region` option: ```sh stout deploy --bucket my.website.com --key MY_AWS_KEY --secret MY_AWS_SECRET --region us-west-1 ``` If you don't want to deploy all the files in your folder, use the files argument. ```sh stout deploy --bucket my.website.com --key MY_AWS_KEY --secret MY_AWS_SECRET --root ./build --files "*.html,images/*" ``` Javascript and CSS included in your HTML files will always be included automatically. The deploy command will give you a deploy id you can use in the future to rollback if you have to: ```sh stout rollback --bucket my.website.com --key MY_AWS_KEY --secret MY_AWS_SECRET a3b8ff290c33 ``` Eventually you'll probably want to move your config to a deploy.yaml file, rather than specifying it in the command every time. Using the info below you can learn about what the deploy/rollback tools actually do, deploying to subfolders, deploying from your build tool, and rolling back. ## Backstory We wrote Stout because we couldn't find an open-source way to reliably and efficiently deploy our static sites (including our app and blog). We used a traditional upload-to-s3 tool, but caching meant a user could get the new html and styles, but the old scripts, for example, causing sporatic and random errors. It also didn't support reliably rolling back when necessary. We built Stout to be the reliable, production-ready choice for static deploys. ## Function Stout is an executable file built from Go code. The `deploy` command deploys one or more html files and their dependencies to a specified location in S3. The `rollback` command takes a deploy id and rolls the project back to that version. ### Deploy The deploy process works by parsing the script and style tags out of one or more html files. It then hashes those files, uploads them prefixed with their hashes, and updates the location of the original script and link tags with the hashed locations. It generates a deploy id by hashing all of the files in the deploy, and uploads the html files to a location prefixed by the deploy id. When the uploads are successful, the prefixed html files are atomically copied to their unprefixed paths, completing the deploy. ### Rollback A rollback simply copies the html files prefixed with the specified deploy id to the unprefixed paths. ### Deploy Configuration You can configure the deploy tool with any combination of command line flags or arguments provided in a configuration yaml file. The options are: ##### `bucket` The S3 bucket to deploy to. In most configurations this bucket should be the origin for the CDN which actually serves your site. It usually makes sense to make this the url you are going to host your site from (i.e. `"example.com"`) ##### `config` ("./deploy.yaml") The location of a yaml file to read any otherwise unspecified configuration from. ##### `dest` ("./") The destination directory to write files to in the S3 bucket. For example if you wanted your this project to end up hosted at `yoursite.com/blog`, you would specify `--dest blog`. ##### `root` ("./") The local directory where the files to be uploaded lives. It's common to make this your "./build" directory or the like. ##### `files` ("*") Comma-seperated glob patterns of the files to be deployed (within the `--root`). HTML files will be parsed, and the CSS/JS they point to will be included (versioned) automatically. If you also include those files in your glob pattern they will be uploaded twice, once with a versioning hash in the URL, again without. Be sure to include any additional files you would like deployed like images, videos, font files, etc. You can use relative paths which break out of the `root`. If you prefix the path with `-/`, it will be interpreted as relative to the project directory, not the `root`. ##### `env` The config file can contain configurations for multiple environments (production, staging, etc.). This specifies which is used. See the "YAML Config" section for more information. ##### `key` The AWS key to use. The create command will create an IAM user for each project with access only to the relevant bucket. See the Permissions section for more information. ##### `secret` The AWS secret of the provided key. ##### `region` ("us-east-1") The AWS region the S3 bucket is located in. If you are getting a `The bucket you are attempting to access must be addressed using the specified endpoint. Please send all future requests to this endpoint.` error, specify your bucket `--region`. ### YAML Config You can provide a yaml file which specifies configuration defaults for the project being deployed. We include this file in each project which will be deployed. This file can have multiple configurations for different environments, along with a default section. For example, the `deploy.yaml` for one of our projects looks like: ```yaml default: root: 'build/' production: key: 'XXX' secret: 'XXX' bucket: 'eager.io' development: key: 'XXX' secret: 'XXX' bucket: 'next.eager.io' ``` Replacing the "XXX"s with our actual credentials. To deploy to development we run (from the directory with the deploy.yaml file in it): ```bash deploy --env development ``` A rollback of development would be: ```bash rollback --env development $DEPLOY_ID ``` Where the deploy id is taken from the output of the deploy you wish to rollback to. Our public projects use a similar config, but they specify the Amazon credentials as environment vars from the build system, passed in as flags: ```bash deploy --env development --key $AMAZON_KEY_DEV --secret $AMAZON_SECRET_DEV ``` Never commit Amazon credentials to a file in a public repo. Keep them on your local machine, or in your build system's configuration. ### Clean URLS It's not specific to Stout, but it's worth mentioning that we recommend you structure your built folder to use a folder with an index.html file for each page. For example, if you want a root and a page at `/blog`, you would have: ``` index.html blog/ index.html ``` That way, assuming S3 and CloudFront are configured properly, you'll be able to use the clean URLs `/` and `/blog/`. ### SSL Cloudfront has the ability to serve your site using SSL. The general procedure for setting it up is: 1. Get an SSL certificate for your domain 2. Upload it to Amazon 3. Select that certificate in the configuration for the CloudFront distribution Stout creates for you You will absolutely need more detailed instructions, which you can find [here](https://bryce.fisher-fleig.org/blog/setting-up-ssl-on-aws-cloudfront-and-s3/). Selecting a certificate for you is one of the few things the `create` command does not do, as it's not always possible to decide which certificate is appropriate. If you need SSL support, you will have to remember to select the cert in the Amazon Console or CLI after running the `create` command. ### Permissions The AWS user which is used for Stout should have the `GetObject`, `PutObject`, `DeleteObject`, and `ListBucket` permissions. The `create` command will set this up for you if you use it. This is an example policy config which works: ```json { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "s3:DeleteObject", "s3:ListBucket", "s3:PutObject", "s3:PutObjectAcl", "s3:GetObject" ], "Resource": [ "arn:aws:s3:::BUCKET", "arn:aws:s3:::BUCKET/*" ] } ] } ``` Be sure to replace `BUCKET` with your bucket's actual name. ### Deploying with CircleCI Deploying with CircleCI is simply a matter of installing the deploy tool and running it as you would locally. Here's an excerpt of a working circle.yml: ```yaml dependencies: post: - go get github.com/tools/godep - git clone git@github.com:EagerIO/Stout.git - cd Stout; godep go build -o ../stout src/*.go deployment: development: branch: dev commands: - ./stout deploy --env development --key $AMAZON_KEY_DEV --secret $AMAZON_SECRET_DEV production: branch: master commands: - ./stout deploy --env production --key $AMAZON_KEY_PROD --secret $AMAZON_SECRET_PROD ``` If you use environment vars for your credentials, make sure to add them to your Circle config. If your repo is private, you can specify your Amazon key and secret in your deploy.yaml file, removing the need to specify them in the commands. ### Caching All versioned files (include a hash of their contents in the path) are configured to cache for one year. All unversioned files are configured to cache for 60 seconds. This means it will take up to 60 seconds for users to see changes made to your site. ### Versioning Only JS and CSS files which are pointed to in HTML files are hashed, as we need to be able to update the HTML to point to our new, versioned, files. Any other file included in your `--files` argument will be uploaded, but not versioned, meaning a rollback will not effect these files. This is something we'd like to improve. ### Consistency As the final step of the deploy is atomic, multiple actors can trigger deploys simultaneously without any danger of inconsistent state. Whichever process triggers the final 'copy' step for a given file will win, with it's specified dependencies guarenteed to be used in their entirity. Note that this consistency is only guarenteed on a per-html-file level, you may end up with some html files from one deployer, and others from another, but all files will point to their correct dependencies. ### Deploying Multiple Projects To One Site You can deploy multiple projects to the same domain simply by specifying the appropriate `dest` for each one. For example your homepage might have the dest `./`, and your blog `./blog`. Your homepage will be hosted at `your-site.com`, your blog `your-site.com/blog`. ### Using Client-side Routers It is possible to use a client-side router (where you have multiple request URLs point to the same HTML file) by configuring your CloudFront distribution to serve your index.html file in response to 403s and 404s. ![CF](https://raw.githubusercontent.com/EagerIO/Stout/master/docs/images/cf-screenshot.png) ### Installing - Download the release for your system type from our [releases](https://github.com/EagerIO/Stout/releases) - Copy or symlink the `stout` binary contained in the archive into your path (for example, into `/usr/local/bin`) ### Building - Install go and godep - Run `godep restore ./...` - Run `go build -o ../stout src/*` #### For a Release (Cross Compiling) - Run `go get github.com/laher/goxc` - Run `go get code.google.com/p/go.tools/cmd/vet` - Run `./utils/xc.sh` The first run will take significantly longer than future runs. The built files will be placed in the `./builds` directory. ### Running To run the commands for development purposes, run: `go run src/*`, followed by any command line args you would normally give to the command. ### Contributing Please do, we would love for this to become a project of the community. Feel free to open an issue, submit a PR or contribute to the wiki. ================================================ FILE: docs/getting-started.md ================================================ ### Setting Up Your Site If you don't already have an S3-hosted site, start here. We're going to create a basic site config which uses CloudFront's CDN to deliver high performance at a minimal cost. Once you run the setup, you'll end up with a configuration which looks like this: ``` The Deploy Tool -> S3 <- CloudFront's Global CDN <- DNS <- Your Users ``` The simplest way to get started is to run the `create_site.sh` script in the utils folder. After installing the [aws command line tools](http://aws.amazon.com/cli/), run: ```bash ./utils/create_site.sh subdomain.my-site.com ``` Feel free to leave out the subdomain if you'd like to host it at the root of your domain. This will: - Create an S3 bucket for this site with the correct security policy and website hosting - Create a CloudFront distribution pointed at that bucket - Create a user with the appropriate permissions to upload to that bucket - Create an access key for that user Once that's done, copy the access key, secret key (from the JSON blob the access key request spits out) and bucket (the bucket's name is just the url you provided) it printed to your `deploy.yaml`, or save them to use with the `stout deploy` as arguments. The final step is to point your DNS records to the new CloudFront distribution. If you use Route 53 you want to create an alias to the distribution (it will be named the same as the new domain). If you use another DNS service, you'll want to create a CNAME to the CloudFront distribution's hostname. Please note that it will take up to twenty minutes for the CloudFront distribution to initialize. Additionally it may take some time for your DNS records to update. If you'd like development or staging environments, just run the command again with the URL you'd like them to have, and add the new credentials as before. See the "YAML Config" section of the README for an example of how to configure multiple environments. Be very careful to never commit a file to a public repo that contains your AWS credentials. If you are deploying a public repo, either keep the credentials on your local machine you deploy from, or in the build service (like CircleCI) you're using. #### Step-by-step Instructions 1. Install Amazon's AWS Command-Line Tools (and create an AWS account if you don't have one) 1. Run the `create_site.sh` tool with the URL of the site you'd like to deploy 1. Take note of the AWS key and secret in the final JSON blob outputted by the script 1. Download the executable from this project 1. Run `stout deploy --bucket subdomain.your-site.com --key YOUR_NEW_AWS_KEY --secret YOUR_NEW_AWS_SECRET` to deploy 1. Add the `--root` argument if your built files are in a subdirectory. 1. Visit the cloudfront url of your new distribution to see how your site currently looks, include any new files you may have missed and deploy again 1. Optionally, Move any configuration options you don't mind being committed to your repository to a deploy.yaml file 1. Optionally, Run `create_site.sh` again to create staging or development sites, and add their configuration to your deploy.yaml as well 1. Optionally, Deploy more projects to this same site by running deploy with the `--dest` argument 1. Optionally, Add the deploy step to your build tool ================================================ FILE: src/admin.go ================================================ package main import ( "fmt" "os" "os/exec" "strings" "github.com/zackbloom/goamz/cloudfront" "github.com/zackbloom/goamz/iam" "github.com/zackbloom/goamz/route53" "github.com/zackbloom/goamz/s3" "golang.org/x/crypto/ssh/terminal" "golang.org/x/net/publicsuffix" ) func CreateBucket(options Options) error { bucket := s3Session.Bucket(options.Bucket) err := bucket.PutBucket("public-read") if err != nil { return err } err = bucket.PutBucketWebsite(s3.WebsiteConfiguration{ IndexDocument: &s3.IndexDocument{"index.html"}, ErrorDocument: &s3.ErrorDocument{"error.html"}, }) if err != nil { return err } err = bucket.PutPolicy([]byte(`{ "Version": "2008-10-17", "Statement": [ { "Sid": "PublicReadForGetBucketObjects", "Effect": "Allow", "Principal": { "AWS": "*" }, "Action": "s3:GetObject", "Resource": "arn:aws:s3:::` + options.Bucket + `/*" } ] }`, )) if err != nil { return err } return nil } func GetDistribution(options Options) (dist cloudfront.DistributionSummary, err error) { distP, err := cfSession.FindDistributionByAlias(options.Bucket) if err != nil { return } if distP != nil { fmt.Println("CloudFront distribution found with the provided bucket name, assuming config matches.") fmt.Println("If you run into issues, delete the distribution and rerun this command.") dist = *distP return } conf := cloudfront.DistributionConfig{ Origins: cloudfront.Origins{ cloudfront.Origin{ Id: "S3-" + options.Bucket, DomainName: options.Bucket + ".s3-website-" + options.AWSRegion + ".amazonaws.com", CustomOriginConfig: &cloudfront.CustomOriginConfig{ HTTPPort: 80, HTTPSPort: 443, OriginProtocolPolicy: "http-only", }, }, }, DefaultRootObject: "index.html", PriceClass: "PriceClass_All", Enabled: true, DefaultCacheBehavior: cloudfront.CacheBehavior{ TargetOriginId: "S3-" + options.Bucket, ViewerProtocolPolicy: "allow-all", AllowedMethods: cloudfront.AllowedMethods{ Allowed: []string{"GET", "HEAD"}, Cached: []string{"GET", "HEAD"}, }, }, ViewerCertificate: &cloudfront.ViewerCertificate{ CloudFrontDefaultCertificate: true, MinimumProtocolVersion: "TLSv1", SSLSupportMethod: "sni-only", }, CustomErrorResponses: cloudfront.CustomErrorResponses{ // This adds support for single-page apps cloudfront.CustomErrorResponse{ ErrorCode: 403, ResponsePagePath: "/index.html", ResponseCode: 200, ErrorCachingMinTTL: 60, }, cloudfront.CustomErrorResponse{ ErrorCode: 404, ResponsePagePath: "/index.html", ResponseCode: 200, ErrorCachingMinTTL: 60, }, }, Aliases: cloudfront.Aliases{ options.Bucket, }, } return cfSession.Create(conf) } func CreateUser(options Options) (key iam.AccessKey, err error) { name := options.Bucket + "_deploy" _, err = iamSession.CreateUser(name, "/") if err != nil { iamErr, ok := err.(*iam.Error) if ok && iamErr.Code == "EntityAlreadyExists" { err = nil } else { return } } _, err = iamSession.PutUserPolicy(name, name, `{ "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "s3:DeleteObject", "s3:ListBucket", "s3:PutObject", "s3:PutObjectAcl", "s3:GetObject" ], "Resource": [ "arn:aws:s3:::`+options.Bucket+`", "arn:aws:s3:::`+options.Bucket+`/*" ] } ] }`, ) if err != nil { return } keyResp, err := iamSession.CreateAccessKey(name) if err != nil { return } return keyResp.AccessKey, nil } func UpdateRoute(options Options, dist cloudfront.DistributionSummary) error { zoneName, err := publicsuffix.EffectiveTLDPlusOne(options.Bucket) if err != nil { return err } zoneName = zoneName + "." resp, err := r53Session.ListHostedZonesByName(zoneName, "", 100) if err != nil { return err } if resp.IsTruncated { panic("More than 100 zones in the account") } var zone *route53.HostedZone for _, z := range resp.HostedZones { if z.Name == zoneName { zone = &z break } } if zone == nil { fmt.Printf("A Route 53 hosted zone was not found for %s\n", zoneName) if zoneName != options.Bucket { fmt.Println("If you would like to use Route 53 to manage your DNS, create a zone for this domain, and update your registrar's configuration to point to the DNS servers Amazon provides and rerun this command. Note that you must copy any existing DNS configuration you have to Route 53 if you do not wish existing services hosted on this domain to stop working.") fmt.Printf("If you would like to continue to use your existing DNS, create a CNAME record pointing %s to %s and the site setup will be finished.\n", options.Bucket, dist.DomainName) } else { fmt.Println("Since you are hosting the root of your domain, using an alternative DNS host is unfortunately not possible.") fmt.Println("If you wish to host your site at the root of your domain, you must switch your sites DNS to Amazon's Route 53 and retry this command.") } return nil } fmt.Printf("Adding %s to %s Route 53 zone\n", options.Bucket, zone.Name) parts := strings.Split(zone.Id, "/") idValue := parts[2] _, err = r53Session.ChangeResourceRecordSet(&route53.ChangeResourceRecordSetsRequest{ Changes: []route53.Change{ route53.Change{ Action: "CREATE", Name: options.Bucket, Type: "A", AliasTarget: route53.AliasTarget{ HostedZoneId: "Z2FDTNDATAQYW2", DNSName: dist.DomainName, EvaluateTargetHealth: false, }, }, }, }, idValue) if err != nil { if strings.Contains(err.Error(), "it already exists") { fmt.Println("Existing route found, assuming it is correct") fmt.Printf("If you run into trouble, you may need to delete the %s route in Route53 and try again\n", options.Bucket) return nil } return err } return nil } func Create(options Options) { if s3Session == nil { s3Session = openS3(options.AWSKey, options.AWSSecret, options.AWSRegion, options.S3Host) } if iamSession == nil { iamSession = openIAM(options.AWSKey, options.AWSSecret, options.AWSRegion) } if r53Session == nil { r53Session = openRoute53(options.AWSKey, options.AWSSecret) } if cfSession == nil { cfSession = openCloudFront(options.AWSKey, options.AWSSecret) } _, err := exec.LookPath("aws") if err != nil { fmt.Println("The aws CLI executable was not found in the PATH") fmt.Println("Install it from http://aws.amazon.com/cli/ and try again") } fmt.Println("Creating Bucket") err = CreateBucket(options) if err != nil { fmt.Println("Error creating S3 bucket") fmt.Println(err) return } fmt.Println("Loading/Creating CloudFront Distribution") dist, err := GetDistribution(options) if err != nil { fmt.Println("Error loading/creating CloudFront distribution") fmt.Println(err) return } fmt.Println("Adding Route") err = UpdateRoute(options, dist) if err != nil { fmt.Println("Error adding route to Route53 DNS config") fmt.Println(err) return } if !options.NoUser { key, err := CreateUser(options) if err != nil { fmt.Println("Error creating user") fmt.Println(err) return } fmt.Println("An access key has been created with just the permissions required to deploy / rollback this site") fmt.Println("It is strongly recommended you use this limited account to deploy this project in the future\n") fmt.Printf("ACCESS_KEY_ID=%s\n", key.Id) fmt.Printf("ACCESS_KEY_SECRET=%s\n\n", key.Secret) if terminal.IsTerminal(int(os.Stdin.Fd())) { fmt.Println(`You can either add these credentials to the deploy.yaml file, or specify them as arguments to the stout deploy / stout rollback commands. You MUST NOT add them to the deploy.yaml file if this project is public (i.e. a public GitHub repo). If you can't add them to the deploy.yaml file, you can specify them as arguments on the command line. If you use a build system like CircleCI, you can add them as environment variables and pass those variables to the deploy commands (see the README). Your first deploy command might be: stout deploy --bucket ` + options.Bucket + ` --key ` + key.Id + ` --secret '` + key.Secret + `' `) } } fmt.Println("You can begin deploying now, but it can take up to ten minutes for your site to begin to work") fmt.Println("Depending on the configuration of your site, you might need to set the 'root', 'dest' or 'files' options to get your deploys working as you wish. See the README for details.") fmt.Println("It's also a good idea to look into the 'env' option, as in real-world situations it usually makes sense to have a development and/or staging site for each of your production sites.") } func createCmd() { options, _ := parseOptions() loadConfigFile(&options) addAWSConfig(&options) if options.Bucket == "" { panic("You must specify a bucket") } if options.AWSKey == "" || options.AWSSecret == "" { panic("You must specify your AWS credentials") } Create(options) } ================================================ FILE: src/cli.go ================================================ package main import ( "flag" "fmt" ) func printUsage() { fmt.Println(`Stout Static Deploy Tool Supports three commands, create, deploy and rollback. Example Usage: To create a site which will be hosted at my.awesome.website: stout create --bucket my.awesome.website --key AWS_KEY --secret AWS_SECRET To deploy the current folder to the root of the my.awesome.website site: stout deploy --bucket my.awesome.website --key AWS_KEY --secret AWS_SECRET To rollback to a specific deploy: stout rollback --bucket my.awesome.website --key AWS_KEY --secret AWS_SECRET c4a22bf94de1 See the README for more configuration information. `) } func main() { flag.Parse() command := flag.Arg(0) switch command { case "help": printUsage() case "deploy": deployCmd() case "rollback": rollbackCmd() case "create": createCmd() default: fmt.Println("Command not understood") fmt.Println("") printUsage() } } ================================================ FILE: src/deploy.go ================================================ package main import ( "bytes" "compress/gzip" "crypto/md5" "encoding/base64" "fmt" "io" "math/big" "mime" "net/url" "os" "os/exec" "path/filepath" "strings" "sync" "time" "github.com/cenk/backoff" "golang.org/x/net/html" "log" "github.com/wsxiaoys/terminal/color" "github.com/zackbloom/goamz/s3" ) const ( SCRIPT = iota STYLE ) const UPLOAD_WORKERS = 20 var NO_GZIP = []string{ "mp4", "webm", "ogg", } func hashFile(path string) []byte { hash := md5.New() io.WriteString(hash, path) io.WriteString(hash, "\n") // TODO: Encode type? ref := must(os.Open(path)).(*os.File) defer ref.Close() must(io.Copy(hash, ref)) return hash.Sum(nil) } func hashBytes(data []byte) []byte { hash := md5.New() must(io.Copy(hash, bytes.NewReader(data))) return hash.Sum(nil) } func hashFiles(files []string) string { hash := new(big.Int) for _, file := range files { val := new(big.Int) val.SetBytes(hashFile(file)) hash = hash.Xor(hash, val) } return fmt.Sprintf("%x", hash) } func getRef() string { gitPath := mustString(exec.LookPath("git")) cmd := exec.Command(gitPath, "rev-parse", "--verify", "HEAD") out := bytes.Buffer{} cmd.Stdout = &out panicIf(cmd.Run()) return string(out.Bytes()) } func guessContentType(file string) string { return mime.TypeByExtension(filepath.Ext(file)) } func shouldCompress(file string) bool { ext := filepath.Ext(file) for _, e := range NO_GZIP { if "."+e == ext { return false } } return true } type UploadFileRequest struct { Bucket *s3.Bucket Reader io.Reader Path string Dest string IncludeHash bool CacheSeconds int } func uploadFile(req UploadFileRequest) (remotePath string) { buffer := bytes.NewBuffer([]byte{}) compress := shouldCompress(req.Path) if compress { writer := gzip.NewWriter(buffer) must(io.Copy(writer, req.Reader)) writer.Close() } else { must(io.Copy(buffer, req.Reader)) } data := buffer.Bytes() hash := hashBytes(data) hashPrefix := fmt.Sprintf("%x", hash)[:12] s3Opts := s3.Options{ ContentMD5: base64.StdEncoding.EncodeToString(hash), CacheControl: fmt.Sprintf("public, max-age=%d", req.CacheSeconds), } if compress { s3Opts.ContentEncoding = "gzip" } dest := req.Path if req.IncludeHash { dest = hashPrefix + "_" + dest } dest = filepath.Join(req.Dest, dest) log.Printf("Uploading to %s in %s (%s) [%d]\n", dest, req.Bucket.Name, hashPrefix, req.CacheSeconds) op := func() error { // We need to create a new reader each time, as we might be doing this more than once (if it fails) return req.Bucket.PutReader(dest, bytes.NewReader(data), int64(len(data)), guessContentType(dest)+"; charset=utf-8", s3.PublicRead, s3Opts) } back := backoff.NewExponentialBackOff() back.MaxElapsedTime = 30 * time.Second err := backoff.RetryNotify(op, back, func(err error, next time.Duration) { log.Println("Error uploading", err, "retrying in", next) }) panicIf(err) return dest } type FileRef struct { LocalPath string RemotePath string UploadedPath string } type FileInst struct { File *FileRef InstPath string } func writeFiles(options Options, includeHash bool, files chan *FileRef) { bucket := s3Session.Bucket(options.Bucket) for file := range files { handle := must(os.Open(file.LocalPath)).(*os.File) defer handle.Close() var ttl int ttl = FOREVER if !includeHash { ttl = LIMITED } remote := file.RemotePath if strings.HasPrefix(remote, "/") { remote = remote[1:] } partialPath, err := filepath.Rel(options.Dest, remote) if err != nil { panic(err) } (*file).UploadedPath = uploadFile(UploadFileRequest{ Bucket: bucket, Reader: handle, Path: partialPath, Dest: options.Dest, IncludeHash: includeHash, CacheSeconds: ttl, }) } } func deployFiles(options Options, includeHash bool, files []*FileRef) { ch := make(chan *FileRef) wg := new(sync.WaitGroup) for i := 0; i < UPLOAD_WORKERS; i++ { wg.Add(1) go func() { writeFiles(options, includeHash, ch) wg.Done() }() } for _, file := range files { if !includeHash && strings.HasSuffix(file.RemotePath, ".html") { panic(fmt.Sprintf("Cowardly refusing to deploy an html file (%s) without versioning.", file.RemotePath)) } ch <- file } close(ch) wg.Wait() } func addFiles(form uint8, parent *html.Node, files []string) { for _, file := range files { node := html.Node{ Type: html.ElementNode, } switch form { case SCRIPT: node.Data = "script" node.Attr = []html.Attribute{ html.Attribute{ Key: "src", Val: file, }, } case STYLE: node.Data = "link" node.Attr = []html.Attribute{ html.Attribute{ Key: "rel", Val: "stylesheet", }, html.Attribute{ Key: "href", Val: file, }, } default: panic("Type not understood") } parent.AppendChild(&node) } } func isLocal(href string) bool { parsed := must(url.Parse(href)).(*url.URL) return parsed.Host == "" } func formatHref(path string) string { if !strings.HasPrefix(path, "/") { path = "/" + path } return path } func renderHTML(options Options, file HTMLFile) string { handle := must(os.Open(file.File.LocalPath)).(*os.File) defer handle.Close() doc := must(html.Parse(handle)).(*html.Node) var f func(*html.Node) f = func(n *html.Node) { for c := n.FirstChild; c != nil; c = c.NextSibling { f(c) } if n.Type == html.ElementNode { switch n.Data { case "script": for i, a := range n.Attr { if a.Key == "src" { for _, dep := range file.Deps { if dep.InstPath == a.Val { n.Attr[i].Val = formatHref(dep.File.UploadedPath) break } } } } case "link": stylesheet := false for _, a := range n.Attr { if a.Key == "rel" { stylesheet = a.Val == "stylesheet" break } } if !stylesheet { return } for i, a := range n.Attr { if a.Key == "href" { for _, dep := range file.Deps { if dep.InstPath == a.Val { n.Attr[i].Val = formatHref(dep.File.UploadedPath) break } } } } } } } f(doc) buf := bytes.NewBuffer([]byte{}) panicIf(html.Render(buf, doc)) return buf.String() } func parseHTML(options Options, path string) (files []string, base string) { files = make([]string, 0) handle := must(os.Open(path)).(*os.File) defer handle.Close() doc := must(html.Parse(handle)).(*html.Node) var f func(*html.Node) f = func(n *html.Node) { for c := n.FirstChild; c != nil; c = c.NextSibling { f(c) } if n.Type == html.ElementNode { switch n.Data { case "base": for _, a := range n.Attr { if a.Key == "href" { base = a.Val } } case "script": for _, a := range n.Attr { if a.Key == "src" { if isLocal(a.Val) { files = append(files, a.Val) } } } case "link": local := false stylesheet := false href := "" for _, a := range n.Attr { switch a.Key { case "href": local = isLocal(a.Val) href = a.Val case "rel": stylesheet = a.Val == "stylesheet" } } if local && stylesheet { files = append(files, href) } } } } f(doc) return } func deployHTML(options Options, id string, file HTMLFile) { data := renderHTML(options, file) internalPath, err := filepath.Rel(options.Root, file.File.LocalPath) if err != nil { panic(err) } permPath := joinPath(options.Dest, id, internalPath) curPath := joinPath(options.Dest, internalPath) bucket := s3Session.Bucket(options.Bucket) uploadFile(UploadFileRequest{ Bucket: bucket, Reader: strings.NewReader(data), Path: permPath, IncludeHash: false, CacheSeconds: FOREVER, }) log.Println("Copying", permPath, "to", curPath) copyFile(bucket, permPath, curPath, "text/html; charset=utf-8", LIMITED) } func expandFiles(root string, glob string) []string { out := make([]string, 0) cases := strings.Split(glob, ",") for _, pattern := range cases { if strings.HasPrefix(pattern, "-/") { pattern = pattern[2:] } else { pattern = joinPath(root, pattern) } list := must(filepath.Glob(pattern)).([]string) for _, file := range list { info := must(os.Stat(file)).(os.FileInfo) if info.IsDir() { filepath.Walk(file, func(path string, info os.FileInfo, err error) error { panicIf(err) if !info.IsDir() { out = append(out, path) } return nil }) } else { out = append(out, file) } } } return out } func listFiles(options Options) []*FileRef { filePaths := expandFiles(options.Root, options.Files) files := make([]*FileRef, len(filePaths)) for i, path := range filePaths { remotePath := joinPath(options.Dest, mustString(filepath.Rel(options.Root, path))) for strings.HasPrefix(remotePath, "../") { remotePath = remotePath[3:] } files[i] = &FileRef{ LocalPath: path, RemotePath: remotePath, } } return files } func ignoreFiles(full []*FileRef, rem []*FileRef) []*FileRef { out := make([]*FileRef, 0, len(full)) for _, file := range full { ignore := false path := filepath.Clean(file.LocalPath) for _, remFile := range rem { if filepath.Clean(remFile.LocalPath) == path { ignore = true break } } if !ignore { out = append(out, file) } } return out } func extractFileList(options Options, pattern string) (files []string) { files = make([]string, 0) parts := strings.Split(pattern, ",") for _, part := range parts { matches, err := filepath.Glob(joinPath(options.Root, part)) if err != nil { panic(err) } if matches == nil { panic(fmt.Sprintf("Pattern %s did not match any files", part)) } files = append(files, matches...) } return files } func filesWithExtension(files []*FileRef, ext string) (outFiles []*FileRef) { outFiles = make([]*FileRef, 0) for _, file := range files { if filepath.Ext(file.LocalPath) == ext { outFiles = append(outFiles, file) } } return } type HTMLFile struct { File FileRef Deps []FileInst Base string } func (f HTMLFile) GetLocalPath() string { return f.File.LocalPath } func Deploy(options Options) { if s3Session == nil { s3Session = openS3(options.AWSKey, options.AWSSecret, options.AWSRegion, options.S3Host) } files := listFiles(options) htmlFileRefs := filesWithExtension(files, ".html") var htmlFiles []HTMLFile var id string if len(htmlFileRefs) == 0 { log.Println("No HTML files found") } else { inclFiles := make(map[string]*FileRef) htmlFiles = make([]HTMLFile, len(htmlFileRefs)) for i, file := range htmlFileRefs { dir := filepath.Dir(file.LocalPath) rel, err := filepath.Rel(options.Root, dir) if err != nil { panic(err) } paths, base := parseHTML(options, file.LocalPath) if strings.HasPrefix(strings.ToLower(base), "http") || strings.HasPrefix(base, "//") { panic("Absolute base tags are not supported") } if strings.HasSuffix(base, "/") { base = base[:len(base)-1] } htmlFiles[i] = HTMLFile{ File: *file, Deps: make([]FileInst, len(paths)), Base: base, } var dest string if strings.HasPrefix(base, "/") && strings.HasPrefix(base, "/"+options.Dest) { dest = base } else { dest = joinPath(options.Dest, base) } var root string if strings.HasPrefix(base, "/") && strings.HasSuffix(options.Root, base) { root = options.Root } else { root = joinPath(options.Root, base) } for j, path := range paths { var local, remote string if strings.HasPrefix(path, "/") { local = joinPath(options.Root, path) remote = joinPath(options.Dest, path) } else { if strings.HasPrefix(base, "/") { local = joinPath(root, path) remote = joinPath(dest, path) } else { local = joinPath(options.Root, rel, base, path) remote = joinPath(options.Dest, rel, base, path) } } for strings.HasPrefix(remote, "../") { remote = remote[3:] } ref, ok := inclFiles[local] if !ok { ref = &FileRef{ LocalPath: local, RemotePath: remote, // Filled in after the deploy: UploadedPath: "", } inclFiles[local] = ref } use := FileInst{ File: ref, InstPath: path, } htmlFiles[i].Deps[j] = use } } inclFileList := make([]*FileRef, len(inclFiles)) i := 0 for _, ref := range inclFiles { inclFileList[i] = ref i++ } hashPaths := make([]string, 0) for _, item := range inclFileList { hashPaths = append(hashPaths, item.LocalPath) } for _, item := range htmlFiles { hashPaths = append(hashPaths, item.File.LocalPath) } hash := hashFiles(hashPaths) id = hash[:12] deployFiles(options, true, inclFileList) } deployFiles(options, false, ignoreFiles(files, htmlFileRefs)) if len(htmlFileRefs) != 0 { // Ensure that the new files exist in s3 // Time based on "Eventual Consistency: How soon is eventual?" time.Sleep(1500 * time.Millisecond) wg := sync.WaitGroup{} for _, file := range htmlFiles { wg.Add(1) go func(file HTMLFile) { defer wg.Done() deployHTML(options, id, file) }(file) } wg.Wait() } visId := id if id == "" { visId = "0 HTML Files" } color.Printf(` +------------------------------------+ | @{g}Deploy Successful!@{|} | | | | Deploy ID: @{?}%s@{|} | +------------------------------------+ `, visId) } func deployCmd() { options, _ := parseOptions() loadConfigFile(&options) addAWSConfig(&options) if options.Bucket == "" { panic("You must specify a bucket") } if options.AWSKey == "" || options.AWSSecret == "" { panic("You must specify your AWS credentials") } Deploy(options) } ================================================ FILE: src/rollback.go ================================================ package main import ( "fmt" "log" "path/filepath" "sync" "github.com/zackbloom/goamz/s3" ) func Rollback(options Options, version string) { if s3Session == nil { s3Session = openS3(options.AWSKey, options.AWSSecret, options.AWSRegion, options.S3Host) } bucket := s3Session.Bucket(options.Bucket) // List files with the correct prefix in bucket // Remove their prefix with a copy. prefix := filepath.Join(options.Dest, version) + "/" list, err := bucket.List(prefix, "", "", 1000) panicIf(err) if list.IsTruncated { panic(fmt.Sprintf("More than %d HTML files in version, rollback is not supported. Consider filing a GitHub issue if you need support for this.", list.MaxKeys)) } if len(list.Contents) == 0 { log.Printf("A deploy with the provided id (%s) was not found in the specified bucket", version) return } wg := sync.WaitGroup{} count := 0 for _, file := range list.Contents { wg.Add(1) go func(file s3.Key) { defer wg.Done() path := file.Key if filepath.Ext(path) != ".html" { log.Printf("Skipping non-html file %s", path) return } newPath := filepath.Join(options.Dest, path[len(prefix):]) log.Printf("Aliasing %s to %s", path, newPath) copyFile(bucket, path, newPath, "text/html", LIMITED) count++ }(file) } wg.Wait() log.Printf("Reverted %d HTML files to version %s", count, version) } func rollbackCmd() { options, set := parseOptions() version := set.Arg(0) loadConfigFile(&options) addAWSConfig(&options) if options.Bucket == "" { panic("You must specify a bucket") } if options.AWSKey == "" || options.AWSSecret == "" { panic("You must specify your AWS credentials") } if version == "" { panic("You must specify a version to rollback to") } Rollback(options, version) } ================================================ FILE: src/utils.go ================================================ package main import ( "flag" "fmt" "io/ioutil" "log" "os" "path/filepath" "regexp" "strings" "github.com/imdario/mergo" homedir "github.com/mitchellh/go-homedir" ini "github.com/sspencer/go-ini" "github.com/zackbloom/goamz/aws" "github.com/zackbloom/goamz/cloudfront" "github.com/zackbloom/goamz/iam" "github.com/zackbloom/goamz/route53" "github.com/zackbloom/goamz/s3" "gopkg.in/yaml.v1" ) const ( LIMITED = 60 FOREVER = 31556926 ) var s3Session *s3.S3 var iamSession *iam.IAM var r53Session *route53.Route53 var cfSession *cloudfront.CloudFront func getRegion(region string, s3Host string) aws.Region { regionS, ok := aws.Regions[region] if !ok { panic("Region not found") } log.Println("HOST", s3Host) if s3Host != "" { regionS.S3Endpoint = "https://" + s3Host regionS.S3BucketEndpoint = "https://${bucket}." + s3Host } return regionS } func openS3(key, secret, region, s3Host string) *s3.S3 { regionS := getRegion(region, s3Host) auth := aws.Auth{ AccessKey: key, SecretKey: secret, } return s3.New(auth, regionS) } func openIAM(key, secret, region string) *iam.IAM { regionS := getRegion(region, "") auth := aws.Auth{ AccessKey: key, SecretKey: secret, } return iam.New(auth, regionS) } func openCloudFront(key, secret string) *cloudfront.CloudFront { auth := aws.Auth{ AccessKey: key, SecretKey: secret, } return cloudfront.NewCloudFront(auth) } func openRoute53(key, secret string) *route53.Route53 { auth := aws.Auth{ AccessKey: key, SecretKey: secret, } r53, _ := route53.NewRoute53(auth) return r53 } func panicIf(err error) { if err != nil { panic(err) } } func must(val interface{}, err error) interface{} { if err != nil { panic(err) } return val } func mustString(val string, err error) string { panicIf(err) return val } func mustInt(val int, err error) int { panicIf(err) return val } type Options struct { Files string `yaml:"files"` Root string `yaml:"root"` Dest string `yaml:"dest"` ConfigFile string `yaml:"-"` Env string `yaml:"-"` Bucket string `yaml:"bucket"` AWSKey string `yaml:"key"` AWSSecret string `yaml:"secret"` AWSRegion string `yaml:"region"` S3Host string `yaml:"s3Host"` NoUser bool `yaml:"-"` } func parseOptions() (o Options, set *flag.FlagSet) { set = flag.NewFlagSet(os.Args[1], flag.ExitOnError) //TODO: Set set.Usage set.StringVar(&o.Files, "files", "*", "Comma-seperated glob patterns of files to deploy (within root)") set.StringVar(&o.Root, "root", "./", "The local directory to deploy") set.StringVar(&o.Dest, "dest", "./", "The destination directory to write files to in the S3 bucket") set.StringVar(&o.ConfigFile, "config", "", "A yaml file to read configuration from") set.StringVar(&o.Env, "env", "", "The env to read from the config file") set.StringVar(&o.Bucket, "bucket", "", "The bucket to deploy to") set.StringVar(&o.AWSKey, "key", "", "The AWS key to use") set.StringVar(&o.AWSSecret, "secret", "", "The AWS secret of the provided key") set.StringVar(&o.AWSRegion, "region", "us-east-1", "The AWS region the S3 bucket is in") set.StringVar(&o.S3Host, "s3-host", "s3.amazonaws.com", "The hostname of an S3 implementation, overrides region") set.BoolVar(&o.NoUser, "no-user", false, "When creating, should we make a user account?") set.Parse(os.Args[2:]) return } type ConfigFile map[string]Options func loadConfigFile(o *Options) { isDefault := false configPath := o.ConfigFile if o.ConfigFile == "" { isDefault = true configPath = "./deploy.yaml" } data, err := ioutil.ReadFile(configPath) if err != nil { if os.IsNotExist(err) && isDefault { return } panic(err) } var file ConfigFile err = yaml.Unmarshal(data, &file) panicIf(err) var envCfg Options if o.Env != "" { var ok bool envCfg, ok = file[o.Env] if !ok { panic("Config for specified env not found") } } defCfg, _ := file["default"] panicIf(mergo.MergeWithOverwrite(o, defCfg)) panicIf(mergo.MergeWithOverwrite(o, envCfg)) } func addAWSConfig(o *Options) { if o.AWSKey == "" && o.AWSSecret == "" { o.AWSKey, o.AWSSecret = loadAWSConfig() } } type AWSConfig struct { Default struct { AccessKey string `ini:"aws_access_key_id"` SecretKey string `ini:"aws_secret_access_key"` } `ini:"[default]"` } func loadAWSConfig() (access string, secret string) { cfg := AWSConfig{} for _, file := range []string{"~/.aws/config", "~/.aws/credentials"} { path, err := homedir.Expand(file) if err != nil { continue } content, err := ioutil.ReadFile(path) if err != nil { continue } ini.Unmarshal(content, &cfg) if cfg.Default.AccessKey != "" { break } } return cfg.Default.AccessKey, cfg.Default.SecretKey } func copyFile(bucket *s3.Bucket, from string, to string, contentType string, maxAge int) { copyOpts := s3.CopyOptions{ MetadataDirective: "REPLACE", ContentType: contentType, Options: s3.Options{ CacheControl: fmt.Sprintf("public, max-age=%d", maxAge), ContentEncoding: "gzip", }, } _, err := bucket.PutCopy(to, s3.PublicRead, copyOpts, joinPath(bucket.Name, from)) if err != nil { panic(err) } } var pathRe = regexp.MustCompile("/{2,}") func joinPath(parts ...string) string { // Like filepath.Join, but always uses '/' out := filepath.Join(parts...) if os.PathSeparator != '/' { out = strings.Replace(out, string(os.PathSeparator), "/", -1) } return out } ================================================ FILE: utils/build.sh ================================================ GOOS=linux GOARCH=amd64 go build -o stout-linux src/* GOOS=darwin GOARCH=amd64 go build -o stout-osx src/* GOOS=windows GOARCH=amd64 go build -o stout-windows.exe src/* ================================================ FILE: utils/create_site.sh ================================================ #set -e export HOST=$1 export DEPLOY_USER=${HOST}_deploy aws s3 mb s3://$HOST --region us-east-1 aws s3 website s3://$HOST --index-document index.html --error-document error.html aws s3api put-bucket-policy --bucket $HOST --policy "{ \"Version\": \"2008-10-17\", \"Statement\": [ { \"Sid\": \"PublicReadForGetBucketObjects\", \"Effect\": \"Allow\", \"Principal\": { \"AWS\": \"*\" }, \"Action\": \"s3:GetObject\", \"Resource\": \"arn:aws:s3:::$HOST/*\" } ] }" export CALLER=`date +"%T"` aws cloudfront create-distribution --distribution-config " { \"CallerReference\": \"$CALLER\", \"Comment\": null, \"CacheBehaviors\": { \"Quantity\": 0 }, \"Logging\": { \"Bucket\": null, \"Prefix\": null, \"Enabled\": false, \"IncludeCookies\": false }, \"Origins\": { \"Items\": [ { \"S3OriginConfig\": { \"OriginAccessIdentity\": null }, \"Id\": \"S3-$HOST\", \"DomainName\": \"$HOST.s3.amazonaws.com\" } ], \"Quantity\": 1 }, \"DefaultRootObject\": \"index.html\", \"PriceClass\": \"PriceClass_All\", \"Enabled\": true, \"DefaultCacheBehavior\": { \"TrustedSigners\": { \"Enabled\": false, \"Quantity\": 0 }, \"TargetOriginId\": \"S3-$HOST\", \"ViewerProtocolPolicy\": \"allow-all\", \"ForwardedValues\": { \"Cookies\": { \"Forward\": \"none\" }, \"QueryString\": false }, \"AllowedMethods\": { \"Items\": [ \"GET\", \"HEAD\" ], \"Quantity\": 2 }, \"MinTTL\": 0 }, \"ViewerCertificate\": { \"CloudFrontDefaultCertificate\": true }, \"CustomErrorResponses\": { \"Quantity\": 0 }, \"Restrictions\": { \"GeoRestriction\": { \"RestrictionType\": \"none\", \"Quantity\": 0 } }, \"Aliases\": { \"Items\": [ \"$HOST\" ], \"Quantity\": 1 } }" aws iam create-user --user-name $DEPLOY_USER aws iam put-user-policy --user-name $DEPLOY_USER --policy-name $DEPLOY_USER --policy-document "{ \"Version\": \"2012-10-17\", \"Statement\": [ { \"Effect\": \"Allow\", \"Action\": [ \"s3:DeleteObject\", \"s3:ListBucket\", \"s3:PutObject\", \"s3:PutObjectAcl\", \"s3:GetObject\" ], \"Resource\": [ \"arn:aws:s3:::$HOST\", \"arn:aws:s3:::$HOST/*\" ] } ] }" aws iam create-access-key --user-name $DEPLOY_USER | cat echo "Select a SSL Cert in CloudFront if applicable" echo "Site set up. You must now manually add the cloudfront distribution to your DNS configuration." ================================================ FILE: utils/release.sh ================================================ set -e # Install jq with brew install jq RELEASE=$1 git tag $RELEASE git push origin master --tags UPLOAD_URL=$(curl -X POST "https://api.github.com/repos/EagerIO/Stout/releases" \ -H "Accept: application/vnd.github.v3+json" \ -H "Authorization: token $GITHUB_AUTH" \ -H "Content-Type: application/json" \ -d " { \"tag_name\": \"$RELEASE\" }" | jq -r '.upload_url' | cut -d { -f 1) mkdir -p debian echo " Package: stout Source: stout Version: $RELEASE Architecture: all Maintainer: Zack Bloom Description: The reliable static website deploy tool " > `dirname $0`/../control `dirname $0`/xc.sh upload () { local archive=$1 local filename=$(basename "$archive") local extension="${filename##*.}" if [ "$extension" == "md" ]; then return fi curl -X POST "$UPLOAD_URL?name=$filename" \ -H "Content-Type: application/octet-stream" \ -H "Authorization: token $GITHUB_AUTH" \ --data-binary @$archive } for f in builds/snapshot/*; do upload "$f" & done wait ================================================ FILE: utils/xc.sh ================================================ goxc -tasks-=downloads-page -d=./builds ================================================ FILE: vendor/github.com/cenk/backoff/.gitignore ================================================ # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe ================================================ FILE: vendor/github.com/cenk/backoff/.travis.yml ================================================ language: go go: - 1.3.3 - tip before_install: - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover script: - $HOME/gopath/bin/goveralls -service=travis-ci ================================================ FILE: vendor/github.com/cenk/backoff/LICENSE ================================================ The MIT License (MIT) Copyright (c) 2014 Cenk Altı Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/cenk/backoff/README.md ================================================ # Exponential Backoff [![GoDoc][godoc image]][godoc] [![Build Status][travis image]][travis] [![Coverage Status][coveralls image]][coveralls] This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client]. [Exponential backoff][exponential backoff wiki] is an algorithm that uses feedback to multiplicatively decrease the rate of some process, in order to gradually find an acceptable rate. The retries exponentially increase and stop increasing when a certain threshold is met. ## How To We define two functions, `Retry()` and `RetryNotify()`. They receive an `Operation` to execute, a `BackOff` algorithm, and an optional `Notify` error handler. The operation will be executed, and will be retried on failure with delay as given by the backoff algorithm. The backoff algorithm can also decide when to stop retrying. In addition, the notify error handler will be called after each failed attempt, except for the last time, whose error should be handled by the caller. ```go // An Operation is executing by Retry() or RetryNotify(). // The operation will be retried using a backoff policy if it returns an error. type Operation func() error // Notify is a notify-on-error function. It receives an operation error and // backoff delay if the operation failed (with an error). // // NOTE that if the backoff policy stated to stop retrying, // the notify function isn't called. type Notify func(error, time.Duration) func Retry(Operation, BackOff) error func RetryNotify(Operation, BackOff, Notify) ``` ## Examples ### Retry Simple retry helper that uses the default exponential backoff algorithm: ```go operation := func() error { // An operation that might fail. return nil // or return errors.New("some error") } err := Retry(operation, NewExponentialBackOff()) if err != nil { // Handle error. return err } // Operation is successful. return nil ``` ### Ticker Ticker is for using backoff algorithms with channels. ```go operation := func() error { // An operation that might fail return nil // or return errors.New("some error") } b := NewExponentialBackOff() ticker := NewTicker(b) var err error // Ticks will continue to arrive when the previous operation is still running, // so operations that take a while to fail could run in quick succession. for range ticker.C { if err = operation(); err != nil { log.Println(err, "will retry...") continue } ticker.Stop() break } if err != nil { // Operation has failed. return err } // Operation is successful. return nil ``` ## Getting Started ```bash # install $ go get github.com/cenk/backoff # test $ cd $GOPATH/src/github.com/cenk/backoff $ go get -t ./... $ go test -v -cover ``` [godoc]: https://godoc.org/github.com/cenk/backoff [godoc image]: https://godoc.org/github.com/cenk/backoff?status.png [travis]: https://travis-ci.org/cenk/backoff [travis image]: https://travis-ci.org/cenk/backoff.png?branch=master [coveralls]: https://coveralls.io/github/cenk/backoff?branch=master [coveralls image]: https://coveralls.io/repos/github/cenk/backoff/badge.svg?branch=master [google-http-java-client]: https://github.com/google/google-http-java-client [exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff [advanced example]: https://godoc.org/github.com/cenk/backoff#example_ ================================================ FILE: vendor/github.com/cenk/backoff/backoff.go ================================================ // Package backoff implements backoff algorithms for retrying operations. // // Also has a Retry() helper for retrying operations that may fail. package backoff import "time" // BackOff is a backoff policy for retrying an operation. type BackOff interface { // NextBackOff returns the duration to wait before retrying the operation, // or backoff.Stop to indicate that no more retries should be made. // // Example usage: // // duration := backoff.NextBackOff(); // if (duration == backoff.Stop) { // // Do not retry operation. // } else { // // Sleep for duration and retry operation. // } // NextBackOff() time.Duration // Reset to initial state. Reset() } // Indicates that no more retries should be made for use in NextBackOff(). const Stop time.Duration = -1 // ZeroBackOff is a fixed backoff policy whose backoff time is always zero, // meaning that the operation is retried immediately without waiting, indefinitely. type ZeroBackOff struct{} func (b *ZeroBackOff) Reset() {} func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } // StopBackOff is a fixed backoff policy that always returns backoff.Stop for // NextBackOff(), meaning that the operation should never be retried. type StopBackOff struct{} func (b *StopBackOff) Reset() {} func (b *StopBackOff) NextBackOff() time.Duration { return Stop } // ConstantBackOff is a backoff policy that always returns the same backoff delay. // This is in contrast to an exponential backoff policy, // which returns a delay that grows longer as you call NextBackOff() over and over again. type ConstantBackOff struct { Interval time.Duration } func (b *ConstantBackOff) Reset() {} func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } func NewConstantBackOff(d time.Duration) *ConstantBackOff { return &ConstantBackOff{Interval: d} } ================================================ FILE: vendor/github.com/cenk/backoff/exponential.go ================================================ package backoff import ( "math/rand" "time" ) /* ExponentialBackOff is a backoff implementation that increases the backoff period for each retry attempt using a randomization function that grows exponentially. NextBackOff() is calculated using the following formula: randomized interval = RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) In other words NextBackOff() will range between the randomization factor percentage below and above the retry interval. For example, given the following parameters: RetryInterval = 2 RandomizationFactor = 0.5 Multiplier = 2 the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, multiplied by the exponential, that is, between 2 and 6 seconds. Note: MaxInterval caps the RetryInterval and not the randomized interval. If the time elapsed since an ExponentialBackOff instance is created goes past the MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. The elapsed time can be reset by calling Reset(). Example: Given the following default arguments, for 10 tries the sequence will be, and assuming we go over the MaxElapsedTime on the 10th try: Request # RetryInterval (seconds) Randomized Interval (seconds) 1 0.5 [0.25, 0.75] 2 0.75 [0.375, 1.125] 3 1.125 [0.562, 1.687] 4 1.687 [0.8435, 2.53] 5 2.53 [1.265, 3.795] 6 3.795 [1.897, 5.692] 7 5.692 [2.846, 8.538] 8 8.538 [4.269, 12.807] 9 12.807 [6.403, 19.210] 10 19.210 backoff.Stop Note: Implementation is not thread-safe. */ type ExponentialBackOff struct { InitialInterval time.Duration RandomizationFactor float64 Multiplier float64 MaxInterval time.Duration // After MaxElapsedTime the ExponentialBackOff stops. // It never stops if MaxElapsedTime == 0. MaxElapsedTime time.Duration Clock Clock currentInterval time.Duration startTime time.Time } // Clock is an interface that returns current time for BackOff. type Clock interface { Now() time.Time } // Default values for ExponentialBackOff. const ( DefaultInitialInterval = 500 * time.Millisecond DefaultRandomizationFactor = 0.5 DefaultMultiplier = 1.5 DefaultMaxInterval = 60 * time.Second DefaultMaxElapsedTime = 15 * time.Minute ) // NewExponentialBackOff creates an instance of ExponentialBackOff using default values. func NewExponentialBackOff() *ExponentialBackOff { b := &ExponentialBackOff{ InitialInterval: DefaultInitialInterval, RandomizationFactor: DefaultRandomizationFactor, Multiplier: DefaultMultiplier, MaxInterval: DefaultMaxInterval, MaxElapsedTime: DefaultMaxElapsedTime, Clock: SystemClock, } if b.RandomizationFactor < 0 { b.RandomizationFactor = 0 } else if b.RandomizationFactor > 1 { b.RandomizationFactor = 1 } b.Reset() return b } type systemClock struct{} func (t systemClock) Now() time.Time { return time.Now() } // SystemClock implements Clock interface that uses time.Now(). var SystemClock = systemClock{} // Reset the interval back to the initial retry interval and restarts the timer. func (b *ExponentialBackOff) Reset() { b.currentInterval = b.InitialInterval b.startTime = b.Clock.Now() } // NextBackOff calculates the next backoff interval using the formula: // Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) func (b *ExponentialBackOff) NextBackOff() time.Duration { // Make sure we have not gone over the maximum elapsed time. if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { return Stop } defer b.incrementCurrentInterval() return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) } // GetElapsedTime returns the elapsed time since an ExponentialBackOff instance // is created and is reset when Reset() is called. // // The elapsed time is computed using time.Now().UnixNano(). func (b *ExponentialBackOff) GetElapsedTime() time.Duration { return b.Clock.Now().Sub(b.startTime) } // Increments the current interval by multiplying it with the multiplier. func (b *ExponentialBackOff) incrementCurrentInterval() { // Check for overflow, if overflow is detected set the current interval to the max interval. if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { b.currentInterval = b.MaxInterval } else { b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) } } // Returns a random value from the following interval: // [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { var delta = randomizationFactor * float64(currentInterval) var minInterval = float64(currentInterval) - delta var maxInterval = float64(currentInterval) + delta // Get a random value from the range [minInterval, maxInterval]. // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then // we want a 33% chance for selecting either 1, 2 or 3. return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) } ================================================ FILE: vendor/github.com/cenk/backoff/retry.go ================================================ package backoff import "time" // An Operation is executing by Retry() or RetryNotify(). // The operation will be retried using a backoff policy if it returns an error. type Operation func() error // Notify is a notify-on-error function. It receives an operation error and // backoff delay if the operation failed (with an error). // // NOTE that if the backoff policy stated to stop retrying, // the notify function isn't called. type Notify func(error, time.Duration) // Retry the operation o until it does not return error or BackOff stops. // o is guaranteed to be run at least once. // It is the caller's responsibility to reset b after Retry returns. // // Retry sleeps the goroutine for the duration returned by BackOff after a // failed operation returns. func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } // RetryNotify calls notify function with the error and wait duration // for each failed attempt before sleep. func RetryNotify(operation Operation, b BackOff, notify Notify) error { var err error var next time.Duration b.Reset() for { if err = operation(); err == nil { return nil } if next = b.NextBackOff(); next == Stop { return err } if notify != nil { notify(err, next) } time.Sleep(next) } } ================================================ FILE: vendor/github.com/cenk/backoff/ticker.go ================================================ package backoff import ( "runtime" "sync" "time" ) // Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. // // Ticks will continue to arrive when the previous operation is still running, // so operations that take a while to fail could run in quick succession. type Ticker struct { C <-chan time.Time c chan time.Time b BackOff stop chan struct{} stopOnce sync.Once } // NewTicker returns a new Ticker containing a channel that will send the time at times // specified by the BackOff argument. Ticker is guaranteed to tick at least once. // The channel is closed when Stop method is called or BackOff stops. func NewTicker(b BackOff) *Ticker { c := make(chan time.Time) t := &Ticker{ C: c, c: c, b: b, stop: make(chan struct{}), } go t.run() runtime.SetFinalizer(t, (*Ticker).Stop) return t } // Stop turns off a ticker. After Stop, no more ticks will be sent. func (t *Ticker) Stop() { t.stopOnce.Do(func() { close(t.stop) }) } func (t *Ticker) run() { c := t.c defer close(c) t.b.Reset() // Ticker is guaranteed to tick at least once. afterC := t.send(time.Now()) for { if afterC == nil { return } select { case tick := <-afterC: afterC = t.send(tick) case <-t.stop: t.c = nil // Prevent future ticks from being sent to the channel. return } } } func (t *Ticker) send(tick time.Time) <-chan time.Time { select { case t.c <- tick: case <-t.stop: return nil } next := t.b.NextBackOff() if next == Stop { t.Stop() return nil } return time.After(next) } ================================================ FILE: vendor/github.com/imdario/mergo/.travis.yml ================================================ language: go install: go get -t ================================================ FILE: vendor/github.com/imdario/mergo/LICENSE ================================================ Copyright (c) 2013 Dario Castañé. All rights reserved. Copyright (c) 2012 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/imdario/mergo/README.md ================================================ # Mergo A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche. ![Mergo dall'alto](http://www.comune.mergo.an.it/Siti/Mergo/Immagini/Foto/mergo_dall_alto.jpg) ## Status It is ready for production use. It works fine after extensive use in the wild. [![Build Status][1]][2] [![GoDoc][3]][4] [![GoCard][5]][6] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo [3]: https://godoc.org/github.com/imdario/mergo?status.svg [4]: https://godoc.org/github.com/imdario/mergo [5]: https://goreportcard.com/badge/imdario/mergo [6]: https://goreportcard.com/report/github.com/imdario/mergo ### Important note Mergo is intended to assign **only** zero value fields on destination with source value. Since April 6th it works like this. Before it didn't work properly, causing some random overwrites. After some issues and PRs I found it didn't merge as I designed it. Thanks to [imdario/mergo#8](https://github.com/imdario/mergo/pull/8) overwriting functions were added and the wrong behavior was clearly detected. If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). ### Mergo in the wild - [docker/docker](https://github.com/docker/docker/) - [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) - [imdario/zas](https://github.com/imdario/zas) - [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) - [EagerIO/Stout](https://github.com/EagerIO/Stout) - [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) - [russross/canvasassignments](https://github.com/russross/canvasassignments) - [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) - [casualjim/exeggutor](https://github.com/casualjim/exeggutor) - [divshot/gitling](https://github.com/divshot/gitling) - [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) - [andrerocker/deploy42](https://github.com/andrerocker/deploy42) - [elwinar/rambler](https://github.com/elwinar/rambler) - [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) - [jfbus/impressionist](https://github.com/jfbus/impressionist) - [Jmeyering/zealot](https://github.com/Jmeyering/zealot) - [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) - [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) - [thoas/picfit](https://github.com/thoas/picfit) - [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) - [jnuthong/item_search](https://github.com/jnuthong/item_search) - [Iris Web Framework](https://github.com/kataras/iris) ## Installation go get github.com/imdario/mergo // use in your .go code import ( "github.com/imdario/mergo" ) ## Usage You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). if err := mergo.Merge(&dst, src); err != nil { // ... } Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field. if err := mergo.Map(&dst, srcMap); err != nil { // ... } Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values. More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). ### Nice example ```go package main import ( "fmt" "github.com/imdario/mergo" ) type Foo struct { A string B int64 } func main() { src := Foo{ A: "one", } dest := Foo{ A: "two", B: 2, } mergo.Merge(&dest, src) fmt.Println(dest) // Will print // {two 2} } ``` Note: if test are failing due missing package, please execute: go get gopkg.in/yaml.v1 ## Contact me If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) ## About Written by [Dario Castañé](http://dario.im). ## License [BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). ================================================ FILE: vendor/github.com/imdario/mergo/doc.go ================================================ // Copyright 2013 Dario Castañé. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). Usage From my own work-in-progress project: type networkConfig struct { Protocol string Address string ServerType string `json: "server_type"` Port uint16 } type FssnConfig struct { Network networkConfig } var fssnDefault = FssnConfig { networkConfig { "tcp", "127.0.0.1", "http", 31560, }, } // Inside a function [...] if err := mergo.Merge(&config, fssnDefault); err != nil { log.Fatal(err) } // More code [...] */ package mergo ================================================ FILE: vendor/github.com/imdario/mergo/map.go ================================================ // Copyright 2014 Dario Castañé. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Based on src/pkg/reflect/deepequal.go from official // golang's stdlib. package mergo import ( "fmt" "reflect" "unicode" "unicode/utf8" ) func changeInitialCase(s string, mapper func(rune) rune) string { if s == "" { return s } r, n := utf8.DecodeRuneInString(s) return string(mapper(r)) + s[n:] } func isExported(field reflect.StructField) bool { r, _ := utf8.DecodeRuneInString(field.Name) return r >= 'A' && r <= 'Z' } // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr seen := visited[h] typ := dst.Type() for p := seen; p != nil; p = p.next { if p.ptr == addr && p.typ == typ { return nil } } // Remember, remember... visited[h] = &visit{addr, typ, seen} } zeroValue := reflect.Value{} switch dst.Kind() { case reflect.Map: dstMap := dst.Interface().(map[string]interface{}) for i, n := 0, src.NumField(); i < n; i++ { srcType := src.Type() field := srcType.Field(i) if !isExported(field) { continue } fieldName := field.Name fieldName = changeInitialCase(fieldName, unicode.ToLower) if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { dstMap[fieldName] = src.Field(i).Interface() } } case reflect.Struct: srcMap := src.Interface().(map[string]interface{}) for key := range srcMap { srcValue := srcMap[key] fieldName := changeInitialCase(key, unicode.ToUpper) dstElement := dst.FieldByName(fieldName) if dstElement == zeroValue { // We discard it because the field doesn't exist. continue } srcElement := reflect.ValueOf(srcValue) dstKind := dstElement.Kind() srcKind := srcElement.Kind() if srcKind == reflect.Ptr && dstKind != reflect.Ptr { srcElement = srcElement.Elem() srcKind = reflect.TypeOf(srcElement.Interface()).Kind() } else if dstKind == reflect.Ptr { // Can this work? I guess it can't. if srcKind != reflect.Ptr && srcElement.CanAddr() { srcPtr := srcElement.Addr() srcElement = reflect.ValueOf(srcPtr) srcKind = reflect.Ptr } } if !srcElement.IsValid() { continue } if srcKind == dstKind { if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { return } } else { if srcKind == reflect.Map { if err = deepMap(dstElement, srcElement, visited, depth+1, overwrite); err != nil { return } } else { return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) } } } } return } // Map sets fields' values in dst from src. // src can be a map with string keys or a struct. dst must be the opposite: // if src is a map, dst must be a valid pointer to struct. If src is a struct, // dst must be map[string]interface{}. // It won't merge unexported (private) fields and will do recursively // any exported field. // If dst is a map, keys will be src fields' names in lower camel case. // Missing key in src that doesn't match a field in dst will be skipped. This // doesn't apply if dst is a map. // This is separated method from Merge because it is cleaner and it keeps sane // semantics: merging equal types, mapping different (restricted) types. func Map(dst, src interface{}) error { return _map(dst, src, false) } // MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by // non-empty src attribute values. func MapWithOverwrite(dst, src interface{}) error { return _map(dst, src, true) } func _map(dst, src interface{}, overwrite bool) error { var ( vDst, vSrc reflect.Value err error ) if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } // To be friction-less, we redirect equal-type arguments // to deepMerge. Only because arguments can be anything. if vSrc.Kind() == vDst.Kind() { return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) } switch vSrc.Kind() { case reflect.Struct: if vDst.Kind() != reflect.Map { return ErrExpectedMapAsDestination } case reflect.Map: if vDst.Kind() != reflect.Struct { return ErrExpectedStructAsDestination } default: return ErrNotSupported } return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) } ================================================ FILE: vendor/github.com/imdario/mergo/merge.go ================================================ // Copyright 2013 Dario Castañé. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Based on src/pkg/reflect/deepequal.go from official // golang's stdlib. package mergo import ( "reflect" ) // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, overwrite bool) (err error) { if !src.IsValid() { return } if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr seen := visited[h] typ := dst.Type() for p := seen; p != nil; p = p.next { if p.ptr == addr && p.typ == typ { return nil } } // Remember, remember... visited[h] = &visit{addr, typ, seen} } switch dst.Kind() { case reflect.Struct: for i, n := 0, dst.NumField(); i < n; i++ { if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, overwrite); err != nil { return } } case reflect.Map: for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) if !srcElement.IsValid() { continue } dstElement := dst.MapIndex(key) switch srcElement.Kind() { case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: if srcElement.IsNil() { continue } fallthrough default: if !srcElement.CanInterface() { continue } switch reflect.TypeOf(srcElement.Interface()).Kind() { case reflect.Struct: fallthrough case reflect.Ptr: fallthrough case reflect.Map: if err = deepMerge(dstElement, srcElement, visited, depth+1, overwrite); err != nil { return } } } if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } dst.SetMapIndex(key, srcElement) } } case reflect.Ptr: fallthrough case reflect.Interface: if src.IsNil() { break } else if dst.IsNil() || overwrite { if dst.CanSet() && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, overwrite); err != nil { return } default: if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { dst.Set(src) } } return } // Merge will fill any empty for value type attributes on the dst struct using corresponding // src attributes if they themselves are not empty. dst and src must be valid same-type structs // and dst must be a pointer to struct. // It won't merge unexported (private) fields and will do recursively any exported field. func Merge(dst, src interface{}) error { return merge(dst, src, false) } // MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by // non-empty src attribute values. func MergeWithOverwrite(dst, src interface{}) error { return merge(dst, src, true) } func merge(dst, src interface{}, overwrite bool) error { var ( vDst, vSrc reflect.Value err error ) if vDst, vSrc, err = resolveValues(dst, src); err != nil { return err } if vDst.Type() != vSrc.Type() { return ErrDifferentArgumentsTypes } return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, overwrite) } ================================================ FILE: vendor/github.com/imdario/mergo/mergo.go ================================================ // Copyright 2013 Dario Castañé. All rights reserved. // Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Based on src/pkg/reflect/deepequal.go from official // golang's stdlib. package mergo import ( "errors" "reflect" ) // Errors reported by Mergo when it finds invalid arguments. var ( ErrNilArguments = errors.New("src and dst must not be nil") ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") ErrNotSupported = errors.New("only structs and maps are supported") ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") ) // During deepMerge, must keep track of checks that are // in progress. The comparison algorithm assumes that all // checks in progress are true when it reencounters them. // Visited are stored in a map indexed by 17 * a1 + a2; type visit struct { ptr uintptr typ reflect.Type next *visit } // From src/pkg/encoding/json. func isEmptyValue(v reflect.Value) bool { switch v.Kind() { case reflect.Array, reflect.Map, reflect.Slice, reflect.String: return v.Len() == 0 case reflect.Bool: return !v.Bool() case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: return v.Int() == 0 case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return v.Uint() == 0 case reflect.Float32, reflect.Float64: return v.Float() == 0 case reflect.Interface, reflect.Ptr: return v.IsNil() } return false } func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { if dst == nil || src == nil { err = ErrNilArguments return } vDst = reflect.ValueOf(dst).Elem() if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { err = ErrNotSupported return } vSrc = reflect.ValueOf(src) // We check if vSrc is a pointer to dereference it. if vSrc.Kind() == reflect.Ptr { vSrc = vSrc.Elem() } return } // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr seen := visited[h] typ := dst.Type() for p := seen; p != nil; p = p.next { if p.ptr == addr && p.typ == typ { return nil } } // Remember, remember... visited[h] = &visit{addr, typ, seen} } return // TODO refactor } ================================================ FILE: vendor/github.com/mitchellh/go-homedir/LICENSE ================================================ The MIT License (MIT) Copyright (c) 2013 Mitchell Hashimoto Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: vendor/github.com/mitchellh/go-homedir/README.md ================================================ # go-homedir This is a Go library for detecting the user's home directory without the use of cgo, so the library can be used in cross-compilation environments. Usage is incredibly simple, just call `homedir.Dir()` to get the home directory for a user, and `homedir.Expand()` to expand the `~` in a path to the home directory. **Why not just use `os/user`?** The built-in `os/user` package requires cgo on Darwin systems. This means that any Go code that uses that package cannot cross compile. But 99% of the time the use for `os/user` is just to retrieve the home directory, which we can do for the current user without cgo. This library does that, enabling cross-compilation. ================================================ FILE: vendor/github.com/mitchellh/go-homedir/homedir.go ================================================ package homedir import ( "bytes" "errors" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" "sync" ) // DisableCache will disable caching of the home directory. Caching is enabled // by default. var DisableCache bool var homedirCache string var cacheLock sync.RWMutex // Dir returns the home directory for the executing user. // // This uses an OS-specific method for discovering the home directory. // An error is returned if a home directory cannot be detected. func Dir() (string, error) { if !DisableCache { cacheLock.RLock() cached := homedirCache cacheLock.RUnlock() if cached != "" { return cached, nil } } cacheLock.Lock() defer cacheLock.Unlock() var result string var err error if runtime.GOOS == "windows" { result, err = dirWindows() } else { // Unix-like system, so just assume Unix result, err = dirUnix() } if err != nil { return "", err } homedirCache = result return result, nil } // Expand expands the path to include the home directory if the path // is prefixed with `~`. If it isn't prefixed with `~`, the path is // returned as-is. func Expand(path string) (string, error) { if len(path) == 0 { return path, nil } if path[0] != '~' { return path, nil } if len(path) > 1 && path[1] != '/' && path[1] != '\\' { return "", errors.New("cannot expand user-specific home dir") } dir, err := Dir() if err != nil { return "", err } return filepath.Join(dir, path[1:]), nil } func dirUnix() (string, error) { // First prefer the HOME environmental variable if home := os.Getenv("HOME"); home != "" { return home, nil } // If that fails, try getent var stdout bytes.Buffer cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) cmd.Stdout = &stdout if err := cmd.Run(); err != nil { // If "getent" is missing, ignore it if err == exec.ErrNotFound { return "", err } } else { if passwd := strings.TrimSpace(stdout.String()); passwd != "" { // username:password:uid:gid:gecos:home:shell passwdParts := strings.SplitN(passwd, ":", 7) if len(passwdParts) > 5 { return passwdParts[5], nil } } } // If all else fails, try the shell stdout.Reset() cmd = exec.Command("sh", "-c", "cd && pwd") cmd.Stdout = &stdout if err := cmd.Run(); err != nil { return "", err } result := strings.TrimSpace(stdout.String()) if result == "" { return "", errors.New("blank output when reading home directory") } return result, nil } func dirWindows() (string, error) { // First prefer the HOME environmental variable if home := os.Getenv("HOME"); home != "" { return home, nil } drive := os.Getenv("HOMEDRIVE") path := os.Getenv("HOMEPATH") home := drive + path if drive == "" || path == "" { home = os.Getenv("USERPROFILE") } if home == "" { return "", errors.New("HOMEDRIVE, HOMEPATH, and USERPROFILE are blank") } return home, nil } ================================================ FILE: vendor/github.com/wsxiaoys/terminal/LICENSE ================================================ Copyright (c) 2013 Meng Zhang. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/github.com/wsxiaoys/terminal/color/color.go ================================================ // The colors package provide a simple way to bring colorful characters to terminal interface. // // This example will output the text with a Blue foreground and a Black background // color.Println("@{bK}Example Text") // // This one will output the text with a red foreground // color.Println("@rExample Text") // // This one will escape the @ // color.Println("@@") // // Full color syntax code // @{rgbcmykwRGBCMYKW} foreground/background color // r/R: Red // g/G: Green // b/B: Blue // c/C: Cyan // m/M: Magenta // y/Y: Yellow // k/K: Black // w/W: White // @{|} Reset format style // @{!./_} Bold / Dim / Italic / Underline // @{^&} Blink / Fast blink // @{*} High intensity foreground color // @{?} Reverse the foreground and background color // @{-} Hide the text // Note some of the functions are not widely supported, like "Fast blink" and "Italic". package color import ( "bytes" "errors" "fmt" "io" "log" ) const ( EscapeChar = '@' // Escape character for color syntax ResetCode = "\033[0m" // Short for reset to default style ) // Mapping from character to concrete escape code. var codeMap = map[int]int{ '|': 0, '!': 1, '.': 2, '/': 3, '_': 4, '^': 5, '&': 6, '?': 7, '-': 8, '*': 60, 'k': 30, 'r': 31, 'g': 32, 'y': 33, 'b': 34, 'm': 35, 'c': 36, 'w': 37, 'd': 39, 'K': 40, 'R': 41, 'G': 42, 'Y': 43, 'B': 44, 'M': 45, 'C': 46, 'W': 47, 'D': 49, } // Compile color syntax string like "rG" to escape code. func Colorize(x string) string { attr := 0 fg := 39 bg := 49 for _, key := range x { c, ok := codeMap[int(key)] switch { case !ok: log.Printf("Wrong color syntax: %c", key) case 0 <= c && c <= 8: attr = c case 30 <= c && c <= 37: fg = c case 40 <= c && c <= 47: bg = c case c == 60: fg += c } } return fmt.Sprintf("\033[%d;%d;%dm", attr, fg, bg) } // Handle state after meeting one '@' func compileColorSyntax(input, output *bytes.Buffer) { i, _, err := input.ReadRune() if err != nil { // EOF got log.Print("Parse failed on color syntax") return } switch i { default: output.WriteString(Colorize(string(i))) case '{': color := bytes.NewBufferString("") for { i, _, err := input.ReadRune() if err != nil { log.Print("Parse failed on color syntax") break } if i == '}' { break } color.WriteRune(i) } output.WriteString(Colorize(color.String())) case EscapeChar: output.WriteRune(EscapeChar) } } // Compile the string and replace color syntax with concrete escape code. func compile(x string) string { if x == "" { return "" } input := bytes.NewBufferString(x) output := bytes.NewBufferString("") for { i, _, err := input.ReadRune() if err != nil { break } switch i { default: output.WriteRune(i) case EscapeChar: compileColorSyntax(input, output) } } return output.String() } // Compile multiple values, only do compiling on string type. func compileValues(a *[]interface{}) { for i, x := range *a { if str, ok := x.(string); ok { (*a)[i] = compile(str) } } } // Similar to fmt.Print, will reset the color at the end. func Print(a ...interface{}) (int, error) { a = append(a, ResetCode) compileValues(&a) return fmt.Print(a...) } // Similar to fmt.Println, will reset the color at the end. func Println(a ...interface{}) (int, error) { a = append(a, ResetCode) compileValues(&a) return fmt.Println(a...) } // Similar to fmt.Printf, will reset the color at the end. func Printf(format string, a ...interface{}) (int, error) { format += ResetCode format = compile(format) return fmt.Printf(format, a...) } // Similar to fmt.Fprint, will reset the color at the end. func Fprint(w io.Writer, a ...interface{}) (int, error) { a = append(a, ResetCode) compileValues(&a) return fmt.Fprint(w, a...) } // Similar to fmt.Fprintln, will reset the color at the end. func Fprintln(w io.Writer, a ...interface{}) (int, error) { a = append(a, ResetCode) compileValues(&a) return fmt.Fprintln(w, a...) } // Similar to fmt.Fprintf, will reset the color at the end. func Fprintf(w io.Writer, format string, a ...interface{}) (int, error) { format += ResetCode format = compile(format) return fmt.Fprintf(w, format, a...) } // Similar to fmt.Sprint, will reset the color at the end. func Sprint(a ...interface{}) string { a = append(a, ResetCode) compileValues(&a) return fmt.Sprint(a...) } // Similar to fmt.Sprintf, will reset the color at the end. func Sprintf(format string, a ...interface{}) string { format += ResetCode format = compile(format) return fmt.Sprintf(format, a...) } // Similar to fmt.Errorf, will reset the color at the end. func Errorf(format string, a ...interface{}) error { return errors.New(Sprintf(format, a...)) } ================================================ FILE: vendor/github.com/zackbloom/go-ini/.gitignore ================================================ # Compiled Object files, Static and Dynamic libs (Shared Objects) *.o *.a *.so # Folders _obj _test # Architecture specific extensions/prefixes *.[568vq] [568vq].out *.cgo1.go *.cgo2.c _cgo_defun.c _cgo_gotypes.go _cgo_export.* _testmain.go *.exe *.test *.prof *.nope ================================================ FILE: vendor/github.com/zackbloom/go-ini/README.md ================================================ go-ini ====== INI file decoder for Go lang. Idea is to have an unmarshaller similar to JSON - specify parts of the file you want coded with structs and tags. For example, for an INI file like this: [Pod_mysql] cache_size = 2000 default_socket = /tmp/mysql.sock [Mysql] default_socket = /tmp/mysql.sock Decode into a structure like this: type MyIni struct { PdoMysql struct { CacheSize int `ini:"cache_size"` DefaultSocket string `ini:"default_socket"` } `ini:"[Pdo_myqsl]"` Mysql struct { DefaultSocket string `ini:"default_socket"` } `ini:"[Myqsl]"` } With code like this: var config MyIni var b []byte // config file stored here err := ini.Unmarshal(b, &config) Advanced Types ============== Over the years, INI files have grown from simple `name=value` lists of properties to files that support arrays and arrays of structures. For example, to support playlists a music config file may look like this: [CREATE SONG] SongId=21348 Title=Long Way to Go Artist=The Coach [CREATE SONG] SongId=9855 Title=The Falcon Lead Artist=It Wasn't Safe [CREATE PLAYLIST] PlaylistId=438432 Title=Acid Jazz Song=21348 Song=482 Song=9855 [CREATE PLAYLIST] PlaylistId=2585 Title=Lounge Song=7558 Song=25828 With GO-INI, parsing is as simple as defining the structure and unmarshalling it. package main import ( "encoding/json" "github.com/sspencer/go-ini" "io/ioutil" "log" "os" ) type TunePlayer struct { Songs []struct { SongId int Title string Artist string } `ini:"[CREATE SONG]"` Playlists []struct { PlaylistId int Title string SongIds []int `ini:"Song"` } `ini:[CREATE PLAYLIST]` } func main() { var player TunePlayer content, err := ioutil.ReadFile("./tunes.ini") if err != nil { log.Fatal(err) } err = ini.Unmarshal(content, &player) if err != nil { log.Fatal(err) } // Output same struct as JSON to verify parsing worked enc := json.NewEncoder(os.Stdout) if err := enc.Encode(&player); err != nil { log.Println(err) } } Todo ===== Need to parse inner array of structs struct { Playlists []struct { Id int Title string Programs []struct { Id int Mix string Separation int } `ini:"Play Program"` } `ini:"[CREATE PLAYLIST]"` } [CREATE PLAYLIST] ID=6524 Title=Pop Start Schedule Play Program ID=391 Mix=RAND Play Program ID=3912 Separation=10 End Schedule ================================================ FILE: vendor/github.com/zackbloom/go-ini/decode.go ================================================ // Decode INI files with a syntax similar to JSON decoding package ini import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "reflect" "strconv" "strings" ) type Unmatched struct { lineNum int line string } type IniError struct { lineNum int line string error string } // decodeState represents the state while decoding a INI value. type decodeState struct { lineNum int line string scanner *bufio.Scanner savedError error unmatched []Unmatched } type property struct { tag string value reflect.Value children propertyMap isArray bool //array []interface{} isInitialized bool } type propertyMap map[string]property //------------------------------------------------------------------ // NewStack returns a new stack. func NewPropMapStack() *PropMapStack { return &PropMapStack{} } // Stack is a basic LIFO stack that resizes as needed. type PropMapStack struct { items []propertyMap count int } // Push adds an iterm to the top of the stack func (s *PropMapStack) Push(item propertyMap) { s.items = append(s.items[:s.count], item) s.count++ } // Pop removes the top item (LIFO) from the stack func (s *PropMapStack) Pop() propertyMap { if s.count == 0 { return nil } s.count-- return s.items[s.count] } // Peek returns item at top of stack without removing it func (s *PropMapStack) Peek() propertyMap { if s.count == 0 { return nil } return s.items[s.count-1] } // Empty returns true when stack is empty, false otherwise func (s *PropMapStack) Empty() bool { return s.count == 0 } // Size returns the number of items in the stack func (s *PropMapStack) Size() int { return s.count } /* * Unmarshal parses the INI-encoded data and stores the result * in the value pointed to by v. */ func Unmarshal(data []byte, v interface{}) error { var d decodeState d.init(data) return d.unmarshal(v) } /* * String interfacer for Unmatched */ func (u Unmatched) String() string { return fmt.Sprintf("%d %s", u.lineNum, u.line) } /* * Conform to Error Interfacer */ func (e *IniError) Error() string { if e.lineNum > 0 { return fmt.Sprintf("%s on line %d: \"%s\"", e.error, e.lineNum, e.line) } else { return e.error } } /* * Stringer interface for property */ func (p property) String() string { return fmt.Sprintf("", p.tag, p.isArray) } /* * Convenience function to prep for decoding byte array. */ func (d *decodeState) init(data []byte) *decodeState { d.lineNum = 0 d.line = "" d.scanner = bufio.NewScanner(bytes.NewReader(data)) d.savedError = nil return d } /* * saveError saves the first err it is called with, * for reporting at the end of the unmarshal. */ func (d *decodeState) saveError(err error) { if d.savedError == nil { d.savedError = err } } /* * Recursive function to map data types in the describing structs * to string markers (tags) in the INI file. */ func (d *decodeState) generateMap(m propertyMap, v reflect.Value) { if v.Type().Kind() == reflect.Ptr { d.generateMap(m, v.Elem()) } else if v.Kind() == reflect.Struct { typ := v.Type() for i := 0; i < typ.NumField(); i++ { sf := typ.Field(i) f := v.Field(i) kind := f.Type().Kind() tag := sf.Tag.Get("ini") if len(tag) == 0 { tag = sf.Name } tag = strings.TrimSpace(strings.ToLower(tag)) st := property{tag, f, make(propertyMap), kind == reflect.Slice, true} // some structures are just for organizing data if tag != "-" { m[tag] = st } if kind == reflect.Struct { if tag == "-" { d.generateMap(m, f) } else { // little namespacing here so property names can // be the same under different sections //fmt.Printf("Struct tag: %s, type: %s\n", tag, f.Type()) d.generateMap(st.children, f) } } else if kind == reflect.Slice { d.generateMap(st.children, reflect.New(f.Type().Elem())) } } } } /* * Iterates line-by-line through INI file setting values into a struct. */ func (d *decodeState) unmarshal(x interface{}) error { var topMap propertyMap topMap = make(propertyMap) d.generateMap(topMap, reflect.ValueOf(x)) propStack := NewPropMapStack() propStack.Push(topMap) // for every line in file for d.scanner.Scan() { if d.savedError != nil { break // breaks on first error } d.line = d.scanner.Text() d.lineNum++ line := strings.TrimSpace(d.line) if len(line) < 1 || line[0] == ';' || line[0] == '#' { continue // skip comments } // Two types of lines: // 1. NAME=VALUE (at least one equal sign - breaks on first) // 2. [HEADER] (no equals sign, square brackets NOT required) matches := strings.SplitN(line, "=", 2) matched := false pn := "" pv := "" if len(matches) == 2 { // NAME=VALUE pn = strings.ToLower(strings.TrimSpace(matches[0])) pv = strings.TrimSpace(matches[1]) prop := propStack.Peek()[pn] if prop.isInitialized { if prop.isArray { value := reflect.New(prop.value.Type().Elem()) d.setValue(reflect.Indirect(value), pv) appendValue(prop.value, value) } else { d.setValue(prop.value, pv) } matched = true } // What if property is umatched - keep popping the stack // until a potential map is found or stay within current section? // Think answer is pop. // NOPE // Section could have unmatched name=value if user doesn't // care about certain values - only stack crawling happens // during numMatches==1 time? // This means if there is > 1 section, there needs to be // section breaks for everything } else { // [Header] section pn = strings.ToLower(strings.TrimSpace(matches[0])) for propStack.Size() > 0 { prop := propStack.Peek()[pn] if prop.isInitialized { propStack.Push(prop.children) matched = true break } else if propStack.Size() > 1 { _ = propStack.Pop() } else { break } } } if !matched { d.unmatched = append(d.unmatched, Unmatched{d.lineNum, d.line}) } } return d.savedError } func (d *decodeState) unmarshal2(x interface{}) error { var sectionMap propertyMap = make(propertyMap) var tempMap propertyMap = make(propertyMap) var section, nextSection property var inSection, nextHasSection bool = false, false var tempValue reflect.Value // "temp" is for filling in array of structs var numTempValue int d.generateMap(sectionMap, reflect.ValueOf(x)) for d.scanner.Scan() { if d.savedError != nil { break } d.line = d.scanner.Text() d.lineNum++ //fmt.Printf("%03d: %s\n", d.lineNum, d.line) line := strings.ToLower(strings.TrimSpace(d.line)) if len(line) < 1 || line[0] == ';' || line[0] == '#' { continue // skip comments } // [Sections] could appear at any time (square brackets not required) // When in a section, also look in children map nextSection, nextHasSection = sectionMap[line] if nextHasSection { if numTempValue > 0 && section.isArray { appendValue(section.value, tempValue) } section = nextSection inSection = true if section.isArray { tempValue = reflect.New(section.value.Type().Elem()) d.generateMap(tempMap, tempValue) } numTempValue = 0 continue } // unrecognized section - exit out of current section if line[0] == '[' && line[len(line)-1] == ']' { inSection = false continue } matches := strings.SplitN(d.line, "=", 2) matched := false // potential property=value if len(matches) == 2 { n := strings.ToLower(strings.TrimSpace(matches[0])) s := strings.TrimSpace(matches[1]) if inSection { // child property, within a section childProperty, hasProp := section.children[n] if hasProp { if section.isArray { tempProperty := tempMap[n] numTempValue++ d.setValue(tempProperty.value, s) } else { d.setValue(childProperty.value, s) } matched = true } } if !matched { // top level property topLevelProperty, hasProp := sectionMap[n] if hasProp { // just encountered a top level property - switch out of section mode inSection = false matched = true d.setValue(topLevelProperty.value, s) } } } if !matched { d.unmatched = append(d.unmatched, Unmatched{d.lineNum, d.line}) } } if numTempValue > 0 { appendValue(section.value, tempValue) } return d.savedError } func appendValue(arr, val reflect.Value) { arr.Set(reflect.Append(arr, reflect.Indirect(val))) } // Set Value with given string func (d *decodeState) setValue(v reflect.Value, s string) { //fmt.Printf("SET(kind:%s, %s)\n", v.Kind(), s) switch v.Kind() { case reflect.String: v.SetString(s) case reflect.Bool: v.SetBool(boolValue(s)) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: n, err := strconv.ParseInt(s, 10, 64) if err != nil || v.OverflowInt(n) { d.saveError(&IniError{d.lineNum, d.line, "Invalid int"}) return } v.SetInt(n) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: n, err := strconv.ParseUint(s, 10, 64) if err != nil || v.OverflowUint(n) { d.saveError(&IniError{d.lineNum, d.line, "Invalid uint"}) return } v.SetUint(n) case reflect.Float32, reflect.Float64: n, err := strconv.ParseFloat(s, v.Type().Bits()) if err != nil || v.OverflowFloat(n) { d.saveError(&IniError{d.lineNum, d.line, "Invalid float"}) return } v.SetFloat(n) case reflect.Slice: d.sliceValue(v, s) default: d.saveError(&IniError{d.lineNum, d.line, fmt.Sprintf("Can't set value of type %s", v.Kind())}) } } func (d *decodeState) sliceValue(v reflect.Value, s string) { //fmt.Printf(":SLICE(%s, %s)\n", v.Kind(), s) switch v.Type().Elem().Kind() { case reflect.String: v.Set(reflect.Append(v, reflect.ValueOf(s))) case reflect.Bool: v.Set(reflect.Append(v, reflect.ValueOf(boolValue(s)))) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: // Hardcoding of []int temporarily n, err := strconv.ParseInt(s, 10, 64) if err != nil { d.saveError(&IniError{d.lineNum, d.line, "Invalid int"}) return } n1 := reflect.ValueOf(n) n2 := n1.Convert(v.Type().Elem()) v.Set(reflect.Append(v, n2)) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: n, err := strconv.ParseUint(s, 10, 64) if err != nil { d.saveError(&IniError{d.lineNum, d.line, "Invalid uint"}) return } n1 := reflect.ValueOf(n) n2 := n1.Convert(v.Type().Elem()) v.Set(reflect.Append(v, n2)) case reflect.Float32, reflect.Float64: n, err := strconv.ParseFloat(s, 64) if err != nil { d.saveError(&IniError{d.lineNum, d.line, "Invalid float"}) return } n1 := reflect.ValueOf(n) n2 := n1.Convert(v.Type().Elem()) v.Set(reflect.Append(v, n2)) default: d.saveError(&IniError{d.lineNum, d.line, fmt.Sprintf("Can't set value in array of type %s", v.Type().Elem().Kind())}) } } // Returns true for truthy values like t/true/y/yes/1, false otherwise func boolValue(s string) bool { v := false switch strings.ToLower(s) { case "t", "true", "y", "yes", "1": v = true } return v } // A Decoder reads and decodes INI object from an input stream. type Decoder struct { r io.Reader d decodeState } // NewDecoder returns a new decoder that reads from r. // // The decoder introduces its own buffering and may // read data from r beyond the JSON values requested. func NewDecoder(r io.Reader) *Decoder { return &Decoder{r: r} } // Decode reads the INI file and stores it in the value pointed to by v. // // See the documentation for Unmarshal for details about // the conversion of an INI into a Go value. func (dec *Decoder) Decode(v interface{}) error { buf, readErr := ioutil.ReadAll(dec.r) if readErr != nil { return readErr } // Don't save err from unmarshal into dec.err: // the connection is still usable since we read a complete JSON // object from it before the error happened. dec.d.init(buf) err := dec.d.unmarshal(v) return err } // UnparsedLines returns an array of strings where each string is an // unparsed line from the file. func (dec *Decoder) Unmatched() []Unmatched { return dec.d.unmatched } ================================================ FILE: vendor/github.com/zackbloom/go-ini/stack.go ================================================ package ini // NewStack returns a new stack. func NewStack() *Stack { return &Stack{} } // Stack is a basic LIFO stack that resizes as needed. type Stack struct { items []interface{} count int } // Push adds an iterm to the top of the stack func (s *Stack) Push(item interface{}) { s.items = append(s.items[:s.count], item) s.count++ } // Pop removes the top item (LIFO) from the stack func (s *Stack) Pop() interface{} { if s.count == 0 { return nil } s.count-- return s.items[s.count] } // Peek returns item at top of stack without removing it func (s *Stack) Peek() interface{} { if s.count == 0 { return nil } return s.items[s.count-1] } // Empty returns true when stack is empty, false otherwise func (s *Stack) Empty() bool { return s.count == 0 } // Size returns the number of items in the stack func (s *Stack) Size() int { return s.count } ================================================ FILE: vendor/github.com/zackbloom/goamz/LICENSE ================================================ This software is licensed under the LGPLv3, included below. As a special exception to the GNU Lesser General Public License version 3 ("LGPL3"), the copyright holders of this Library give you permission to convey to a third party a Combined Work that links statically or dynamically to this Library without providing any Minimal Corresponding Source or Minimal Application Code as set out in 4d or providing the installation information set out in section 4e, provided that you comply with the other provisions of LGPL3 and provided that you meet, for the Application the terms and conditions of the license(s) which apply to the Application. Except as stated in this special exception, the provisions of LGPL3 will continue to comply in full to this Library. If you modify this Library, you may apply this exception to your version of this Library, but you are not obliged to do so. If you do not wish to do so, delete this exception statement from your version. This exception does not (and cannot) modify any license terms which apply to the Application, with which you must still comply. GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. ================================================ FILE: vendor/github.com/zackbloom/goamz/aws/attempt.go ================================================ package aws import ( "time" ) // AttemptStrategy represents a strategy for waiting for an action // to complete successfully. This is an internal type used by the // implementation of other goamz packages. type AttemptStrategy struct { Total time.Duration // total duration of attempt. Delay time.Duration // interval between each try in the burst. Min int // minimum number of retries; overrides Total } type Attempt struct { strategy AttemptStrategy last time.Time end time.Time force bool count int } // Start begins a new sequence of attempts for the given strategy. func (s AttemptStrategy) Start() *Attempt { now := time.Now() return &Attempt{ strategy: s, last: now, end: now.Add(s.Total), force: true, } } // Next waits until it is time to perform the next attempt or returns // false if it is time to stop trying. func (a *Attempt) Next() bool { now := time.Now() sleep := a.nextSleep(now) if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count { return false } a.force = false if sleep > 0 && a.count > 0 { time.Sleep(sleep) now = time.Now() } a.count++ a.last = now return true } func (a *Attempt) nextSleep(now time.Time) time.Duration { sleep := a.strategy.Delay - now.Sub(a.last) if sleep < 0 { return 0 } return sleep } // HasNext returns whether another attempt will be made if the current // one fails. If it returns true, the following call to Next is // guaranteed to return true. func (a *Attempt) HasNext() bool { if a.force || a.strategy.Min > a.count { return true } now := time.Now() if now.Add(a.nextSleep(now)).Before(a.end) { a.force = true return true } return false } ================================================ FILE: vendor/github.com/zackbloom/goamz/aws/aws.go ================================================ // // goamz - Go packages to interact with the Amazon Web Services. // // https://wiki.ubuntu.com/goamz // // Copyright (c) 2011 Canonical Ltd. // // Written by Gustavo Niemeyer // package aws import ( "encoding/json" "encoding/xml" "errors" "fmt" "io/ioutil" "net" "net/http" "net/url" "os" "os/user" "path" "regexp" "strings" "time" ) // Regular expressions for INI files var ( iniSectionRegexp = regexp.MustCompile(`^\s*\[([^\[\]]+)\]\s*$`) iniSettingRegexp = regexp.MustCompile(`^\s*(.+?)\s*=\s*(.*\S)\s*$`) ) // Defines the valid signers const ( V2Signature = iota V4Signature = iota Route53Signature = iota ) // Defines the service endpoint and correct Signer implementation to use // to sign requests for this endpoint type ServiceInfo struct { Endpoint string Signer uint } // Region defines the URLs where AWS services may be accessed. // // See http://goo.gl/d8BP1 for more details. type Region struct { Name string // the canonical name of this region. EC2Endpoint string S3Endpoint string S3BucketEndpoint string // Not needed by AWS S3. Use ${bucket} for bucket name. S3LocationConstraint bool // true if this region requires a LocationConstraint declaration. S3LowercaseBucket bool // true if the region requires bucket names to be lower case. SDBEndpoint string SNSEndpoint string SQSEndpoint string SESEndpoint string IAMEndpoint string ELBEndpoint string DynamoDBEndpoint string CloudWatchServicepoint ServiceInfo AutoScalingEndpoint string RDSEndpoint ServiceInfo KinesisEndpoint string STSEndpoint string CloudFormationEndpoint string ElastiCacheEndpoint string } var Regions = map[string]Region{ APNortheast.Name: APNortheast, APSoutheast.Name: APSoutheast, APSoutheast2.Name: APSoutheast2, EUCentral.Name: EUCentral, EUWest.Name: EUWest, USEast.Name: USEast, USWest.Name: USWest, USWest2.Name: USWest2, USGovWest.Name: USGovWest, SAEast.Name: SAEast, } // Designates a signer interface suitable for signing AWS requests, params // should be appropriately encoded for the request before signing. // // A signer should be initialized with Auth and the appropriate endpoint. type Signer interface { Sign(method, path string, params map[string]string) } // An AWS Service interface with the API to query the AWS service // // Supplied as an easy way to mock out service calls during testing. type AWSService interface { // Queries the AWS service at a given method/path with the params and // returns an http.Response and error Query(method, path string, params map[string]string) (*http.Response, error) // Builds an error given an XML payload in the http.Response, can be used // to process an error if the status code is not 200 for example. BuildError(r *http.Response) error } // Implements a Server Query/Post API to easily query AWS services and build // errors when desired type Service struct { service ServiceInfo signer Signer } // Create a base set of params for an action func MakeParams(action string) map[string]string { params := make(map[string]string) params["Action"] = action return params } // Create a new AWS server to handle making requests func NewService(auth Auth, service ServiceInfo) (s *Service, err error) { var signer Signer switch service.Signer { case V2Signature: signer, err = NewV2Signer(auth, service) // case V4Signature: // signer, err = NewV4Signer(auth, service, Regions["eu-west-1"]) default: err = fmt.Errorf("Unsupported signer for service") } if err != nil { return } s = &Service{service: service, signer: signer} return } func (s *Service) Query(method, path string, params map[string]string) (resp *http.Response, err error) { params["Timestamp"] = time.Now().UTC().Format(time.RFC3339) u, err := url.Parse(s.service.Endpoint) if err != nil { return nil, err } u.Path = path s.signer.Sign(method, path, params) if method == "GET" { u.RawQuery = multimap(params).Encode() resp, err = http.Get(u.String()) } else if method == "POST" { resp, err = http.PostForm(u.String(), multimap(params)) } return } func (s *Service) BuildError(r *http.Response) error { errors := ErrorResponse{} xml.NewDecoder(r.Body).Decode(&errors) var err Error err = errors.Errors err.RequestId = errors.RequestId err.StatusCode = r.StatusCode if err.Message == "" { err.Message = r.Status } return &err } type ServiceError interface { error ErrorCode() string } type ErrorResponse struct { Errors Error `xml:"Error"` RequestId string // A unique ID for tracking the request } type Error struct { StatusCode int Type string Code string Message string RequestId string } func (err *Error) Error() string { return fmt.Sprintf("Type: %s, Code: %s, Message: %s", err.Type, err.Code, err.Message, ) } func (err *Error) ErrorCode() string { return err.Code } type Auth struct { AccessKey, SecretKey string token string expiration time.Time } func (a *Auth) Token() string { if a.token == "" { return "" } if time.Since(a.expiration) >= -30*time.Second { //in an ideal world this should be zero assuming the instance is synching it's clock *a, _ = GetAuth("", "", "", time.Time{}) } return a.token } func (a *Auth) Expiration() time.Time { return a.expiration } // To be used with other APIs that return auth credentials such as STS func NewAuth(accessKey, secretKey, token string, expiration time.Time) *Auth { return &Auth{ AccessKey: accessKey, SecretKey: secretKey, token: token, expiration: expiration, } } // ResponseMetadata type ResponseMetadata struct { RequestId string // A unique ID for tracking the request } type BaseResponse struct { ResponseMetadata ResponseMetadata } var unreserved = make([]bool, 128) var hex = "0123456789ABCDEF" func init() { // RFC3986 u := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890-_.~" for _, c := range u { unreserved[c] = true } } func multimap(p map[string]string) url.Values { q := make(url.Values, len(p)) for k, v := range p { q[k] = []string{v} } return q } type credentials struct { Code string LastUpdated string Type string AccessKeyId string SecretAccessKey string Token string Expiration string } // GetMetaData retrieves instance metadata about the current machine. // // See http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AESDG-chapter-instancedata.html for more details. func GetMetaData(path string) (contents []byte, err error) { c := http.Client{ Transport: &http.Transport{ Dial: func(netw, addr string) (net.Conn, error) { deadline := time.Now().Add(5 * time.Second) c, err := net.DialTimeout(netw, addr, time.Second*2) if err != nil { return nil, err } c.SetDeadline(deadline) return c, nil }, }, } url := "http://169.254.169.254/latest/meta-data/" + path resp, err := c.Get(url) if err != nil { return } defer resp.Body.Close() if resp.StatusCode != 200 { err = fmt.Errorf("Code %d returned for url %s", resp.StatusCode, url) return } body, err := ioutil.ReadAll(resp.Body) if err != nil { return } return []byte(body), err } func GetRegion(regionName string) (region Region) { region = Regions[regionName] return } // GetInstanceCredentials creates an Auth based on the instance's role credentials. // If the running instance is not in EC2 or does not have a valid IAM role, an error will be returned. // For more info about setting up IAM roles, see http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html func GetInstanceCredentials() (cred credentials, err error) { credentialPath := "iam/security-credentials/" // Get the instance role role, err := GetMetaData(credentialPath) if err != nil { return } // Get the instance role credentials credentialJSON, err := GetMetaData(credentialPath + string(role)) if err != nil { return } err = json.Unmarshal([]byte(credentialJSON), &cred) return } // GetAuth creates an Auth based on either passed in credentials, // environment information or instance based role credentials. func GetAuth(accessKey string, secretKey, token string, expiration time.Time) (auth Auth, err error) { // First try passed in credentials if accessKey != "" && secretKey != "" { return Auth{accessKey, secretKey, token, expiration}, nil } // Next try to get auth from the environment auth, err = EnvAuth() if err == nil { // Found auth, return return } // Next try getting auth from the instance role cred, err := GetInstanceCredentials() if err == nil { // Found auth, return auth.AccessKey = cred.AccessKeyId auth.SecretKey = cred.SecretAccessKey auth.token = cred.Token exptdate, err := time.Parse("2006-01-02T15:04:05Z", cred.Expiration) if err != nil { err = fmt.Errorf("Error Parsing expiration date: cred.Expiration :%s , error: %s \n", cred.Expiration, err) } auth.expiration = exptdate return auth, err } // Next try getting auth from the credentials file auth, err = CredentialFileAuth("", "", time.Minute*5) if err == nil { return } //err = errors.New("No valid AWS authentication found") err = fmt.Errorf("No valid AWS authentication found: %s", err) return auth, err } // EnvAuth creates an Auth based on environment information. // The AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment // variables are used. func EnvAuth() (auth Auth, err error) { auth.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID") if auth.AccessKey == "" { auth.AccessKey = os.Getenv("AWS_ACCESS_KEY") } auth.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY") if auth.SecretKey == "" { auth.SecretKey = os.Getenv("AWS_SECRET_KEY") } if auth.AccessKey == "" { err = errors.New("AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment") } if auth.SecretKey == "" { err = errors.New("AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment") } return } // CredentialFileAuth creates and Auth based on a credentials file. The file // contains various authentication profiles for use with AWS. // // The credentials file, which is used by other AWS SDKs, is documented at // http://blogs.aws.amazon.com/security/post/Tx3D6U6WSFGOK2H/A-New-and-Standardized-Way-to-Manage-Credentials-in-the-AWS-SDKs func CredentialFileAuth(filePath string, profile string, expiration time.Duration) (auth Auth, err error) { if profile == "" { profile = "default" } if filePath == "" { u, err := user.Current() if err != nil { return auth, err } filePath = path.Join(u.HomeDir, ".aws", "credentials") } // read the file, then parse the INI contents, err := ioutil.ReadFile(filePath) if err != nil { return } profiles := parseINI(string(contents)) profileData, ok := profiles[profile] if !ok { err = errors.New("The credentials file did not contain the profile") return } keyId, ok := profileData["aws_access_key_id"] if !ok { err = errors.New("The credentials file did not contain required attribute aws_access_key_id") return } secretKey, ok := profileData["aws_secret_access_key"] if !ok { err = errors.New("The credentials file did not contain required attribute aws_secret_access_key") return } auth.AccessKey = keyId auth.SecretKey = secretKey if token, ok := profileData["aws_session_token"]; ok { auth.token = token } auth.expiration = time.Now().Add(expiration) return } // parseINI takes the contents of a credentials file and returns a map, whose keys // are the various profiles, and whose values are maps of the settings for the // profiles func parseINI(fileContents string) map[string]map[string]string { profiles := make(map[string]map[string]string) lines := strings.Split(fileContents, "\n") var currentSection map[string]string for _, line := range lines { // remove comments, which start with a semi-colon if split := strings.Split(line, ";"); len(split) > 1 { line = split[0] } // check if the line is the start of a profile. // // for example: // [default] // // otherwise, check for the proper setting // property=value if sectMatch := iniSectionRegexp.FindStringSubmatch(line); len(sectMatch) == 2 { currentSection = make(map[string]string) profiles[sectMatch[1]] = currentSection } else if setMatch := iniSettingRegexp.FindStringSubmatch(line); len(setMatch) == 3 && currentSection != nil { currentSection[setMatch[1]] = setMatch[2] } } return profiles } // Encode takes a string and URI-encodes it in a way suitable // to be used in AWS signatures. func Encode(s string) string { encode := false for i := 0; i != len(s); i++ { c := s[i] if c > 127 || !unreserved[c] { encode = true break } } if !encode { return s } e := make([]byte, len(s)*3) ei := 0 for i := 0; i != len(s); i++ { c := s[i] if c > 127 || !unreserved[c] { e[ei] = '%' e[ei+1] = hex[c>>4] e[ei+2] = hex[c&0xF] ei += 3 } else { e[ei] = c ei += 1 } } return string(e[:ei]) } func dialTimeout(network, addr string) (net.Conn, error) { return net.DialTimeout(network, addr, time.Duration(2*time.Second)) } func AvailabilityZone() string { transport := http.Transport{Dial: dialTimeout} client := http.Client{ Transport: &transport, } resp, err := client.Get("http://169.254.169.254/latest/meta-data/placement/availability-zone") if err != nil { return "unknown" } else { defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "unknown" } else { return string(body) } } } func InstanceRegion() string { az := AvailabilityZone() if az == "unknown" { return az } else { region := az[:len(az)-1] return region } } func InstanceId() string { transport := http.Transport{Dial: dialTimeout} client := http.Client{ Transport: &transport, } resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-id") if err != nil { return "unknown" } else { defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "unknown" } else { return string(body) } } } func InstanceType() string { transport := http.Transport{Dial: dialTimeout} client := http.Client{ Transport: &transport, } resp, err := client.Get("http://169.254.169.254/latest/meta-data/instance-type") if err != nil { return "unknown" } else { defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "unknown" } else { return string(body) } } } func ServerLocalIp() string { transport := http.Transport{Dial: dialTimeout} client := http.Client{ Transport: &transport, } resp, err := client.Get("http://169.254.169.254/latest/meta-data/local-ipv4") if err != nil { return "127.0.0.1" } else { defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "127.0.0.1" } else { return string(body) } } } func ServerPublicIp() string { transport := http.Transport{Dial: dialTimeout} client := http.Client{ Transport: &transport, } resp, err := client.Get("http://169.254.169.254/latest/meta-data/public-ipv4") if err != nil { return "127.0.0.1" } else { defer resp.Body.Close() body, err := ioutil.ReadAll(resp.Body) if err != nil { return "127.0.0.1" } else { return string(body) } } } ================================================ FILE: vendor/github.com/zackbloom/goamz/aws/client.go ================================================ package aws import ( "math" "net" "net/http" "time" ) type RetryableFunc func(*http.Request, *http.Response, error) bool type WaitFunc func(try int) type DeadlineFunc func() time.Time type ResilientTransport struct { // Timeout is the maximum amount of time a dial will wait for // a connect to complete. // // The default is no timeout. // // With or without a timeout, the operating system may impose // its own earlier timeout. For instance, TCP timeouts are // often around 3 minutes. DialTimeout time.Duration // MaxTries, if non-zero, specifies the number of times we will retry on // failure. Retries are only attempted for temporary network errors or known // safe failures. MaxTries int Deadline DeadlineFunc ShouldRetry RetryableFunc Wait WaitFunc transport *http.Transport } // Convenience method for creating an http client func NewClient(rt *ResilientTransport) *http.Client { rt.transport = &http.Transport{ Dial: func(netw, addr string) (net.Conn, error) { c, err := net.DialTimeout(netw, addr, rt.DialTimeout) if err != nil { return nil, err } c.SetDeadline(rt.Deadline()) return c, nil }, Proxy: http.ProxyFromEnvironment, } // TODO: Would be nice is ResilientTransport allowed clients to initialize // with http.Transport attributes. return &http.Client{ Transport: rt, } } var retryingTransport = &ResilientTransport{ Deadline: func() time.Time { return time.Now().Add(5 * time.Second) }, DialTimeout: 10 * time.Second, MaxTries: 3, ShouldRetry: awsRetry, Wait: ExpBackoff, } // Exported default client var RetryingClient = NewClient(retryingTransport) func (t *ResilientTransport) RoundTrip(req *http.Request) (*http.Response, error) { return t.tries(req) } // Retry a request a maximum of t.MaxTries times. // We'll only retry if the proper criteria are met. // If a wait function is specified, wait that amount of time // In between requests. func (t *ResilientTransport) tries(req *http.Request) (res *http.Response, err error) { for try := 0; try < t.MaxTries; try += 1 { res, err = t.transport.RoundTrip(req) if !t.ShouldRetry(req, res, err) { break } if res != nil { res.Body.Close() } if t.Wait != nil { t.Wait(try) } } return } func ExpBackoff(try int) { time.Sleep(100 * time.Millisecond * time.Duration(math.Exp2(float64(try)))) } func LinearBackoff(try int) { time.Sleep(time.Duration(try*100) * time.Millisecond) } // Decide if we should retry a request. // In general, the criteria for retrying a request is described here // http://docs.aws.amazon.com/general/latest/gr/api-retries.html func awsRetry(req *http.Request, res *http.Response, err error) bool { retry := false // Retry if there's a temporary network error. if neterr, ok := err.(net.Error); ok { if neterr.Temporary() { retry = true } } // Retry if we get a 5xx series error. if res != nil { if res.StatusCode >= 500 && res.StatusCode < 600 { retry = true } } return retry } ================================================ FILE: vendor/github.com/zackbloom/goamz/aws/regions.go ================================================ package aws var USGovWest = Region{ "us-gov-west-1", "https://ec2.us-gov-west-1.amazonaws.com", "https://s3-fips-us-gov-west-1.amazonaws.com", "", true, true, "", "https://sns.us-gov-west-1.amazonaws.com", "https://sqs.us-gov-west-1.amazonaws.com", "", "https://iam.us-gov.amazonaws.com", "https://elasticloadbalancing.us-gov-west-1.amazonaws.com", "https://dynamodb.us-gov-west-1.amazonaws.com", ServiceInfo{"https://monitoring.us-gov-west-1.amazonaws.com", V2Signature}, "https://autoscaling.us-gov-west-1.amazonaws.com", ServiceInfo{"https://rds.us-gov-west-1.amazonaws.com", V2Signature}, "", "https://sts.amazonaws.com", "https://cloudformation.us-gov-west-1.amazonaws.com", "", } var USEast = Region{ "us-east-1", "https://ec2.us-east-1.amazonaws.com", "https://s3.amazonaws.com", "", false, false, "https://sdb.amazonaws.com", "https://sns.us-east-1.amazonaws.com", "https://sqs.us-east-1.amazonaws.com", "https://email.us-east-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.us-east-1.amazonaws.com", "https://dynamodb.us-east-1.amazonaws.com", ServiceInfo{"https://monitoring.us-east-1.amazonaws.com", V2Signature}, "https://autoscaling.us-east-1.amazonaws.com", ServiceInfo{"https://rds.us-east-1.amazonaws.com", V2Signature}, "https://kinesis.us-east-1.amazonaws.com", "https://sts.amazonaws.com", "https://cloudformation.us-east-1.amazonaws.com", "https://elasticache.us-east-1.amazonaws.com", } var USWest = Region{ "us-west-1", "https://ec2.us-west-1.amazonaws.com", "https://s3-us-west-1.amazonaws.com", "", true, true, "https://sdb.us-west-1.amazonaws.com", "https://sns.us-west-1.amazonaws.com", "https://sqs.us-west-1.amazonaws.com", "", "https://iam.amazonaws.com", "https://elasticloadbalancing.us-west-1.amazonaws.com", "https://dynamodb.us-west-1.amazonaws.com", ServiceInfo{"https://monitoring.us-west-1.amazonaws.com", V2Signature}, "https://autoscaling.us-west-1.amazonaws.com", ServiceInfo{"https://rds.us-west-1.amazonaws.com", V2Signature}, "https://kinesis.us-west-1.amazonaws.com", "https://sts.amazonaws.com", "https://cloudformation.us-west-1.amazonaws.com", "https://elasticache.us-west-1.amazonaws.com", } var USWest2 = Region{ "us-west-2", "https://ec2.us-west-2.amazonaws.com", "https://s3-us-west-2.amazonaws.com", "", true, true, "https://sdb.us-west-2.amazonaws.com", "https://sns.us-west-2.amazonaws.com", "https://sqs.us-west-2.amazonaws.com", "https://email.us-west-2.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.us-west-2.amazonaws.com", "https://dynamodb.us-west-2.amazonaws.com", ServiceInfo{"https://monitoring.us-west-2.amazonaws.com", V2Signature}, "https://autoscaling.us-west-2.amazonaws.com", ServiceInfo{"https://rds.us-west-2.amazonaws.com", V2Signature}, "https://kinesis.us-west-2.amazonaws.com", "https://sts.amazonaws.com", "https://cloudformation.us-west-2.amazonaws.com", "https://elasticache.us-west-2.amazonaws.com", } var EUWest = Region{ "eu-west-1", "https://ec2.eu-west-1.amazonaws.com", "https://s3-eu-west-1.amazonaws.com", "", true, true, "https://sdb.eu-west-1.amazonaws.com", "https://sns.eu-west-1.amazonaws.com", "https://sqs.eu-west-1.amazonaws.com", "https://email.eu-west-1.amazonaws.com", "https://iam.amazonaws.com", "https://elasticloadbalancing.eu-west-1.amazonaws.com", "https://dynamodb.eu-west-1.amazonaws.com", ServiceInfo{"https://monitoring.eu-west-1.amazonaws.com", V2Signature}, "https://autoscaling.eu-west-1.amazonaws.com", ServiceInfo{"https://rds.eu-west-1.amazonaws.com", V2Signature}, "https://kinesis.eu-west-1.amazonaws.com", "https://sts.amazonaws.com", "https://cloudformation.eu-west-1.amazonaws.com", "https://elasticache.eu-west-1.amazonaws.com", } var EUCentral = Region{ "eu-central-1", "https://ec2.eu-central-1.amazonaws.com", "https://s3-eu-central-1.amazonaws.com", "", true, true, "https://sdb.eu-central-1.amazonaws.com", "https://sns.eu-central-1.amazonaws.com", "https://sqs.eu-central-1.amazonaws.com", "", "https://iam.amazonaws.com", "https://elasticloadbalancing.eu-central-1.amazonaws.com", "https://dynamodb.eu-central-1.amazonaws.com", ServiceInfo{"https://monitoring.eu-central-1.amazonaws.com", V2Signature}, "https://autoscaling.eu-central-1.amazonaws.com", ServiceInfo{"https://rds.eu-central-1.amazonaws.com", V2Signature}, "https://kinesis.eu-central-1.amazonaws.com", "https://sts.amazonaws.com", "https://cloudformation.eu-central-1.amazonaws.com", "", } var APSoutheast = Region{ "ap-southeast-1", "https://ec2.ap-southeast-1.amazonaws.com", "https://s3-ap-southeast-1.amazonaws.com", "", true, true, "https://sdb.ap-southeast-1.amazonaws.com", "https://sns.ap-southeast-1.amazonaws.com", "https://sqs.ap-southeast-1.amazonaws.com", "", "https://iam.amazonaws.com", "https://elasticloadbalancing.ap-southeast-1.amazonaws.com", "https://dynamodb.ap-southeast-1.amazonaws.com", ServiceInfo{"https://monitoring.ap-southeast-1.amazonaws.com", V2Signature}, "https://autoscaling.ap-southeast-1.amazonaws.com", ServiceInfo{"https://rds.ap-southeast-1.amazonaws.com", V2Signature}, "https://kinesis.ap-southeast-1.amazonaws.com", "https://sts.amazonaws.com", "https://cloudformation.ap-southeast-1.amazonaws.com", "https://elasticache.ap-southeast-1.amazonaws.com", } var APSoutheast2 = Region{ "ap-southeast-2", "https://ec2.ap-southeast-2.amazonaws.com", "https://s3-ap-southeast-2.amazonaws.com", "", true, true, "https://sdb.ap-southeast-2.amazonaws.com", "https://sns.ap-southeast-2.amazonaws.com", "https://sqs.ap-southeast-2.amazonaws.com", "", "https://iam.amazonaws.com", "https://elasticloadbalancing.ap-southeast-2.amazonaws.com", "https://dynamodb.ap-southeast-2.amazonaws.com", ServiceInfo{"https://monitoring.ap-southeast-2.amazonaws.com", V2Signature}, "https://autoscaling.ap-southeast-2.amazonaws.com", ServiceInfo{"https://rds.ap-southeast-2.amazonaws.com", V2Signature}, "https://kinesis.ap-southeast-2.amazonaws.com", "https://sts.amazonaws.com", "https://cloudformation.ap-southeast-2.amazonaws.com", "https://elasticache.ap-southeast-2.amazonaws.com", } var APNortheast = Region{ "ap-northeast-1", "https://ec2.ap-northeast-1.amazonaws.com", "https://s3-ap-northeast-1.amazonaws.com", "", true, true, "https://sdb.ap-northeast-1.amazonaws.com", "https://sns.ap-northeast-1.amazonaws.com", "https://sqs.ap-northeast-1.amazonaws.com", "", "https://iam.amazonaws.com", "https://elasticloadbalancing.ap-northeast-1.amazonaws.com", "https://dynamodb.ap-northeast-1.amazonaws.com", ServiceInfo{"https://monitoring.ap-northeast-1.amazonaws.com", V2Signature}, "https://autoscaling.ap-northeast-1.amazonaws.com", ServiceInfo{"https://rds.ap-northeast-1.amazonaws.com", V2Signature}, "https://kinesis.ap-northeast-1.amazonaws.com", "https://sts.amazonaws.com", "https://cloudformation.ap-northeast-1.amazonaws.com", "https://elasticache.ap-northeast-1.amazonaws.com", } var SAEast = Region{ "sa-east-1", "https://ec2.sa-east-1.amazonaws.com", "https://s3-sa-east-1.amazonaws.com", "", true, true, "https://sdb.sa-east-1.amazonaws.com", "https://sns.sa-east-1.amazonaws.com", "https://sqs.sa-east-1.amazonaws.com", "", "https://iam.amazonaws.com", "https://elasticloadbalancing.sa-east-1.amazonaws.com", "https://dynamodb.sa-east-1.amazonaws.com", ServiceInfo{"https://monitoring.sa-east-1.amazonaws.com", V2Signature}, "https://autoscaling.sa-east-1.amazonaws.com", ServiceInfo{"https://rds.sa-east-1.amazonaws.com", V2Signature}, "", "https://sts.amazonaws.com", "https://cloudformation.sa-east-1.amazonaws.com", "https://elasticache.sa-east-1.amazonaws.com", } ================================================ FILE: vendor/github.com/zackbloom/goamz/aws/retry.go ================================================ package aws import ( "math/rand" "net" "net/http" "time" ) const ( maxDelay = 20 * time.Second defaultScale = 300 * time.Millisecond throttlingScale = 500 * time.Millisecond throttlingScaleRange = throttlingScale / 4 defaultMaxRetries = 3 dynamoDBScale = 25 * time.Millisecond dynamoDBMaxRetries = 10 ) // A RetryPolicy encapsulates a strategy for implementing client retries. // // Default implementations are provided which match the AWS SDKs. type RetryPolicy interface { // ShouldRetry returns whether a client should retry a failed request. ShouldRetry(target string, r *http.Response, err error, numRetries int) bool // Delay returns the time a client should wait before issuing a retry. Delay(target string, r *http.Response, err error, numRetries int) time.Duration } // DefaultRetryPolicy implements the AWS SDK default retry policy. // // It will retry up to 3 times, and uses an exponential backoff with a scale // factor of 300ms (300ms, 600ms, 1200ms). If the retry is because of // throttling, the delay will also include some randomness. // // See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L90. type DefaultRetryPolicy struct { } // ShouldRetry implements the RetryPolicy ShouldRetry method. func (policy DefaultRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { return shouldRetry(r, err, numRetries, defaultMaxRetries) } // Delay implements the RetryPolicy Delay method. func (policy DefaultRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { scale := defaultScale if err, ok := err.(*Error); ok && isThrottlingException(err) { scale = throttlingScale + time.Duration(rand.Int63n(int64(throttlingScaleRange))) } return exponentialBackoff(numRetries, scale) } // DynamoDBRetryPolicy implements the AWS SDK DynamoDB retry policy. // // It will retry up to 10 times, and uses an exponential backoff with a scale // factor of 25ms (25ms, 50ms, 100ms, ...). // // See https://github.com/aws/aws-sdk-java/blob/master/aws-java-sdk-core/src/main/java/com/amazonaws/retry/PredefinedRetryPolicies.java#L103. type DynamoDBRetryPolicy struct { } // ShouldRetry implements the RetryPolicy ShouldRetry method. func (policy DynamoDBRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { return shouldRetry(r, err, numRetries, dynamoDBMaxRetries) } // Delay implements the RetryPolicy Delay method. func (policy DynamoDBRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { return exponentialBackoff(numRetries, dynamoDBScale) } // NeverRetryPolicy never retries requests and returns immediately on failure. type NeverRetryPolicy struct { } // ShouldRetry implements the RetryPolicy ShouldRetry method. func (policy NeverRetryPolicy) ShouldRetry(target string, r *http.Response, err error, numRetries int) bool { return false } // Delay implements the RetryPolicy Delay method. func (policy NeverRetryPolicy) Delay(target string, r *http.Response, err error, numRetries int) time.Duration { return time.Duration(0) } // shouldRetry determines if we should retry the request. // // See http://docs.aws.amazon.com/general/latest/gr/api-retries.html. func shouldRetry(r *http.Response, err error, numRetries int, maxRetries int) bool { // Once we've exceeded the max retry attempts, game over. if numRetries >= maxRetries { return false } // Always retry temporary network errors. if err, ok := err.(net.Error); ok && err.Temporary() { return true } // Always retry 5xx responses. if r != nil && r.StatusCode >= 500 { return true } // Always retry throttling exceptions. if err, ok := err.(ServiceError); ok && isThrottlingException(err) { return true } // Other classes of failures indicate a problem with the request. Retrying // won't help. return false } func exponentialBackoff(numRetries int, scale time.Duration) time.Duration { if numRetries < 0 { return time.Duration(0) } delay := (1 << uint(numRetries)) * scale if delay > maxDelay { return maxDelay } return delay } func isThrottlingException(err ServiceError) bool { switch err.ErrorCode() { case "Throttling", "ThrottlingException", "ProvisionedThroughputExceededException": return true default: return false } } ================================================ FILE: vendor/github.com/zackbloom/goamz/aws/sign.go ================================================ package aws import ( "bytes" "crypto/hmac" "crypto/sha256" "encoding/base64" "fmt" "io/ioutil" "net/http" "net/url" "path" "sort" "strings" "time" ) type V2Signer struct { auth Auth service ServiceInfo host string } var b64 = base64.StdEncoding func NewV2Signer(auth Auth, service ServiceInfo) (*V2Signer, error) { u, err := url.Parse(service.Endpoint) if err != nil { return nil, err } return &V2Signer{auth: auth, service: service, host: u.Host}, nil } func (s *V2Signer) Sign(method, path string, params map[string]string) { params["AWSAccessKeyId"] = s.auth.AccessKey params["SignatureVersion"] = "2" params["SignatureMethod"] = "HmacSHA256" if s.auth.Token() != "" { params["SecurityToken"] = s.auth.Token() } // AWS specifies that the parameters in a signed request must // be provided in the natural order of the keys. This is distinct // from the natural order of the encoded value of key=value. // Percent and gocheck.Equals affect the sorting order. var keys, sarray []string for k, _ := range params { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { sarray = append(sarray, Encode(k)+"="+Encode(params[k])) } joined := strings.Join(sarray, "&") payload := method + "\n" + s.host + "\n" + path + "\n" + joined hash := hmac.New(sha256.New, []byte(s.auth.SecretKey)) hash.Write([]byte(payload)) signature := make([]byte, b64.EncodedLen(hash.Size())) b64.Encode(signature, hash.Sum(nil)) params["Signature"] = string(signature) } // Common date formats for signing requests const ( ISO8601BasicFormat = "20060102T150405Z" ISO8601BasicFormatShort = "20060102" ) type Route53Signer struct { auth Auth } func NewRoute53Signer(auth Auth) *Route53Signer { return &Route53Signer{auth: auth} } // getCurrentDate fetches the date stamp from the aws servers to // ensure the auth headers are within 5 minutes of the server time func (s *Route53Signer) getCurrentDate() string { response, err := http.Get("https://route53.amazonaws.com/date") if err != nil { fmt.Print("Unable to get date from amazon: ", err) return "" } response.Body.Close() return response.Header.Get("Date") } // Creates the authorize signature based on the date stamp and secret key func (s *Route53Signer) getHeaderAuthorize(message string) string { hmacSha256 := hmac.New(sha256.New, []byte(s.auth.SecretKey)) hmacSha256.Write([]byte(message)) cryptedString := hmacSha256.Sum(nil) return base64.StdEncoding.EncodeToString(cryptedString) } // Adds all the required headers for AWS Route53 API to the request // including the authorization func (s *Route53Signer) Sign(req *http.Request) { date := s.getCurrentDate() authHeader := fmt.Sprintf("AWS3-HTTPS AWSAccessKeyId=%s,Algorithm=%s,Signature=%s", s.auth.AccessKey, "HmacSHA256", s.getHeaderAuthorize(date)) req.Header.Set("Host", req.Host) req.Header.Set("X-Amzn-Authorization", authHeader) req.Header.Set("X-Amz-Date", date) req.Header.Set("Content-Type", "application/xml") if s.auth.Token() != "" { req.Header.Set("X-Amzn-Security-Token", s.auth.Token()) } } /* The V4Signer encapsulates all of the functionality to sign a request with the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) */ type V4Signer struct { auth Auth serviceName string region Region // Add the x-amz-content-sha256 header IncludeXAmzContentSha256 bool } /* Return a new instance of a V4Signer capable of signing AWS requests. */ func NewV4Signer(auth Auth, serviceName string, region Region) *V4Signer { return &V4Signer{ auth: auth, serviceName: serviceName, region: region, IncludeXAmzContentSha256: false, } } /* Sign a request according to the AWS Signature Version 4 Signing Process. (http://goo.gl/u1OWZz) The signed request will include an "x-amz-date" header with a current timestamp if a valid "x-amz-date" or "date" header was not available in the original request. In addition, AWS Signature Version 4 requires the "host" header to be a signed header, therefor the Sign method will manually set a "host" header from the request.Host. The signed request will include a new "Authorization" header indicating that the request has been signed. Any changes to the request after signing the request will invalidate the signature. */ func (s *V4Signer) Sign(req *http.Request) { req.Header.Set("host", req.Host) // host header must be included as a signed header t := s.requestTime(req) // Get request time payloadHash := "" if _, ok := req.Form["X-Amz-Expires"]; ok { // We are authenticating the the request by using query params // (also known as pre-signing a url, http://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html) payloadHash = "UNSIGNED-PAYLOAD" req.Header.Del("x-amz-date") req.Form["X-Amz-SignedHeaders"] = []string{s.signedHeaders(req.Header)} req.Form["X-Amz-Algorithm"] = []string{"AWS4-HMAC-SHA256"} req.Form["X-Amz-Credential"] = []string{s.auth.AccessKey + "/" + s.credentialScope(t)} req.Form["X-Amz-Date"] = []string{t.Format(ISO8601BasicFormat)} req.URL.RawQuery = req.Form.Encode() } else { payloadHash = s.payloadHash(req) if s.IncludeXAmzContentSha256 { req.Header.Set("x-amz-content-sha256", payloadHash) // x-amz-content-sha256 contains the payload hash } } creq := s.canonicalRequest(req, payloadHash) // Build canonical request sts := s.stringToSign(t, creq) // Build string to sign signature := s.signature(t, sts) // Calculate the AWS Signature Version 4 auth := s.authorization(req.Header, t, signature) // Create Authorization header value if _, ok := req.Form["X-Amz-Expires"]; ok { req.Form["X-Amz-Signature"] = []string{signature} } else { req.Header.Set("Authorization", auth) // Add Authorization header to request } return } /* requestTime method will parse the time from the request "x-amz-date" or "date" headers. If the "x-amz-date" header is present, that will take priority over the "date" header. If neither header is defined or we are unable to parse either header as a valid date then we will create a new "x-amz-date" header with the current time. */ func (s *V4Signer) requestTime(req *http.Request) time.Time { // Get "x-amz-date" header date := req.Header.Get("x-amz-date") // Attempt to parse as ISO8601BasicFormat t, err := time.Parse(ISO8601BasicFormat, date) if err == nil { return t } // Attempt to parse as http.TimeFormat t, err = time.Parse(http.TimeFormat, date) if err == nil { req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) return t } // Get "date" header date = req.Header.Get("date") // Attempt to parse as http.TimeFormat t, err = time.Parse(http.TimeFormat, date) if err == nil { return t } // Create a current time header to be used t = time.Now().UTC() req.Header.Set("x-amz-date", t.Format(ISO8601BasicFormat)) return t } /* canonicalRequest method creates the canonical request according to Task 1 of the AWS Signature Version 4 Signing Process. (http://goo.gl/eUUZ3S) CanonicalRequest = HTTPRequestMethod + '\n' + CanonicalURI + '\n' + CanonicalQueryString + '\n' + CanonicalHeaders + '\n' + SignedHeaders + '\n' + HexEncode(Hash(Payload)) payloadHash is optional; use the empty string and it will be calculated from the request */ func (s *V4Signer) canonicalRequest(req *http.Request, payloadHash string) string { if payloadHash == "" { payloadHash = s.payloadHash(req) } c := new(bytes.Buffer) fmt.Fprintf(c, "%s\n", req.Method) fmt.Fprintf(c, "%s\n", s.canonicalURI(req.URL)) fmt.Fprintf(c, "%s\n", s.canonicalQueryString(req.URL)) fmt.Fprintf(c, "%s\n\n", s.canonicalHeaders(req.Header)) fmt.Fprintf(c, "%s\n", s.signedHeaders(req.Header)) fmt.Fprintf(c, "%s", payloadHash) return c.String() } func (s *V4Signer) canonicalURI(u *url.URL) string { u = &url.URL{Path: u.Path} canonicalPath := u.String() slash := strings.HasSuffix(canonicalPath, "/") canonicalPath = path.Clean(canonicalPath) if canonicalPath == "" || canonicalPath == "." { canonicalPath = "/" } if canonicalPath != "/" && slash { canonicalPath += "/" } return canonicalPath } func (s *V4Signer) canonicalQueryString(u *url.URL) string { var a []string for k, vs := range u.Query() { k = url.QueryEscape(k) for _, v := range vs { if v == "" { a = append(a, k+"=") } else { v = url.QueryEscape(v) a = append(a, k+"="+v) } } } sort.Strings(a) return strings.Join(a, "&") } func (s *V4Signer) canonicalHeaders(h http.Header) string { i, a, lowerCase := 0, make([]string, len(h)), make(map[string][]string) for k, v := range h { lowerCase[strings.ToLower(k)] = v } var keys []string for k := range lowerCase { keys = append(keys, k) } sort.Strings(keys) for _, k := range keys { v := lowerCase[k] for j, w := range v { v[j] = strings.Trim(w, " ") } sort.Strings(v) a[i] = strings.ToLower(k) + ":" + strings.Join(v, ",") i++ } return strings.Join(a, "\n") } func (s *V4Signer) signedHeaders(h http.Header) string { i, a := 0, make([]string, len(h)) for k, _ := range h { a[i] = strings.ToLower(k) i++ } sort.Strings(a) return strings.Join(a, ";") } func (s *V4Signer) payloadHash(req *http.Request) string { var b []byte if req.Body == nil { b = []byte("") } else { var err error b, err = ioutil.ReadAll(req.Body) if err != nil { // TODO: I REALLY DON'T LIKE THIS PANIC!!!! panic(err) } } req.Body = ioutil.NopCloser(bytes.NewBuffer(b)) return s.hash(string(b)) } /* stringToSign method creates the string to sign accorting to Task 2 of the AWS Signature Version 4 Signing Process. (http://goo.gl/es1PAu) StringToSign = Algorithm + '\n' + RequestDate + '\n' + CredentialScope + '\n' + HexEncode(Hash(CanonicalRequest)) */ func (s *V4Signer) stringToSign(t time.Time, creq string) string { w := new(bytes.Buffer) fmt.Fprint(w, "AWS4-HMAC-SHA256\n") fmt.Fprintf(w, "%s\n", t.Format(ISO8601BasicFormat)) fmt.Fprintf(w, "%s\n", s.credentialScope(t)) fmt.Fprintf(w, "%s", s.hash(creq)) return w.String() } func (s *V4Signer) credentialScope(t time.Time) string { return fmt.Sprintf("%s/%s/%s/aws4_request", t.Format(ISO8601BasicFormatShort), s.region.Name, s.serviceName) } /* signature method calculates the AWS Signature Version 4 according to Task 3 of the AWS Signature Version 4 Signing Process. (http://goo.gl/j0Yqe1) signature = HexEncode(HMAC(derived-signing-key, string-to-sign)) */ func (s *V4Signer) signature(t time.Time, sts string) string { h := s.hmac(s.derivedKey(t), []byte(sts)) return fmt.Sprintf("%x", h) } /* derivedKey method derives a signing key to be used for signing a request. kSecret = Your AWS Secret Access Key kDate = HMAC("AWS4" + kSecret, Date) kRegion = HMAC(kDate, Region) kService = HMAC(kRegion, Service) kSigning = HMAC(kService, "aws4_request") */ func (s *V4Signer) derivedKey(t time.Time) []byte { h := s.hmac([]byte("AWS4"+s.auth.SecretKey), []byte(t.Format(ISO8601BasicFormatShort))) h = s.hmac(h, []byte(s.region.Name)) h = s.hmac(h, []byte(s.serviceName)) h = s.hmac(h, []byte("aws4_request")) return h } /* authorization method generates the authorization header value. */ func (s *V4Signer) authorization(header http.Header, t time.Time, signature string) string { w := new(bytes.Buffer) fmt.Fprint(w, "AWS4-HMAC-SHA256 ") fmt.Fprintf(w, "Credential=%s/%s, ", s.auth.AccessKey, s.credentialScope(t)) fmt.Fprintf(w, "SignedHeaders=%s, ", s.signedHeaders(header)) fmt.Fprintf(w, "Signature=%s", signature) return w.String() } // hash method calculates the sha256 hash for a given string func (s *V4Signer) hash(in string) string { h := sha256.New() fmt.Fprintf(h, "%s", in) return fmt.Sprintf("%x", h.Sum(nil)) } // hmac method calculates the sha256 hmac for a given slice of bytes func (s *V4Signer) hmac(key, data []byte) []byte { h := hmac.New(sha256.New, key) h.Write(data) return h.Sum(nil) } ================================================ FILE: vendor/github.com/zackbloom/goamz/cloudfront/cloudfront.go ================================================ package cloudfront import ( "bytes" "crypto" "crypto/rsa" "crypto/sha1" "encoding/base64" "encoding/json" "encoding/xml" "fmt" "net/http" "net/url" "strconv" "strings" "time" "github.com/zackbloom/goamz/aws" ) const ( ServiceName = "cloudfront" ApiVersion = "2014-11-06" ) // TODO Reconcile with 'New' fn below func NewCloudFront(auth aws.Auth) *CloudFront { signer := aws.NewV4Signer(auth, "cloudfront", aws.USEast) return &CloudFront{ Signer: signer, Auth: auth, } } type CloudFront struct { Signer *aws.V4Signer Auth aws.Auth BaseURL string keyPairId string key *rsa.PrivateKey } type DistributionConfig struct { XMLName xml.Name `xml:"DistributionConfig"` CallerReference string Aliases Aliases DefaultRootObject string Origins Origins DefaultCacheBehavior CacheBehavior Comment string CacheBehaviors CacheBehaviors CustomErrorResponses CustomErrorResponses Restrictions *GeoRestriction `xml:"Restrictions>GeoRestriction,omitempty"` Logging Logging ViewerCertificate *ViewerCertificate `xml:",omitempty"` PriceClass string Enabled bool } type DistributionSummary struct { XMLName xml.Name `xml:"Distribution"` DistributionConfig DomainName string Status string Id string LastModifiedTime time.Time } type Aliases []string type EncodedAliases struct { Quantity int Items []string `xml:"Items>CNAME"` } func (a Aliases) MarshalXML(e *xml.Encoder, start xml.StartElement) error { enc := EncodedAliases{ Quantity: len(a), Items: []string(a), } return e.EncodeElement(enc, start) } func (n *Aliases) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { enc := EncodedAliases{} err := d.DecodeElement(&enc, &start) if err != nil { return err } *n = enc.Items return nil } type CustomErrorResponses []CustomErrorResponse type EncodedCustomErrorResponses struct { Quantity int Items []CustomErrorResponse `xml:"Items>CustomErrorResponse"` } func (a CustomErrorResponses) MarshalXML(e *xml.Encoder, start xml.StartElement) error { enc := EncodedCustomErrorResponses{ Quantity: len(a), Items: []CustomErrorResponse(a), } return e.EncodeElement(enc, start) } func (n *CustomErrorResponses) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { enc := EncodedCustomErrorResponses{} err := d.DecodeElement(&enc, &start) if err != nil { return err } *n = enc.Items return nil } type CacheBehaviors []CacheBehavior type EncodedCacheBehaviors struct { Quantity int Items []CacheBehavior `xml:"Items>CacheBehavior"` } func (a CacheBehaviors) MarshalXML(e *xml.Encoder, start xml.StartElement) error { enc := EncodedCacheBehaviors{ Quantity: len(a), Items: []CacheBehavior(a), } return e.EncodeElement(enc, start) } func (n *CacheBehaviors) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { enc := EncodedCacheBehaviors{} err := d.DecodeElement(&enc, &start) if err != nil { return err } *n = enc.Items return nil } type Logging struct { Enabled bool IncludeCookies bool Bucket string Prefix string } type ViewerCertificate struct { IAMCertificateId string `xml:",omitempty"` CloudFrontDefaultCertificate bool `xml:",omitempty"` SSLSupportMethod string MinimumProtocolVersion string } type GeoRestriction struct { RestrictionType string Locations []string } type EncodedGeoRestriction struct { RestrictionType string Quantity int Locations []string `xml:"Items>Location"` } func (a GeoRestriction) MarshalXML(e *xml.Encoder, start xml.StartElement) error { enc := EncodedGeoRestriction{ RestrictionType: a.RestrictionType, Quantity: len(a.Locations), Locations: []string(a.Locations), } return e.EncodeElement(enc, start) } func (n *GeoRestriction) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { enc := EncodedGeoRestriction{} err := d.DecodeElement(&enc, &start) if err != nil { return err } n.Locations = enc.Locations n.RestrictionType = enc.RestrictionType return nil } type CustomErrorResponse struct { XMLName xml.Name `xml:"CustomErrorResponse"` ErrorCode int ResponsePagePath string ResponseCode int ErrorCachingMinTTL int } type Origin struct { XMLName xml.Name `xml:"Origin"` Id string DomainName string OriginPath string `xml:"OriginPath,omitempty"` S3OriginConfig *S3OriginConfig `xml:",omitempty"` CustomOriginConfig *CustomOriginConfig `xml:",omitempty"` } type S3OriginConfig struct { OriginAccessIdentity string } type CustomOriginConfig struct { HTTPPort int HTTPSPort int OriginProtocolPolicy string } type Origins []Origin type EncodedOrigins struct { Quantity int Items []Origin `xml:"Items>Origin"` } func (o Origins) MarshalXML(e *xml.Encoder, start xml.StartElement) error { enc := EncodedOrigins{ Quantity: len(o), Items: []Origin(o), } return e.EncodeElement(enc, start) } func (o *Origins) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { enc := EncodedOrigins{} err := d.DecodeElement(&enc, &start) if err != nil { return err } *o = Origins(enc.Items) return nil } type CacheBehavior struct { TargetOriginId string PathPattern string `xml:",omitempty"` ForwardedValues ForwardedValues TrustedSigners TrustedSigners ViewerProtocolPolicy string MinTTL int AllowedMethods AllowedMethods SmoothStreaming bool } type ForwardedValues struct { QueryString bool Cookies *Cookies Headers Names } type Cookies struct { Forward string WhitelistedNames Names } var CookiesDefault = Cookies{ Forward: "none", WhitelistedNames: Names{}, } func cacheBehaviorDefault(cache *CacheBehavior) { if cache.ForwardedValues.Cookies == nil { clone := CookiesDefault cache.ForwardedValues.Cookies = &clone } } type Names []string type EncodedNames struct { Quantity int Items []string `xml:"Items>Name"` } func (w Names) MarshalXML(e *xml.Encoder, start xml.StartElement) error { enc := EncodedNames{ Quantity: len(w), Items: []string(w), } return e.EncodeElement(enc, start) } func (n *Names) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { enc := EncodedNames{} err := d.DecodeElement(&enc, &start) if err != nil { return err } *n = Names(enc.Items) return nil } type ItemsList []string type TrustedSigners struct { Enabled bool AWSAccountNumbers []string } type EncodedTrustedSigners struct { Enabled bool Quantity int Items []string `xml:"Items>AWSAccountNumber"` } func (n TrustedSigners) MarshalXML(e *xml.Encoder, start xml.StartElement) error { enc := EncodedTrustedSigners{ Enabled: n.Enabled, Quantity: len(n.AWSAccountNumbers), Items: n.AWSAccountNumbers, } return e.EncodeElement(enc, start) } func (n *TrustedSigners) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { enc := EncodedTrustedSigners{} err := d.DecodeElement(&enc, &start) if err != nil { return err } n.AWSAccountNumbers = enc.Items n.Enabled = enc.Enabled return nil } type AllowedMethods struct { Allowed []string `xml:"Items"` Cached []string `xml:"CachedMethods>Items,omitempty"` } type EncodedAllowedMethods struct { AllowedQuantity int `xml:"Quantity"` Allowed []string `xml:"Items>Method"` CachedQuantity int `xml:"CachedMethods>Quantity"` Cached []string `xml:"CachedMethods>Items>Method"` } func (n AllowedMethods) MarshalXML(e *xml.Encoder, start xml.StartElement) error { enc := EncodedAllowedMethods{ AllowedQuantity: len(n.Allowed), Allowed: n.Allowed, CachedQuantity: len(n.Cached), Cached: n.Cached, } return e.EncodeElement(enc, start) } func (n *AllowedMethods) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { enc := EncodedAllowedMethods{} err := d.DecodeElement(&enc, &start) if err != nil { return err } n.Allowed = enc.Allowed n.Cached = enc.Cached return nil } var base64Replacer = strings.NewReplacer("=", "_", "+", "-", "/", "~") func NewKeyLess(auth aws.Auth, baseurl string) *CloudFront { return &CloudFront{keyPairId: auth.AccessKey, BaseURL: baseurl} } func New(baseurl string, key *rsa.PrivateKey, keyPairId string) *CloudFront { return &CloudFront{ BaseURL: baseurl, keyPairId: keyPairId, key: key, } } type epochTime struct { EpochTime int64 `json:"AWS:EpochTime"` } type condition struct { DateLessThan epochTime } type statement struct { Resource string Condition condition } type policy struct { Statement []statement } func buildPolicy(resource string, expireTime time.Time) ([]byte, error) { p := &policy{ Statement: []statement{ statement{ Resource: resource, Condition: condition{ DateLessThan: epochTime{ EpochTime: expireTime.Truncate(time.Millisecond).Unix(), }, }, }, }, } return json.Marshal(p) } func (cf *CloudFront) generateSignature(policy []byte) (string, error) { hash := sha1.New() _, err := hash.Write(policy) if err != nil { return "", err } hashed := hash.Sum(nil) var signed []byte if cf.key.Validate() == nil { signed, err = rsa.SignPKCS1v15(nil, cf.key, crypto.SHA1, hashed) if err != nil { return "", err } } else { signed = hashed } encoded := base64Replacer.Replace(base64.StdEncoding.EncodeToString(signed)) return encoded, nil } // Create a CloudFront distribution // // Usage: // conf := cloudfront.DistributionConfig{ // Enabled: true, // // Origins: cloudfront.Origins{ // cloudfront.Origin{ // Id: "test", // DomainName: "example.com", // CustomOriginConfig: &cloudfront.CustomOriginConfig{ // HTTPPort: 80, // HTTPSPort: 443, // OriginProtocolPolicy: "http-only", // }, // }, // }, // // DefaultCacheBehavior: cloudfront.CacheBehavior{ // TargetOriginId: "test", // PathPattern: "/test", // ForwardedValues: cloudfront.ForwardedValues{ // QueryString: true, // Cookies: cloudfront.Cookies{ // Forward: "whitelist", // WhitelistedNames: cloudfront.Names{ // "cat", // "dog", // }, // }, // Headers: cloudfront.Names{ // "horse", // "pig", // }, // }, // ViewerProtocolPolicy: "allow-all", // MinTTL: 300, // AllowedMethods: cloudfront.AllowedMethods{ // Allowed: []string{"GET", "HEAD"}, // Cached: []string{"GET", "HEAD"}, // }, // }, // // Restrictions: cloudfront.GeoRestriction{ // RestrictionType: "blacklist", // Locations: []string{ // "CA", // "DE", // }, // }, // // CustomErrorResponses: cloudfront.CustomErrorResponses{ // cloudfront.CustomErrorResponse{ // ErrorCode: 404, // ResponseCode: 403, // ResponsePagePath: "/index.html", // }, // }, // // PriceClass: "PriceClass_All", // } // // cf := cloudfront.NewCloudFront(aws.Auth{ // AccessKey: // ... // SecretKey: // ... // }) // cf.CreateDistribution(conf) func (cf *CloudFront) Create(config DistributionConfig) (summary DistributionSummary, err error) { if config.CallerReference == "" { config.CallerReference = strconv.FormatInt(time.Now().Unix(), 10) } cacheBehaviorDefault(&config.DefaultCacheBehavior) for i, _ := range config.CacheBehaviors { cacheBehaviorDefault(&(config.CacheBehaviors[i])) } body, err := xml.Marshal(config) if err != nil { return } client := http.Client{} req, err := http.NewRequest("POST", "https://"+ServiceName+".amazonaws.com/"+ApiVersion+"/distribution", bytes.NewReader(body)) if err != nil { return } cf.Signer.Sign(req) resp, err := client.Do(req) if err != nil { return } defer resp.Body.Close() if resp.StatusCode >= 400 { errors := aws.ErrorResponse{} xml.NewDecoder(resp.Body).Decode(&errors) err := errors.Errors err.RequestId = errors.RequestId err.StatusCode = resp.StatusCode if err.Message == "" { err.Message = resp.Status } return summary, &err } else { err = xml.NewDecoder(resp.Body).Decode(&summary) } return } type DistributionItem struct { XMLName xml.Name `xml:"DistributionSummary"` DistributionSummary } type DistributionsResp struct { Items []DistributionItem `xml:"Items>DistributionSummary"` IsTruncated bool Marker string // Use this to get the next page of results if IsTruncated is true NextMarker string // Total number in account Quantity int MaxItems int } // Marker is an optional pointer to the NextMarker from the previous page of results // Max is the maximum number of results to return, max 100 func (cf *CloudFront) List(marker string, max int) (items *DistributionsResp, err error) { params := url.Values{ "MaxItems": []string{strconv.FormatInt(int64(max), 10)}, } if marker != "" { params["Marker"] = []string{marker} } uri, _ := url.Parse("https://" + ServiceName + ".amazonaws.com/" + ApiVersion + "/distribution") uri.RawQuery = params.Encode() client := http.Client{} req, err := http.NewRequest("GET", uri.String(), nil) if err != nil { return } cf.Signer.Sign(req) resp, err := client.Do(req) if err != nil { return } defer resp.Body.Close() if resp.StatusCode >= 400 { errors := aws.ErrorResponse{} xml.NewDecoder(resp.Body).Decode(&errors) errors.Errors.RequestId = errors.RequestId errors.Errors.StatusCode = resp.StatusCode if errors.Errors.Message == "" { errors.Errors.Message = resp.Status } err = &errors.Errors } else { items = &DistributionsResp{} err = xml.NewDecoder(resp.Body).Decode(items) } return } func (cf *CloudFront) FindDistributionByAlias(alias string) (dist *DistributionSummary, err error) { marker := "" for page := 0; page < 10; page++ { var resp *DistributionsResp resp, err = cf.List(marker, 100) if err != nil { return } if resp.Quantity > 1000 { panic("More than 1000 CloudFront distributions in account, not all will be correctly searched") } var item DistributionItem for _, item = range resp.Items { for _, _alias := range item.Aliases { if _alias == alias { dist = &(item.DistributionSummary) return } } } marker = resp.NextMarker if !resp.IsTruncated { break } } return } // Creates a signed url using RSAwithSHA1 as specified by // http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-creating-signed-url-canned-policy.html#private-content-canned-policy-creating-signature func (cf *CloudFront) CannedSignedURL(path, queryString string, expires time.Time) (string, error) { resource := cf.BaseURL + path if queryString != "" { resource = path + "?" + queryString } policy, err := buildPolicy(resource, expires) if err != nil { return "", err } signature, err := cf.generateSignature(policy) if err != nil { return "", err } // TOOD: Do this once uri, err := url.Parse(cf.BaseURL) if err != nil { return "", err } uri.RawQuery = queryString if queryString != "" { uri.RawQuery += "&" } expireTime := expires.Truncate(time.Millisecond).Unix() uri.Path = path uri.RawQuery += fmt.Sprintf("Expires=%d&Signature=%s&Key-Pair-Id=%s", expireTime, signature, cf.keyPairId) return uri.String(), nil } func (cloudfront *CloudFront) SignedURL(path, querystrings string, expires time.Time) string { policy := `{"Statement":[{"Resource":"` + path + "?" + querystrings + `,"Condition":{"DateLessThan":{"AWS:EpochTime":` + strconv.FormatInt(expires.Truncate(time.Millisecond).Unix(), 10) + `}}}]}` hash := sha1.New() hash.Write([]byte(policy)) b := hash.Sum(nil) he := base64.StdEncoding.EncodeToString(b) policySha1 := he url := cloudfront.BaseURL + path + "?" + querystrings + "&Expires=" + strconv.FormatInt(expires.Unix(), 10) + "&Signature=" + policySha1 + "&Key-Pair-Id=" + cloudfront.keyPairId return url } ================================================ FILE: vendor/github.com/zackbloom/goamz/iam/iam.go ================================================ // The iam package provides types and functions for interaction with the AWS // Identity and Access Management (IAM) service. package iam import ( "encoding/xml" "github.com/zackbloom/goamz/aws" "net/http" "net/url" "strconv" "strings" "time" ) // The IAM type encapsulates operations operations with the IAM endpoint. type IAM struct { aws.Auth aws.Region } // New creates a new IAM instance. func New(auth aws.Auth, region aws.Region) *IAM { return &IAM{auth, region} } func (iam *IAM) query(params map[string]string, resp interface{}) error { params["Version"] = "2010-05-08" params["Timestamp"] = time.Now().In(time.UTC).Format(time.RFC3339) endpoint, err := url.Parse(iam.IAMEndpoint) if err != nil { return err } sign(iam.Auth, "GET", "/", params, endpoint.Host) endpoint.RawQuery = multimap(params).Encode() r, err := http.Get(endpoint.String()) if err != nil { return err } defer r.Body.Close() if r.StatusCode > 200 { return buildError(r) } return xml.NewDecoder(r.Body).Decode(resp) } func (iam *IAM) postQuery(params map[string]string, resp interface{}) error { endpoint, err := url.Parse(iam.IAMEndpoint) if err != nil { return err } params["Version"] = "2010-05-08" params["Timestamp"] = time.Now().In(time.UTC).Format(time.RFC3339) sign(iam.Auth, "POST", "/", params, endpoint.Host) encoded := multimap(params).Encode() body := strings.NewReader(encoded) req, err := http.NewRequest("POST", endpoint.String(), body) if err != nil { return err } req.Header.Set("Host", endpoint.Host) req.Header.Set("Content-Type", "application/x-www-form-urlencoded") req.Header.Set("Content-Length", strconv.Itoa(len(encoded))) r, err := http.DefaultClient.Do(req) if err != nil { return err } defer r.Body.Close() if r.StatusCode > 200 { return buildError(r) } return xml.NewDecoder(r.Body).Decode(resp) } func buildError(r *http.Response) error { var ( err Error errors xmlErrors ) xml.NewDecoder(r.Body).Decode(&errors) if len(errors.Errors) > 0 { err = errors.Errors[0] } err.StatusCode = r.StatusCode if err.Message == "" { err.Message = r.Status } return &err } func multimap(p map[string]string) url.Values { q := make(url.Values, len(p)) for k, v := range p { q[k] = []string{v} } return q } // Response to a CreateUser request. // // See http://goo.gl/JS9Gz for more details. type CreateUserResp struct { RequestId string `xml:"ResponseMetadata>RequestId"` User User `xml:"CreateUserResult>User"` } // User encapsulates a user managed by IAM. // // See http://goo.gl/BwIQ3 for more details. type User struct { Arn string Path string Id string `xml:"UserId"` Name string `xml:"UserName"` } // CreateUser creates a new user in IAM. // // See http://goo.gl/JS9Gz for more details. func (iam *IAM) CreateUser(name, path string) (*CreateUserResp, error) { params := map[string]string{ "Action": "CreateUser", "Path": path, "UserName": name, } resp := new(CreateUserResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // Response for GetUser requests. // // See http://goo.gl/ZnzRN for more details. type GetUserResp struct { RequestId string `xml:"ResponseMetadata>RequestId"` User User `xml:"GetUserResult>User"` } // GetUser gets a user from IAM. // // See http://goo.gl/ZnzRN for more details. func (iam *IAM) GetUser(name string) (*GetUserResp, error) { params := map[string]string{ "Action": "GetUser", } if name != "" { params["UserName"] = name } resp := new(GetUserResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // DeleteUser deletes a user from IAM. // // See http://goo.gl/jBuCG for more details. func (iam *IAM) DeleteUser(name string) (*SimpleResp, error) { params := map[string]string{ "Action": "DeleteUser", "UserName": name, } resp := new(SimpleResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // Response to a CreateGroup request. // // See http://goo.gl/n7NNQ for more details. type CreateGroupResp struct { Group Group `xml:"CreateGroupResult>Group"` RequestId string `xml:"ResponseMetadata>RequestId"` } // Group encapsulates a group managed by IAM. // // See http://goo.gl/ae7Vs for more details. type Group struct { Arn string Id string `xml:"GroupId"` Name string `xml:"GroupName"` Path string } // CreateGroup creates a new group in IAM. // // The path parameter can be used to identify which division or part of the // organization the user belongs to. // // If path is unset ("") it defaults to "/". // // See http://goo.gl/n7NNQ for more details. func (iam *IAM) CreateGroup(name string, path string) (*CreateGroupResp, error) { params := map[string]string{ "Action": "CreateGroup", "GroupName": name, } if path != "" { params["Path"] = path } resp := new(CreateGroupResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // Response to a ListGroups request. // // See http://goo.gl/W2TRj for more details. type GroupsResp struct { Groups []Group `xml:"ListGroupsResult>Groups>member"` RequestId string `xml:"ResponseMetadata>RequestId"` } // Groups list the groups that have the specified path prefix. // // The parameter pathPrefix is optional. If pathPrefix is "", all groups are // returned. // // See http://goo.gl/W2TRj for more details. func (iam *IAM) Groups(pathPrefix string) (*GroupsResp, error) { params := map[string]string{ "Action": "ListGroups", } if pathPrefix != "" { params["PathPrefix"] = pathPrefix } resp := new(GroupsResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // DeleteGroup deletes a group from IAM. // // See http://goo.gl/d5i2i for more details. func (iam *IAM) DeleteGroup(name string) (*SimpleResp, error) { params := map[string]string{ "Action": "DeleteGroup", "GroupName": name, } resp := new(SimpleResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // Response to a CreateAccessKey request. // // See http://goo.gl/L46Py for more details. type CreateAccessKeyResp struct { RequestId string `xml:"ResponseMetadata>RequestId"` AccessKey AccessKey `xml:"CreateAccessKeyResult>AccessKey"` } // AccessKey encapsulates an access key generated for a user. // // See http://goo.gl/LHgZR for more details. type AccessKey struct { UserName string Id string `xml:"AccessKeyId"` Secret string `xml:"SecretAccessKey,omitempty"` Status string } // CreateAccessKey creates a new access key in IAM. // // See http://goo.gl/L46Py for more details. func (iam *IAM) CreateAccessKey(userName string) (*CreateAccessKeyResp, error) { params := map[string]string{ "Action": "CreateAccessKey", "UserName": userName, } resp := new(CreateAccessKeyResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // Response to AccessKeys request. // // See http://goo.gl/Vjozx for more details. type AccessKeysResp struct { RequestId string `xml:"ResponseMetadata>RequestId"` AccessKeys []AccessKey `xml:"ListAccessKeysResult>AccessKeyMetadata>member"` } // AccessKeys lists all acccess keys associated with a user. // // The userName parameter is optional. If set to "", the userName is determined // implicitly based on the AWS Access Key ID used to sign the request. // // See http://goo.gl/Vjozx for more details. func (iam *IAM) AccessKeys(userName string) (*AccessKeysResp, error) { params := map[string]string{ "Action": "ListAccessKeys", } if userName != "" { params["UserName"] = userName } resp := new(AccessKeysResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // DeleteAccessKey deletes an access key from IAM. // // The userName parameter is optional. If set to "", the userName is determined // implicitly based on the AWS Access Key ID used to sign the request. // // See http://goo.gl/hPGhw for more details. func (iam *IAM) DeleteAccessKey(id, userName string) (*SimpleResp, error) { params := map[string]string{ "Action": "DeleteAccessKey", "AccessKeyId": id, } if userName != "" { params["UserName"] = userName } resp := new(SimpleResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } // Response to a GetUserPolicy request. // // See http://goo.gl/BH04O for more details. type GetUserPolicyResp struct { Policy UserPolicy `xml:"GetUserPolicyResult"` RequestId string `xml:"ResponseMetadata>RequestId"` } // UserPolicy encapsulates an IAM group policy. // // See http://goo.gl/C7hgS for more details. type UserPolicy struct { Name string `xml:"PolicyName"` UserName string `xml:"UserName"` Document string `xml:"PolicyDocument"` } // GetUserPolicy gets a user policy in IAM. // // See http://goo.gl/BH04O for more details. func (iam *IAM) GetUserPolicy(userName, policyName string) (*GetUserPolicyResp, error) { params := map[string]string{ "Action": "GetUserPolicy", "UserName": userName, "PolicyName": policyName, } resp := new(GetUserPolicyResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil return nil, nil } // PutUserPolicy creates a user policy in IAM. // // See http://goo.gl/ldCO8 for more details. func (iam *IAM) PutUserPolicy(userName, policyName, policyDocument string) (*SimpleResp, error) { params := map[string]string{ "Action": "PutUserPolicy", "UserName": userName, "PolicyName": policyName, "PolicyDocument": policyDocument, } resp := new(SimpleResp) if err := iam.postQuery(params, resp); err != nil { return nil, err } return resp, nil } // DeleteUserPolicy deletes a user policy from IAM. // // See http://goo.gl/7Jncn for more details. func (iam *IAM) DeleteUserPolicy(userName, policyName string) (*SimpleResp, error) { params := map[string]string{ "Action": "DeleteUserPolicy", "PolicyName": policyName, "UserName": userName, } resp := new(SimpleResp) if err := iam.query(params, resp); err != nil { return nil, err } return resp, nil } type SimpleResp struct { RequestId string `xml:"ResponseMetadata>RequestId"` } type xmlErrors struct { Errors []Error `xml:"Error"` } // Error encapsulates an IAM error. type Error struct { // HTTP status code of the error. StatusCode int // AWS code of the error. Code string // Message explaining the error. Message string } func (e *Error) Error() string { var prefix string if e.Code != "" { prefix = e.Code + ": " } if prefix == "" && e.StatusCode > 0 { prefix = strconv.Itoa(e.StatusCode) + ": " } return prefix + e.Message } ================================================ FILE: vendor/github.com/zackbloom/goamz/iam/sign.go ================================================ package iam import ( "crypto/hmac" "crypto/sha256" "encoding/base64" "github.com/zackbloom/goamz/aws" "sort" "strings" ) // ---------------------------------------------------------------------------- // Version 2 signing (http://goo.gl/RSRp5) var b64 = base64.StdEncoding func sign(auth aws.Auth, method, path string, params map[string]string, host string) { params["AWSAccessKeyId"] = auth.AccessKey params["SignatureVersion"] = "2" params["SignatureMethod"] = "HmacSHA256" if auth.Token() != "" { params["SecurityToken"] = auth.Token() } var sarray []string for k, v := range params { sarray = append(sarray, aws.Encode(k)+"="+aws.Encode(v)) } sort.StringSlice(sarray).Sort() joined := strings.Join(sarray, "&") payload := method + "\n" + host + "\n" + path + "\n" + joined hash := hmac.New(sha256.New, []byte(auth.SecretKey)) hash.Write([]byte(payload)) signature := make([]byte, b64.EncodedLen(hash.Size())) b64.Encode(signature, hash.Sum(nil)) params["Signature"] = string(signature) } ================================================ FILE: vendor/github.com/zackbloom/goamz/route53/route53.go ================================================ package route53 import ( "bytes" "encoding/xml" "fmt" "io" "net/http" "net/url" "strconv" "github.com/zackbloom/goamz/aws" ) type Route53 struct { Auth aws.Auth Endpoint string Signer *aws.Route53Signer Service *aws.Service } const route53_host = "https://route53.amazonaws.com" const route53_ver = "2013-04-01" // Factory for the route53 type func NewRoute53(auth aws.Auth) (*Route53, error) { signer := aws.NewRoute53Signer(auth) return &Route53{ Auth: auth, Signer: signer, Endpoint: route53_host + "/" + route53_ver + "/hostedzone", }, nil } // General Structs used in all types of requests type HostedZone struct { XMLName xml.Name `xml:"HostedZone"` Id string Name string VPC HostedZoneVPC `xml:"VPC,omitempty"` // used on CreateHostedZone CallerReference string Config Config ResourceRecordSetCount int } type Config struct { XMLName xml.Name `xml:"Config"` Comment string PrivateZone bool } // Structs for getting the existing Hosted Zones type ListHostedZonesResponse struct { XMLName xml.Name `xml:"ListHostedZonesResponse"` HostedZones []HostedZone `xml:"HostedZones>HostedZone"` Marker string IsTruncated bool NextMarker string MaxItems int } type ListHostedZonesByNameResponse struct { XMLName xml.Name `xml:"ListHostedZonesResponse"` HostedZones []HostedZone `xml:"HostedZones>HostedZone"` DNSName string HostedZoneId string NextDNSName string NextHostedZoneId string IsTruncated bool MaxItems int } // Structs for Creating a New Host type CreateHostedZoneRequest struct { XMLName xml.Name `xml:"CreateHostedZoneRequest"` Xmlns string `xml:"xmlns,attr"` Name string CallerReference string VPC HostedZoneVPC HostedZoneConfig HostedZoneConfig } type ResourceRecordValue struct { Value string `xml:"ResourceRecord>Value"` } type Change struct { Action string `xml:"Action"` Name string `xml:"ResourceRecordSet>Name"` Type string `xml:"ResourceRecordSet>Type"` TTL int `xml:"ResourceRecordSet>TTL,omitempty"` AliasTarget AliasTarget `xml:"ResourceRecordSet>AliasTarget,omitempty"` Values []ResourceRecordValue `xml:"ResourceRecordSet>ResourceRecords,omitempty"` } type ChangeResourceRecordSetsRequest struct { XMLName xml.Name `xml:"ChangeResourceRecordSetsRequest"` Xmlns string `xml:"xmlns,attr"` Changes []Change `xml:"ChangeBatch>Changes>Change"` } type AssociateVPCWithHostedZoneRequest struct { XMLName xml.Name `xml:"AssociateVPCWithHostedZoneRequest"` Xmlns string `xml:"xmlns,attr"` VPC HostedZoneVPC Comment string } type DisassociateVPCWithHostedZoneRequest struct { XMLName xml.Name `xml:"DisassociateVPCWithHostedZoneRequest"` Xmlns string `xml:"xmlns,attr"` VPC HostedZoneVPC Comment string } type HostedZoneConfig struct { XMLName xml.Name `xml:"HostedZoneConfig"` Comment string } type HostedZoneVPC struct { XMLName xml.Name `xml:"VPC"` VPCId string VPCRegion string } type CreateHostedZoneResponse struct { XMLName xml.Name `xml:"CreateHostedZoneResponse"` HostedZone HostedZone ChangeInfo ChangeInfo DelegationSet DelegationSet } type AliasTarget struct { HostedZoneId string DNSName string EvaluateTargetHealth bool } type ResourceRecord struct { XMLName xml.Name `xml:"ResourceRecord"` Value string } type ResourceRecords struct { XMLName xml.Name `xml:"ResourceRecords"` ResourceRecord []ResourceRecord } type ResourceRecordSet struct { XMLName xml.Name `xml:"ResourceRecordSet"` Name string Type string TTL int ResourceRecords []ResourceRecords HealthCheckId string Region string Failover string AliasTarget AliasTarget } type ResourceRecordSets struct { XMLName xml.Name `xml:"ResourceRecordSets"` ResourceRecordSet []ResourceRecordSet } type ListResourceRecordSetsResponse struct { XMLName xml.Name `xml:"ListResourceRecordSetsResponse"` ResourceRecordSets []ResourceRecordSets IsTruncated bool MaxItems int NextRecordName string NextRecordType string NextRecordIdentifier string } type ChangeResourceRecordSetsResponse struct { XMLName xml.Name `xml:"ChangeResourceRecordSetsResponse"` Id string `xml:"ChangeInfo>Id"` Status string `xml:"ChangeInfo>Status"` SubmittedAt string `xml:"ChangeInfo>SubmittedAt"` } type ChangeInfo struct { XMLName xml.Name `xml:"ChangeInfo"` Id string Status string SubmittedAt string } type DelegationSet struct { XMLName xml.Name `xml:"DelegationSet` NameServers NameServers } type NameServers struct { XMLName xml.Name `xml:"NameServers` NameServer []string } type GetHostedZoneResponse struct { XMLName xml.Name `xml:"GetHostedZoneResponse"` HostedZone HostedZone DelegationSet DelegationSet VPCs []HostedZoneVPC `xml:"VPCs>VPC"` } type DeleteHostedZoneResponse struct { XMLName xml.Name `xml:"DeleteHostedZoneResponse"` Xmlns string `xml:"xmlns,attr"` ChangeInfo ChangeInfo } type AssociateVPCWithHostedZoneResponse struct { XMLName xml.Name `xml:"AssociateVPCWithHostedZoneResponse"` Xmlns string `xml:"xmlns,attr"` ChangeInfo ChangeInfo } type DisassociateVPCWithHostedZoneResponse struct { XMLName xml.Name `xml:"DisassociateVPCWithHostedZoneResponse"` Xmlns string `xml:"xmlns,attr"` ChangeInfo ChangeInfo } // query sends the specified HTTP request to the path and signs the request // with the required authentication and headers based on the Auth. // // Automatically decodes the response into the the result interface func (r *Route53) query(method string, path string, body io.Reader, result interface{}) error { var err error // Create the POST request and sign the headers req, err := http.NewRequest(method, path, body) r.Signer.Sign(req) // Send the request and capture the response client := &http.Client{} res, err := client.Do(req) if err != nil { return err } if method == "POST" { defer req.Body.Close() } if res.StatusCode != 201 && res.StatusCode != 200 { err = r.Service.BuildError(res) return err } err = xml.NewDecoder(res.Body).Decode(result) return err } // CreateHostedZone send a creation request to the AWS Route53 API func (r *Route53) CreateHostedZone(hostedZoneReq *CreateHostedZoneRequest) (*CreateHostedZoneResponse, error) { xmlBytes, err := xml.Marshal(hostedZoneReq) if err != nil { return nil, err } result := new(CreateHostedZoneResponse) err = r.query("POST", r.Endpoint, bytes.NewBuffer(xmlBytes), result) return result, err } // ListResourceRecordSets fetches a collection of ResourceRecordSets through the AWS Route53 API func (r *Route53) ListResourceRecordSets(hostedZone string, name string, _type string, identifier string, maxitems int) (result *ListResourceRecordSetsResponse, err error) { var buffer bytes.Buffer addParam(&buffer, "name", name) addParam(&buffer, "type", _type) addParam(&buffer, "identifier", identifier) if maxitems > 0 { addParam(&buffer, "maxitems", strconv.Itoa(maxitems)) } path := fmt.Sprintf("%s/%s/rrset?%s", r.Endpoint, hostedZone, buffer.String()) fmt.Println(path) result = new(ListResourceRecordSetsResponse) err = r.query("GET", path, nil, result) return } func (response *ListResourceRecordSetsResponse) GetResourceRecordSets() []ResourceRecordSet { return response.ResourceRecordSets[0].ResourceRecordSet } func (recordset *ResourceRecordSet) GetValues() []string { if len(recordset.ResourceRecords) > 0 { result := make([]string, len(recordset.ResourceRecords[0].ResourceRecord)) for i, record := range recordset.ResourceRecords[0].ResourceRecord { result[i] = record.Value } return result } return make([]string, 0) } // ChangeResourceRecordSet send a change resource record request to the AWS Route53 API func (r *Route53) ChangeResourceRecordSet(req *ChangeResourceRecordSetsRequest, zoneId string) (*ChangeResourceRecordSetsResponse, error) { req.Xmlns = "https://route53.amazonaws.com/doc/" + route53_ver + "/" xmlBytes, err := xml.Marshal(req) if err != nil { return nil, err } xmlBytes = []byte(xml.Header + string(xmlBytes)) result := new(ChangeResourceRecordSetsResponse) path := fmt.Sprintf("%s/%s/rrset", r.Endpoint, zoneId) err = r.query("POST", path, bytes.NewBuffer(xmlBytes), result) return result, err } // ListedHostedZones fetches a collection of HostedZones through the AWS Route53 API func (r *Route53) ListHostedZones(marker string, maxItems int) (result *ListHostedZonesResponse, err error) { path := "" if marker == "" { path = fmt.Sprintf("%s?maxitems=%d", r.Endpoint, maxItems) } else { path = fmt.Sprintf("%s?marker=%v&maxitems=%d", r.Endpoint, marker, maxItems) } result = new(ListHostedZonesResponse) err = r.query("GET", path, nil, result) return } // ListedHostedZonesByName fetches a collection of HostedZones through the AWS Route53 API ordered, and optionally filtered by, a DNS name func (r *Route53) ListHostedZonesByName(DNSName string, nextHostedZoneId string, maxItems int) (result *ListHostedZonesByNameResponse, err error) { params := url.Values{} if DNSName != "" { params.Add("dnsname", DNSName) } if nextHostedZoneId != "" { params.Add("hostedzoneid", nextHostedZoneId) } if maxItems != 0 { params.Add("maxitems", strconv.FormatInt(int64(maxItems), 10)) } path := fmt.Sprintf("%s?%s", r.Endpoint, params.Encode()) result = new(ListHostedZonesByNameResponse) err = r.query("GET", path, nil, result) return } // GetHostedZone fetches a particular hostedzones DelegationSet by id func (r *Route53) GetHostedZone(id string) (result *GetHostedZoneResponse, err error) { result = new(GetHostedZoneResponse) err = r.query("GET", fmt.Sprintf("%s/%v", r.Endpoint, id), nil, result) return } // DeleteHostedZone deletes the hosted zone with the given id func (r *Route53) DeleteHostedZone(id string) (result *DeleteHostedZoneResponse, err error) { path := fmt.Sprintf("%s/%s", r.Endpoint, id) result = new(DeleteHostedZoneResponse) err = r.query("DELETE", path, nil, result) return } // AssociateVPCWithHostedZone associates a VPC with specified private hosted zone func (r *Route53) AssociateVPCWithHostedZone(zoneid string, req *AssociateVPCWithHostedZoneRequest) (result *AssociateVPCWithHostedZoneResponse, err error) { xmlBytes, err := xml.Marshal(req) if err != nil { return nil, err } xmlBytes = []byte(xml.Header + string(xmlBytes)) path := fmt.Sprintf("%s/%s/associatevpc", r.Endpoint, zoneid) result = new(AssociateVPCWithHostedZoneResponse) err = r.query("POST", path, bytes.NewBuffer(xmlBytes), result) return } // DisassociateVPCWithHostedZone disassociates a VPC from specified private hosted zone func (r *Route53) DisassociateVPCWithHostedZone(zoneid string, req *DisassociateVPCWithHostedZoneRequest) (result *DisassociateVPCWithHostedZoneResponse, err error) { xmlBytes, err := xml.Marshal(req) if err != nil { return nil, err } xmlBytes = []byte(xml.Header + string(xmlBytes)) path := fmt.Sprintf("%s/%s/disassociatevpc", r.Endpoint, zoneid) result = new(DisassociateVPCWithHostedZoneResponse) err = r.query("POST", path, bytes.NewBuffer(xmlBytes), result) return } func addParam(buffer *bytes.Buffer, name, value string) { if value != "" { if buffer.Len() > 0 { buffer.WriteString("&") } buffer.WriteString(fmt.Sprintf("%s=%s", name, value)) } } ================================================ FILE: vendor/github.com/zackbloom/goamz/s3/lifecycle.go ================================================ package s3 import ( "crypto/md5" "encoding/base64" "encoding/xml" "net/url" "strconv" "time" ) // Implements an interface for s3 bucket lifecycle configuration // See goo.gl/d0bbDf for details. const ( LifecycleRuleStatusEnabled = "Enabled" LifecycleRuleStatusDisabled = "Disabled" LifecycleRuleDateFormat = "2006-01-02" StorageClassGlacier = "GLACIER" ) type Expiration struct { Days *uint `xml:"Days,omitempty"` Date string `xml:"Date,omitempty"` } // Returns Date as a time.Time. func (r *Expiration) ParseDate() (time.Time, error) { return time.Parse(LifecycleRuleDateFormat, r.Date) } type Transition struct { Days *uint `xml:"Days,omitempty"` Date string `xml:"Date,omitempty"` StorageClass string `xml:"StorageClass"` } // Returns Date as a time.Time. func (r *Transition) ParseDate() (time.Time, error) { return time.Parse(LifecycleRuleDateFormat, r.Date) } type NoncurrentVersionExpiration struct { Days *uint `xml:"NoncurrentDays,omitempty"` } type NoncurrentVersionTransition struct { Days *uint `xml:"NoncurrentDays,omitempty"` StorageClass string `xml:"StorageClass"` } type LifecycleRule struct { ID string `xml:"ID"` Prefix string `xml:"Prefix"` Status string `xml:"Status"` NoncurrentVersionTransition *NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"` NoncurrentVersionExpiration *NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"` Transition *Transition `xml:"Transition,omitempty"` Expiration *Expiration `xml:"Expiration,omitempty"` } // Create a lifecycle rule with arbitrary identifier id and object name prefix // for which the rules should apply. func NewLifecycleRule(id, prefix string) *LifecycleRule { rule := &LifecycleRule{ ID: id, Prefix: prefix, Status: LifecycleRuleStatusEnabled, } return rule } // Adds a transition rule in days. Overwrites any previous transition rule. func (r *LifecycleRule) SetTransitionDays(days uint) { r.Transition = &Transition{ Days: &days, StorageClass: StorageClassGlacier, } } // Adds a transition rule as a date. Overwrites any previous transition rule. func (r *LifecycleRule) SetTransitionDate(date time.Time) { r.Transition = &Transition{ Date: date.Format(LifecycleRuleDateFormat), StorageClass: StorageClassGlacier, } } // Adds an expiration rule in days. Overwrites any previous expiration rule. // Days must be > 0. func (r *LifecycleRule) SetExpirationDays(days uint) { r.Expiration = &Expiration{ Days: &days, } } // Adds an expiration rule as a date. Overwrites any previous expiration rule. func (r *LifecycleRule) SetExpirationDate(date time.Time) { r.Expiration = &Expiration{ Date: date.Format(LifecycleRuleDateFormat), } } // Adds a noncurrent version transition rule. Overwrites any previous // noncurrent version transition rule. func (r *LifecycleRule) SetNoncurrentVersionTransitionDays(days uint) { r.NoncurrentVersionTransition = &NoncurrentVersionTransition{ Days: &days, StorageClass: StorageClassGlacier, } } // Adds a noncurrent version expiration rule. Days must be > 0. Overwrites // any previous noncurrent version expiration rule. func (r *LifecycleRule) SetNoncurrentVersionExpirationDays(days uint) { r.NoncurrentVersionExpiration = &NoncurrentVersionExpiration{ Days: &days, } } // Marks the rule as disabled. func (r *LifecycleRule) Disable() { r.Status = LifecycleRuleStatusDisabled } // Marks the rule as enabled (default). func (r *LifecycleRule) Enable() { r.Status = LifecycleRuleStatusEnabled } type LifecycleConfiguration struct { XMLName xml.Name `xml:"LifecycleConfiguration"` Rules *[]*LifecycleRule `xml:"Rule,omitempty"` } // Adds a LifecycleRule to the configuration. func (c *LifecycleConfiguration) AddRule(r *LifecycleRule) { var rules []*LifecycleRule if c.Rules != nil { rules = *c.Rules } rules = append(rules, r) c.Rules = &rules } // Sets the bucket's lifecycle configuration. func (b *Bucket) PutLifecycleConfiguration(c *LifecycleConfiguration) error { doc, err := xml.Marshal(c) if err != nil { return err } buf := makeXmlBuffer(doc) digest := md5.New() size, err := digest.Write(buf.Bytes()) if err != nil { return err } headers := map[string][]string{ "Content-Length": {strconv.FormatInt(int64(size), 10)}, "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, } req := &request{ path: "/", method: "PUT", bucket: b.Name, headers: headers, payload: buf, params: url.Values{"lifecycle": {""}}, } return b.S3.queryV4Sign(req, nil) } // Retrieves the lifecycle configuration for the bucket. AWS returns an error // if no lifecycle found. func (b *Bucket) GetLifecycleConfiguration() (*LifecycleConfiguration, error) { req := &request{ method: "GET", bucket: b.Name, path: "/", params: url.Values{"lifecycle": {""}}, } conf := &LifecycleConfiguration{} err := b.S3.queryV4Sign(req, conf) return conf, err } // Delete the bucket's lifecycle configuration. func (b *Bucket) DeleteLifecycleConfiguration() error { req := &request{ method: "DELETE", bucket: b.Name, path: "/", params: url.Values{"lifecycle": {""}}, } return b.S3.queryV4Sign(req, nil) } ================================================ FILE: vendor/github.com/zackbloom/goamz/s3/multi.go ================================================ package s3 import ( "bytes" "crypto/md5" "encoding/base64" "encoding/hex" "encoding/xml" "errors" "io" "net/url" "sort" "strconv" "strings" ) // Multi represents an unfinished multipart upload. // // Multipart uploads allow sending big objects in smaller chunks. // After all parts have been sent, the upload must be explicitly // completed by calling Complete with the list of parts. // // See http://goo.gl/vJfTG for an overview of multipart uploads. type Multi struct { Bucket *Bucket Key string UploadId string } // That's the default. Here just for testing. var listMultiMax = 1000 type listMultiResp struct { NextKeyMarker string NextUploadIdMarker string IsTruncated bool Upload []Multi CommonPrefixes []string `xml:"CommonPrefixes>Prefix"` } // ListMulti returns the list of unfinished multipart uploads in b. // // The prefix parameter limits the response to keys that begin with the // specified prefix. You can use prefixes to separate a bucket into different // groupings of keys (to get the feeling of folders, for example). // // The delim parameter causes the response to group all of the keys that // share a common prefix up to the next delimiter in a single entry within // the CommonPrefixes field. You can use delimiters to separate a bucket // into different groupings of keys, similar to how folders would work. // // See http://goo.gl/ePioY for details. func (b *Bucket) ListMulti(prefix, delim string) (multis []*Multi, prefixes []string, err error) { params := map[string][]string{ "uploads": {""}, "max-uploads": {strconv.FormatInt(int64(listMultiMax), 10)}, "prefix": {prefix}, "delimiter": {delim}, } for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "GET", bucket: b.Name, params: params, } var resp listMultiResp err := b.S3.query(req, &resp) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, nil, err } for i := range resp.Upload { multi := &resp.Upload[i] multi.Bucket = b multis = append(multis, multi) } prefixes = append(prefixes, resp.CommonPrefixes...) if !resp.IsTruncated { return multis, prefixes, nil } params["key-marker"] = []string{resp.NextKeyMarker} params["upload-id-marker"] = []string{resp.NextUploadIdMarker} attempt = attempts.Start() // Last request worked. } panic("unreachable") } // Multi returns a multipart upload handler for the provided key // inside b. If a multipart upload exists for key, it is returned, // otherwise a new multipart upload is initiated with contType and perm. func (b *Bucket) Multi(key, contType string, perm ACL, options Options) (*Multi, error) { multis, _, err := b.ListMulti(key, "") if err != nil && !hasCode(err, "NoSuchUpload") { return nil, err } for _, m := range multis { if m.Key == key { return m, nil } } return b.InitMulti(key, contType, perm, options) } // InitMulti initializes a new multipart upload at the provided // key inside b and returns a value for manipulating it. // // See http://goo.gl/XP8kL for details. func (b *Bucket) InitMulti(key string, contType string, perm ACL, options Options) (*Multi, error) { headers := map[string][]string{ "Content-Type": {contType}, "Content-Length": {"0"}, "x-amz-acl": {string(perm)}, } options.addHeaders(headers) params := map[string][]string{ "uploads": {""}, } req := &request{ method: "POST", bucket: b.Name, path: key, headers: headers, params: params, } var err error var resp struct { UploadId string `xml:"UploadId"` } for attempt := attempts.Start(); attempt.Next(); { err = b.S3.query(req, &resp) if !shouldRetry(err) { break } } if err != nil { return nil, err } return &Multi{Bucket: b, Key: key, UploadId: resp.UploadId}, nil } func (m *Multi) PutPartCopy(n int, options CopyOptions, source string) (*CopyObjectResult, Part, error) { headers := map[string][]string{ "x-amz-copy-source": {url.QueryEscape(source)}, } options.addHeaders(headers) params := map[string][]string{ "uploadId": {m.UploadId}, "partNumber": {strconv.FormatInt(int64(n), 10)}, } sourceBucket := m.Bucket.S3.Bucket(strings.TrimRight(strings.SplitAfterN(source, "/", 2)[0], "/")) sourceMeta, err := sourceBucket.Head(strings.SplitAfterN(source, "/", 2)[1], nil) if err != nil { return nil, Part{}, err } for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "PUT", bucket: m.Bucket.Name, path: m.Key, headers: headers, params: params, } resp := &CopyObjectResult{} err = m.Bucket.S3.query(req, resp) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, Part{}, err } if resp.ETag == "" { return nil, Part{}, errors.New("part upload succeeded with no ETag") } return resp, Part{n, resp.ETag, sourceMeta.ContentLength}, nil } panic("unreachable") } // PutPart sends part n of the multipart upload, reading all the content from r. // Each part, except for the last one, must be at least 5MB in size. // // See http://goo.gl/pqZer for details. func (m *Multi) PutPart(n int, r io.ReadSeeker) (Part, error) { partSize, _, md5b64, err := seekerInfo(r) if err != nil { return Part{}, err } return m.putPart(n, r, partSize, md5b64) } func (m *Multi) putPart(n int, r io.ReadSeeker, partSize int64, md5b64 string) (Part, error) { headers := map[string][]string{ "Content-Length": {strconv.FormatInt(partSize, 10)}, "Content-MD5": {md5b64}, } params := map[string][]string{ "uploadId": {m.UploadId}, "partNumber": {strconv.FormatInt(int64(n), 10)}, } for attempt := attempts.Start(); attempt.Next(); { _, err := r.Seek(0, 0) if err != nil { return Part{}, err } req := &request{ method: "PUT", bucket: m.Bucket.Name, path: m.Key, headers: headers, params: params, payload: r, } err = m.Bucket.S3.prepare(req) if err != nil { return Part{}, err } resp, err := m.Bucket.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return Part{}, err } etag := resp.Header.Get("ETag") if etag == "" { return Part{}, errors.New("part upload succeeded with no ETag") } return Part{n, etag, partSize}, nil } panic("unreachable") } func seekerInfo(r io.ReadSeeker) (size int64, md5hex string, md5b64 string, err error) { _, err = r.Seek(0, 0) if err != nil { return 0, "", "", err } digest := md5.New() size, err = io.Copy(digest, r) if err != nil { return 0, "", "", err } sum := digest.Sum(nil) md5hex = hex.EncodeToString(sum) md5b64 = base64.StdEncoding.EncodeToString(sum) return size, md5hex, md5b64, nil } type Part struct { N int `xml:"PartNumber"` ETag string Size int64 } type partSlice []Part func (s partSlice) Len() int { return len(s) } func (s partSlice) Less(i, j int) bool { return s[i].N < s[j].N } func (s partSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } type listPartsResp struct { NextPartNumberMarker string IsTruncated bool Part []Part } // That's the default. Here just for testing. var listPartsMax = 1000 // Kept for backcompatability. See the documentation for ListPartsFull func (m *Multi) ListParts() ([]Part, error) { return m.ListPartsFull(0, listPartsMax) } // ListParts returns the list of previously uploaded parts in m, // ordered by part number (Only parts with higher part numbers than // partNumberMarker will be listed). Only up to maxParts parts will be // returned. // // See http://goo.gl/ePioY for details. func (m *Multi) ListPartsFull(partNumberMarker int, maxParts int) ([]Part, error) { if maxParts > listPartsMax { maxParts = listPartsMax } params := map[string][]string{ "uploadId": {m.UploadId}, "max-parts": {strconv.FormatInt(int64(maxParts), 10)}, "part-number-marker": {strconv.FormatInt(int64(partNumberMarker), 10)}, } var parts partSlice for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "GET", bucket: m.Bucket.Name, path: m.Key, params: params, } var resp listPartsResp err := m.Bucket.S3.query(req, &resp) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, err } parts = append(parts, resp.Part...) if !resp.IsTruncated { sort.Sort(parts) return parts, nil } params["part-number-marker"] = []string{resp.NextPartNumberMarker} attempt = attempts.Start() // Last request worked. } panic("unreachable") } type ReaderAtSeeker interface { io.ReaderAt io.ReadSeeker } // PutAll sends all of r via a multipart upload with parts no larger // than partSize bytes, which must be set to at least 5MB. // Parts previously uploaded are either reused if their checksum // and size match the new part, or otherwise overwritten with the // new content. // PutAll returns all the parts of m (reused or not). func (m *Multi) PutAll(r ReaderAtSeeker, partSize int64) ([]Part, error) { old, err := m.ListParts() if err != nil && !hasCode(err, "NoSuchUpload") { return nil, err } reuse := 0 // Index of next old part to consider reusing. current := 1 // Part number of latest good part handled. totalSize, err := r.Seek(0, 2) if err != nil { return nil, err } first := true // Must send at least one empty part if the file is empty. var result []Part NextSection: for offset := int64(0); offset < totalSize || first; offset += partSize { first = false if offset+partSize > totalSize { partSize = totalSize - offset } section := io.NewSectionReader(r, offset, partSize) _, md5hex, md5b64, err := seekerInfo(section) if err != nil { return nil, err } for reuse < len(old) && old[reuse].N <= current { // Looks like this part was already sent. part := &old[reuse] etag := `"` + md5hex + `"` if part.N == current && part.Size == partSize && part.ETag == etag { // Checksum matches. Reuse the old part. result = append(result, *part) current++ continue NextSection } reuse++ } // Part wasn't found or doesn't match. Send it. part, err := m.putPart(current, section, partSize, md5b64) if err != nil { return nil, err } result = append(result, part) current++ } return result, nil } type completeUpload struct { XMLName xml.Name `xml:"CompleteMultipartUpload"` Parts completeParts `xml:"Part"` } type completePart struct { PartNumber int ETag string } type completeParts []completePart func (p completeParts) Len() int { return len(p) } func (p completeParts) Less(i, j int) bool { return p[i].PartNumber < p[j].PartNumber } func (p completeParts) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // Complete assembles the given previously uploaded parts into the // final object. This operation may take several minutes. // // See http://goo.gl/2Z7Tw for details. func (m *Multi) Complete(parts []Part) error { params := map[string][]string{ "uploadId": {m.UploadId}, } c := completeUpload{} for _, p := range parts { c.Parts = append(c.Parts, completePart{p.N, p.ETag}) } sort.Sort(c.Parts) data, err := xml.Marshal(&c) if err != nil { return err } for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "POST", bucket: m.Bucket.Name, path: m.Key, params: params, payload: bytes.NewReader(data), } err := m.Bucket.S3.query(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } return err } panic("unreachable") } // Abort deletes an unifinished multipart upload and any previously // uploaded parts for it. // // After a multipart upload is aborted, no additional parts can be // uploaded using it. However, if any part uploads are currently in // progress, those part uploads might or might not succeed. As a result, // it might be necessary to abort a given multipart upload multiple // times in order to completely free all storage consumed by all parts. // // NOTE: If the described scenario happens to you, please report back to // the goamz authors with details. In the future such retrying should be // handled internally, but it's not clear what happens precisely (Is an // error returned? Is the issue completely undetectable?). // // See http://goo.gl/dnyJw for details. func (m *Multi) Abort() error { params := map[string][]string{ "uploadId": {m.UploadId}, } for attempt := attempts.Start(); attempt.Next(); { req := &request{ method: "DELETE", bucket: m.Bucket.Name, path: m.Key, params: params, } err := m.Bucket.S3.query(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } return err } panic("unreachable") } ================================================ FILE: vendor/github.com/zackbloom/goamz/s3/s3.go ================================================ // // goamz - Go packages to interact with the Amazon Web Services. // // https://wiki.ubuntu.com/goamz // // Copyright (c) 2011 Canonical Ltd. // // Written by Gustavo Niemeyer // package s3 import ( "bytes" "crypto/hmac" "crypto/md5" "crypto/sha1" "encoding/base64" "encoding/xml" "fmt" "io" "io/ioutil" "log" "net" "net/http" "net/http/httputil" "net/url" "strconv" "strings" "time" "github.com/zackbloom/goamz/aws" ) const debug = false // The S3 type encapsulates operations with an S3 region. type S3 struct { aws.Auth aws.Region ConnectTimeout time.Duration ReadTimeout time.Duration Signature int private byte // Reserve the right of using private data. } // The Bucket type encapsulates operations with an S3 bucket. type Bucket struct { *S3 Name string } // The Owner type represents the owner of the object in an S3 bucket. type Owner struct { ID string DisplayName string } // Fold options into an Options struct // type Options struct { SSE bool SSECustomerAlgorithm string SSECustomerKey string SSECustomerKeyMD5 string Meta map[string][]string ContentEncoding string CacheControl string RedirectLocation string ContentMD5 string ContentDisposition string Range string // What else? //// The following become headers so they are []strings rather than strings... I think // x-amz-storage-class []string } type CopyOptions struct { Options CopySourceOptions string MetadataDirective string ContentType string } // CopyObjectResult is the output from a Copy request type CopyObjectResult struct { ETag string LastModified string } var attempts = aws.AttemptStrategy{ Min: 5, Total: 5 * time.Second, Delay: 200 * time.Millisecond, } // New creates a new S3. func New(auth aws.Auth, region aws.Region) *S3 { return &S3{auth, region, 0, 0, 0, aws.V2Signature} } // Bucket returns a Bucket with the given name. func (s3 *S3) Bucket(name string) *Bucket { if s3.Region.S3BucketEndpoint != "" || s3.Region.S3LowercaseBucket { name = strings.ToLower(name) } return &Bucket{s3, name} } type BucketInfo struct { Name string CreationDate string } type GetServiceResp struct { Owner Owner Buckets []BucketInfo `xml:">Bucket"` } // GetService gets a list of all buckets owned by an account. // // See http://goo.gl/wbHkGj for details. func (s3 *S3) GetService() (*GetServiceResp, error) { bucket := s3.Bucket("") r, err := bucket.Get("") if err != nil { return nil, err } // Parse the XML response. var resp GetServiceResp if err = xml.Unmarshal(r, &resp); err != nil { return nil, err } return &resp, nil } var createBucketConfiguration = ` %s ` // locationConstraint returns an io.Reader specifying a LocationConstraint if // required for the region. // // See http://goo.gl/bh9Kq for details. func (s3 *S3) locationConstraint() io.Reader { constraint := "" if s3.Region.S3LocationConstraint { constraint = fmt.Sprintf(createBucketConfiguration, s3.Region.Name) } return strings.NewReader(constraint) } type ACL string const ( Private = ACL("private") PublicRead = ACL("public-read") PublicReadWrite = ACL("public-read-write") AuthenticatedRead = ACL("authenticated-read") BucketOwnerRead = ACL("bucket-owner-read") BucketOwnerFull = ACL("bucket-owner-full-control") ) // PutBucket creates a new bucket. // // See http://goo.gl/ndjnR for details. func (b *Bucket) PutBucket(perm ACL) error { headers := map[string][]string{ "x-amz-acl": {string(perm)}, } req := &request{ method: "PUT", bucket: b.Name, path: "/", headers: headers, payload: b.locationConstraint(), } return b.S3.query(req, nil) } // DelBucket removes an existing S3 bucket. All objects in the bucket must // be removed before the bucket itself can be removed. // // See http://goo.gl/GoBrY for details. func (b *Bucket) DelBucket() (err error) { req := &request{ method: "DELETE", bucket: b.Name, path: "/", } for attempt := attempts.Start(); attempt.Next(); { err = b.S3.query(req, nil) if !shouldRetry(err) { break } } return err } // Get retrieves an object from an S3 bucket. // // See http://goo.gl/isCO7 for details. func (b *Bucket) Get(path string) (data []byte, err error) { body, err := b.GetReader(path) if err != nil { return nil, err } data, err = ioutil.ReadAll(body) body.Close() return data, err } // GetReader retrieves an object from an S3 bucket, // returning the body of the HTTP response. // It is the caller's responsibility to call Close on rc when // finished reading. func (b *Bucket) GetReader(path string) (rc io.ReadCloser, err error) { resp, err := b.GetResponse(path) if resp != nil { return resp.Body, err } return nil, err } // GetResponse retrieves an object from an S3 bucket, // returning the HTTP response. // It is the caller's responsibility to call Close on rc when // finished reading func (b *Bucket) GetResponse(path string) (resp *http.Response, err error) { return b.GetResponseWithHeaders(path, make(http.Header)) } // GetReaderWithHeaders retrieves an object from an S3 bucket // Accepts custom headers to be sent as the second parameter // returning the body of the HTTP response. // It is the caller's responsibility to call Close on rc when // finished reading func (b *Bucket) GetResponseWithHeaders(path string, headers map[string][]string) (resp *http.Response, err error) { req := &request{ bucket: b.Name, path: path, headers: headers, } err = b.S3.prepare(req) if err != nil { return nil, err } for attempt := attempts.Start(); attempt.Next(); { resp, err := b.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, err } return resp, nil } panic("unreachable") } // Exists checks whether or not an object exists on an S3 bucket using a HEAD request. func (b *Bucket) Exists(path string) (exists bool, err error) { req := &request{ method: "HEAD", bucket: b.Name, path: path, } err = b.S3.prepare(req) if err != nil { return } for attempt := attempts.Start(); attempt.Next(); { resp, err := b.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { // We can treat a 403 or 404 as non existance if e, ok := err.(*Error); ok && (e.StatusCode == 403 || e.StatusCode == 404) { return false, nil } return false, err } if resp.StatusCode/100 == 2 { exists = true } if resp.Body != nil { resp.Body.Close() } return exists, err } return false, fmt.Errorf("S3 Currently Unreachable") } // Head HEADs an object in the S3 bucket, returns the response with // no body see http://bit.ly/17K1ylI func (b *Bucket) Head(path string, headers map[string][]string) (*http.Response, error) { req := &request{ method: "HEAD", bucket: b.Name, path: path, headers: headers, } err := b.S3.prepare(req) if err != nil { return nil, err } for attempt := attempts.Start(); attempt.Next(); { resp, err := b.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, err } return resp, err } return nil, fmt.Errorf("S3 Currently Unreachable") } // Put inserts an object into the S3 bucket. // // See http://goo.gl/FEBPD for details. func (b *Bucket) Put(path string, data []byte, contType string, perm ACL, options Options) error { body := bytes.NewBuffer(data) return b.PutReader(path, body, int64(len(data)), contType, perm, options) } // PutCopy puts a copy of an object given by the key path into bucket b using b.Path as the target key func (b *Bucket) PutCopy(path string, perm ACL, options CopyOptions, source string) (*CopyObjectResult, error) { headers := map[string][]string{ "x-amz-acl": {string(perm)}, "x-amz-copy-source": {url.QueryEscape(source)}, } options.addHeaders(headers) req := &request{ method: "PUT", bucket: b.Name, path: path, headers: headers, } resp := &CopyObjectResult{} err := b.S3.query(req, resp) if err != nil { return resp, err } return resp, nil } // PutReader inserts an object into the S3 bucket by consuming data // from r until EOF. func (b *Bucket) PutReader(path string, r io.Reader, length int64, contType string, perm ACL, options Options) error { headers := map[string][]string{ "Content-Length": {strconv.FormatInt(length, 10)}, "Content-Type": {contType}, "x-amz-acl": {string(perm)}, } options.addHeaders(headers) req := &request{ method: "PUT", bucket: b.Name, path: path, headers: headers, payload: r, } return b.S3.query(req, nil) } // addHeaders adds o's specified fields to headers func (o Options) addHeaders(headers map[string][]string) { if o.SSE { headers["x-amz-server-side-encryption"] = []string{"AES256"} } else if len(o.SSECustomerAlgorithm) != 0 && len(o.SSECustomerKey) != 0 && len(o.SSECustomerKeyMD5) != 0 { // Amazon-managed keys and customer-managed keys are mutually exclusive headers["x-amz-server-side-encryption-customer-algorithm"] = []string{o.SSECustomerAlgorithm} headers["x-amz-server-side-encryption-customer-key"] = []string{o.SSECustomerKey} headers["x-amz-server-side-encryption-customer-key-MD5"] = []string{o.SSECustomerKeyMD5} } if len(o.Range) != 0 { headers["Range"] = []string{o.Range} } if len(o.ContentEncoding) != 0 { headers["Content-Encoding"] = []string{o.ContentEncoding} } if len(o.CacheControl) != 0 { headers["Cache-Control"] = []string{o.CacheControl} } if len(o.ContentMD5) != 0 { headers["Content-MD5"] = []string{o.ContentMD5} } if len(o.RedirectLocation) != 0 { headers["x-amz-website-redirect-location"] = []string{o.RedirectLocation} } if len(o.ContentDisposition) != 0 { headers["Content-Disposition"] = []string{o.ContentDisposition} } for k, v := range o.Meta { headers["x-amz-meta-"+k] = v } } // addHeaders adds o's specified fields to headers func (o CopyOptions) addHeaders(headers map[string][]string) { o.Options.addHeaders(headers) if len(o.MetadataDirective) != 0 { headers["x-amz-metadata-directive"] = []string{o.MetadataDirective} } if len(o.CopySourceOptions) != 0 { headers["x-amz-copy-source-range"] = []string{o.CopySourceOptions} } if len(o.ContentType) != 0 { headers["Content-Type"] = []string{o.ContentType} } } func makeXmlBuffer(doc []byte) *bytes.Buffer { buf := new(bytes.Buffer) buf.WriteString(xml.Header) buf.Write(doc) return buf } type IndexDocument struct { Suffix string `xml:"Suffix"` } type ErrorDocument struct { Key string `xml:"Key"` } type RoutingRule struct { ConditionKeyPrefixEquals string `xml:"Condition>KeyPrefixEquals"` RedirectReplaceKeyPrefixWith string `xml:"Redirect>ReplaceKeyPrefixWith,omitempty"` RedirectReplaceKeyWith string `xml:"Redirect>ReplaceKeyWith,omitempty"` } type RedirectAllRequestsTo struct { HostName string `xml:"HostName"` Protocol string `xml:"Protocol,omitempty"` } type WebsiteConfiguration struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ WebsiteConfiguration"` IndexDocument *IndexDocument `xml:"IndexDocument,omitempty"` ErrorDocument *ErrorDocument `xml:"ErrorDocument,omitempty"` RoutingRules *[]RoutingRule `xml:"RoutingRules>RoutingRule,omitempty"` RedirectAllRequestsTo *RedirectAllRequestsTo `xml:"RedirectAllRequestsTo,omitempty"` } // PutBucketWebsite configures a bucket as a website. // // See http://goo.gl/TpRlUy for details. func (b *Bucket) PutBucketWebsite(configuration WebsiteConfiguration) error { doc, err := xml.Marshal(configuration) if err != nil { return err } buf := makeXmlBuffer(doc) return b.PutBucketSubresource("website", buf, int64(buf.Len())) } func (b *Bucket) PutBucketSubresource(subresource string, r io.Reader, length int64) error { headers := map[string][]string{ "Content-Length": {strconv.FormatInt(length, 10)}, } req := &request{ path: "/", method: "PUT", bucket: b.Name, headers: headers, payload: r, params: url.Values{subresource: {""}}, } return b.S3.query(req, nil) } // Del removes an object from the S3 bucket. // // See http://goo.gl/APeTt for details. func (b *Bucket) Del(path string) error { req := &request{ method: "DELETE", bucket: b.Name, path: path, } return b.S3.query(req, nil) } type Delete struct { Quiet bool `xml:"Quiet,omitempty"` Objects []Object `xml:"Object"` } type Object struct { Key string `xml:"Key"` VersionId string `xml:"VersionId,omitempty"` } // DelMulti removes up to 1000 objects from the S3 bucket. // // See http://goo.gl/jx6cWK for details. func (b *Bucket) DelMulti(objects Delete) error { doc, err := xml.Marshal(objects) if err != nil { return err } buf := makeXmlBuffer(doc) digest := md5.New() size, err := digest.Write(buf.Bytes()) if err != nil { return err } headers := map[string][]string{ "Content-Length": {strconv.FormatInt(int64(size), 10)}, "Content-MD5": {base64.StdEncoding.EncodeToString(digest.Sum(nil))}, "Content-Type": {"text/xml"}, } req := &request{ path: "/", method: "POST", params: url.Values{"delete": {""}}, bucket: b.Name, headers: headers, payload: buf, } return b.S3.query(req, nil) } // The ListResp type holds the results of a List bucket operation. type ListResp struct { Name string Prefix string Delimiter string Marker string MaxKeys int // IsTruncated is true if the results have been truncated because // there are more keys and prefixes than can fit in MaxKeys. // N.B. this is the opposite sense to that documented (incorrectly) in // http://goo.gl/YjQTc IsTruncated bool Contents []Key CommonPrefixes []string `xml:">Prefix"` // if IsTruncated is true, pass NextMarker as marker argument to List() // to get the next set of keys NextMarker string } // The Key type represents an item stored in an S3 bucket. type Key struct { Key string LastModified string Size int64 // ETag gives the hex-encoded MD5 sum of the contents, // surrounded with double-quotes. ETag string StorageClass string Owner Owner } // List returns information about objects in an S3 bucket. // // The prefix parameter limits the response to keys that begin with the // specified prefix. // // The delim parameter causes the response to group all of the keys that // share a common prefix up to the next delimiter in a single entry within // the CommonPrefixes field. You can use delimiters to separate a bucket // into different groupings of keys, similar to how folders would work. // // The marker parameter specifies the key to start with when listing objects // in a bucket. Amazon S3 lists objects in alphabetical order and // will return keys alphabetically greater than the marker. // // The max parameter specifies how many keys + common prefixes to return in // the response. The default is 1000. // // For example, given these keys in a bucket: // // index.html // index2.html // photos/2006/January/sample.jpg // photos/2006/February/sample2.jpg // photos/2006/February/sample3.jpg // photos/2006/February/sample4.jpg // // Listing this bucket with delimiter set to "/" would yield the // following result: // // &ListResp{ // Name: "sample-bucket", // MaxKeys: 1000, // Delimiter: "/", // Contents: []Key{ // {Key: "index.html", "index2.html"}, // }, // CommonPrefixes: []string{ // "photos/", // }, // } // // Listing the same bucket with delimiter set to "/" and prefix set to // "photos/2006/" would yield the following result: // // &ListResp{ // Name: "sample-bucket", // MaxKeys: 1000, // Delimiter: "/", // Prefix: "photos/2006/", // CommonPrefixes: []string{ // "photos/2006/February/", // "photos/2006/January/", // }, // } // // See http://goo.gl/YjQTc for details. func (b *Bucket) List(prefix, delim, marker string, max int) (result *ListResp, err error) { params := map[string][]string{ "prefix": {prefix}, "delimiter": {delim}, "marker": {marker}, } if max != 0 { params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} } req := &request{ bucket: b.Name, params: params, } result = &ListResp{} for attempt := attempts.Start(); attempt.Next(); { err = b.S3.query(req, result) if !shouldRetry(err) { break } } if err != nil { return nil, err } // if NextMarker is not returned, it should be set to the name of last key, // so let's do it so that each caller doesn't have to if result.IsTruncated && result.NextMarker == "" { n := len(result.Contents) if n > 0 { result.NextMarker = result.Contents[n-1].Key } } return result, nil } // The VersionsResp type holds the results of a list bucket Versions operation. type VersionsResp struct { Name string Prefix string KeyMarker string VersionIdMarker string MaxKeys int Delimiter string IsTruncated bool Versions []Version `xml:"Version"` CommonPrefixes []string `xml:">Prefix"` } // The Version type represents an object version stored in an S3 bucket. type Version struct { Key string VersionId string IsLatest bool LastModified string // ETag gives the hex-encoded MD5 sum of the contents, // surrounded with double-quotes. ETag string Size int64 Owner Owner StorageClass string } func (b *Bucket) Versions(prefix, delim, keyMarker string, versionIdMarker string, max int) (result *VersionsResp, err error) { params := map[string][]string{ "versions": {""}, "prefix": {prefix}, "delimiter": {delim}, } if len(versionIdMarker) != 0 { params["version-id-marker"] = []string{versionIdMarker} } if len(keyMarker) != 0 { params["key-marker"] = []string{keyMarker} } if max != 0 { params["max-keys"] = []string{strconv.FormatInt(int64(max), 10)} } req := &request{ bucket: b.Name, params: params, } result = &VersionsResp{} for attempt := attempts.Start(); attempt.Next(); { err = b.S3.query(req, result) if !shouldRetry(err) { break } } if err != nil { return nil, err } return result, nil } type GetLocationResp struct { Location string `xml:",innerxml"` } func (b *Bucket) Location() (string, error) { r, err := b.Get("/?location") if err != nil { return "", err } // Parse the XML response. var resp GetLocationResp if err = xml.Unmarshal(r, &resp); err != nil { return "", err } if resp.Location == "" { return "us-east-1", nil } else { return resp.Location, nil } } // Get bucket policy func (b *Bucket) GetPolicy() ([]byte, error) { req := &request{ bucket: b.Name, path: "/", method: "GET", params: url.Values{"policy": {""}}, } err := b.S3.prepare(req) if err != nil { return nil, err } for attempt := attempts.Start(); attempt.Next(); { resp, err := b.S3.run(req, nil) if shouldRetry(err) && attempt.HasNext() { continue } if err != nil { return nil, err } str, err := ioutil.ReadAll(resp.Body) return str, nil } panic("unreachable") } // Put bucket policy func (b *Bucket) PutPolicy(data []byte) error { req := &request{ bucket: b.Name, path: "/", method: "PUT", params: url.Values{"policy": {""}}, payload: bytes.NewReader(data), } return b.S3.query(req, nil) } // URL returns a non-signed URL that allows retriving the // object at path. It only works if the object is publicly // readable (see SignedURL). func (b *Bucket) URL(path string) string { req := &request{ bucket: b.Name, path: path, } err := b.S3.prepare(req) if err != nil { panic(err) } u, err := req.url() if err != nil { panic(err) } u.RawQuery = "" return u.String() } // SignedURL returns a signed URL that allows anyone holding the URL // to retrieve the object at path. The signature is valid until expires. func (b *Bucket) SignedURL(path string, expires time.Time) string { return b.SignedURLWithArgs(path, expires, nil, nil) } // SignedURLWithArgs returns a signed URL that allows anyone holding the URL // to retrieve the object at path. The signature is valid until expires. func (b *Bucket) SignedURLWithArgs(path string, expires time.Time, params url.Values, headers http.Header) string { return b.SignedURLWithMethod("GET", path, expires, params, headers) } // SignedURLWithMethod returns a signed URL that allows anyone holding the URL // to either retrieve the object at path or make a HEAD request against it. The signature is valid until expires. func (b *Bucket) SignedURLWithMethod(method, path string, expires time.Time, params url.Values, headers http.Header) string { var uv = url.Values{} if params != nil { uv = params } if b.S3.Signature == aws.V2Signature { uv.Set("Expires", strconv.FormatInt(expires.Unix(), 10)) } else { uv.Set("X-Amz-Expires", strconv.FormatInt(expires.Unix()-time.Now().Unix(), 10)) } req := &request{ method: method, bucket: b.Name, path: path, params: uv, headers: headers, } err := b.S3.prepare(req) if err != nil { panic(err) } u, err := req.url() if err != nil { panic(err) } if b.S3.Auth.Token() != "" && b.S3.Signature == aws.V2Signature { return u.String() + "&x-amz-security-token=" + url.QueryEscape(req.headers["X-Amz-Security-Token"][0]) } else { return u.String() } } // UploadSignedURL returns a signed URL that allows anyone holding the URL // to upload the object at path. The signature is valid until expires. // contenttype is a string like image/png // path is the resource name in s3 terminalogy like images/ali.png [obviously exclusing the bucket name itself] func (b *Bucket) UploadSignedURL(path, method, content_type string, expires time.Time) string { expire_date := expires.Unix() if method != "POST" { method = "PUT" } a := b.S3.Auth tokenData := "" if a.Token() != "" { tokenData = "x-amz-security-token:" + a.Token() + "\n" } stringToSign := method + "\n\n" + content_type + "\n" + strconv.FormatInt(expire_date, 10) + "\n" + tokenData + "/" + b.Name + "/" + path secretKey := a.SecretKey accessId := a.AccessKey mac := hmac.New(sha1.New, []byte(secretKey)) mac.Write([]byte(stringToSign)) macsum := mac.Sum(nil) signature := base64.StdEncoding.EncodeToString([]byte(macsum)) signature = strings.TrimSpace(signature) signedurl, err := url.Parse("https://" + b.Name + ".s3.amazonaws.com/") if err != nil { log.Println("ERROR sining url for S3 upload", err) return "" } signedurl.Path += path params := url.Values{} params.Add("AWSAccessKeyId", accessId) params.Add("Expires", strconv.FormatInt(expire_date, 10)) params.Add("Signature", signature) if a.Token() != "" { params.Add("x-amz-security-token", a.Token()) } signedurl.RawQuery = params.Encode() return signedurl.String() } // PostFormArgs returns the action and input fields needed to allow anonymous // uploads to a bucket within the expiration limit // Additional conditions can be specified with conds func (b *Bucket) PostFormArgsEx(path string, expires time.Time, redirect string, conds []string) (action string, fields map[string]string) { conditions := make([]string, 0) fields = map[string]string{ "AWSAccessKeyId": b.Auth.AccessKey, "key": path, } if conds != nil { conditions = append(conditions, conds...) } conditions = append(conditions, fmt.Sprintf("{\"key\": \"%s\"}", path)) conditions = append(conditions, fmt.Sprintf("{\"bucket\": \"%s\"}", b.Name)) if redirect != "" { conditions = append(conditions, fmt.Sprintf("{\"success_action_redirect\": \"%s\"}", redirect)) fields["success_action_redirect"] = redirect } vExpiration := expires.Format("2006-01-02T15:04:05Z") vConditions := strings.Join(conditions, ",") policy := fmt.Sprintf("{\"expiration\": \"%s\", \"conditions\": [%s]}", vExpiration, vConditions) policy64 := base64.StdEncoding.EncodeToString([]byte(policy)) fields["policy"] = policy64 signer := hmac.New(sha1.New, []byte(b.Auth.SecretKey)) signer.Write([]byte(policy64)) fields["signature"] = base64.StdEncoding.EncodeToString(signer.Sum(nil)) action = fmt.Sprintf("%s/%s/", b.S3.Region.S3Endpoint, b.Name) return } // PostFormArgs returns the action and input fields needed to allow anonymous // uploads to a bucket within the expiration limit func (b *Bucket) PostFormArgs(path string, expires time.Time, redirect string) (action string, fields map[string]string) { return b.PostFormArgsEx(path, expires, redirect, nil) } type request struct { method string bucket string path string params url.Values headers http.Header baseurl string payload io.Reader prepared bool } func (req *request) url() (*url.URL, error) { u, err := url.Parse(req.baseurl) if err != nil { return nil, fmt.Errorf("bad S3 endpoint URL %q: %v", req.baseurl, err) } u.RawQuery = req.params.Encode() u.Path = req.path return u, nil } // query prepares and runs the req request. // If resp is not nil, the XML data contained in the response // body will be unmarshalled on it. func (s3 *S3) query(req *request, resp interface{}) error { err := s3.prepare(req) if err != nil { return err } r, err := s3.run(req, resp) if r != nil && r.Body != nil { r.Body.Close() } return err } // queryV4Signprepares and runs the req request, signed with aws v4 signatures. // If resp is not nil, the XML data contained in the response // body will be unmarshalled on it. func (s3 *S3) queryV4Sign(req *request, resp interface{}) error { if req.headers == nil { req.headers = map[string][]string{} } err := s3.setBaseURL(req) if err != nil { return err } hreq, err := s3.setupHttpRequest(req) if err != nil { return err } // req.Host must be set for V4 signature calculation hreq.Host = hreq.URL.Host signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) signer.IncludeXAmzContentSha256 = true signer.Sign(hreq) _, err = s3.doHttpRequest(hreq, resp) return err } // Sets baseurl on req from bucket name and the region endpoint func (s3 *S3) setBaseURL(req *request) error { if req.bucket == "" { req.baseurl = s3.Region.S3Endpoint } else { req.baseurl = s3.Region.S3BucketEndpoint if req.baseurl == "" { // Use the path method to address the bucket. req.baseurl = s3.Region.S3Endpoint req.path = "/" + req.bucket + req.path } else { // Just in case, prevent injection. if strings.IndexAny(req.bucket, "/:@") >= 0 { return fmt.Errorf("bad S3 bucket: %q", req.bucket) } req.baseurl = strings.Replace(req.baseurl, "${bucket}", req.bucket, -1) } } return nil } // partiallyEscapedPath partially escapes the S3 path allowing for all S3 REST API calls. // // Some commands including: // GET Bucket acl http://goo.gl/aoXflF // GET Bucket cors http://goo.gl/UlmBdx // GET Bucket lifecycle http://goo.gl/8Fme7M // GET Bucket policy http://goo.gl/ClXIo3 // GET Bucket location http://goo.gl/5lh8RD // GET Bucket Logging http://goo.gl/sZ5ckF // GET Bucket notification http://goo.gl/qSSZKD // GET Bucket tagging http://goo.gl/QRvxnM // require the first character after the bucket name in the path to be a literal '?' and // not the escaped hex representation '%3F'. func partiallyEscapedPath(path string) string { pathEscapedAndSplit := strings.Split((&url.URL{Path: path}).String(), "/") if len(pathEscapedAndSplit) >= 3 { if len(pathEscapedAndSplit[2]) >= 3 { // Check for the one "?" that should not be escaped. if pathEscapedAndSplit[2][0:3] == "%3F" { pathEscapedAndSplit[2] = "?" + pathEscapedAndSplit[2][3:] } } } return strings.Replace(strings.Join(pathEscapedAndSplit, "/"), "+", "%2B", -1) } // prepare sets up req to be delivered to S3. func (s3 *S3) prepare(req *request) error { // Copy so they can be mutated without affecting on retries. params := make(url.Values) headers := make(http.Header) for k, v := range req.params { params[k] = v } for k, v := range req.headers { headers[k] = v } req.params = params req.headers = headers if !req.prepared { req.prepared = true if req.method == "" { req.method = "GET" } if !strings.HasPrefix(req.path, "/") { req.path = "/" + req.path } err := s3.setBaseURL(req) if err != nil { return err } } if s3.Signature == aws.V2Signature && s3.Auth.Token() != "" { req.headers["X-Amz-Security-Token"] = []string{s3.Auth.Token()} } else if s3.Auth.Token() != "" { req.params.Set("X-Amz-Security-Token", s3.Auth.Token()) } if s3.Signature == aws.V2Signature { // Always sign again as it's not clear how far the // server has handled a previous attempt. u, err := url.Parse(req.baseurl) if err != nil { return err } signpathPatiallyEscaped := partiallyEscapedPath(req.path) req.headers["Host"] = []string{u.Host} req.headers["Date"] = []string{time.Now().In(time.UTC).Format(time.RFC1123)} sign(s3.Auth, req.method, signpathPatiallyEscaped, req.params, req.headers) } else { hreq, err := s3.setupHttpRequest(req) if err != nil { return err } hreq.Host = hreq.URL.Host signer := aws.NewV4Signer(s3.Auth, "s3", s3.Region) signer.IncludeXAmzContentSha256 = true signer.Sign(hreq) req.payload = hreq.Body if _, ok := headers["Content-Length"]; ok { req.headers["Content-Length"] = headers["Content-Length"] } } return nil } // Prepares an *http.Request for doHttpRequest func (s3 *S3) setupHttpRequest(req *request) (*http.Request, error) { // Copy so that signing the http request will not mutate it headers := make(http.Header) for k, v := range req.headers { headers[k] = v } req.headers = headers u, err := req.url() if err != nil { return nil, err } u.Opaque = fmt.Sprintf("//%s%s", u.Host, partiallyEscapedPath(u.Path)) hreq := http.Request{ URL: u, Method: req.method, ProtoMajor: 1, ProtoMinor: 1, Close: true, Header: req.headers, Form: req.params, } if v, ok := req.headers["Content-Length"]; ok { hreq.ContentLength, _ = strconv.ParseInt(v[0], 10, 64) delete(req.headers, "Content-Length") } if req.payload != nil { hreq.Body = ioutil.NopCloser(req.payload) } return &hreq, nil } // doHttpRequest sends hreq and returns the http response from the server. // If resp is not nil, the XML data contained in the response // body will be unmarshalled on it. func (s3 *S3) doHttpRequest(hreq *http.Request, resp interface{}) (*http.Response, error) { c := http.Client{ Transport: &http.Transport{ Dial: func(netw, addr string) (c net.Conn, err error) { deadline := time.Now().Add(s3.ReadTimeout) if s3.ConnectTimeout > 0 { c, err = net.DialTimeout(netw, addr, s3.ConnectTimeout) } else { c, err = net.Dial(netw, addr) } if err != nil { return } if s3.ReadTimeout > 0 { err = c.SetDeadline(deadline) } return }, Proxy: http.ProxyFromEnvironment, }, } hresp, err := c.Do(hreq) if err != nil { return nil, err } if debug { dump, _ := httputil.DumpResponse(hresp, true) log.Printf("} -> %s\n", dump) } if hresp.StatusCode != 200 && hresp.StatusCode != 204 && hresp.StatusCode != 206 { return nil, buildError(hresp) } if resp != nil { err = xml.NewDecoder(hresp.Body).Decode(resp) hresp.Body.Close() if debug { log.Printf("goamz.s3> decoded xml into %#v", resp) } } return hresp, err } // run sends req and returns the http response from the server. // If resp is not nil, the XML data contained in the response // body will be unmarshalled on it. func (s3 *S3) run(req *request, resp interface{}) (*http.Response, error) { if debug { log.Printf("Running S3 request: %#v", req) } hreq, err := s3.setupHttpRequest(req) if err != nil { return nil, err } return s3.doHttpRequest(hreq, resp) } // Error represents an error in an operation with S3. type Error struct { StatusCode int // HTTP status code (200, 403, ...) Code string // EC2 error code ("UnsupportedOperation", ...) Message string // The human-oriented error message BucketName string RequestId string HostId string } func (e *Error) Error() string { return e.Message } func buildError(r *http.Response) error { if debug { log.Printf("got error (status code %v)", r.StatusCode) data, err := ioutil.ReadAll(r.Body) if err != nil { log.Printf("\tread error: %v", err) } else { log.Printf("\tdata:\n%s\n\n", data) } r.Body = ioutil.NopCloser(bytes.NewBuffer(data)) } err := Error{} // TODO return error if Unmarshal fails? xml.NewDecoder(r.Body).Decode(&err) r.Body.Close() err.StatusCode = r.StatusCode if err.Message == "" { err.Message = r.Status } if debug { log.Printf("err: %#v\n", err) } return &err } func shouldRetry(err error) bool { if err == nil { return false } switch err { case io.ErrUnexpectedEOF, io.EOF: return true } switch e := err.(type) { case *net.DNSError: return true case *net.OpError: switch e.Op { case "read", "write": return true } case *url.Error: // url.Error can be returned either by net/url if a URL cannot be // parsed, or by net/http if the response is closed before the headers // are received or parsed correctly. In that later case, e.Op is set to // the HTTP method name with the first letter uppercased. We don't want // to retry on POST operations, since those are not idempotent, all the // other ones should be safe to retry. switch e.Op { case "Get", "Put", "Delete", "Head": return shouldRetry(e.Err) default: return false } case *Error: switch e.Code { case "InternalError", "NoSuchUpload", "NoSuchBucket": return true } } return false } func hasCode(err error, code string) bool { s3err, ok := err.(*Error) return ok && s3err.Code == code } ================================================ FILE: vendor/github.com/zackbloom/goamz/s3/sign.go ================================================ package s3 import ( "crypto/hmac" "crypto/sha1" "encoding/base64" "github.com/zackbloom/goamz/aws" "log" "sort" "strings" ) var b64 = base64.StdEncoding // ---------------------------------------------------------------------------- // S3 signing (http://goo.gl/G1LrK) var s3ParamsToSign = map[string]bool{ "acl": true, "location": true, "logging": true, "notification": true, "partNumber": true, "policy": true, "requestPayment": true, "torrent": true, "uploadId": true, "uploads": true, "versionId": true, "versioning": true, "versions": true, "response-content-type": true, "response-content-language": true, "response-expires": true, "response-cache-control": true, "response-content-disposition": true, "response-content-encoding": true, "website": true, "delete": true, } func sign(auth aws.Auth, method, canonicalPath string, params, headers map[string][]string) { var md5, ctype, date, xamz string var xamzDate bool var keys, sarray []string xheaders := make(map[string]string) for k, v := range headers { k = strings.ToLower(k) switch k { case "content-md5": md5 = v[0] case "content-type": ctype = v[0] case "date": if !xamzDate { date = v[0] } default: if strings.HasPrefix(k, "x-amz-") { keys = append(keys, k) xheaders[k] = strings.Join(v, ",") if k == "x-amz-date" { xamzDate = true date = "" } } } } if len(keys) > 0 { sort.StringSlice(keys).Sort() for i := range keys { key := keys[i] value := xheaders[key] sarray = append(sarray, key+":"+value) } xamz = strings.Join(sarray, "\n") + "\n" } expires := false if v, ok := params["Expires"]; ok { // Query string request authentication alternative. expires = true date = v[0] params["AWSAccessKeyId"] = []string{auth.AccessKey} } sarray = sarray[0:0] for k, v := range params { if s3ParamsToSign[k] { for _, vi := range v { if vi == "" { sarray = append(sarray, k) } else { // "When signing you do not encode these values." sarray = append(sarray, k+"="+vi) } } } } if len(sarray) > 0 { sort.StringSlice(sarray).Sort() canonicalPath = canonicalPath + "?" + strings.Join(sarray, "&") } payload := method + "\n" + md5 + "\n" + ctype + "\n" + date + "\n" + xamz + canonicalPath hash := hmac.New(sha1.New, []byte(auth.SecretKey)) hash.Write([]byte(payload)) signature := make([]byte, b64.EncodedLen(hash.Size())) b64.Encode(signature, hash.Sum(nil)) if expires { params["Signature"] = []string{string(signature)} } else { headers["Authorization"] = []string{"AWS " + auth.AccessKey + ":" + string(signature)} } if debug { log.Printf("Signature payload: %q", payload) log.Printf("Signature: %q", signature) } } ================================================ FILE: vendor/golang.org/x/crypto/LICENSE ================================================ Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/golang.org/x/crypto/PATENTS ================================================ Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/terminal.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package terminal import ( "bytes" "io" "sync" "unicode/utf8" ) // EscapeCodes contains escape sequences that can be written to the terminal in // order to achieve different styles of text. type EscapeCodes struct { // Foreground colors Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte // Reset all attributes Reset []byte } var vt100EscapeCodes = EscapeCodes{ Black: []byte{keyEscape, '[', '3', '0', 'm'}, Red: []byte{keyEscape, '[', '3', '1', 'm'}, Green: []byte{keyEscape, '[', '3', '2', 'm'}, Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, Blue: []byte{keyEscape, '[', '3', '4', 'm'}, Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, White: []byte{keyEscape, '[', '3', '7', 'm'}, Reset: []byte{keyEscape, '[', '0', 'm'}, } // Terminal contains the state for running a VT100 terminal that is capable of // reading lines of input. type Terminal struct { // AutoCompleteCallback, if non-null, is called for each keypress with // the full input line and the current position of the cursor (in // bytes, as an index into |line|). If it returns ok=false, the key // press is processed normally. Otherwise it returns a replacement line // and the new cursor position. AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) // Escape contains a pointer to the escape codes for this terminal. // It's always a valid pointer, although the escape codes themselves // may be empty if the terminal doesn't support them. Escape *EscapeCodes // lock protects the terminal and the state in this object from // concurrent processing of a key press and a Write() call. lock sync.Mutex c io.ReadWriter prompt []rune // line is the current line being entered. line []rune // pos is the logical position of the cursor in line pos int // echo is true if local echo is enabled echo bool // pasteActive is true iff there is a bracketed paste operation in // progress. pasteActive bool // cursorX contains the current X value of the cursor where the left // edge is 0. cursorY contains the row number where the first row of // the current line is 0. cursorX, cursorY int // maxLine is the greatest value of cursorY so far. maxLine int termWidth, termHeight int // outBuf contains the terminal data to be sent. outBuf []byte // remainder contains the remainder of any partial key sequences after // a read. It aliases into inBuf. remainder []byte inBuf [256]byte // history contains previously entered commands so that they can be // accessed with the up and down keys. history stRingBuffer // historyIndex stores the currently accessed history entry, where zero // means the immediately previous entry. historyIndex int // When navigating up and down the history it's possible to return to // the incomplete, initial line. That value is stored in // historyPending. historyPending string } // NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is // a local terminal, that terminal must first have been put into raw mode. // prompt is a string that is written at the start of each input line (i.e. // "> "). func NewTerminal(c io.ReadWriter, prompt string) *Terminal { return &Terminal{ Escape: &vt100EscapeCodes, c: c, prompt: []rune(prompt), termWidth: 80, termHeight: 24, echo: true, historyIndex: -1, } } const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota keyUp keyDown keyLeft keyRight keyAltLeft keyAltRight keyHome keyEnd keyDeleteWord keyDeleteLine keyClearScreen keyPasteStart keyPasteEnd ) var pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} var pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} // bytesToKey tries to parse a key sequence from b. If successful, it returns // the key and the remainder of the input. Otherwise it returns utf8.RuneError. func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { if len(b) == 0 { return utf8.RuneError, nil } if !pasteActive { switch b[0] { case 1: // ^A return keyHome, b[1:] case 5: // ^E return keyEnd, b[1:] case 8: // ^H return keyBackspace, b[1:] case 11: // ^K return keyDeleteLine, b[1:] case 12: // ^L return keyClearScreen, b[1:] case 23: // ^W return keyDeleteWord, b[1:] } } if b[0] != keyEscape { if !utf8.FullRune(b) { return utf8.RuneError, b } r, l := utf8.DecodeRune(b) return r, b[l:] } if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { switch b[2] { case 'A': return keyUp, b[3:] case 'B': return keyDown, b[3:] case 'C': return keyRight, b[3:] case 'D': return keyLeft, b[3:] case 'H': return keyHome, b[3:] case 'F': return keyEnd, b[3:] } } if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { switch b[5] { case 'C': return keyAltRight, b[6:] case 'D': return keyAltLeft, b[6:] } } if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { return keyPasteStart, b[6:] } if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { return keyPasteEnd, b[6:] } // If we get here then we have a key that we don't recognise, or a // partial sequence. It's not clear how one should find the end of a // sequence without knowing them all, but it seems that [a-zA-Z~] only // appears at the end of a sequence. for i, c := range b[0:] { if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { return keyUnknown, b[i+1:] } } return utf8.RuneError, b } // queue appends data to the end of t.outBuf func (t *Terminal) queue(data []rune) { t.outBuf = append(t.outBuf, []byte(string(data))...) } var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} var space = []rune{' '} func isPrintable(key rune) bool { isInSurrogateArea := key >= 0xd800 && key <= 0xdbff return key >= 32 && !isInSurrogateArea } // moveCursorToPos appends data to t.outBuf which will move the cursor to the // given, logical position in the text. func (t *Terminal) moveCursorToPos(pos int) { if !t.echo { return } x := visualLength(t.prompt) + pos y := x / t.termWidth x = x % t.termWidth up := 0 if y < t.cursorY { up = t.cursorY - y } down := 0 if y > t.cursorY { down = y - t.cursorY } left := 0 if x < t.cursorX { left = t.cursorX - x } right := 0 if x > t.cursorX { right = x - t.cursorX } t.cursorX = x t.cursorY = y t.move(up, down, left, right) } func (t *Terminal) move(up, down, left, right int) { movement := make([]rune, 3*(up+down+left+right)) m := movement for i := 0; i < up; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'A' m = m[3:] } for i := 0; i < down; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'B' m = m[3:] } for i := 0; i < left; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'D' m = m[3:] } for i := 0; i < right; i++ { m[0] = keyEscape m[1] = '[' m[2] = 'C' m = m[3:] } t.queue(movement) } func (t *Terminal) clearLineToRight() { op := []rune{keyEscape, '[', 'K'} t.queue(op) } const maxLineLength = 4096 func (t *Terminal) setLine(newLine []rune, newPos int) { if t.echo { t.moveCursorToPos(0) t.writeLine(newLine) for i := len(newLine); i < len(t.line); i++ { t.writeLine(space) } t.moveCursorToPos(newPos) } t.line = newLine t.pos = newPos } func (t *Terminal) advanceCursor(places int) { t.cursorX += places t.cursorY += t.cursorX / t.termWidth if t.cursorY > t.maxLine { t.maxLine = t.cursorY } t.cursorX = t.cursorX % t.termWidth if places > 0 && t.cursorX == 0 { // Normally terminals will advance the current position // when writing a character. But that doesn't happen // for the last character in a line. However, when // writing a character (except a new line) that causes // a line wrap, the position will be advanced two // places. // // So, if we are stopping at the end of a line, we // need to write a newline so that our cursor can be // advanced to the next line. t.outBuf = append(t.outBuf, '\n') } } func (t *Terminal) eraseNPreviousChars(n int) { if n == 0 { return } if t.pos < n { n = t.pos } t.pos -= n t.moveCursorToPos(t.pos) copy(t.line[t.pos:], t.line[n+t.pos:]) t.line = t.line[:len(t.line)-n] if t.echo { t.writeLine(t.line[t.pos:]) for i := 0; i < n; i++ { t.queue(space) } t.advanceCursor(n) t.moveCursorToPos(t.pos) } } // countToLeftWord returns then number of characters from the cursor to the // start of the previous word. func (t *Terminal) countToLeftWord() int { if t.pos == 0 { return 0 } pos := t.pos - 1 for pos > 0 { if t.line[pos] != ' ' { break } pos-- } for pos > 0 { if t.line[pos] == ' ' { pos++ break } pos-- } return t.pos - pos } // countToRightWord returns then number of characters from the cursor to the // start of the next word. func (t *Terminal) countToRightWord() int { pos := t.pos for pos < len(t.line) { if t.line[pos] == ' ' { break } pos++ } for pos < len(t.line) { if t.line[pos] != ' ' { break } pos++ } return pos - t.pos } // visualLength returns the number of visible glyphs in s. func visualLength(runes []rune) int { inEscapeSeq := false length := 0 for _, r := range runes { switch { case inEscapeSeq: if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { inEscapeSeq = false } case r == '\x1b': inEscapeSeq = true default: length++ } } return length } // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { if t.pasteActive && key != keyEnter { t.addKeyToLine(key) return } switch key { case keyBackspace: if t.pos == 0 { return } t.eraseNPreviousChars(1) case keyAltLeft: // move left by a word. t.pos -= t.countToLeftWord() t.moveCursorToPos(t.pos) case keyAltRight: // move right by a word. t.pos += t.countToRightWord() t.moveCursorToPos(t.pos) case keyLeft: if t.pos == 0 { return } t.pos-- t.moveCursorToPos(t.pos) case keyRight: if t.pos == len(t.line) { return } t.pos++ t.moveCursorToPos(t.pos) case keyHome: if t.pos == 0 { return } t.pos = 0 t.moveCursorToPos(t.pos) case keyEnd: if t.pos == len(t.line) { return } t.pos = len(t.line) t.moveCursorToPos(t.pos) case keyUp: entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) if !ok { return "", false } if t.historyIndex == -1 { t.historyPending = string(t.line) } t.historyIndex++ runes := []rune(entry) t.setLine(runes, len(runes)) case keyDown: switch t.historyIndex { case -1: return case 0: runes := []rune(t.historyPending) t.setLine(runes, len(runes)) t.historyIndex-- default: entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) if ok { t.historyIndex-- runes := []rune(entry) t.setLine(runes, len(runes)) } } case keyEnter: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) ok = true t.line = t.line[:0] t.pos = 0 t.cursorX = 0 t.cursorY = 0 t.maxLine = 0 case keyDeleteWord: // Delete zero or more spaces and then one or more characters. t.eraseNPreviousChars(t.countToLeftWord()) case keyDeleteLine: // Delete everything from the current cursor position to the // end of line. for i := t.pos; i < len(t.line); i++ { t.queue(space) t.advanceCursor(1) } t.line = t.line[:t.pos] t.moveCursorToPos(t.pos) case keyCtrlD: // Erase the character under the current position. // The EOF case when the line is empty is handled in // readLine(). if t.pos < len(t.line) { t.pos++ t.eraseNPreviousChars(1) } case keyCtrlU: t.eraseNPreviousChars(t.pos) case keyClearScreen: // Erases the screen and moves the cursor to the home position. t.queue([]rune("\x1b[2J\x1b[H")) t.queue(t.prompt) t.cursorX, t.cursorY = 0, 0 t.advanceCursor(visualLength(t.prompt)) t.setLine(t.line, t.pos) default: if t.AutoCompleteCallback != nil { prefix := string(t.line[:t.pos]) suffix := string(t.line[t.pos:]) t.lock.Unlock() newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) t.lock.Lock() if completeOk { t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) return } } if !isPrintable(key) { return } if len(t.line) == maxLineLength { return } t.addKeyToLine(key) } return } // addKeyToLine inserts the given key at the current position in the current // line. func (t *Terminal) addKeyToLine(key rune) { if len(t.line) == cap(t.line) { newLine := make([]rune, len(t.line), 2*(1+len(t.line))) copy(newLine, t.line) t.line = newLine } t.line = t.line[:len(t.line)+1] copy(t.line[t.pos+1:], t.line[t.pos:]) t.line[t.pos] = key if t.echo { t.writeLine(t.line[t.pos:]) } t.pos++ t.moveCursorToPos(t.pos) } func (t *Terminal) writeLine(line []rune) { for len(line) != 0 { remainingOnLine := t.termWidth - t.cursorX todo := len(line) if todo > remainingOnLine { todo = remainingOnLine } t.queue(line[:todo]) t.advanceCursor(visualLength(line[:todo])) line = line[todo:] } } func (t *Terminal) Write(buf []byte) (n int, err error) { t.lock.Lock() defer t.lock.Unlock() if t.cursorX == 0 && t.cursorY == 0 { // This is the easy case: there's nothing on the screen that we // have to move out of the way. return t.c.Write(buf) } // We have a prompt and possibly user input on the screen. We // have to clear it first. t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) t.cursorX = 0 t.clearLineToRight() for t.cursorY > 0 { t.move(1 /* up */, 0, 0, 0) t.cursorY-- t.clearLineToRight() } if _, err = t.c.Write(t.outBuf); err != nil { return } t.outBuf = t.outBuf[:0] if n, err = t.c.Write(buf); err != nil { return } t.writeLine(t.prompt) if t.echo { t.writeLine(t.line) } t.moveCursorToPos(t.pos) if _, err = t.c.Write(t.outBuf); err != nil { return } t.outBuf = t.outBuf[:0] return } // ReadPassword temporarily changes the prompt and reads a password, without // echo, from the terminal. func (t *Terminal) ReadPassword(prompt string) (line string, err error) { t.lock.Lock() defer t.lock.Unlock() oldPrompt := t.prompt t.prompt = []rune(prompt) t.echo = false line, err = t.readLine() t.prompt = oldPrompt t.echo = true return } // ReadLine returns a line of input from the terminal. func (t *Terminal) ReadLine() (line string, err error) { t.lock.Lock() defer t.lock.Unlock() return t.readLine() } func (t *Terminal) readLine() (line string, err error) { // t.lock must be held at this point if t.cursorX == 0 && t.cursorY == 0 { t.writeLine(t.prompt) t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] } lineIsPasted := t.pasteActive for { rest := t.remainder lineOk := false for !lineOk { var key rune key, rest = bytesToKey(rest, t.pasteActive) if key == utf8.RuneError { break } if !t.pasteActive { if key == keyCtrlD { if len(t.line) == 0 { return "", io.EOF } } if key == keyPasteStart { t.pasteActive = true if len(t.line) == 0 { lineIsPasted = true } continue } } else if key == keyPasteEnd { t.pasteActive = false continue } if !t.pasteActive { lineIsPasted = false } line, lineOk = t.handleKey(key) } if len(rest) > 0 { n := copy(t.inBuf[:], rest) t.remainder = t.inBuf[:n] } else { t.remainder = nil } t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] if lineOk { if t.echo { t.historyIndex = -1 t.history.Add(line) } if lineIsPasted { err = ErrPasteIndicator } return } // t.remainder is a slice at the beginning of t.inBuf // containing a partial key sequence readBuf := t.inBuf[len(t.remainder):] var n int t.lock.Unlock() n, err = t.c.Read(readBuf) t.lock.Lock() if err != nil { return } t.remainder = t.inBuf[:n+len(t.remainder)] } panic("unreachable") // for Go 1.0. } // SetPrompt sets the prompt to be used when reading subsequent lines. func (t *Terminal) SetPrompt(prompt string) { t.lock.Lock() defer t.lock.Unlock() t.prompt = []rune(prompt) } func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { // Move cursor to column zero at the start of the line. t.move(t.cursorY, 0, t.cursorX, 0) t.cursorX, t.cursorY = 0, 0 t.clearLineToRight() for t.cursorY < numPrevLines { // Move down a line t.move(0, 1, 0, 0) t.cursorY++ t.clearLineToRight() } // Move back to beginning. t.move(t.cursorY, 0, 0, 0) t.cursorX, t.cursorY = 0, 0 t.queue(t.prompt) t.advanceCursor(visualLength(t.prompt)) t.writeLine(t.line) t.moveCursorToPos(t.pos) } func (t *Terminal) SetSize(width, height int) error { t.lock.Lock() defer t.lock.Unlock() if width == 0 { width = 1 } oldWidth := t.termWidth t.termWidth, t.termHeight = width, height switch { case width == oldWidth: // If the width didn't change then nothing else needs to be // done. return nil case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: // If there is nothing on current line and no prompt printed, // just do nothing return nil case width < oldWidth: // Some terminals (e.g. xterm) will truncate lines that were // too long when shinking. Others, (e.g. gnome-terminal) will // attempt to wrap them. For the former, repainting t.maxLine // works great, but that behaviour goes badly wrong in the case // of the latter because they have doubled every full line. // We assume that we are working on a terminal that wraps lines // and adjust the cursor position based on every previous line // wrapping and turning into two. This causes the prompt on // xterms to move upwards, which isn't great, but it avoids a // huge mess with gnome-terminal. if t.cursorX >= t.termWidth { t.cursorX = t.termWidth - 1 } t.cursorY *= 2 t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) case width > oldWidth: // If the terminal expands then our position calculations will // be wrong in the future because we think the cursor is // |t.pos| chars into the string, but there will be a gap at // the end of any wrapped line. // // But the position will actually be correct until we move, so // we can move back to the beginning and repaint everything. t.clearAndRepaintLinePlusNPrevious(t.maxLine) } _, err := t.c.Write(t.outBuf) t.outBuf = t.outBuf[:0] return err } type pasteIndicatorError struct{} func (pasteIndicatorError) Error() string { return "terminal: ErrPasteIndicator not correctly handled" } // ErrPasteIndicator may be returned from ReadLine as the error, in addition // to valid line data. It indicates that bracketed paste mode is enabled and // that the returned line consists only of pasted data. Programs may wish to // interpret pasted data more literally than typed data. var ErrPasteIndicator = pasteIndicatorError{} // SetBracketedPasteMode requests that the terminal bracket paste operations // with markers. Not all terminals support this but, if it is supported, then // enabling this mode will stop any autocomplete callback from running due to // pastes. Additionally, any lines that are completely pasted will be returned // from ReadLine with the error set to ErrPasteIndicator. func (t *Terminal) SetBracketedPasteMode(on bool) { if on { io.WriteString(t.c, "\x1b[?2004h") } else { io.WriteString(t.c, "\x1b[?2004l") } } // stRingBuffer is a ring buffer of strings. type stRingBuffer struct { // entries contains max elements. entries []string max int // head contains the index of the element most recently added to the ring. head int // size contains the number of elements in the ring. size int } func (s *stRingBuffer) Add(a string) { if s.entries == nil { const defaultNumEntries = 100 s.entries = make([]string, defaultNumEntries) s.max = defaultNumEntries } s.head = (s.head + 1) % s.max s.entries[s.head] = a if s.size < s.max { s.size++ } } // NthPreviousEntry returns the value passed to the nth previous call to Add. // If n is zero then the immediately prior value is returned, if one, then the // next most recent, and so on. If such an element doesn't exist then ok is // false. func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { if n >= s.size { return "", false } index := s.head - n if index < 0 { index += s.max } return s.entries[index], true } ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd linux,!appengine netbsd openbsd // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. // // Putting a terminal into raw mode is the most common requirement: // // oldState, err := terminal.MakeRaw(0) // if err != nil { // panic(err) // } // defer terminal.Restore(0, oldState) package terminal import ( "io" "syscall" "unsafe" ) // State contains the state of a terminal. type State struct { termios syscall.Termios } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd int) bool { var termios syscall.Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd int) (*State, error) { var oldState State if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { return nil, err } newState := oldState.termios newState.Iflag &^= syscall.ISTRIP | syscall.INLCR | syscall.ICRNL | syscall.IGNCR | syscall.IXON | syscall.IXOFF newState.Lflag &^= syscall.ECHO | syscall.ICANON | syscall.ISIG if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { return nil, err } return &oldState, nil } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { var oldState State if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 { return nil, err } return &oldState, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, state *State) error { _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0) return err } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { var dimensions [4]uint16 if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 { return -1, -1, err } return int(dimensions[1]), int(dimensions[0]), nil } // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. func ReadPassword(fd int) ([]byte, error) { var oldState syscall.Termios if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 { return nil, err } newState := oldState newState.Lflag &^= syscall.ECHO newState.Lflag |= syscall.ICANON | syscall.ISIG newState.Iflag |= syscall.ICRNL if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 { return nil, err } defer func() { syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0) }() var buf [16]byte var ret []byte for { n, err := syscall.Read(fd, buf[:]) if err != nil { return nil, err } if n == 0 { if len(ret) == 0 { return nil, io.EOF } break } if buf[n-1] == '\n' { n-- } ret = append(ret, buf[:n]...) if n < len(buf) { break } } return ret, nil } ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go ================================================ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build darwin dragonfly freebsd netbsd openbsd package terminal import "syscall" const ioctlReadTermios = syscall.TIOCGETA const ioctlWriteTermios = syscall.TIOCSETA ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_linux.go ================================================ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package terminal // These constants are declared here, rather than importing // them from the syscall package as some syscall packages, even // on linux, for example gccgo, do not declare them. const ioctlReadTermios = 0x5401 // syscall.TCGETS const ioctlWriteTermios = 0x5402 // syscall.TCSETS ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go ================================================ // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. // // Putting a terminal into raw mode is the most common requirement: // // oldState, err := terminal.MakeRaw(0) // if err != nil { // panic(err) // } // defer terminal.Restore(0, oldState) package terminal import ( "fmt" "runtime" ) type State struct{} // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd int) bool { return false } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd int) (*State, error) { return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, state *State) error { return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. func ReadPassword(fd int) ([]byte, error) { return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) } ================================================ FILE: vendor/golang.org/x/crypto/ssh/terminal/util_windows.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build windows // Package terminal provides support functions for dealing with terminals, as // commonly found on UNIX systems. // // Putting a terminal into raw mode is the most common requirement: // // oldState, err := terminal.MakeRaw(0) // if err != nil { // panic(err) // } // defer terminal.Restore(0, oldState) package terminal import ( "io" "syscall" "unsafe" ) const ( enableLineInput = 2 enableEchoInput = 4 enableProcessedInput = 1 enableWindowInput = 8 enableMouseInput = 16 enableInsertMode = 32 enableQuickEditMode = 64 enableExtendedFlags = 128 enableAutoPosition = 256 enableProcessedOutput = 1 enableWrapAtEolOutput = 2 ) var kernel32 = syscall.NewLazyDLL("kernel32.dll") var ( procGetConsoleMode = kernel32.NewProc("GetConsoleMode") procSetConsoleMode = kernel32.NewProc("SetConsoleMode") procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") ) type ( short int16 word uint16 coord struct { x short y short } smallRect struct { left short top short right short bottom short } consoleScreenBufferInfo struct { size coord cursorPosition coord attributes word window smallRect maximumWindowSize coord } ) type State struct { mode uint32 } // IsTerminal returns true if the given file descriptor is a terminal. func IsTerminal(fd int) bool { var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 } // MakeRaw put the terminal connected to the given file descriptor into raw // mode and returns the previous state of the terminal so that it can be // restored. func MakeRaw(fd int) (*State, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput) _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0) if e != 0 { return nil, error(e) } return &State{st}, nil } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } return &State{st}, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, state *State) error { _, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0) return err } // GetSize returns the dimensions of the given terminal. func GetSize(fd int) (width, height int, err error) { var info consoleScreenBufferInfo _, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0) if e != 0 { return 0, 0, error(e) } return int(info.size.x), int(info.size.y), nil } // ReadPassword reads a line of input from a terminal without local echo. This // is commonly used for inputting passwords and other sensitive data. The slice // returned does not include the \n. func ReadPassword(fd int) ([]byte, error) { var st uint32 _, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) if e != 0 { return nil, error(e) } old := st st &^= (enableEchoInput) st |= (enableProcessedInput | enableLineInput | enableProcessedOutput) _, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0) if e != 0 { return nil, error(e) } defer func() { syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0) }() var buf [16]byte var ret []byte for { n, err := syscall.Read(syscall.Handle(fd), buf[:]) if err != nil { return nil, err } if n == 0 { if len(ret) == 0 { return nil, io.EOF } break } if buf[n-1] == '\n' { n-- } if n > 0 && buf[n-1] == '\r' { n-- } ret = append(ret, buf[:n]...) if n < len(buf) { break } } return ret, nil } ================================================ FILE: vendor/golang.org/x/net/LICENSE ================================================ Copyright (c) 2009 The Go Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ================================================ FILE: vendor/golang.org/x/net/PATENTS ================================================ Additional IP Rights Grant (Patents) "This implementation" means the copyrightable works distributed by Google as part of the Go project. Google hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, transfer and otherwise run, modify and propagate the contents of this implementation of Go, where such license applies only to those patent claims, both currently owned or controlled by Google and acquired in the future, licensable by Google that are necessarily infringed by this implementation of Go. This grant does not include claims that would be infringed only as a consequence of further modification of this implementation. If you or your agent or exclusive licensee institute or order or agree to the institution of patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that this implementation of Go or any code incorporated within this implementation of Go constitutes direct or contributory patent infringement, or inducement of patent infringement, then any patent rights granted to you under this License for this implementation of Go shall terminate as of the date such litigation is filed. ================================================ FILE: vendor/golang.org/x/net/html/atom/atom.go ================================================ // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Package atom provides integer codes (also known as atoms) for a fixed set of // frequently occurring HTML strings: tag names and attribute keys such as "p" // and "id". // // Sharing an atom's name between all elements with the same tag can result in // fewer string allocations when tokenizing and parsing HTML. Integer // comparisons are also generally faster than string comparisons. // // The value of an atom's particular code is not guaranteed to stay the same // between versions of this package. Neither is any ordering guaranteed: // whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to // be dense. The only guarantees are that e.g. looking up "div" will yield // atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. package atom // Atom is an integer code for a string. The zero value maps to "". type Atom uint32 // String returns the atom's name. func (a Atom) String() string { start := uint32(a >> 8) n := uint32(a & 0xff) if start+n > uint32(len(atomText)) { return "" } return atomText[start : start+n] } func (a Atom) string() string { return atomText[a>>8 : a>>8+a&0xff] } // fnv computes the FNV hash with an arbitrary starting value h. func fnv(h uint32, s []byte) uint32 { for i := range s { h ^= uint32(s[i]) h *= 16777619 } return h } func match(s string, t []byte) bool { for i, c := range t { if s[i] != c { return false } } return true } // Lookup returns the atom whose name is s. It returns zero if there is no // such atom. The lookup is case sensitive. func Lookup(s []byte) Atom { if len(s) == 0 || len(s) > maxAtomLen { return 0 } h := fnv(hash0, s) if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { return a } if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { return a } return 0 } // String returns a string whose contents are equal to s. In that sense, it is // equivalent to string(s) but may be more efficient. func String(s []byte) string { if a := Lookup(s); a != 0 { return a.String() } return string(s) } ================================================ FILE: vendor/golang.org/x/net/html/atom/gen.go ================================================ // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore package main // This program generates table.go and table_test.go. // Invoke as // // go run gen.go |gofmt >table.go // go run gen.go -test |gofmt >table_test.go import ( "flag" "fmt" "math/rand" "os" "sort" "strings" ) // identifier converts s to a Go exported identifier. // It converts "div" to "Div" and "accept-charset" to "AcceptCharset". func identifier(s string) string { b := make([]byte, 0, len(s)) cap := true for _, c := range s { if c == '-' { cap = true continue } if cap && 'a' <= c && c <= 'z' { c -= 'a' - 'A' } cap = false b = append(b, byte(c)) } return string(b) } var test = flag.Bool("test", false, "generate table_test.go") func main() { flag.Parse() var all []string all = append(all, elements...) all = append(all, attributes...) all = append(all, eventHandlers...) all = append(all, extra...) sort.Strings(all) if *test { fmt.Printf("// generated by go run gen.go -test; DO NOT EDIT\n\n") fmt.Printf("package atom\n\n") fmt.Printf("var testAtomList = []string{\n") for _, s := range all { fmt.Printf("\t%q,\n", s) } fmt.Printf("}\n") return } // uniq - lists have dups // compute max len too maxLen := 0 w := 0 for _, s := range all { if w == 0 || all[w-1] != s { if maxLen < len(s) { maxLen = len(s) } all[w] = s w++ } } all = all[:w] // Find hash that minimizes table size. var best *table for i := 0; i < 1000000; i++ { if best != nil && 1<<(best.k-1) < len(all) { break } h := rand.Uint32() for k := uint(0); k <= 16; k++ { if best != nil && k >= best.k { break } var t table if t.init(h, k, all) { best = &t break } } } if best == nil { fmt.Fprintf(os.Stderr, "failed to construct string table\n") os.Exit(1) } // Lay out strings, using overlaps when possible. layout := append([]string{}, all...) // Remove strings that are substrings of other strings for changed := true; changed; { changed = false for i, s := range layout { if s == "" { continue } for j, t := range layout { if i != j && t != "" && strings.Contains(s, t) { changed = true layout[j] = "" } } } } // Join strings where one suffix matches another prefix. for { // Find best i, j, k such that layout[i][len-k:] == layout[j][:k], // maximizing overlap length k. besti := -1 bestj := -1 bestk := 0 for i, s := range layout { if s == "" { continue } for j, t := range layout { if i == j { continue } for k := bestk + 1; k <= len(s) && k <= len(t); k++ { if s[len(s)-k:] == t[:k] { besti = i bestj = j bestk = k } } } } if bestk > 0 { layout[besti] += layout[bestj][bestk:] layout[bestj] = "" continue } break } text := strings.Join(layout, "") atom := map[string]uint32{} for _, s := range all { off := strings.Index(text, s) if off < 0 { panic("lost string " + s) } atom[s] = uint32(off<<8 | len(s)) } // Generate the Go code. fmt.Printf("// generated by go run gen.go; DO NOT EDIT\n\n") fmt.Printf("package atom\n\nconst (\n") for _, s := range all { fmt.Printf("\t%s Atom = %#x\n", identifier(s), atom[s]) } fmt.Printf(")\n\n") fmt.Printf("const hash0 = %#x\n\n", best.h0) fmt.Printf("const maxAtomLen = %d\n\n", maxLen) fmt.Printf("var table = [1<<%d]Atom{\n", best.k) for i, s := range best.tab { if s == "" { continue } fmt.Printf("\t%#x: %#x, // %s\n", i, atom[s], s) } fmt.Printf("}\n") datasize := (1 << best.k) * 4 fmt.Printf("const atomText =\n") textsize := len(text) for len(text) > 60 { fmt.Printf("\t%q +\n", text[:60]) text = text[60:] } fmt.Printf("\t%q\n\n", text) fmt.Fprintf(os.Stderr, "%d atoms; %d string bytes + %d tables = %d total data\n", len(all), textsize, datasize, textsize+datasize) } type byLen []string func (x byLen) Less(i, j int) bool { return len(x[i]) > len(x[j]) } func (x byLen) Swap(i, j int) { x[i], x[j] = x[j], x[i] } func (x byLen) Len() int { return len(x) } // fnv computes the FNV hash with an arbitrary starting value h. func fnv(h uint32, s string) uint32 { for i := 0; i < len(s); i++ { h ^= uint32(s[i]) h *= 16777619 } return h } // A table represents an attempt at constructing the lookup table. // The lookup table uses cuckoo hashing, meaning that each string // can be found in one of two positions. type table struct { h0 uint32 k uint mask uint32 tab []string } // hash returns the two hashes for s. func (t *table) hash(s string) (h1, h2 uint32) { h := fnv(t.h0, s) h1 = h & t.mask h2 = (h >> 16) & t.mask return } // init initializes the table with the given parameters. // h0 is the initial hash value, // k is the number of bits of hash value to use, and // x is the list of strings to store in the table. // init returns false if the table cannot be constructed. func (t *table) init(h0 uint32, k uint, x []string) bool { t.h0 = h0 t.k = k t.tab = make([]string, 1< len(t.tab) { return false } s := t.tab[i] h1, h2 := t.hash(s) j := h1 + h2 - i if t.tab[j] != "" && !t.push(j, depth+1) { return false } t.tab[j] = s return true } // The lists of element names and attribute keys were taken from // https://html.spec.whatwg.org/multipage/indices.html#index // as of the "HTML Living Standard - Last Updated 21 February 2015" version. var elements = []string{ "a", "abbr", "address", "area", "article", "aside", "audio", "b", "base", "bdi", "bdo", "blockquote", "body", "br", "button", "canvas", "caption", "cite", "code", "col", "colgroup", "command", "data", "datalist", "dd", "del", "details", "dfn", "dialog", "div", "dl", "dt", "em", "embed", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "head", "header", "hgroup", "hr", "html", "i", "iframe", "img", "input", "ins", "kbd", "keygen", "label", "legend", "li", "link", "map", "mark", "menu", "menuitem", "meta", "meter", "nav", "noscript", "object", "ol", "optgroup", "option", "output", "p", "param", "pre", "progress", "q", "rp", "rt", "ruby", "s", "samp", "script", "section", "select", "small", "source", "span", "strong", "style", "sub", "summary", "sup", "table", "tbody", "td", "template", "textarea", "tfoot", "th", "thead", "time", "title", "tr", "track", "u", "ul", "var", "video", "wbr", } // https://html.spec.whatwg.org/multipage/indices.html#attributes-3 var attributes = []string{ "abbr", "accept", "accept-charset", "accesskey", "action", "alt", "async", "autocomplete", "autofocus", "autoplay", "challenge", "charset", "checked", "cite", "class", "cols", "colspan", "command", "content", "contenteditable", "contextmenu", "controls", "coords", "crossorigin", "data", "datetime", "default", "defer", "dir", "dirname", "disabled", "download", "draggable", "dropzone", "enctype", "for", "form", "formaction", "formenctype", "formmethod", "formnovalidate", "formtarget", "headers", "height", "hidden", "high", "href", "hreflang", "http-equiv", "icon", "id", "inputmode", "ismap", "itemid", "itemprop", "itemref", "itemscope", "itemtype", "keytype", "kind", "label", "lang", "list", "loop", "low", "manifest", "max", "maxlength", "media", "mediagroup", "method", "min", "minlength", "multiple", "muted", "name", "novalidate", "open", "optimum", "pattern", "ping", "placeholder", "poster", "preload", "radiogroup", "readonly", "rel", "required", "reversed", "rows", "rowspan", "sandbox", "spellcheck", "scope", "scoped", "seamless", "selected", "shape", "size", "sizes", "sortable", "sorted", "span", "src", "srcdoc", "srclang", "start", "step", "style", "tabindex", "target", "title", "translate", "type", "typemustmatch", "usemap", "value", "width", "wrap", } var eventHandlers = []string{ "onabort", "onautocomplete", "onautocompleteerror", "onafterprint", "onbeforeprint", "onbeforeunload", "onblur", "oncancel", "oncanplay", "oncanplaythrough", "onchange", "onclick", "onclose", "oncontextmenu", "oncuechange", "ondblclick", "ondrag", "ondragend", "ondragenter", "ondragleave", "ondragover", "ondragstart", "ondrop", "ondurationchange", "onemptied", "onended", "onerror", "onfocus", "onhashchange", "oninput", "oninvalid", "onkeydown", "onkeypress", "onkeyup", "onlanguagechange", "onload", "onloadeddata", "onloadedmetadata", "onloadstart", "onmessage", "onmousedown", "onmousemove", "onmouseout", "onmouseover", "onmouseup", "onmousewheel", "onoffline", "ononline", "onpagehide", "onpageshow", "onpause", "onplay", "onplaying", "onpopstate", "onprogress", "onratechange", "onreset", "onresize", "onscroll", "onseeked", "onseeking", "onselect", "onshow", "onsort", "onstalled", "onstorage", "onsubmit", "onsuspend", "ontimeupdate", "ontoggle", "onunload", "onvolumechange", "onwaiting", } // extra are ad-hoc values not covered by any of the lists above. var extra = []string{ "align", "annotation", "annotation-xml", "applet", "basefont", "bgsound", "big", "blink", "center", "color", "desc", "face", "font", "foreignObject", // HTML is case-insensitive, but SVG-embedded-in-HTML is case-sensitive. "foreignobject", "frame", "frameset", "image", "isindex", "listing", "malignmark", "marquee", "math", "mglyph", "mi", "mn", "mo", "ms", "mtext", "nobr", "noembed", "noframes", "plaintext", "prompt", "public", "spacer", "strike", "svg", "system", "tt", "xmp", } ================================================ FILE: vendor/golang.org/x/net/html/atom/table.go ================================================ // generated by go run gen.go; DO NOT EDIT package atom const ( A Atom = 0x1 Abbr Atom = 0x4 Accept Atom = 0x2106 AcceptCharset Atom = 0x210e Accesskey Atom = 0x3309 Action Atom = 0x1f606 Address Atom = 0x4f307 Align Atom = 0x1105 Alt Atom = 0x4503 Annotation Atom = 0x1670a AnnotationXml Atom = 0x1670e Applet Atom = 0x2b306 Area Atom = 0x2fa04 Article Atom = 0x38807 Aside Atom = 0x8305 Async Atom = 0x7b05 Audio Atom = 0xa605 Autocomplete Atom = 0x1fc0c Autofocus Atom = 0xb309 Autoplay Atom = 0xce08 B Atom = 0x101 Base Atom = 0xd604 Basefont Atom = 0xd608 Bdi Atom = 0x1a03 Bdo Atom = 0xe703 Bgsound Atom = 0x11807 Big Atom = 0x12403 Blink Atom = 0x12705 Blockquote Atom = 0x12c0a Body Atom = 0x2f04 Br Atom = 0x202 Button Atom = 0x13606 Canvas Atom = 0x7f06 Caption Atom = 0x1bb07 Center Atom = 0x5b506 Challenge Atom = 0x21f09 Charset Atom = 0x2807 Checked Atom = 0x32807 Cite Atom = 0x3c804 Class Atom = 0x4de05 Code Atom = 0x14904 Col Atom = 0x15003 Colgroup Atom = 0x15008 Color Atom = 0x15d05 Cols Atom = 0x16204 Colspan Atom = 0x16207 Command Atom = 0x17507 Content Atom = 0x42307 Contenteditable Atom = 0x4230f Contextmenu Atom = 0x3310b Controls Atom = 0x18808 Coords Atom = 0x19406 Crossorigin Atom = 0x19f0b Data Atom = 0x44a04 Datalist Atom = 0x44a08 Datetime Atom = 0x23c08 Dd Atom = 0x26702 Default Atom = 0x8607 Defer Atom = 0x14b05 Del Atom = 0x3ef03 Desc Atom = 0x4db04 Details Atom = 0x4807 Dfn Atom = 0x6103 Dialog Atom = 0x1b06 Dir Atom = 0x6903 Dirname Atom = 0x6907 Disabled Atom = 0x10c08 Div Atom = 0x11303 Dl Atom = 0x11e02 Download Atom = 0x40008 Draggable Atom = 0x17b09 Dropzone Atom = 0x39108 Dt Atom = 0x50902 Em Atom = 0x6502 Embed Atom = 0x6505 Enctype Atom = 0x21107 Face Atom = 0x5b304 Fieldset Atom = 0x1b008 Figcaption Atom = 0x1b80a Figure Atom = 0x1cc06 Font Atom = 0xda04 Footer Atom = 0x8d06 For Atom = 0x1d803 ForeignObject Atom = 0x1d80d Foreignobject Atom = 0x1e50d Form Atom = 0x1f204 Formaction Atom = 0x1f20a Formenctype Atom = 0x20d0b Formmethod Atom = 0x2280a Formnovalidate Atom = 0x2320e Formtarget Atom = 0x2470a Frame Atom = 0x9a05 Frameset Atom = 0x9a08 H1 Atom = 0x26e02 H2 Atom = 0x29402 H3 Atom = 0x2a702 H4 Atom = 0x2e902 H5 Atom = 0x2f302 H6 Atom = 0x50b02 Head Atom = 0x2d504 Header Atom = 0x2d506 Headers Atom = 0x2d507 Height Atom = 0x25106 Hgroup Atom = 0x25906 Hidden Atom = 0x26506 High Atom = 0x26b04 Hr Atom = 0x27002 Href Atom = 0x27004 Hreflang Atom = 0x27008 Html Atom = 0x25504 HttpEquiv Atom = 0x2780a I Atom = 0x601 Icon Atom = 0x42204 Id Atom = 0x8502 Iframe Atom = 0x29606 Image Atom = 0x29c05 Img Atom = 0x2a103 Input Atom = 0x3e805 Inputmode Atom = 0x3e809 Ins Atom = 0x1a803 Isindex Atom = 0x2a907 Ismap Atom = 0x2b005 Itemid Atom = 0x33c06 Itemprop Atom = 0x3c908 Itemref Atom = 0x5ad07 Itemscope Atom = 0x2b909 Itemtype Atom = 0x2c308 Kbd Atom = 0x1903 Keygen Atom = 0x3906 Keytype Atom = 0x53707 Kind Atom = 0x10904 Label Atom = 0xf005 Lang Atom = 0x27404 Legend Atom = 0x18206 Li Atom = 0x1202 Link Atom = 0x12804 List Atom = 0x44e04 Listing Atom = 0x44e07 Loop Atom = 0xf404 Low Atom = 0x11f03 Malignmark Atom = 0x100a Manifest Atom = 0x5f108 Map Atom = 0x2b203 Mark Atom = 0x1604 Marquee Atom = 0x2cb07 Math Atom = 0x2d204 Max Atom = 0x2e103 Maxlength Atom = 0x2e109 Media Atom = 0x6e05 Mediagroup Atom = 0x6e0a Menu Atom = 0x33804 Menuitem Atom = 0x33808 Meta Atom = 0x45d04 Meter Atom = 0x24205 Method Atom = 0x22c06 Mglyph Atom = 0x2a206 Mi Atom = 0x2eb02 Min Atom = 0x2eb03 Minlength Atom = 0x2eb09 Mn Atom = 0x23502 Mo Atom = 0x3ed02 Ms Atom = 0x2bc02 Mtext Atom = 0x2f505 Multiple Atom = 0x30308 Muted Atom = 0x30b05 Name Atom = 0x6c04 Nav Atom = 0x3e03 Nobr Atom = 0x5704 Noembed Atom = 0x6307 Noframes Atom = 0x9808 Noscript Atom = 0x3d208 Novalidate Atom = 0x2360a Object Atom = 0x1ec06 Ol Atom = 0xc902 Onabort Atom = 0x13a07 Onafterprint Atom = 0x1c00c Onautocomplete Atom = 0x1fa0e Onautocompleteerror Atom = 0x1fa13 Onbeforeprint Atom = 0x6040d Onbeforeunload Atom = 0x4e70e Onblur Atom = 0xaa06 Oncancel Atom = 0xe908 Oncanplay Atom = 0x28509 Oncanplaythrough Atom = 0x28510 Onchange Atom = 0x3a708 Onclick Atom = 0x31007 Onclose Atom = 0x31707 Oncontextmenu Atom = 0x32f0d Oncuechange Atom = 0x3420b Ondblclick Atom = 0x34d0a Ondrag Atom = 0x35706 Ondragend Atom = 0x35709 Ondragenter Atom = 0x3600b Ondragleave Atom = 0x36b0b Ondragover Atom = 0x3760a Ondragstart Atom = 0x3800b Ondrop Atom = 0x38f06 Ondurationchange Atom = 0x39f10 Onemptied Atom = 0x39609 Onended Atom = 0x3af07 Onerror Atom = 0x3b607 Onfocus Atom = 0x3bd07 Onhashchange Atom = 0x3da0c Oninput Atom = 0x3e607 Oninvalid Atom = 0x3f209 Onkeydown Atom = 0x3fb09 Onkeypress Atom = 0x4080a Onkeyup Atom = 0x41807 Onlanguagechange Atom = 0x43210 Onload Atom = 0x44206 Onloadeddata Atom = 0x4420c Onloadedmetadata Atom = 0x45510 Onloadstart Atom = 0x46b0b Onmessage Atom = 0x47609 Onmousedown Atom = 0x47f0b Onmousemove Atom = 0x48a0b Onmouseout Atom = 0x4950a Onmouseover Atom = 0x4a20b Onmouseup Atom = 0x4ad09 Onmousewheel Atom = 0x4b60c Onoffline Atom = 0x4c209 Ononline Atom = 0x4cb08 Onpagehide Atom = 0x4d30a Onpageshow Atom = 0x4fe0a Onpause Atom = 0x50d07 Onplay Atom = 0x51706 Onplaying Atom = 0x51709 Onpopstate Atom = 0x5200a Onprogress Atom = 0x52a0a Onratechange Atom = 0x53e0c Onreset Atom = 0x54a07 Onresize Atom = 0x55108 Onscroll Atom = 0x55f08 Onseeked Atom = 0x56708 Onseeking Atom = 0x56f09 Onselect Atom = 0x57808 Onshow Atom = 0x58206 Onsort Atom = 0x58b06 Onstalled Atom = 0x59509 Onstorage Atom = 0x59e09 Onsubmit Atom = 0x5a708 Onsuspend Atom = 0x5bb09 Ontimeupdate Atom = 0xdb0c Ontoggle Atom = 0x5c408 Onunload Atom = 0x5cc08 Onvolumechange Atom = 0x5d40e Onwaiting Atom = 0x5e209 Open Atom = 0x3cf04 Optgroup Atom = 0xf608 Optimum Atom = 0x5eb07 Option Atom = 0x60006 Output Atom = 0x49c06 P Atom = 0xc01 Param Atom = 0xc05 Pattern Atom = 0x5107 Ping Atom = 0x7704 Placeholder Atom = 0xc30b Plaintext Atom = 0xfd09 Poster Atom = 0x15706 Pre Atom = 0x25e03 Preload Atom = 0x25e07 Progress Atom = 0x52c08 Prompt Atom = 0x5fa06 Public Atom = 0x41e06 Q Atom = 0x13101 Radiogroup Atom = 0x30a Readonly Atom = 0x2fb08 Rel Atom = 0x25f03 Required Atom = 0x1d008 Reversed Atom = 0x5a08 Rows Atom = 0x9204 Rowspan Atom = 0x9207 Rp Atom = 0x1c602 Rt Atom = 0x13f02 Ruby Atom = 0xaf04 S Atom = 0x2c01 Samp Atom = 0x4e04 Sandbox Atom = 0xbb07 Scope Atom = 0x2bd05 Scoped Atom = 0x2bd06 Script Atom = 0x3d406 Seamless Atom = 0x31c08 Section Atom = 0x4e207 Select Atom = 0x57a06 Selected Atom = 0x57a08 Shape Atom = 0x4f905 Size Atom = 0x55504 Sizes Atom = 0x55505 Small Atom = 0x18f05 Sortable Atom = 0x58d08 Sorted Atom = 0x19906 Source Atom = 0x1aa06 Spacer Atom = 0x2db06 Span Atom = 0x9504 Spellcheck Atom = 0x3230a Src Atom = 0x3c303 Srcdoc Atom = 0x3c306 Srclang Atom = 0x41107 Start Atom = 0x38605 Step Atom = 0x5f704 Strike Atom = 0x53306 Strong Atom = 0x55906 Style Atom = 0x61105 Sub Atom = 0x5a903 Summary Atom = 0x61607 Sup Atom = 0x61d03 Svg Atom = 0x62003 System Atom = 0x62306 Tabindex Atom = 0x46308 Table Atom = 0x42d05 Target Atom = 0x24b06 Tbody Atom = 0x2e05 Td Atom = 0x4702 Template Atom = 0x62608 Textarea Atom = 0x2f608 Tfoot Atom = 0x8c05 Th Atom = 0x22e02 Thead Atom = 0x2d405 Time Atom = 0xdd04 Title Atom = 0xa105 Tr Atom = 0x10502 Track Atom = 0x10505 Translate Atom = 0x14009 Tt Atom = 0x5302 Type Atom = 0x21404 Typemustmatch Atom = 0x2140d U Atom = 0xb01 Ul Atom = 0x8a02 Usemap Atom = 0x51106 Value Atom = 0x4005 Var Atom = 0x11503 Video Atom = 0x28105 Wbr Atom = 0x12103 Width Atom = 0x50705 Wrap Atom = 0x58704 Xmp Atom = 0xc103 ) const hash0 = 0xc17da63e const maxAtomLen = 19 var table = [1 << 9]Atom{ 0x1: 0x48a0b, // onmousemove 0x2: 0x5e209, // onwaiting 0x3: 0x1fa13, // onautocompleteerror 0x4: 0x5fa06, // prompt 0x7: 0x5eb07, // optimum 0x8: 0x1604, // mark 0xa: 0x5ad07, // itemref 0xb: 0x4fe0a, // onpageshow 0xc: 0x57a06, // select 0xd: 0x17b09, // draggable 0xe: 0x3e03, // nav 0xf: 0x17507, // command 0x11: 0xb01, // u 0x14: 0x2d507, // headers 0x15: 0x44a08, // datalist 0x17: 0x4e04, // samp 0x1a: 0x3fb09, // onkeydown 0x1b: 0x55f08, // onscroll 0x1c: 0x15003, // col 0x20: 0x3c908, // itemprop 0x21: 0x2780a, // http-equiv 0x22: 0x61d03, // sup 0x24: 0x1d008, // required 0x2b: 0x25e07, // preload 0x2c: 0x6040d, // onbeforeprint 0x2d: 0x3600b, // ondragenter 0x2e: 0x50902, // dt 0x2f: 0x5a708, // onsubmit 0x30: 0x27002, // hr 0x31: 0x32f0d, // oncontextmenu 0x33: 0x29c05, // image 0x34: 0x50d07, // onpause 0x35: 0x25906, // hgroup 0x36: 0x7704, // ping 0x37: 0x57808, // onselect 0x3a: 0x11303, // div 0x3b: 0x1fa0e, // onautocomplete 0x40: 0x2eb02, // mi 0x41: 0x31c08, // seamless 0x42: 0x2807, // charset 0x43: 0x8502, // id 0x44: 0x5200a, // onpopstate 0x45: 0x3ef03, // del 0x46: 0x2cb07, // marquee 0x47: 0x3309, // accesskey 0x49: 0x8d06, // footer 0x4a: 0x44e04, // list 0x4b: 0x2b005, // ismap 0x51: 0x33804, // menu 0x52: 0x2f04, // body 0x55: 0x9a08, // frameset 0x56: 0x54a07, // onreset 0x57: 0x12705, // blink 0x58: 0xa105, // title 0x59: 0x38807, // article 0x5b: 0x22e02, // th 0x5d: 0x13101, // q 0x5e: 0x3cf04, // open 0x5f: 0x2fa04, // area 0x61: 0x44206, // onload 0x62: 0xda04, // font 0x63: 0xd604, // base 0x64: 0x16207, // colspan 0x65: 0x53707, // keytype 0x66: 0x11e02, // dl 0x68: 0x1b008, // fieldset 0x6a: 0x2eb03, // min 0x6b: 0x11503, // var 0x6f: 0x2d506, // header 0x70: 0x13f02, // rt 0x71: 0x15008, // colgroup 0x72: 0x23502, // mn 0x74: 0x13a07, // onabort 0x75: 0x3906, // keygen 0x76: 0x4c209, // onoffline 0x77: 0x21f09, // challenge 0x78: 0x2b203, // map 0x7a: 0x2e902, // h4 0x7b: 0x3b607, // onerror 0x7c: 0x2e109, // maxlength 0x7d: 0x2f505, // mtext 0x7e: 0xbb07, // sandbox 0x7f: 0x58b06, // onsort 0x80: 0x100a, // malignmark 0x81: 0x45d04, // meta 0x82: 0x7b05, // async 0x83: 0x2a702, // h3 0x84: 0x26702, // dd 0x85: 0x27004, // href 0x86: 0x6e0a, // mediagroup 0x87: 0x19406, // coords 0x88: 0x41107, // srclang 0x89: 0x34d0a, // ondblclick 0x8a: 0x4005, // value 0x8c: 0xe908, // oncancel 0x8e: 0x3230a, // spellcheck 0x8f: 0x9a05, // frame 0x91: 0x12403, // big 0x94: 0x1f606, // action 0x95: 0x6903, // dir 0x97: 0x2fb08, // readonly 0x99: 0x42d05, // table 0x9a: 0x61607, // summary 0x9b: 0x12103, // wbr 0x9c: 0x30a, // radiogroup 0x9d: 0x6c04, // name 0x9f: 0x62306, // system 0xa1: 0x15d05, // color 0xa2: 0x7f06, // canvas 0xa3: 0x25504, // html 0xa5: 0x56f09, // onseeking 0xac: 0x4f905, // shape 0xad: 0x25f03, // rel 0xae: 0x28510, // oncanplaythrough 0xaf: 0x3760a, // ondragover 0xb0: 0x62608, // template 0xb1: 0x1d80d, // foreignObject 0xb3: 0x9204, // rows 0xb6: 0x44e07, // listing 0xb7: 0x49c06, // output 0xb9: 0x3310b, // contextmenu 0xbb: 0x11f03, // low 0xbc: 0x1c602, // rp 0xbd: 0x5bb09, // onsuspend 0xbe: 0x13606, // button 0xbf: 0x4db04, // desc 0xc1: 0x4e207, // section 0xc2: 0x52a0a, // onprogress 0xc3: 0x59e09, // onstorage 0xc4: 0x2d204, // math 0xc5: 0x4503, // alt 0xc7: 0x8a02, // ul 0xc8: 0x5107, // pattern 0xc9: 0x4b60c, // onmousewheel 0xca: 0x35709, // ondragend 0xcb: 0xaf04, // ruby 0xcc: 0xc01, // p 0xcd: 0x31707, // onclose 0xce: 0x24205, // meter 0xcf: 0x11807, // bgsound 0xd2: 0x25106, // height 0xd4: 0x101, // b 0xd5: 0x2c308, // itemtype 0xd8: 0x1bb07, // caption 0xd9: 0x10c08, // disabled 0xdb: 0x33808, // menuitem 0xdc: 0x62003, // svg 0xdd: 0x18f05, // small 0xde: 0x44a04, // data 0xe0: 0x4cb08, // ononline 0xe1: 0x2a206, // mglyph 0xe3: 0x6505, // embed 0xe4: 0x10502, // tr 0xe5: 0x46b0b, // onloadstart 0xe7: 0x3c306, // srcdoc 0xeb: 0x5c408, // ontoggle 0xed: 0xe703, // bdo 0xee: 0x4702, // td 0xef: 0x8305, // aside 0xf0: 0x29402, // h2 0xf1: 0x52c08, // progress 0xf2: 0x12c0a, // blockquote 0xf4: 0xf005, // label 0xf5: 0x601, // i 0xf7: 0x9207, // rowspan 0xfb: 0x51709, // onplaying 0xfd: 0x2a103, // img 0xfe: 0xf608, // optgroup 0xff: 0x42307, // content 0x101: 0x53e0c, // onratechange 0x103: 0x3da0c, // onhashchange 0x104: 0x4807, // details 0x106: 0x40008, // download 0x109: 0x14009, // translate 0x10b: 0x4230f, // contenteditable 0x10d: 0x36b0b, // ondragleave 0x10e: 0x2106, // accept 0x10f: 0x57a08, // selected 0x112: 0x1f20a, // formaction 0x113: 0x5b506, // center 0x115: 0x45510, // onloadedmetadata 0x116: 0x12804, // link 0x117: 0xdd04, // time 0x118: 0x19f0b, // crossorigin 0x119: 0x3bd07, // onfocus 0x11a: 0x58704, // wrap 0x11b: 0x42204, // icon 0x11d: 0x28105, // video 0x11e: 0x4de05, // class 0x121: 0x5d40e, // onvolumechange 0x122: 0xaa06, // onblur 0x123: 0x2b909, // itemscope 0x124: 0x61105, // style 0x127: 0x41e06, // public 0x129: 0x2320e, // formnovalidate 0x12a: 0x58206, // onshow 0x12c: 0x51706, // onplay 0x12d: 0x3c804, // cite 0x12e: 0x2bc02, // ms 0x12f: 0xdb0c, // ontimeupdate 0x130: 0x10904, // kind 0x131: 0x2470a, // formtarget 0x135: 0x3af07, // onended 0x136: 0x26506, // hidden 0x137: 0x2c01, // s 0x139: 0x2280a, // formmethod 0x13a: 0x3e805, // input 0x13c: 0x50b02, // h6 0x13d: 0xc902, // ol 0x13e: 0x3420b, // oncuechange 0x13f: 0x1e50d, // foreignobject 0x143: 0x4e70e, // onbeforeunload 0x144: 0x2bd05, // scope 0x145: 0x39609, // onemptied 0x146: 0x14b05, // defer 0x147: 0xc103, // xmp 0x148: 0x39f10, // ondurationchange 0x149: 0x1903, // kbd 0x14c: 0x47609, // onmessage 0x14d: 0x60006, // option 0x14e: 0x2eb09, // minlength 0x14f: 0x32807, // checked 0x150: 0xce08, // autoplay 0x152: 0x202, // br 0x153: 0x2360a, // novalidate 0x156: 0x6307, // noembed 0x159: 0x31007, // onclick 0x15a: 0x47f0b, // onmousedown 0x15b: 0x3a708, // onchange 0x15e: 0x3f209, // oninvalid 0x15f: 0x2bd06, // scoped 0x160: 0x18808, // controls 0x161: 0x30b05, // muted 0x162: 0x58d08, // sortable 0x163: 0x51106, // usemap 0x164: 0x1b80a, // figcaption 0x165: 0x35706, // ondrag 0x166: 0x26b04, // high 0x168: 0x3c303, // src 0x169: 0x15706, // poster 0x16b: 0x1670e, // annotation-xml 0x16c: 0x5f704, // step 0x16d: 0x4, // abbr 0x16e: 0x1b06, // dialog 0x170: 0x1202, // li 0x172: 0x3ed02, // mo 0x175: 0x1d803, // for 0x176: 0x1a803, // ins 0x178: 0x55504, // size 0x179: 0x43210, // onlanguagechange 0x17a: 0x8607, // default 0x17b: 0x1a03, // bdi 0x17c: 0x4d30a, // onpagehide 0x17d: 0x6907, // dirname 0x17e: 0x21404, // type 0x17f: 0x1f204, // form 0x181: 0x28509, // oncanplay 0x182: 0x6103, // dfn 0x183: 0x46308, // tabindex 0x186: 0x6502, // em 0x187: 0x27404, // lang 0x189: 0x39108, // dropzone 0x18a: 0x4080a, // onkeypress 0x18b: 0x23c08, // datetime 0x18c: 0x16204, // cols 0x18d: 0x1, // a 0x18e: 0x4420c, // onloadeddata 0x190: 0xa605, // audio 0x192: 0x2e05, // tbody 0x193: 0x22c06, // method 0x195: 0xf404, // loop 0x196: 0x29606, // iframe 0x198: 0x2d504, // head 0x19e: 0x5f108, // manifest 0x19f: 0xb309, // autofocus 0x1a0: 0x14904, // code 0x1a1: 0x55906, // strong 0x1a2: 0x30308, // multiple 0x1a3: 0xc05, // param 0x1a6: 0x21107, // enctype 0x1a7: 0x5b304, // face 0x1a8: 0xfd09, // plaintext 0x1a9: 0x26e02, // h1 0x1aa: 0x59509, // onstalled 0x1ad: 0x3d406, // script 0x1ae: 0x2db06, // spacer 0x1af: 0x55108, // onresize 0x1b0: 0x4a20b, // onmouseover 0x1b1: 0x5cc08, // onunload 0x1b2: 0x56708, // onseeked 0x1b4: 0x2140d, // typemustmatch 0x1b5: 0x1cc06, // figure 0x1b6: 0x4950a, // onmouseout 0x1b7: 0x25e03, // pre 0x1b8: 0x50705, // width 0x1b9: 0x19906, // sorted 0x1bb: 0x5704, // nobr 0x1be: 0x5302, // tt 0x1bf: 0x1105, // align 0x1c0: 0x3e607, // oninput 0x1c3: 0x41807, // onkeyup 0x1c6: 0x1c00c, // onafterprint 0x1c7: 0x210e, // accept-charset 0x1c8: 0x33c06, // itemid 0x1c9: 0x3e809, // inputmode 0x1cb: 0x53306, // strike 0x1cc: 0x5a903, // sub 0x1cd: 0x10505, // track 0x1ce: 0x38605, // start 0x1d0: 0xd608, // basefont 0x1d6: 0x1aa06, // source 0x1d7: 0x18206, // legend 0x1d8: 0x2d405, // thead 0x1da: 0x8c05, // tfoot 0x1dd: 0x1ec06, // object 0x1de: 0x6e05, // media 0x1df: 0x1670a, // annotation 0x1e0: 0x20d0b, // formenctype 0x1e2: 0x3d208, // noscript 0x1e4: 0x55505, // sizes 0x1e5: 0x1fc0c, // autocomplete 0x1e6: 0x9504, // span 0x1e7: 0x9808, // noframes 0x1e8: 0x24b06, // target 0x1e9: 0x38f06, // ondrop 0x1ea: 0x2b306, // applet 0x1ec: 0x5a08, // reversed 0x1f0: 0x2a907, // isindex 0x1f3: 0x27008, // hreflang 0x1f5: 0x2f302, // h5 0x1f6: 0x4f307, // address 0x1fa: 0x2e103, // max 0x1fb: 0xc30b, // placeholder 0x1fc: 0x2f608, // textarea 0x1fe: 0x4ad09, // onmouseup 0x1ff: 0x3800b, // ondragstart } const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" + "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" + "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" + "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" + "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" + "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" + "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" + "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" + "bjectforeignobjectformactionautocompleteerrorformenctypemust" + "matchallengeformmethodformnovalidatetimeterformtargetheightm" + "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" + "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" + "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" + "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" + "hangeondblclickondragendondragenterondragleaveondragoverondr" + "agstarticleondropzonemptiedondurationchangeonendedonerroronf" + "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" + "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" + "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" + "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" + "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" + "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" + "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" + "ollonseekedonseekingonselectedonshowraponsortableonstalledon" + "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" + "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" + "mmarysupsvgsystemplate" ================================================ FILE: vendor/golang.org/x/net/html/const.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html // Section 12.2.3.2 of the HTML5 specification says "The following elements // have varying levels of special parsing rules". // https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements var isSpecialElementMap = map[string]bool{ "address": true, "applet": true, "area": true, "article": true, "aside": true, "base": true, "basefont": true, "bgsound": true, "blockquote": true, "body": true, "br": true, "button": true, "caption": true, "center": true, "col": true, "colgroup": true, "dd": true, "details": true, "dir": true, "div": true, "dl": true, "dt": true, "embed": true, "fieldset": true, "figcaption": true, "figure": true, "footer": true, "form": true, "frame": true, "frameset": true, "h1": true, "h2": true, "h3": true, "h4": true, "h5": true, "h6": true, "head": true, "header": true, "hgroup": true, "hr": true, "html": true, "iframe": true, "img": true, "input": true, "isindex": true, "li": true, "link": true, "listing": true, "marquee": true, "menu": true, "meta": true, "nav": true, "noembed": true, "noframes": true, "noscript": true, "object": true, "ol": true, "p": true, "param": true, "plaintext": true, "pre": true, "script": true, "section": true, "select": true, "source": true, "style": true, "summary": true, "table": true, "tbody": true, "td": true, "template": true, "textarea": true, "tfoot": true, "th": true, "thead": true, "title": true, "tr": true, "track": true, "ul": true, "wbr": true, "xmp": true, } func isSpecialElement(element *Node) bool { switch element.Namespace { case "", "html": return isSpecialElementMap[element.Data] case "svg": return element.Data == "foreignObject" } return false } ================================================ FILE: vendor/golang.org/x/net/html/doc.go ================================================ // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. /* Package html implements an HTML5-compliant tokenizer and parser. Tokenization is done by creating a Tokenizer for an io.Reader r. It is the caller's responsibility to ensure that r provides UTF-8 encoded HTML. z := html.NewTokenizer(r) Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), which parses the next token and returns its type, or an error: for { tt := z.Next() if tt == html.ErrorToken { // ... return ... } // Process the current token. } There are two APIs for retrieving the current token. The high-level API is to call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs allow optionally calling Raw after Next but before Token, Text, TagName, or TagAttr. In EBNF notation, the valid call sequence per token is: Next {Raw} [ Token | Text | TagName {TagAttr} ] Token returns an independent data structure that completely describes a token. Entities (such as "<") are unescaped, tag names and attribute keys are lower-cased, and attributes are collected into a []Attribute. For example: for { if z.Next() == html.ErrorToken { // Returning io.EOF indicates success. return z.Err() } emitToken(z.Token()) } The low-level API performs fewer allocations and copies, but the contents of the []byte values returned by Text, TagName and TagAttr may change on the next call to Next. For example, to extract an HTML page's anchor text: depth := 0 for { tt := z.Next() switch tt { case ErrorToken: return z.Err() case TextToken: if depth > 0 { // emitBytes should copy the []byte it receives, // if it doesn't process it immediately. emitBytes(z.Text()) } case StartTagToken, EndTagToken: tn, _ := z.TagName() if len(tn) == 1 && tn[0] == 'a' { if tt == StartTagToken { depth++ } else { depth-- } } } } Parsing is done by calling Parse with an io.Reader, which returns the root of the parse tree (the document element) as a *Node. It is the caller's responsibility to ensure that the Reader provides UTF-8 encoded HTML. For example, to process each anchor node in depth-first order: doc, err := html.Parse(r) if err != nil { // ... } var f func(*html.Node) f = func(n *html.Node) { if n.Type == html.ElementNode && n.Data == "a" { // Do something with n... } for c := n.FirstChild; c != nil; c = c.NextSibling { f(c) } } f(doc) The relevant specifications include: https://html.spec.whatwg.org/multipage/syntax.html and https://html.spec.whatwg.org/multipage/syntax.html#tokenization */ package html // The tokenization algorithm implemented by this package is not a line-by-line // transliteration of the relatively verbose state-machine in the WHATWG // specification. A more direct approach is used instead, where the program // counter implies the state, such as whether it is tokenizing a tag or a text // node. Specification compliance is verified by checking expected and actual // outputs over a test suite rather than aiming for algorithmic fidelity. // TODO(nigeltao): Does a DOM API belong in this package or a separate one? // TODO(nigeltao): How does parsing interact with a JavaScript engine? ================================================ FILE: vendor/golang.org/x/net/html/doctype.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html import ( "strings" ) // parseDoctype parses the data from a DoctypeToken into a name, // public identifier, and system identifier. It returns a Node whose Type // is DoctypeNode, whose Data is the name, and which has attributes // named "system" and "public" for the two identifiers if they were present. // quirks is whether the document should be parsed in "quirks mode". func parseDoctype(s string) (n *Node, quirks bool) { n = &Node{Type: DoctypeNode} // Find the name. space := strings.IndexAny(s, whitespace) if space == -1 { space = len(s) } n.Data = s[:space] // The comparison to "html" is case-sensitive. if n.Data != "html" { quirks = true } n.Data = strings.ToLower(n.Data) s = strings.TrimLeft(s[space:], whitespace) if len(s) < 6 { // It can't start with "PUBLIC" or "SYSTEM". // Ignore the rest of the string. return n, quirks || s != "" } key := strings.ToLower(s[:6]) s = s[6:] for key == "public" || key == "system" { s = strings.TrimLeft(s, whitespace) if s == "" { break } quote := s[0] if quote != '"' && quote != '\'' { break } s = s[1:] q := strings.IndexRune(s, rune(quote)) var id string if q == -1 { id = s s = "" } else { id = s[:q] s = s[q+1:] } n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) if key == "public" { key = "system" } else { key = "" } } if key != "" || s != "" { quirks = true } else if len(n.Attr) > 0 { if n.Attr[0].Key == "public" { public := strings.ToLower(n.Attr[0].Val) switch public { case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": quirks = true default: for _, q := range quirkyIDs { if strings.HasPrefix(public, q) { quirks = true break } } } // The following two public IDs only cause quirks mode if there is no system ID. if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { quirks = true } } if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { quirks = true } } return n, quirks } // quirkyIDs is a list of public doctype identifiers that cause a document // to be interpreted in quirks mode. The identifiers should be in lower case. var quirkyIDs = []string{ "+//silmaril//dtd html pro v0r11 19970101//", "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", "-//as//dtd html 3.0 aswedit + extensions//", "-//ietf//dtd html 2.0 level 1//", "-//ietf//dtd html 2.0 level 2//", "-//ietf//dtd html 2.0 strict level 1//", "-//ietf//dtd html 2.0 strict level 2//", "-//ietf//dtd html 2.0 strict//", "-//ietf//dtd html 2.0//", "-//ietf//dtd html 2.1e//", "-//ietf//dtd html 3.0//", "-//ietf//dtd html 3.2 final//", "-//ietf//dtd html 3.2//", "-//ietf//dtd html 3//", "-//ietf//dtd html level 0//", "-//ietf//dtd html level 1//", "-//ietf//dtd html level 2//", "-//ietf//dtd html level 3//", "-//ietf//dtd html strict level 0//", "-//ietf//dtd html strict level 1//", "-//ietf//dtd html strict level 2//", "-//ietf//dtd html strict level 3//", "-//ietf//dtd html strict//", "-//ietf//dtd html//", "-//metrius//dtd metrius presentational//", "-//microsoft//dtd internet explorer 2.0 html strict//", "-//microsoft//dtd internet explorer 2.0 html//", "-//microsoft//dtd internet explorer 2.0 tables//", "-//microsoft//dtd internet explorer 3.0 html strict//", "-//microsoft//dtd internet explorer 3.0 html//", "-//microsoft//dtd internet explorer 3.0 tables//", "-//netscape comm. corp.//dtd html//", "-//netscape comm. corp.//dtd strict html//", "-//o'reilly and associates//dtd html 2.0//", "-//o'reilly and associates//dtd html extended 1.0//", "-//o'reilly and associates//dtd html extended relaxed 1.0//", "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", "-//spyglass//dtd html 2.0 extended//", "-//sq//dtd html 2.0 hotmetal + extensions//", "-//sun microsystems corp.//dtd hotjava html//", "-//sun microsystems corp.//dtd hotjava strict html//", "-//w3c//dtd html 3 1995-03-24//", "-//w3c//dtd html 3.2 draft//", "-//w3c//dtd html 3.2 final//", "-//w3c//dtd html 3.2//", "-//w3c//dtd html 3.2s draft//", "-//w3c//dtd html 4.0 frameset//", "-//w3c//dtd html 4.0 transitional//", "-//w3c//dtd html experimental 19960712//", "-//w3c//dtd html experimental 970421//", "-//w3c//dtd w3 html//", "-//w3o//dtd w3 html 3.0//", "-//webtechs//dtd mozilla html 2.0//", "-//webtechs//dtd mozilla html//", } ================================================ FILE: vendor/golang.org/x/net/html/entity.go ================================================ // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html // All entities that do not end with ';' are 6 or fewer bytes long. const longestEntityWithoutSemicolon = 6 // entity is a map from HTML entity names to their values. The semicolon matters: // https://html.spec.whatwg.org/multipage/syntax.html#named-character-references // lists both "amp" and "amp;" as two separate entries. // // Note that the HTML5 list is larger than the HTML4 list at // http://www.w3.org/TR/html4/sgml/entities.html var entity = map[string]rune{ "AElig;": '\U000000C6', "AMP;": '\U00000026', "Aacute;": '\U000000C1', "Abreve;": '\U00000102', "Acirc;": '\U000000C2', "Acy;": '\U00000410', "Afr;": '\U0001D504', "Agrave;": '\U000000C0', "Alpha;": '\U00000391', "Amacr;": '\U00000100', "And;": '\U00002A53', "Aogon;": '\U00000104', "Aopf;": '\U0001D538', "ApplyFunction;": '\U00002061', "Aring;": '\U000000C5', "Ascr;": '\U0001D49C', "Assign;": '\U00002254', "Atilde;": '\U000000C3', "Auml;": '\U000000C4', "Backslash;": '\U00002216', "Barv;": '\U00002AE7', "Barwed;": '\U00002306', "Bcy;": '\U00000411', "Because;": '\U00002235', "Bernoullis;": '\U0000212C', "Beta;": '\U00000392', "Bfr;": '\U0001D505', "Bopf;": '\U0001D539', "Breve;": '\U000002D8', "Bscr;": '\U0000212C', "Bumpeq;": '\U0000224E', "CHcy;": '\U00000427', "COPY;": '\U000000A9', "Cacute;": '\U00000106', "Cap;": '\U000022D2', "CapitalDifferentialD;": '\U00002145', "Cayleys;": '\U0000212D', "Ccaron;": '\U0000010C', "Ccedil;": '\U000000C7', "Ccirc;": '\U00000108', "Cconint;": '\U00002230', "Cdot;": '\U0000010A', "Cedilla;": '\U000000B8', "CenterDot;": '\U000000B7', "Cfr;": '\U0000212D', "Chi;": '\U000003A7', "CircleDot;": '\U00002299', "CircleMinus;": '\U00002296', "CirclePlus;": '\U00002295', "CircleTimes;": '\U00002297', "ClockwiseContourIntegral;": '\U00002232', "CloseCurlyDoubleQuote;": '\U0000201D', "CloseCurlyQuote;": '\U00002019', "Colon;": '\U00002237', "Colone;": '\U00002A74', "Congruent;": '\U00002261', "Conint;": '\U0000222F', "ContourIntegral;": '\U0000222E', "Copf;": '\U00002102', "Coproduct;": '\U00002210', "CounterClockwiseContourIntegral;": '\U00002233', "Cross;": '\U00002A2F', "Cscr;": '\U0001D49E', "Cup;": '\U000022D3', "CupCap;": '\U0000224D', "DD;": '\U00002145', "DDotrahd;": '\U00002911', "DJcy;": '\U00000402', "DScy;": '\U00000405', "DZcy;": '\U0000040F', "Dagger;": '\U00002021', "Darr;": '\U000021A1', "Dashv;": '\U00002AE4', "Dcaron;": '\U0000010E', "Dcy;": '\U00000414', "Del;": '\U00002207', "Delta;": '\U00000394', "Dfr;": '\U0001D507', "DiacriticalAcute;": '\U000000B4', "DiacriticalDot;": '\U000002D9', "DiacriticalDoubleAcute;": '\U000002DD', "DiacriticalGrave;": '\U00000060', "DiacriticalTilde;": '\U000002DC', "Diamond;": '\U000022C4', "DifferentialD;": '\U00002146', "Dopf;": '\U0001D53B', "Dot;": '\U000000A8', "DotDot;": '\U000020DC', "DotEqual;": '\U00002250', "DoubleContourIntegral;": '\U0000222F', "DoubleDot;": '\U000000A8', "DoubleDownArrow;": '\U000021D3', "DoubleLeftArrow;": '\U000021D0', "DoubleLeftRightArrow;": '\U000021D4', "DoubleLeftTee;": '\U00002AE4', "DoubleLongLeftArrow;": '\U000027F8', "DoubleLongLeftRightArrow;": '\U000027FA', "DoubleLongRightArrow;": '\U000027F9', "DoubleRightArrow;": '\U000021D2', "DoubleRightTee;": '\U000022A8', "DoubleUpArrow;": '\U000021D1', "DoubleUpDownArrow;": '\U000021D5', "DoubleVerticalBar;": '\U00002225', "DownArrow;": '\U00002193', "DownArrowBar;": '\U00002913', "DownArrowUpArrow;": '\U000021F5', "DownBreve;": '\U00000311', "DownLeftRightVector;": '\U00002950', "DownLeftTeeVector;": '\U0000295E', "DownLeftVector;": '\U000021BD', "DownLeftVectorBar;": '\U00002956', "DownRightTeeVector;": '\U0000295F', "DownRightVector;": '\U000021C1', "DownRightVectorBar;": '\U00002957', "DownTee;": '\U000022A4', "DownTeeArrow;": '\U000021A7', "Downarrow;": '\U000021D3', "Dscr;": '\U0001D49F', "Dstrok;": '\U00000110', "ENG;": '\U0000014A', "ETH;": '\U000000D0', "Eacute;": '\U000000C9', "Ecaron;": '\U0000011A', "Ecirc;": '\U000000CA', "Ecy;": '\U0000042D', "Edot;": '\U00000116', "Efr;": '\U0001D508', "Egrave;": '\U000000C8', "Element;": '\U00002208', "Emacr;": '\U00000112', "EmptySmallSquare;": '\U000025FB', "EmptyVerySmallSquare;": '\U000025AB', "Eogon;": '\U00000118', "Eopf;": '\U0001D53C', "Epsilon;": '\U00000395', "Equal;": '\U00002A75', "EqualTilde;": '\U00002242', "Equilibrium;": '\U000021CC', "Escr;": '\U00002130', "Esim;": '\U00002A73', "Eta;": '\U00000397', "Euml;": '\U000000CB', "Exists;": '\U00002203', "ExponentialE;": '\U00002147', "Fcy;": '\U00000424', "Ffr;": '\U0001D509', "FilledSmallSquare;": '\U000025FC', "FilledVerySmallSquare;": '\U000025AA', "Fopf;": '\U0001D53D', "ForAll;": '\U00002200', "Fouriertrf;": '\U00002131', "Fscr;": '\U00002131', "GJcy;": '\U00000403', "GT;": '\U0000003E', "Gamma;": '\U00000393', "Gammad;": '\U000003DC', "Gbreve;": '\U0000011E', "Gcedil;": '\U00000122', "Gcirc;": '\U0000011C', "Gcy;": '\U00000413', "Gdot;": '\U00000120', "Gfr;": '\U0001D50A', "Gg;": '\U000022D9', "Gopf;": '\U0001D53E', "GreaterEqual;": '\U00002265', "GreaterEqualLess;": '\U000022DB', "GreaterFullEqual;": '\U00002267', "GreaterGreater;": '\U00002AA2', "GreaterLess;": '\U00002277', "GreaterSlantEqual;": '\U00002A7E', "GreaterTilde;": '\U00002273', "Gscr;": '\U0001D4A2', "Gt;": '\U0000226B', "HARDcy;": '\U0000042A', "Hacek;": '\U000002C7', "Hat;": '\U0000005E', "Hcirc;": '\U00000124', "Hfr;": '\U0000210C', "HilbertSpace;": '\U0000210B', "Hopf;": '\U0000210D', "HorizontalLine;": '\U00002500', "Hscr;": '\U0000210B', "Hstrok;": '\U00000126', "HumpDownHump;": '\U0000224E', "HumpEqual;": '\U0000224F', "IEcy;": '\U00000415', "IJlig;": '\U00000132', "IOcy;": '\U00000401', "Iacute;": '\U000000CD', "Icirc;": '\U000000CE', "Icy;": '\U00000418', "Idot;": '\U00000130', "Ifr;": '\U00002111', "Igrave;": '\U000000CC', "Im;": '\U00002111', "Imacr;": '\U0000012A', "ImaginaryI;": '\U00002148', "Implies;": '\U000021D2', "Int;": '\U0000222C', "Integral;": '\U0000222B', "Intersection;": '\U000022C2', "InvisibleComma;": '\U00002063', "InvisibleTimes;": '\U00002062', "Iogon;": '\U0000012E', "Iopf;": '\U0001D540', "Iota;": '\U00000399', "Iscr;": '\U00002110', "Itilde;": '\U00000128', "Iukcy;": '\U00000406', "Iuml;": '\U000000CF', "Jcirc;": '\U00000134', "Jcy;": '\U00000419', "Jfr;": '\U0001D50D', "Jopf;": '\U0001D541', "Jscr;": '\U0001D4A5', "Jsercy;": '\U00000408', "Jukcy;": '\U00000404', "KHcy;": '\U00000425', "KJcy;": '\U0000040C', "Kappa;": '\U0000039A', "Kcedil;": '\U00000136', "Kcy;": '\U0000041A', "Kfr;": '\U0001D50E', "Kopf;": '\U0001D542', "Kscr;": '\U0001D4A6', "LJcy;": '\U00000409', "LT;": '\U0000003C', "Lacute;": '\U00000139', "Lambda;": '\U0000039B', "Lang;": '\U000027EA', "Laplacetrf;": '\U00002112', "Larr;": '\U0000219E', "Lcaron;": '\U0000013D', "Lcedil;": '\U0000013B', "Lcy;": '\U0000041B', "LeftAngleBracket;": '\U000027E8', "LeftArrow;": '\U00002190', "LeftArrowBar;": '\U000021E4', "LeftArrowRightArrow;": '\U000021C6', "LeftCeiling;": '\U00002308', "LeftDoubleBracket;": '\U000027E6', "LeftDownTeeVector;": '\U00002961', "LeftDownVector;": '\U000021C3', "LeftDownVectorBar;": '\U00002959', "LeftFloor;": '\U0000230A', "LeftRightArrow;": '\U00002194', "LeftRightVector;": '\U0000294E', "LeftTee;": '\U000022A3', "LeftTeeArrow;": '\U000021A4', "LeftTeeVector;": '\U0000295A', "LeftTriangle;": '\U000022B2', "LeftTriangleBar;": '\U000029CF', "LeftTriangleEqual;": '\U000022B4', "LeftUpDownVector;": '\U00002951', "LeftUpTeeVector;": '\U00002960', "LeftUpVector;": '\U000021BF', "LeftUpVectorBar;": '\U00002958', "LeftVector;": '\U000021BC', "LeftVectorBar;": '\U00002952', "Leftarrow;": '\U000021D0', "Leftrightarrow;": '\U000021D4', "LessEqualGreater;": '\U000022DA', "LessFullEqual;": '\U00002266', "LessGreater;": '\U00002276', "LessLess;": '\U00002AA1', "LessSlantEqual;": '\U00002A7D', "LessTilde;": '\U00002272', "Lfr;": '\U0001D50F', "Ll;": '\U000022D8', "Lleftarrow;": '\U000021DA', "Lmidot;": '\U0000013F', "LongLeftArrow;": '\U000027F5', "LongLeftRightArrow;": '\U000027F7', "LongRightArrow;": '\U000027F6', "Longleftarrow;": '\U000027F8', "Longleftrightarrow;": '\U000027FA', "Longrightarrow;": '\U000027F9', "Lopf;": '\U0001D543', "LowerLeftArrow;": '\U00002199', "LowerRightArrow;": '\U00002198', "Lscr;": '\U00002112', "Lsh;": '\U000021B0', "Lstrok;": '\U00000141', "Lt;": '\U0000226A', "Map;": '\U00002905', "Mcy;": '\U0000041C', "MediumSpace;": '\U0000205F', "Mellintrf;": '\U00002133', "Mfr;": '\U0001D510', "MinusPlus;": '\U00002213', "Mopf;": '\U0001D544', "Mscr;": '\U00002133', "Mu;": '\U0000039C', "NJcy;": '\U0000040A', "Nacute;": '\U00000143', "Ncaron;": '\U00000147', "Ncedil;": '\U00000145', "Ncy;": '\U0000041D', "NegativeMediumSpace;": '\U0000200B', "NegativeThickSpace;": '\U0000200B', "NegativeThinSpace;": '\U0000200B', "NegativeVeryThinSpace;": '\U0000200B', "NestedGreaterGreater;": '\U0000226B', "NestedLessLess;": '\U0000226A', "NewLine;": '\U0000000A', "Nfr;": '\U0001D511', "NoBreak;": '\U00002060', "NonBreakingSpace;": '\U000000A0', "Nopf;": '\U00002115', "Not;": '\U00002AEC', "NotCongruent;": '\U00002262', "NotCupCap;": '\U0000226D', "NotDoubleVerticalBar;": '\U00002226', "NotElement;": '\U00002209', "NotEqual;": '\U00002260', "NotExists;": '\U00002204', "NotGreater;": '\U0000226F', "NotGreaterEqual;": '\U00002271', "NotGreaterLess;": '\U00002279', "NotGreaterTilde;": '\U00002275', "NotLeftTriangle;": '\U000022EA', "NotLeftTriangleEqual;": '\U000022EC', "NotLess;": '\U0000226E', "NotLessEqual;": '\U00002270', "NotLessGreater;": '\U00002278', "NotLessTilde;": '\U00002274', "NotPrecedes;": '\U00002280', "NotPrecedesSlantEqual;": '\U000022E0', "NotReverseElement;": '\U0000220C', "NotRightTriangle;": '\U000022EB', "NotRightTriangleEqual;": '\U000022ED', "NotSquareSubsetEqual;": '\U000022E2', "NotSquareSupersetEqual;": '\U000022E3', "NotSubsetEqual;": '\U00002288', "NotSucceeds;": '\U00002281', "NotSucceedsSlantEqual;": '\U000022E1', "NotSupersetEqual;": '\U00002289', "NotTilde;": '\U00002241', "NotTildeEqual;": '\U00002244', "NotTildeFullEqual;": '\U00002247', "NotTildeTilde;": '\U00002249', "NotVerticalBar;": '\U00002224', "Nscr;": '\U0001D4A9', "Ntilde;": '\U000000D1', "Nu;": '\U0000039D', "OElig;": '\U00000152', "Oacute;": '\U000000D3', "Ocirc;": '\U000000D4', "Ocy;": '\U0000041E', "Odblac;": '\U00000150', "Ofr;": '\U0001D512', "Ograve;": '\U000000D2', "Omacr;": '\U0000014C', "Omega;": '\U000003A9', "Omicron;": '\U0000039F', "Oopf;": '\U0001D546', "OpenCurlyDoubleQuote;": '\U0000201C', "OpenCurlyQuote;": '\U00002018', "Or;": '\U00002A54', "Oscr;": '\U0001D4AA', "Oslash;": '\U000000D8', "Otilde;": '\U000000D5', "Otimes;": '\U00002A37', "Ouml;": '\U000000D6', "OverBar;": '\U0000203E', "OverBrace;": '\U000023DE', "OverBracket;": '\U000023B4', "OverParenthesis;": '\U000023DC', "PartialD;": '\U00002202', "Pcy;": '\U0000041F', "Pfr;": '\U0001D513', "Phi;": '\U000003A6', "Pi;": '\U000003A0', "PlusMinus;": '\U000000B1', "Poincareplane;": '\U0000210C', "Popf;": '\U00002119', "Pr;": '\U00002ABB', "Precedes;": '\U0000227A', "PrecedesEqual;": '\U00002AAF', "PrecedesSlantEqual;": '\U0000227C', "PrecedesTilde;": '\U0000227E', "Prime;": '\U00002033', "Product;": '\U0000220F', "Proportion;": '\U00002237', "Proportional;": '\U0000221D', "Pscr;": '\U0001D4AB', "Psi;": '\U000003A8', "QUOT;": '\U00000022', "Qfr;": '\U0001D514', "Qopf;": '\U0000211A', "Qscr;": '\U0001D4AC', "RBarr;": '\U00002910', "REG;": '\U000000AE', "Racute;": '\U00000154', "Rang;": '\U000027EB', "Rarr;": '\U000021A0', "Rarrtl;": '\U00002916', "Rcaron;": '\U00000158', "Rcedil;": '\U00000156', "Rcy;": '\U00000420', "Re;": '\U0000211C', "ReverseElement;": '\U0000220B', "ReverseEquilibrium;": '\U000021CB', "ReverseUpEquilibrium;": '\U0000296F', "Rfr;": '\U0000211C', "Rho;": '\U000003A1', "RightAngleBracket;": '\U000027E9', "RightArrow;": '\U00002192', "RightArrowBar;": '\U000021E5', "RightArrowLeftArrow;": '\U000021C4', "RightCeiling;": '\U00002309', "RightDoubleBracket;": '\U000027E7', "RightDownTeeVector;": '\U0000295D', "RightDownVector;": '\U000021C2', "RightDownVectorBar;": '\U00002955', "RightFloor;": '\U0000230B', "RightTee;": '\U000022A2', "RightTeeArrow;": '\U000021A6', "RightTeeVector;": '\U0000295B', "RightTriangle;": '\U000022B3', "RightTriangleBar;": '\U000029D0', "RightTriangleEqual;": '\U000022B5', "RightUpDownVector;": '\U0000294F', "RightUpTeeVector;": '\U0000295C', "RightUpVector;": '\U000021BE', "RightUpVectorBar;": '\U00002954', "RightVector;": '\U000021C0', "RightVectorBar;": '\U00002953', "Rightarrow;": '\U000021D2', "Ropf;": '\U0000211D', "RoundImplies;": '\U00002970', "Rrightarrow;": '\U000021DB', "Rscr;": '\U0000211B', "Rsh;": '\U000021B1', "RuleDelayed;": '\U000029F4', "SHCHcy;": '\U00000429', "SHcy;": '\U00000428', "SOFTcy;": '\U0000042C', "Sacute;": '\U0000015A', "Sc;": '\U00002ABC', "Scaron;": '\U00000160', "Scedil;": '\U0000015E', "Scirc;": '\U0000015C', "Scy;": '\U00000421', "Sfr;": '\U0001D516', "ShortDownArrow;": '\U00002193', "ShortLeftArrow;": '\U00002190', "ShortRightArrow;": '\U00002192', "ShortUpArrow;": '\U00002191', "Sigma;": '\U000003A3', "SmallCircle;": '\U00002218', "Sopf;": '\U0001D54A', "Sqrt;": '\U0000221A', "Square;": '\U000025A1', "SquareIntersection;": '\U00002293', "SquareSubset;": '\U0000228F', "SquareSubsetEqual;": '\U00002291', "SquareSuperset;": '\U00002290', "SquareSupersetEqual;": '\U00002292', "SquareUnion;": '\U00002294', "Sscr;": '\U0001D4AE', "Star;": '\U000022C6', "Sub;": '\U000022D0', "Subset;": '\U000022D0', "SubsetEqual;": '\U00002286', "Succeeds;": '\U0000227B', "SucceedsEqual;": '\U00002AB0', "SucceedsSlantEqual;": '\U0000227D', "SucceedsTilde;": '\U0000227F', "SuchThat;": '\U0000220B', "Sum;": '\U00002211', "Sup;": '\U000022D1', "Superset;": '\U00002283', "SupersetEqual;": '\U00002287', "Supset;": '\U000022D1', "THORN;": '\U000000DE', "TRADE;": '\U00002122', "TSHcy;": '\U0000040B', "TScy;": '\U00000426', "Tab;": '\U00000009', "Tau;": '\U000003A4', "Tcaron;": '\U00000164', "Tcedil;": '\U00000162', "Tcy;": '\U00000422', "Tfr;": '\U0001D517', "Therefore;": '\U00002234', "Theta;": '\U00000398', "ThinSpace;": '\U00002009', "Tilde;": '\U0000223C', "TildeEqual;": '\U00002243', "TildeFullEqual;": '\U00002245', "TildeTilde;": '\U00002248', "Topf;": '\U0001D54B', "TripleDot;": '\U000020DB', "Tscr;": '\U0001D4AF', "Tstrok;": '\U00000166', "Uacute;": '\U000000DA', "Uarr;": '\U0000219F', "Uarrocir;": '\U00002949', "Ubrcy;": '\U0000040E', "Ubreve;": '\U0000016C', "Ucirc;": '\U000000DB', "Ucy;": '\U00000423', "Udblac;": '\U00000170', "Ufr;": '\U0001D518', "Ugrave;": '\U000000D9', "Umacr;": '\U0000016A', "UnderBar;": '\U0000005F', "UnderBrace;": '\U000023DF', "UnderBracket;": '\U000023B5', "UnderParenthesis;": '\U000023DD', "Union;": '\U000022C3', "UnionPlus;": '\U0000228E', "Uogon;": '\U00000172', "Uopf;": '\U0001D54C', "UpArrow;": '\U00002191', "UpArrowBar;": '\U00002912', "UpArrowDownArrow;": '\U000021C5', "UpDownArrow;": '\U00002195', "UpEquilibrium;": '\U0000296E', "UpTee;": '\U000022A5', "UpTeeArrow;": '\U000021A5', "Uparrow;": '\U000021D1', "Updownarrow;": '\U000021D5', "UpperLeftArrow;": '\U00002196', "UpperRightArrow;": '\U00002197', "Upsi;": '\U000003D2', "Upsilon;": '\U000003A5', "Uring;": '\U0000016E', "Uscr;": '\U0001D4B0', "Utilde;": '\U00000168', "Uuml;": '\U000000DC', "VDash;": '\U000022AB', "Vbar;": '\U00002AEB', "Vcy;": '\U00000412', "Vdash;": '\U000022A9', "Vdashl;": '\U00002AE6', "Vee;": '\U000022C1', "Verbar;": '\U00002016', "Vert;": '\U00002016', "VerticalBar;": '\U00002223', "VerticalLine;": '\U0000007C', "VerticalSeparator;": '\U00002758', "VerticalTilde;": '\U00002240', "VeryThinSpace;": '\U0000200A', "Vfr;": '\U0001D519', "Vopf;": '\U0001D54D', "Vscr;": '\U0001D4B1', "Vvdash;": '\U000022AA', "Wcirc;": '\U00000174', "Wedge;": '\U000022C0', "Wfr;": '\U0001D51A', "Wopf;": '\U0001D54E', "Wscr;": '\U0001D4B2', "Xfr;": '\U0001D51B', "Xi;": '\U0000039E', "Xopf;": '\U0001D54F', "Xscr;": '\U0001D4B3', "YAcy;": '\U0000042F', "YIcy;": '\U00000407', "YUcy;": '\U0000042E', "Yacute;": '\U000000DD', "Ycirc;": '\U00000176', "Ycy;": '\U0000042B', "Yfr;": '\U0001D51C', "Yopf;": '\U0001D550', "Yscr;": '\U0001D4B4', "Yuml;": '\U00000178', "ZHcy;": '\U00000416', "Zacute;": '\U00000179', "Zcaron;": '\U0000017D', "Zcy;": '\U00000417', "Zdot;": '\U0000017B', "ZeroWidthSpace;": '\U0000200B', "Zeta;": '\U00000396', "Zfr;": '\U00002128', "Zopf;": '\U00002124', "Zscr;": '\U0001D4B5', "aacute;": '\U000000E1', "abreve;": '\U00000103', "ac;": '\U0000223E', "acd;": '\U0000223F', "acirc;": '\U000000E2', "acute;": '\U000000B4', "acy;": '\U00000430', "aelig;": '\U000000E6', "af;": '\U00002061', "afr;": '\U0001D51E', "agrave;": '\U000000E0', "alefsym;": '\U00002135', "aleph;": '\U00002135', "alpha;": '\U000003B1', "amacr;": '\U00000101', "amalg;": '\U00002A3F', "amp;": '\U00000026', "and;": '\U00002227', "andand;": '\U00002A55', "andd;": '\U00002A5C', "andslope;": '\U00002A58', "andv;": '\U00002A5A', "ang;": '\U00002220', "ange;": '\U000029A4', "angle;": '\U00002220', "angmsd;": '\U00002221', "angmsdaa;": '\U000029A8', "angmsdab;": '\U000029A9', "angmsdac;": '\U000029AA', "angmsdad;": '\U000029AB', "angmsdae;": '\U000029AC', "angmsdaf;": '\U000029AD', "angmsdag;": '\U000029AE', "angmsdah;": '\U000029AF', "angrt;": '\U0000221F', "angrtvb;": '\U000022BE', "angrtvbd;": '\U0000299D', "angsph;": '\U00002222', "angst;": '\U000000C5', "angzarr;": '\U0000237C', "aogon;": '\U00000105', "aopf;": '\U0001D552', "ap;": '\U00002248', "apE;": '\U00002A70', "apacir;": '\U00002A6F', "ape;": '\U0000224A', "apid;": '\U0000224B', "apos;": '\U00000027', "approx;": '\U00002248', "approxeq;": '\U0000224A', "aring;": '\U000000E5', "ascr;": '\U0001D4B6', "ast;": '\U0000002A', "asymp;": '\U00002248', "asympeq;": '\U0000224D', "atilde;": '\U000000E3', "auml;": '\U000000E4', "awconint;": '\U00002233', "awint;": '\U00002A11', "bNot;": '\U00002AED', "backcong;": '\U0000224C', "backepsilon;": '\U000003F6', "backprime;": '\U00002035', "backsim;": '\U0000223D', "backsimeq;": '\U000022CD', "barvee;": '\U000022BD', "barwed;": '\U00002305', "barwedge;": '\U00002305', "bbrk;": '\U000023B5', "bbrktbrk;": '\U000023B6', "bcong;": '\U0000224C', "bcy;": '\U00000431', "bdquo;": '\U0000201E', "becaus;": '\U00002235', "because;": '\U00002235', "bemptyv;": '\U000029B0', "bepsi;": '\U000003F6', "bernou;": '\U0000212C', "beta;": '\U000003B2', "beth;": '\U00002136', "between;": '\U0000226C', "bfr;": '\U0001D51F', "bigcap;": '\U000022C2', "bigcirc;": '\U000025EF', "bigcup;": '\U000022C3', "bigodot;": '\U00002A00', "bigoplus;": '\U00002A01', "bigotimes;": '\U00002A02', "bigsqcup;": '\U00002A06', "bigstar;": '\U00002605', "bigtriangledown;": '\U000025BD', "bigtriangleup;": '\U000025B3', "biguplus;": '\U00002A04', "bigvee;": '\U000022C1', "bigwedge;": '\U000022C0', "bkarow;": '\U0000290D', "blacklozenge;": '\U000029EB', "blacksquare;": '\U000025AA', "blacktriangle;": '\U000025B4', "blacktriangledown;": '\U000025BE', "blacktriangleleft;": '\U000025C2', "blacktriangleright;": '\U000025B8', "blank;": '\U00002423', "blk12;": '\U00002592', "blk14;": '\U00002591', "blk34;": '\U00002593', "block;": '\U00002588', "bnot;": '\U00002310', "bopf;": '\U0001D553', "bot;": '\U000022A5', "bottom;": '\U000022A5', "bowtie;": '\U000022C8', "boxDL;": '\U00002557', "boxDR;": '\U00002554', "boxDl;": '\U00002556', "boxDr;": '\U00002553', "boxH;": '\U00002550', "boxHD;": '\U00002566', "boxHU;": '\U00002569', "boxHd;": '\U00002564', "boxHu;": '\U00002567', "boxUL;": '\U0000255D', "boxUR;": '\U0000255A', "boxUl;": '\U0000255C', "boxUr;": '\U00002559', "boxV;": '\U00002551', "boxVH;": '\U0000256C', "boxVL;": '\U00002563', "boxVR;": '\U00002560', "boxVh;": '\U0000256B', "boxVl;": '\U00002562', "boxVr;": '\U0000255F', "boxbox;": '\U000029C9', "boxdL;": '\U00002555', "boxdR;": '\U00002552', "boxdl;": '\U00002510', "boxdr;": '\U0000250C', "boxh;": '\U00002500', "boxhD;": '\U00002565', "boxhU;": '\U00002568', "boxhd;": '\U0000252C', "boxhu;": '\U00002534', "boxminus;": '\U0000229F', "boxplus;": '\U0000229E', "boxtimes;": '\U000022A0', "boxuL;": '\U0000255B', "boxuR;": '\U00002558', "boxul;": '\U00002518', "boxur;": '\U00002514', "boxv;": '\U00002502', "boxvH;": '\U0000256A', "boxvL;": '\U00002561', "boxvR;": '\U0000255E', "boxvh;": '\U0000253C', "boxvl;": '\U00002524', "boxvr;": '\U0000251C', "bprime;": '\U00002035', "breve;": '\U000002D8', "brvbar;": '\U000000A6', "bscr;": '\U0001D4B7', "bsemi;": '\U0000204F', "bsim;": '\U0000223D', "bsime;": '\U000022CD', "bsol;": '\U0000005C', "bsolb;": '\U000029C5', "bsolhsub;": '\U000027C8', "bull;": '\U00002022', "bullet;": '\U00002022', "bump;": '\U0000224E', "bumpE;": '\U00002AAE', "bumpe;": '\U0000224F', "bumpeq;": '\U0000224F', "cacute;": '\U00000107', "cap;": '\U00002229', "capand;": '\U00002A44', "capbrcup;": '\U00002A49', "capcap;": '\U00002A4B', "capcup;": '\U00002A47', "capdot;": '\U00002A40', "caret;": '\U00002041', "caron;": '\U000002C7', "ccaps;": '\U00002A4D', "ccaron;": '\U0000010D', "ccedil;": '\U000000E7', "ccirc;": '\U00000109', "ccups;": '\U00002A4C', "ccupssm;": '\U00002A50', "cdot;": '\U0000010B', "cedil;": '\U000000B8', "cemptyv;": '\U000029B2', "cent;": '\U000000A2', "centerdot;": '\U000000B7', "cfr;": '\U0001D520', "chcy;": '\U00000447', "check;": '\U00002713', "checkmark;": '\U00002713', "chi;": '\U000003C7', "cir;": '\U000025CB', "cirE;": '\U000029C3', "circ;": '\U000002C6', "circeq;": '\U00002257', "circlearrowleft;": '\U000021BA', "circlearrowright;": '\U000021BB', "circledR;": '\U000000AE', "circledS;": '\U000024C8', "circledast;": '\U0000229B', "circledcirc;": '\U0000229A', "circleddash;": '\U0000229D', "cire;": '\U00002257', "cirfnint;": '\U00002A10', "cirmid;": '\U00002AEF', "cirscir;": '\U000029C2', "clubs;": '\U00002663', "clubsuit;": '\U00002663', "colon;": '\U0000003A', "colone;": '\U00002254', "coloneq;": '\U00002254', "comma;": '\U0000002C', "commat;": '\U00000040', "comp;": '\U00002201', "compfn;": '\U00002218', "complement;": '\U00002201', "complexes;": '\U00002102', "cong;": '\U00002245', "congdot;": '\U00002A6D', "conint;": '\U0000222E', "copf;": '\U0001D554', "coprod;": '\U00002210', "copy;": '\U000000A9', "copysr;": '\U00002117', "crarr;": '\U000021B5', "cross;": '\U00002717', "cscr;": '\U0001D4B8', "csub;": '\U00002ACF', "csube;": '\U00002AD1', "csup;": '\U00002AD0', "csupe;": '\U00002AD2', "ctdot;": '\U000022EF', "cudarrl;": '\U00002938', "cudarrr;": '\U00002935', "cuepr;": '\U000022DE', "cuesc;": '\U000022DF', "cularr;": '\U000021B6', "cularrp;": '\U0000293D', "cup;": '\U0000222A', "cupbrcap;": '\U00002A48', "cupcap;": '\U00002A46', "cupcup;": '\U00002A4A', "cupdot;": '\U0000228D', "cupor;": '\U00002A45', "curarr;": '\U000021B7', "curarrm;": '\U0000293C', "curlyeqprec;": '\U000022DE', "curlyeqsucc;": '\U000022DF', "curlyvee;": '\U000022CE', "curlywedge;": '\U000022CF', "curren;": '\U000000A4', "curvearrowleft;": '\U000021B6', "curvearrowright;": '\U000021B7', "cuvee;": '\U000022CE', "cuwed;": '\U000022CF', "cwconint;": '\U00002232', "cwint;": '\U00002231', "cylcty;": '\U0000232D', "dArr;": '\U000021D3', "dHar;": '\U00002965', "dagger;": '\U00002020', "daleth;": '\U00002138', "darr;": '\U00002193', "dash;": '\U00002010', "dashv;": '\U000022A3', "dbkarow;": '\U0000290F', "dblac;": '\U000002DD', "dcaron;": '\U0000010F', "dcy;": '\U00000434', "dd;": '\U00002146', "ddagger;": '\U00002021', "ddarr;": '\U000021CA', "ddotseq;": '\U00002A77', "deg;": '\U000000B0', "delta;": '\U000003B4', "demptyv;": '\U000029B1', "dfisht;": '\U0000297F', "dfr;": '\U0001D521', "dharl;": '\U000021C3', "dharr;": '\U000021C2', "diam;": '\U000022C4', "diamond;": '\U000022C4', "diamondsuit;": '\U00002666', "diams;": '\U00002666', "die;": '\U000000A8', "digamma;": '\U000003DD', "disin;": '\U000022F2', "div;": '\U000000F7', "divide;": '\U000000F7', "divideontimes;": '\U000022C7', "divonx;": '\U000022C7', "djcy;": '\U00000452', "dlcorn;": '\U0000231E', "dlcrop;": '\U0000230D', "dollar;": '\U00000024', "dopf;": '\U0001D555', "dot;": '\U000002D9', "doteq;": '\U00002250', "doteqdot;": '\U00002251', "dotminus;": '\U00002238', "dotplus;": '\U00002214', "dotsquare;": '\U000022A1', "doublebarwedge;": '\U00002306', "downarrow;": '\U00002193', "downdownarrows;": '\U000021CA', "downharpoonleft;": '\U000021C3', "downharpoonright;": '\U000021C2', "drbkarow;": '\U00002910', "drcorn;": '\U0000231F', "drcrop;": '\U0000230C', "dscr;": '\U0001D4B9', "dscy;": '\U00000455', "dsol;": '\U000029F6', "dstrok;": '\U00000111', "dtdot;": '\U000022F1', "dtri;": '\U000025BF', "dtrif;": '\U000025BE', "duarr;": '\U000021F5', "duhar;": '\U0000296F', "dwangle;": '\U000029A6', "dzcy;": '\U0000045F', "dzigrarr;": '\U000027FF', "eDDot;": '\U00002A77', "eDot;": '\U00002251', "eacute;": '\U000000E9', "easter;": '\U00002A6E', "ecaron;": '\U0000011B', "ecir;": '\U00002256', "ecirc;": '\U000000EA', "ecolon;": '\U00002255', "ecy;": '\U0000044D', "edot;": '\U00000117', "ee;": '\U00002147', "efDot;": '\U00002252', "efr;": '\U0001D522', "eg;": '\U00002A9A', "egrave;": '\U000000E8', "egs;": '\U00002A96', "egsdot;": '\U00002A98', "el;": '\U00002A99', "elinters;": '\U000023E7', "ell;": '\U00002113', "els;": '\U00002A95', "elsdot;": '\U00002A97', "emacr;": '\U00000113', "empty;": '\U00002205', "emptyset;": '\U00002205', "emptyv;": '\U00002205', "emsp;": '\U00002003', "emsp13;": '\U00002004', "emsp14;": '\U00002005', "eng;": '\U0000014B', "ensp;": '\U00002002', "eogon;": '\U00000119', "eopf;": '\U0001D556', "epar;": '\U000022D5', "eparsl;": '\U000029E3', "eplus;": '\U00002A71', "epsi;": '\U000003B5', "epsilon;": '\U000003B5', "epsiv;": '\U000003F5', "eqcirc;": '\U00002256', "eqcolon;": '\U00002255', "eqsim;": '\U00002242', "eqslantgtr;": '\U00002A96', "eqslantless;": '\U00002A95', "equals;": '\U0000003D', "equest;": '\U0000225F', "equiv;": '\U00002261', "equivDD;": '\U00002A78', "eqvparsl;": '\U000029E5', "erDot;": '\U00002253', "erarr;": '\U00002971', "escr;": '\U0000212F', "esdot;": '\U00002250', "esim;": '\U00002242', "eta;": '\U000003B7', "eth;": '\U000000F0', "euml;": '\U000000EB', "euro;": '\U000020AC', "excl;": '\U00000021', "exist;": '\U00002203', "expectation;": '\U00002130', "exponentiale;": '\U00002147', "fallingdotseq;": '\U00002252', "fcy;": '\U00000444', "female;": '\U00002640', "ffilig;": '\U0000FB03', "fflig;": '\U0000FB00', "ffllig;": '\U0000FB04', "ffr;": '\U0001D523', "filig;": '\U0000FB01', "flat;": '\U0000266D', "fllig;": '\U0000FB02', "fltns;": '\U000025B1', "fnof;": '\U00000192', "fopf;": '\U0001D557', "forall;": '\U00002200', "fork;": '\U000022D4', "forkv;": '\U00002AD9', "fpartint;": '\U00002A0D', "frac12;": '\U000000BD', "frac13;": '\U00002153', "frac14;": '\U000000BC', "frac15;": '\U00002155', "frac16;": '\U00002159', "frac18;": '\U0000215B', "frac23;": '\U00002154', "frac25;": '\U00002156', "frac34;": '\U000000BE', "frac35;": '\U00002157', "frac38;": '\U0000215C', "frac45;": '\U00002158', "frac56;": '\U0000215A', "frac58;": '\U0000215D', "frac78;": '\U0000215E', "frasl;": '\U00002044', "frown;": '\U00002322', "fscr;": '\U0001D4BB', "gE;": '\U00002267', "gEl;": '\U00002A8C', "gacute;": '\U000001F5', "gamma;": '\U000003B3', "gammad;": '\U000003DD', "gap;": '\U00002A86', "gbreve;": '\U0000011F', "gcirc;": '\U0000011D', "gcy;": '\U00000433', "gdot;": '\U00000121', "ge;": '\U00002265', "gel;": '\U000022DB', "geq;": '\U00002265', "geqq;": '\U00002267', "geqslant;": '\U00002A7E', "ges;": '\U00002A7E', "gescc;": '\U00002AA9', "gesdot;": '\U00002A80', "gesdoto;": '\U00002A82', "gesdotol;": '\U00002A84', "gesles;": '\U00002A94', "gfr;": '\U0001D524', "gg;": '\U0000226B', "ggg;": '\U000022D9', "gimel;": '\U00002137', "gjcy;": '\U00000453', "gl;": '\U00002277', "glE;": '\U00002A92', "gla;": '\U00002AA5', "glj;": '\U00002AA4', "gnE;": '\U00002269', "gnap;": '\U00002A8A', "gnapprox;": '\U00002A8A', "gne;": '\U00002A88', "gneq;": '\U00002A88', "gneqq;": '\U00002269', "gnsim;": '\U000022E7', "gopf;": '\U0001D558', "grave;": '\U00000060', "gscr;": '\U0000210A', "gsim;": '\U00002273', "gsime;": '\U00002A8E', "gsiml;": '\U00002A90', "gt;": '\U0000003E', "gtcc;": '\U00002AA7', "gtcir;": '\U00002A7A', "gtdot;": '\U000022D7', "gtlPar;": '\U00002995', "gtquest;": '\U00002A7C', "gtrapprox;": '\U00002A86', "gtrarr;": '\U00002978', "gtrdot;": '\U000022D7', "gtreqless;": '\U000022DB', "gtreqqless;": '\U00002A8C', "gtrless;": '\U00002277', "gtrsim;": '\U00002273', "hArr;": '\U000021D4', "hairsp;": '\U0000200A', "half;": '\U000000BD', "hamilt;": '\U0000210B', "hardcy;": '\U0000044A', "harr;": '\U00002194', "harrcir;": '\U00002948', "harrw;": '\U000021AD', "hbar;": '\U0000210F', "hcirc;": '\U00000125', "hearts;": '\U00002665', "heartsuit;": '\U00002665', "hellip;": '\U00002026', "hercon;": '\U000022B9', "hfr;": '\U0001D525', "hksearow;": '\U00002925', "hkswarow;": '\U00002926', "hoarr;": '\U000021FF', "homtht;": '\U0000223B', "hookleftarrow;": '\U000021A9', "hookrightarrow;": '\U000021AA', "hopf;": '\U0001D559', "horbar;": '\U00002015', "hscr;": '\U0001D4BD', "hslash;": '\U0000210F', "hstrok;": '\U00000127', "hybull;": '\U00002043', "hyphen;": '\U00002010', "iacute;": '\U000000ED', "ic;": '\U00002063', "icirc;": '\U000000EE', "icy;": '\U00000438', "iecy;": '\U00000435', "iexcl;": '\U000000A1', "iff;": '\U000021D4', "ifr;": '\U0001D526', "igrave;": '\U000000EC', "ii;": '\U00002148', "iiiint;": '\U00002A0C', "iiint;": '\U0000222D', "iinfin;": '\U000029DC', "iiota;": '\U00002129', "ijlig;": '\U00000133', "imacr;": '\U0000012B', "image;": '\U00002111', "imagline;": '\U00002110', "imagpart;": '\U00002111', "imath;": '\U00000131', "imof;": '\U000022B7', "imped;": '\U000001B5', "in;": '\U00002208', "incare;": '\U00002105', "infin;": '\U0000221E', "infintie;": '\U000029DD', "inodot;": '\U00000131', "int;": '\U0000222B', "intcal;": '\U000022BA', "integers;": '\U00002124', "intercal;": '\U000022BA', "intlarhk;": '\U00002A17', "intprod;": '\U00002A3C', "iocy;": '\U00000451', "iogon;": '\U0000012F', "iopf;": '\U0001D55A', "iota;": '\U000003B9', "iprod;": '\U00002A3C', "iquest;": '\U000000BF', "iscr;": '\U0001D4BE', "isin;": '\U00002208', "isinE;": '\U000022F9', "isindot;": '\U000022F5', "isins;": '\U000022F4', "isinsv;": '\U000022F3', "isinv;": '\U00002208', "it;": '\U00002062', "itilde;": '\U00000129', "iukcy;": '\U00000456', "iuml;": '\U000000EF', "jcirc;": '\U00000135', "jcy;": '\U00000439', "jfr;": '\U0001D527', "jmath;": '\U00000237', "jopf;": '\U0001D55B', "jscr;": '\U0001D4BF', "jsercy;": '\U00000458', "jukcy;": '\U00000454', "kappa;": '\U000003BA', "kappav;": '\U000003F0', "kcedil;": '\U00000137', "kcy;": '\U0000043A', "kfr;": '\U0001D528', "kgreen;": '\U00000138', "khcy;": '\U00000445', "kjcy;": '\U0000045C', "kopf;": '\U0001D55C', "kscr;": '\U0001D4C0', "lAarr;": '\U000021DA', "lArr;": '\U000021D0', "lAtail;": '\U0000291B', "lBarr;": '\U0000290E', "lE;": '\U00002266', "lEg;": '\U00002A8B', "lHar;": '\U00002962', "lacute;": '\U0000013A', "laemptyv;": '\U000029B4', "lagran;": '\U00002112', "lambda;": '\U000003BB', "lang;": '\U000027E8', "langd;": '\U00002991', "langle;": '\U000027E8', "lap;": '\U00002A85', "laquo;": '\U000000AB', "larr;": '\U00002190', "larrb;": '\U000021E4', "larrbfs;": '\U0000291F', "larrfs;": '\U0000291D', "larrhk;": '\U000021A9', "larrlp;": '\U000021AB', "larrpl;": '\U00002939', "larrsim;": '\U00002973', "larrtl;": '\U000021A2', "lat;": '\U00002AAB', "latail;": '\U00002919', "late;": '\U00002AAD', "lbarr;": '\U0000290C', "lbbrk;": '\U00002772', "lbrace;": '\U0000007B', "lbrack;": '\U0000005B', "lbrke;": '\U0000298B', "lbrksld;": '\U0000298F', "lbrkslu;": '\U0000298D', "lcaron;": '\U0000013E', "lcedil;": '\U0000013C', "lceil;": '\U00002308', "lcub;": '\U0000007B', "lcy;": '\U0000043B', "ldca;": '\U00002936', "ldquo;": '\U0000201C', "ldquor;": '\U0000201E', "ldrdhar;": '\U00002967', "ldrushar;": '\U0000294B', "ldsh;": '\U000021B2', "le;": '\U00002264', "leftarrow;": '\U00002190', "leftarrowtail;": '\U000021A2', "leftharpoondown;": '\U000021BD', "leftharpoonup;": '\U000021BC', "leftleftarrows;": '\U000021C7', "leftrightarrow;": '\U00002194', "leftrightarrows;": '\U000021C6', "leftrightharpoons;": '\U000021CB', "leftrightsquigarrow;": '\U000021AD', "leftthreetimes;": '\U000022CB', "leg;": '\U000022DA', "leq;": '\U00002264', "leqq;": '\U00002266', "leqslant;": '\U00002A7D', "les;": '\U00002A7D', "lescc;": '\U00002AA8', "lesdot;": '\U00002A7F', "lesdoto;": '\U00002A81', "lesdotor;": '\U00002A83', "lesges;": '\U00002A93', "lessapprox;": '\U00002A85', "lessdot;": '\U000022D6', "lesseqgtr;": '\U000022DA', "lesseqqgtr;": '\U00002A8B', "lessgtr;": '\U00002276', "lesssim;": '\U00002272', "lfisht;": '\U0000297C', "lfloor;": '\U0000230A', "lfr;": '\U0001D529', "lg;": '\U00002276', "lgE;": '\U00002A91', "lhard;": '\U000021BD', "lharu;": '\U000021BC', "lharul;": '\U0000296A', "lhblk;": '\U00002584', "ljcy;": '\U00000459', "ll;": '\U0000226A', "llarr;": '\U000021C7', "llcorner;": '\U0000231E', "llhard;": '\U0000296B', "lltri;": '\U000025FA', "lmidot;": '\U00000140', "lmoust;": '\U000023B0', "lmoustache;": '\U000023B0', "lnE;": '\U00002268', "lnap;": '\U00002A89', "lnapprox;": '\U00002A89', "lne;": '\U00002A87', "lneq;": '\U00002A87', "lneqq;": '\U00002268', "lnsim;": '\U000022E6', "loang;": '\U000027EC', "loarr;": '\U000021FD', "lobrk;": '\U000027E6', "longleftarrow;": '\U000027F5', "longleftrightarrow;": '\U000027F7', "longmapsto;": '\U000027FC', "longrightarrow;": '\U000027F6', "looparrowleft;": '\U000021AB', "looparrowright;": '\U000021AC', "lopar;": '\U00002985', "lopf;": '\U0001D55D', "loplus;": '\U00002A2D', "lotimes;": '\U00002A34', "lowast;": '\U00002217', "lowbar;": '\U0000005F', "loz;": '\U000025CA', "lozenge;": '\U000025CA', "lozf;": '\U000029EB', "lpar;": '\U00000028', "lparlt;": '\U00002993', "lrarr;": '\U000021C6', "lrcorner;": '\U0000231F', "lrhar;": '\U000021CB', "lrhard;": '\U0000296D', "lrm;": '\U0000200E', "lrtri;": '\U000022BF', "lsaquo;": '\U00002039', "lscr;": '\U0001D4C1', "lsh;": '\U000021B0', "lsim;": '\U00002272', "lsime;": '\U00002A8D', "lsimg;": '\U00002A8F', "lsqb;": '\U0000005B', "lsquo;": '\U00002018', "lsquor;": '\U0000201A', "lstrok;": '\U00000142', "lt;": '\U0000003C', "ltcc;": '\U00002AA6', "ltcir;": '\U00002A79', "ltdot;": '\U000022D6', "lthree;": '\U000022CB', "ltimes;": '\U000022C9', "ltlarr;": '\U00002976', "ltquest;": '\U00002A7B', "ltrPar;": '\U00002996', "ltri;": '\U000025C3', "ltrie;": '\U000022B4', "ltrif;": '\U000025C2', "lurdshar;": '\U0000294A', "luruhar;": '\U00002966', "mDDot;": '\U0000223A', "macr;": '\U000000AF', "male;": '\U00002642', "malt;": '\U00002720', "maltese;": '\U00002720', "map;": '\U000021A6', "mapsto;": '\U000021A6', "mapstodown;": '\U000021A7', "mapstoleft;": '\U000021A4', "mapstoup;": '\U000021A5', "marker;": '\U000025AE', "mcomma;": '\U00002A29', "mcy;": '\U0000043C', "mdash;": '\U00002014', "measuredangle;": '\U00002221', "mfr;": '\U0001D52A', "mho;": '\U00002127', "micro;": '\U000000B5', "mid;": '\U00002223', "midast;": '\U0000002A', "midcir;": '\U00002AF0', "middot;": '\U000000B7', "minus;": '\U00002212', "minusb;": '\U0000229F', "minusd;": '\U00002238', "minusdu;": '\U00002A2A', "mlcp;": '\U00002ADB', "mldr;": '\U00002026', "mnplus;": '\U00002213', "models;": '\U000022A7', "mopf;": '\U0001D55E', "mp;": '\U00002213', "mscr;": '\U0001D4C2', "mstpos;": '\U0000223E', "mu;": '\U000003BC', "multimap;": '\U000022B8', "mumap;": '\U000022B8', "nLeftarrow;": '\U000021CD', "nLeftrightarrow;": '\U000021CE', "nRightarrow;": '\U000021CF', "nVDash;": '\U000022AF', "nVdash;": '\U000022AE', "nabla;": '\U00002207', "nacute;": '\U00000144', "nap;": '\U00002249', "napos;": '\U00000149', "napprox;": '\U00002249', "natur;": '\U0000266E', "natural;": '\U0000266E', "naturals;": '\U00002115', "nbsp;": '\U000000A0', "ncap;": '\U00002A43', "ncaron;": '\U00000148', "ncedil;": '\U00000146', "ncong;": '\U00002247', "ncup;": '\U00002A42', "ncy;": '\U0000043D', "ndash;": '\U00002013', "ne;": '\U00002260', "neArr;": '\U000021D7', "nearhk;": '\U00002924', "nearr;": '\U00002197', "nearrow;": '\U00002197', "nequiv;": '\U00002262', "nesear;": '\U00002928', "nexist;": '\U00002204', "nexists;": '\U00002204', "nfr;": '\U0001D52B', "nge;": '\U00002271', "ngeq;": '\U00002271', "ngsim;": '\U00002275', "ngt;": '\U0000226F', "ngtr;": '\U0000226F', "nhArr;": '\U000021CE', "nharr;": '\U000021AE', "nhpar;": '\U00002AF2', "ni;": '\U0000220B', "nis;": '\U000022FC', "nisd;": '\U000022FA', "niv;": '\U0000220B', "njcy;": '\U0000045A', "nlArr;": '\U000021CD', "nlarr;": '\U0000219A', "nldr;": '\U00002025', "nle;": '\U00002270', "nleftarrow;": '\U0000219A', "nleftrightarrow;": '\U000021AE', "nleq;": '\U00002270', "nless;": '\U0000226E', "nlsim;": '\U00002274', "nlt;": '\U0000226E', "nltri;": '\U000022EA', "nltrie;": '\U000022EC', "nmid;": '\U00002224', "nopf;": '\U0001D55F', "not;": '\U000000AC', "notin;": '\U00002209', "notinva;": '\U00002209', "notinvb;": '\U000022F7', "notinvc;": '\U000022F6', "notni;": '\U0000220C', "notniva;": '\U0000220C', "notnivb;": '\U000022FE', "notnivc;": '\U000022FD', "npar;": '\U00002226', "nparallel;": '\U00002226', "npolint;": '\U00002A14', "npr;": '\U00002280', "nprcue;": '\U000022E0', "nprec;": '\U00002280', "nrArr;": '\U000021CF', "nrarr;": '\U0000219B', "nrightarrow;": '\U0000219B', "nrtri;": '\U000022EB', "nrtrie;": '\U000022ED', "nsc;": '\U00002281', "nsccue;": '\U000022E1', "nscr;": '\U0001D4C3', "nshortmid;": '\U00002224', "nshortparallel;": '\U00002226', "nsim;": '\U00002241', "nsime;": '\U00002244', "nsimeq;": '\U00002244', "nsmid;": '\U00002224', "nspar;": '\U00002226', "nsqsube;": '\U000022E2', "nsqsupe;": '\U000022E3', "nsub;": '\U00002284', "nsube;": '\U00002288', "nsubseteq;": '\U00002288', "nsucc;": '\U00002281', "nsup;": '\U00002285', "nsupe;": '\U00002289', "nsupseteq;": '\U00002289', "ntgl;": '\U00002279', "ntilde;": '\U000000F1', "ntlg;": '\U00002278', "ntriangleleft;": '\U000022EA', "ntrianglelefteq;": '\U000022EC', "ntriangleright;": '\U000022EB', "ntrianglerighteq;": '\U000022ED', "nu;": '\U000003BD', "num;": '\U00000023', "numero;": '\U00002116', "numsp;": '\U00002007', "nvDash;": '\U000022AD', "nvHarr;": '\U00002904', "nvdash;": '\U000022AC', "nvinfin;": '\U000029DE', "nvlArr;": '\U00002902', "nvrArr;": '\U00002903', "nwArr;": '\U000021D6', "nwarhk;": '\U00002923', "nwarr;": '\U00002196', "nwarrow;": '\U00002196', "nwnear;": '\U00002927', "oS;": '\U000024C8', "oacute;": '\U000000F3', "oast;": '\U0000229B', "ocir;": '\U0000229A', "ocirc;": '\U000000F4', "ocy;": '\U0000043E', "odash;": '\U0000229D', "odblac;": '\U00000151', "odiv;": '\U00002A38', "odot;": '\U00002299', "odsold;": '\U000029BC', "oelig;": '\U00000153', "ofcir;": '\U000029BF', "ofr;": '\U0001D52C', "ogon;": '\U000002DB', "ograve;": '\U000000F2', "ogt;": '\U000029C1', "ohbar;": '\U000029B5', "ohm;": '\U000003A9', "oint;": '\U0000222E', "olarr;": '\U000021BA', "olcir;": '\U000029BE', "olcross;": '\U000029BB', "oline;": '\U0000203E', "olt;": '\U000029C0', "omacr;": '\U0000014D', "omega;": '\U000003C9', "omicron;": '\U000003BF', "omid;": '\U000029B6', "ominus;": '\U00002296', "oopf;": '\U0001D560', "opar;": '\U000029B7', "operp;": '\U000029B9', "oplus;": '\U00002295', "or;": '\U00002228', "orarr;": '\U000021BB', "ord;": '\U00002A5D', "order;": '\U00002134', "orderof;": '\U00002134', "ordf;": '\U000000AA', "ordm;": '\U000000BA', "origof;": '\U000022B6', "oror;": '\U00002A56', "orslope;": '\U00002A57', "orv;": '\U00002A5B', "oscr;": '\U00002134', "oslash;": '\U000000F8', "osol;": '\U00002298', "otilde;": '\U000000F5', "otimes;": '\U00002297', "otimesas;": '\U00002A36', "ouml;": '\U000000F6', "ovbar;": '\U0000233D', "par;": '\U00002225', "para;": '\U000000B6', "parallel;": '\U00002225', "parsim;": '\U00002AF3', "parsl;": '\U00002AFD', "part;": '\U00002202', "pcy;": '\U0000043F', "percnt;": '\U00000025', "period;": '\U0000002E', "permil;": '\U00002030', "perp;": '\U000022A5', "pertenk;": '\U00002031', "pfr;": '\U0001D52D', "phi;": '\U000003C6', "phiv;": '\U000003D5', "phmmat;": '\U00002133', "phone;": '\U0000260E', "pi;": '\U000003C0', "pitchfork;": '\U000022D4', "piv;": '\U000003D6', "planck;": '\U0000210F', "planckh;": '\U0000210E', "plankv;": '\U0000210F', "plus;": '\U0000002B', "plusacir;": '\U00002A23', "plusb;": '\U0000229E', "pluscir;": '\U00002A22', "plusdo;": '\U00002214', "plusdu;": '\U00002A25', "pluse;": '\U00002A72', "plusmn;": '\U000000B1', "plussim;": '\U00002A26', "plustwo;": '\U00002A27', "pm;": '\U000000B1', "pointint;": '\U00002A15', "popf;": '\U0001D561', "pound;": '\U000000A3', "pr;": '\U0000227A', "prE;": '\U00002AB3', "prap;": '\U00002AB7', "prcue;": '\U0000227C', "pre;": '\U00002AAF', "prec;": '\U0000227A', "precapprox;": '\U00002AB7', "preccurlyeq;": '\U0000227C', "preceq;": '\U00002AAF', "precnapprox;": '\U00002AB9', "precneqq;": '\U00002AB5', "precnsim;": '\U000022E8', "precsim;": '\U0000227E', "prime;": '\U00002032', "primes;": '\U00002119', "prnE;": '\U00002AB5', "prnap;": '\U00002AB9', "prnsim;": '\U000022E8', "prod;": '\U0000220F', "profalar;": '\U0000232E', "profline;": '\U00002312', "profsurf;": '\U00002313', "prop;": '\U0000221D', "propto;": '\U0000221D', "prsim;": '\U0000227E', "prurel;": '\U000022B0', "pscr;": '\U0001D4C5', "psi;": '\U000003C8', "puncsp;": '\U00002008', "qfr;": '\U0001D52E', "qint;": '\U00002A0C', "qopf;": '\U0001D562', "qprime;": '\U00002057', "qscr;": '\U0001D4C6', "quaternions;": '\U0000210D', "quatint;": '\U00002A16', "quest;": '\U0000003F', "questeq;": '\U0000225F', "quot;": '\U00000022', "rAarr;": '\U000021DB', "rArr;": '\U000021D2', "rAtail;": '\U0000291C', "rBarr;": '\U0000290F', "rHar;": '\U00002964', "racute;": '\U00000155', "radic;": '\U0000221A', "raemptyv;": '\U000029B3', "rang;": '\U000027E9', "rangd;": '\U00002992', "range;": '\U000029A5', "rangle;": '\U000027E9', "raquo;": '\U000000BB', "rarr;": '\U00002192', "rarrap;": '\U00002975', "rarrb;": '\U000021E5', "rarrbfs;": '\U00002920', "rarrc;": '\U00002933', "rarrfs;": '\U0000291E', "rarrhk;": '\U000021AA', "rarrlp;": '\U000021AC', "rarrpl;": '\U00002945', "rarrsim;": '\U00002974', "rarrtl;": '\U000021A3', "rarrw;": '\U0000219D', "ratail;": '\U0000291A', "ratio;": '\U00002236', "rationals;": '\U0000211A', "rbarr;": '\U0000290D', "rbbrk;": '\U00002773', "rbrace;": '\U0000007D', "rbrack;": '\U0000005D', "rbrke;": '\U0000298C', "rbrksld;": '\U0000298E', "rbrkslu;": '\U00002990', "rcaron;": '\U00000159', "rcedil;": '\U00000157', "rceil;": '\U00002309', "rcub;": '\U0000007D', "rcy;": '\U00000440', "rdca;": '\U00002937', "rdldhar;": '\U00002969', "rdquo;": '\U0000201D', "rdquor;": '\U0000201D', "rdsh;": '\U000021B3', "real;": '\U0000211C', "realine;": '\U0000211B', "realpart;": '\U0000211C', "reals;": '\U0000211D', "rect;": '\U000025AD', "reg;": '\U000000AE', "rfisht;": '\U0000297D', "rfloor;": '\U0000230B', "rfr;": '\U0001D52F', "rhard;": '\U000021C1', "rharu;": '\U000021C0', "rharul;": '\U0000296C', "rho;": '\U000003C1', "rhov;": '\U000003F1', "rightarrow;": '\U00002192', "rightarrowtail;": '\U000021A3', "rightharpoondown;": '\U000021C1', "rightharpoonup;": '\U000021C0', "rightleftarrows;": '\U000021C4', "rightleftharpoons;": '\U000021CC', "rightrightarrows;": '\U000021C9', "rightsquigarrow;": '\U0000219D', "rightthreetimes;": '\U000022CC', "ring;": '\U000002DA', "risingdotseq;": '\U00002253', "rlarr;": '\U000021C4', "rlhar;": '\U000021CC', "rlm;": '\U0000200F', "rmoust;": '\U000023B1', "rmoustache;": '\U000023B1', "rnmid;": '\U00002AEE', "roang;": '\U000027ED', "roarr;": '\U000021FE', "robrk;": '\U000027E7', "ropar;": '\U00002986', "ropf;": '\U0001D563', "roplus;": '\U00002A2E', "rotimes;": '\U00002A35', "rpar;": '\U00000029', "rpargt;": '\U00002994', "rppolint;": '\U00002A12', "rrarr;": '\U000021C9', "rsaquo;": '\U0000203A', "rscr;": '\U0001D4C7', "rsh;": '\U000021B1', "rsqb;": '\U0000005D', "rsquo;": '\U00002019', "rsquor;": '\U00002019', "rthree;": '\U000022CC', "rtimes;": '\U000022CA', "rtri;": '\U000025B9', "rtrie;": '\U000022B5', "rtrif;": '\U000025B8', "rtriltri;": '\U000029CE', "ruluhar;": '\U00002968', "rx;": '\U0000211E', "sacute;": '\U0000015B', "sbquo;": '\U0000201A', "sc;": '\U0000227B', "scE;": '\U00002AB4', "scap;": '\U00002AB8', "scaron;": '\U00000161', "sccue;": '\U0000227D', "sce;": '\U00002AB0', "scedil;": '\U0000015F', "scirc;": '\U0000015D', "scnE;": '\U00002AB6', "scnap;": '\U00002ABA', "scnsim;": '\U000022E9', "scpolint;": '\U00002A13', "scsim;": '\U0000227F', "scy;": '\U00000441', "sdot;": '\U000022C5', "sdotb;": '\U000022A1', "sdote;": '\U00002A66', "seArr;": '\U000021D8', "searhk;": '\U00002925', "searr;": '\U00002198', "searrow;": '\U00002198', "sect;": '\U000000A7', "semi;": '\U0000003B', "seswar;": '\U00002929', "setminus;": '\U00002216', "setmn;": '\U00002216', "sext;": '\U00002736', "sfr;": '\U0001D530', "sfrown;": '\U00002322', "sharp;": '\U0000266F', "shchcy;": '\U00000449', "shcy;": '\U00000448', "shortmid;": '\U00002223', "shortparallel;": '\U00002225', "shy;": '\U000000AD', "sigma;": '\U000003C3', "sigmaf;": '\U000003C2', "sigmav;": '\U000003C2', "sim;": '\U0000223C', "simdot;": '\U00002A6A', "sime;": '\U00002243', "simeq;": '\U00002243', "simg;": '\U00002A9E', "simgE;": '\U00002AA0', "siml;": '\U00002A9D', "simlE;": '\U00002A9F', "simne;": '\U00002246', "simplus;": '\U00002A24', "simrarr;": '\U00002972', "slarr;": '\U00002190', "smallsetminus;": '\U00002216', "smashp;": '\U00002A33', "smeparsl;": '\U000029E4', "smid;": '\U00002223', "smile;": '\U00002323', "smt;": '\U00002AAA', "smte;": '\U00002AAC', "softcy;": '\U0000044C', "sol;": '\U0000002F', "solb;": '\U000029C4', "solbar;": '\U0000233F', "sopf;": '\U0001D564', "spades;": '\U00002660', "spadesuit;": '\U00002660', "spar;": '\U00002225', "sqcap;": '\U00002293', "sqcup;": '\U00002294', "sqsub;": '\U0000228F', "sqsube;": '\U00002291', "sqsubset;": '\U0000228F', "sqsubseteq;": '\U00002291', "sqsup;": '\U00002290', "sqsupe;": '\U00002292', "sqsupset;": '\U00002290', "sqsupseteq;": '\U00002292', "squ;": '\U000025A1', "square;": '\U000025A1', "squarf;": '\U000025AA', "squf;": '\U000025AA', "srarr;": '\U00002192', "sscr;": '\U0001D4C8', "ssetmn;": '\U00002216', "ssmile;": '\U00002323', "sstarf;": '\U000022C6', "star;": '\U00002606', "starf;": '\U00002605', "straightepsilon;": '\U000003F5', "straightphi;": '\U000003D5', "strns;": '\U000000AF', "sub;": '\U00002282', "subE;": '\U00002AC5', "subdot;": '\U00002ABD', "sube;": '\U00002286', "subedot;": '\U00002AC3', "submult;": '\U00002AC1', "subnE;": '\U00002ACB', "subne;": '\U0000228A', "subplus;": '\U00002ABF', "subrarr;": '\U00002979', "subset;": '\U00002282', "subseteq;": '\U00002286', "subseteqq;": '\U00002AC5', "subsetneq;": '\U0000228A', "subsetneqq;": '\U00002ACB', "subsim;": '\U00002AC7', "subsub;": '\U00002AD5', "subsup;": '\U00002AD3', "succ;": '\U0000227B', "succapprox;": '\U00002AB8', "succcurlyeq;": '\U0000227D', "succeq;": '\U00002AB0', "succnapprox;": '\U00002ABA', "succneqq;": '\U00002AB6', "succnsim;": '\U000022E9', "succsim;": '\U0000227F', "sum;": '\U00002211', "sung;": '\U0000266A', "sup;": '\U00002283', "sup1;": '\U000000B9', "sup2;": '\U000000B2', "sup3;": '\U000000B3', "supE;": '\U00002AC6', "supdot;": '\U00002ABE', "supdsub;": '\U00002AD8', "supe;": '\U00002287', "supedot;": '\U00002AC4', "suphsol;": '\U000027C9', "suphsub;": '\U00002AD7', "suplarr;": '\U0000297B', "supmult;": '\U00002AC2', "supnE;": '\U00002ACC', "supne;": '\U0000228B', "supplus;": '\U00002AC0', "supset;": '\U00002283', "supseteq;": '\U00002287', "supseteqq;": '\U00002AC6', "supsetneq;": '\U0000228B', "supsetneqq;": '\U00002ACC', "supsim;": '\U00002AC8', "supsub;": '\U00002AD4', "supsup;": '\U00002AD6', "swArr;": '\U000021D9', "swarhk;": '\U00002926', "swarr;": '\U00002199', "swarrow;": '\U00002199', "swnwar;": '\U0000292A', "szlig;": '\U000000DF', "target;": '\U00002316', "tau;": '\U000003C4', "tbrk;": '\U000023B4', "tcaron;": '\U00000165', "tcedil;": '\U00000163', "tcy;": '\U00000442', "tdot;": '\U000020DB', "telrec;": '\U00002315', "tfr;": '\U0001D531', "there4;": '\U00002234', "therefore;": '\U00002234', "theta;": '\U000003B8', "thetasym;": '\U000003D1', "thetav;": '\U000003D1', "thickapprox;": '\U00002248', "thicksim;": '\U0000223C', "thinsp;": '\U00002009', "thkap;": '\U00002248', "thksim;": '\U0000223C', "thorn;": '\U000000FE', "tilde;": '\U000002DC', "times;": '\U000000D7', "timesb;": '\U000022A0', "timesbar;": '\U00002A31', "timesd;": '\U00002A30', "tint;": '\U0000222D', "toea;": '\U00002928', "top;": '\U000022A4', "topbot;": '\U00002336', "topcir;": '\U00002AF1', "topf;": '\U0001D565', "topfork;": '\U00002ADA', "tosa;": '\U00002929', "tprime;": '\U00002034', "trade;": '\U00002122', "triangle;": '\U000025B5', "triangledown;": '\U000025BF', "triangleleft;": '\U000025C3', "trianglelefteq;": '\U000022B4', "triangleq;": '\U0000225C', "triangleright;": '\U000025B9', "trianglerighteq;": '\U000022B5', "tridot;": '\U000025EC', "trie;": '\U0000225C', "triminus;": '\U00002A3A', "triplus;": '\U00002A39', "trisb;": '\U000029CD', "tritime;": '\U00002A3B', "trpezium;": '\U000023E2', "tscr;": '\U0001D4C9', "tscy;": '\U00000446', "tshcy;": '\U0000045B', "tstrok;": '\U00000167', "twixt;": '\U0000226C', "twoheadleftarrow;": '\U0000219E', "twoheadrightarrow;": '\U000021A0', "uArr;": '\U000021D1', "uHar;": '\U00002963', "uacute;": '\U000000FA', "uarr;": '\U00002191', "ubrcy;": '\U0000045E', "ubreve;": '\U0000016D', "ucirc;": '\U000000FB', "ucy;": '\U00000443', "udarr;": '\U000021C5', "udblac;": '\U00000171', "udhar;": '\U0000296E', "ufisht;": '\U0000297E', "ufr;": '\U0001D532', "ugrave;": '\U000000F9', "uharl;": '\U000021BF', "uharr;": '\U000021BE', "uhblk;": '\U00002580', "ulcorn;": '\U0000231C', "ulcorner;": '\U0000231C', "ulcrop;": '\U0000230F', "ultri;": '\U000025F8', "umacr;": '\U0000016B', "uml;": '\U000000A8', "uogon;": '\U00000173', "uopf;": '\U0001D566', "uparrow;": '\U00002191', "updownarrow;": '\U00002195', "upharpoonleft;": '\U000021BF', "upharpoonright;": '\U000021BE', "uplus;": '\U0000228E', "upsi;": '\U000003C5', "upsih;": '\U000003D2', "upsilon;": '\U000003C5', "upuparrows;": '\U000021C8', "urcorn;": '\U0000231D', "urcorner;": '\U0000231D', "urcrop;": '\U0000230E', "uring;": '\U0000016F', "urtri;": '\U000025F9', "uscr;": '\U0001D4CA', "utdot;": '\U000022F0', "utilde;": '\U00000169', "utri;": '\U000025B5', "utrif;": '\U000025B4', "uuarr;": '\U000021C8', "uuml;": '\U000000FC', "uwangle;": '\U000029A7', "vArr;": '\U000021D5', "vBar;": '\U00002AE8', "vBarv;": '\U00002AE9', "vDash;": '\U000022A8', "vangrt;": '\U0000299C', "varepsilon;": '\U000003F5', "varkappa;": '\U000003F0', "varnothing;": '\U00002205', "varphi;": '\U000003D5', "varpi;": '\U000003D6', "varpropto;": '\U0000221D', "varr;": '\U00002195', "varrho;": '\U000003F1', "varsigma;": '\U000003C2', "vartheta;": '\U000003D1', "vartriangleleft;": '\U000022B2', "vartriangleright;": '\U000022B3', "vcy;": '\U00000432', "vdash;": '\U000022A2', "vee;": '\U00002228', "veebar;": '\U000022BB', "veeeq;": '\U0000225A', "vellip;": '\U000022EE', "verbar;": '\U0000007C', "vert;": '\U0000007C', "vfr;": '\U0001D533', "vltri;": '\U000022B2', "vopf;": '\U0001D567', "vprop;": '\U0000221D', "vrtri;": '\U000022B3', "vscr;": '\U0001D4CB', "vzigzag;": '\U0000299A', "wcirc;": '\U00000175', "wedbar;": '\U00002A5F', "wedge;": '\U00002227', "wedgeq;": '\U00002259', "weierp;": '\U00002118', "wfr;": '\U0001D534', "wopf;": '\U0001D568', "wp;": '\U00002118', "wr;": '\U00002240', "wreath;": '\U00002240', "wscr;": '\U0001D4CC', "xcap;": '\U000022C2', "xcirc;": '\U000025EF', "xcup;": '\U000022C3', "xdtri;": '\U000025BD', "xfr;": '\U0001D535', "xhArr;": '\U000027FA', "xharr;": '\U000027F7', "xi;": '\U000003BE', "xlArr;": '\U000027F8', "xlarr;": '\U000027F5', "xmap;": '\U000027FC', "xnis;": '\U000022FB', "xodot;": '\U00002A00', "xopf;": '\U0001D569', "xoplus;": '\U00002A01', "xotime;": '\U00002A02', "xrArr;": '\U000027F9', "xrarr;": '\U000027F6', "xscr;": '\U0001D4CD', "xsqcup;": '\U00002A06', "xuplus;": '\U00002A04', "xutri;": '\U000025B3', "xvee;": '\U000022C1', "xwedge;": '\U000022C0', "yacute;": '\U000000FD', "yacy;": '\U0000044F', "ycirc;": '\U00000177', "ycy;": '\U0000044B', "yen;": '\U000000A5', "yfr;": '\U0001D536', "yicy;": '\U00000457', "yopf;": '\U0001D56A', "yscr;": '\U0001D4CE', "yucy;": '\U0000044E', "yuml;": '\U000000FF', "zacute;": '\U0000017A', "zcaron;": '\U0000017E', "zcy;": '\U00000437', "zdot;": '\U0000017C', "zeetrf;": '\U00002128', "zeta;": '\U000003B6', "zfr;": '\U0001D537', "zhcy;": '\U00000436', "zigrarr;": '\U000021DD', "zopf;": '\U0001D56B', "zscr;": '\U0001D4CF', "zwj;": '\U0000200D', "zwnj;": '\U0000200C', "AElig": '\U000000C6', "AMP": '\U00000026', "Aacute": '\U000000C1', "Acirc": '\U000000C2', "Agrave": '\U000000C0', "Aring": '\U000000C5', "Atilde": '\U000000C3', "Auml": '\U000000C4', "COPY": '\U000000A9', "Ccedil": '\U000000C7', "ETH": '\U000000D0', "Eacute": '\U000000C9', "Ecirc": '\U000000CA', "Egrave": '\U000000C8', "Euml": '\U000000CB', "GT": '\U0000003E', "Iacute": '\U000000CD', "Icirc": '\U000000CE', "Igrave": '\U000000CC', "Iuml": '\U000000CF', "LT": '\U0000003C', "Ntilde": '\U000000D1', "Oacute": '\U000000D3', "Ocirc": '\U000000D4', "Ograve": '\U000000D2', "Oslash": '\U000000D8', "Otilde": '\U000000D5', "Ouml": '\U000000D6', "QUOT": '\U00000022', "REG": '\U000000AE', "THORN": '\U000000DE', "Uacute": '\U000000DA', "Ucirc": '\U000000DB', "Ugrave": '\U000000D9', "Uuml": '\U000000DC', "Yacute": '\U000000DD', "aacute": '\U000000E1', "acirc": '\U000000E2', "acute": '\U000000B4', "aelig": '\U000000E6', "agrave": '\U000000E0', "amp": '\U00000026', "aring": '\U000000E5', "atilde": '\U000000E3', "auml": '\U000000E4', "brvbar": '\U000000A6', "ccedil": '\U000000E7', "cedil": '\U000000B8', "cent": '\U000000A2', "copy": '\U000000A9', "curren": '\U000000A4', "deg": '\U000000B0', "divide": '\U000000F7', "eacute": '\U000000E9', "ecirc": '\U000000EA', "egrave": '\U000000E8', "eth": '\U000000F0', "euml": '\U000000EB', "frac12": '\U000000BD', "frac14": '\U000000BC', "frac34": '\U000000BE', "gt": '\U0000003E', "iacute": '\U000000ED', "icirc": '\U000000EE', "iexcl": '\U000000A1', "igrave": '\U000000EC', "iquest": '\U000000BF', "iuml": '\U000000EF', "laquo": '\U000000AB', "lt": '\U0000003C', "macr": '\U000000AF', "micro": '\U000000B5', "middot": '\U000000B7', "nbsp": '\U000000A0', "not": '\U000000AC', "ntilde": '\U000000F1', "oacute": '\U000000F3', "ocirc": '\U000000F4', "ograve": '\U000000F2', "ordf": '\U000000AA', "ordm": '\U000000BA', "oslash": '\U000000F8', "otilde": '\U000000F5', "ouml": '\U000000F6', "para": '\U000000B6', "plusmn": '\U000000B1', "pound": '\U000000A3', "quot": '\U00000022', "raquo": '\U000000BB', "reg": '\U000000AE', "sect": '\U000000A7', "shy": '\U000000AD', "sup1": '\U000000B9', "sup2": '\U000000B2', "sup3": '\U000000B3', "szlig": '\U000000DF', "thorn": '\U000000FE', "times": '\U000000D7', "uacute": '\U000000FA', "ucirc": '\U000000FB', "ugrave": '\U000000F9', "uml": '\U000000A8', "uuml": '\U000000FC', "yacute": '\U000000FD', "yen": '\U000000A5', "yuml": '\U000000FF', } // HTML entities that are two unicode codepoints. var entity2 = map[string][2]rune{ // TODO(nigeltao): Handle replacements that are wider than their names. // "nLt;": {'\u226A', '\u20D2'}, // "nGt;": {'\u226B', '\u20D2'}, "NotEqualTilde;": {'\u2242', '\u0338'}, "NotGreaterFullEqual;": {'\u2267', '\u0338'}, "NotGreaterGreater;": {'\u226B', '\u0338'}, "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, "NotHumpDownHump;": {'\u224E', '\u0338'}, "NotHumpEqual;": {'\u224F', '\u0338'}, "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, "NotLessLess;": {'\u226A', '\u0338'}, "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, "NotNestedLessLess;": {'\u2AA1', '\u0338'}, "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, "NotRightTriangleBar;": {'\u29D0', '\u0338'}, "NotSquareSubset;": {'\u228F', '\u0338'}, "NotSquareSuperset;": {'\u2290', '\u0338'}, "NotSubset;": {'\u2282', '\u20D2'}, "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, "NotSucceedsTilde;": {'\u227F', '\u0338'}, "NotSuperset;": {'\u2283', '\u20D2'}, "ThickSpace;": {'\u205F', '\u200A'}, "acE;": {'\u223E', '\u0333'}, "bne;": {'\u003D', '\u20E5'}, "bnequiv;": {'\u2261', '\u20E5'}, "caps;": {'\u2229', '\uFE00'}, "cups;": {'\u222A', '\uFE00'}, "fjlig;": {'\u0066', '\u006A'}, "gesl;": {'\u22DB', '\uFE00'}, "gvertneqq;": {'\u2269', '\uFE00'}, "gvnE;": {'\u2269', '\uFE00'}, "lates;": {'\u2AAD', '\uFE00'}, "lesg;": {'\u22DA', '\uFE00'}, "lvertneqq;": {'\u2268', '\uFE00'}, "lvnE;": {'\u2268', '\uFE00'}, "nGg;": {'\u22D9', '\u0338'}, "nGtv;": {'\u226B', '\u0338'}, "nLl;": {'\u22D8', '\u0338'}, "nLtv;": {'\u226A', '\u0338'}, "nang;": {'\u2220', '\u20D2'}, "napE;": {'\u2A70', '\u0338'}, "napid;": {'\u224B', '\u0338'}, "nbump;": {'\u224E', '\u0338'}, "nbumpe;": {'\u224F', '\u0338'}, "ncongdot;": {'\u2A6D', '\u0338'}, "nedot;": {'\u2250', '\u0338'}, "nesim;": {'\u2242', '\u0338'}, "ngE;": {'\u2267', '\u0338'}, "ngeqq;": {'\u2267', '\u0338'}, "ngeqslant;": {'\u2A7E', '\u0338'}, "nges;": {'\u2A7E', '\u0338'}, "nlE;": {'\u2266', '\u0338'}, "nleqq;": {'\u2266', '\u0338'}, "nleqslant;": {'\u2A7D', '\u0338'}, "nles;": {'\u2A7D', '\u0338'}, "notinE;": {'\u22F9', '\u0338'}, "notindot;": {'\u22F5', '\u0338'}, "nparsl;": {'\u2AFD', '\u20E5'}, "npart;": {'\u2202', '\u0338'}, "npre;": {'\u2AAF', '\u0338'}, "npreceq;": {'\u2AAF', '\u0338'}, "nrarrc;": {'\u2933', '\u0338'}, "nrarrw;": {'\u219D', '\u0338'}, "nsce;": {'\u2AB0', '\u0338'}, "nsubE;": {'\u2AC5', '\u0338'}, "nsubset;": {'\u2282', '\u20D2'}, "nsubseteqq;": {'\u2AC5', '\u0338'}, "nsucceq;": {'\u2AB0', '\u0338'}, "nsupE;": {'\u2AC6', '\u0338'}, "nsupset;": {'\u2283', '\u20D2'}, "nsupseteqq;": {'\u2AC6', '\u0338'}, "nvap;": {'\u224D', '\u20D2'}, "nvge;": {'\u2265', '\u20D2'}, "nvgt;": {'\u003E', '\u20D2'}, "nvle;": {'\u2264', '\u20D2'}, "nvlt;": {'\u003C', '\u20D2'}, "nvltrie;": {'\u22B4', '\u20D2'}, "nvrtrie;": {'\u22B5', '\u20D2'}, "nvsim;": {'\u223C', '\u20D2'}, "race;": {'\u223D', '\u0331'}, "smtes;": {'\u2AAC', '\uFE00'}, "sqcaps;": {'\u2293', '\uFE00'}, "sqcups;": {'\u2294', '\uFE00'}, "varsubsetneq;": {'\u228A', '\uFE00'}, "varsubsetneqq;": {'\u2ACB', '\uFE00'}, "varsupsetneq;": {'\u228B', '\uFE00'}, "varsupsetneqq;": {'\u2ACC', '\uFE00'}, "vnsub;": {'\u2282', '\u20D2'}, "vnsup;": {'\u2283', '\u20D2'}, "vsubnE;": {'\u2ACB', '\uFE00'}, "vsubne;": {'\u228A', '\uFE00'}, "vsupnE;": {'\u2ACC', '\uFE00'}, "vsupne;": {'\u228B', '\uFE00'}, } ================================================ FILE: vendor/golang.org/x/net/html/escape.go ================================================ // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html import ( "bytes" "strings" "unicode/utf8" ) // These replacements permit compatibility with old numeric entities that // assumed Windows-1252 encoding. // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference var replacementTable = [...]rune{ '\u20AC', // First entry is what 0x80 should be replaced with. '\u0081', '\u201A', '\u0192', '\u201E', '\u2026', '\u2020', '\u2021', '\u02C6', '\u2030', '\u0160', '\u2039', '\u0152', '\u008D', '\u017D', '\u008F', '\u0090', '\u2018', '\u2019', '\u201C', '\u201D', '\u2022', '\u2013', '\u2014', '\u02DC', '\u2122', '\u0161', '\u203A', '\u0153', '\u009D', '\u017E', '\u0178', // Last entry is 0x9F. // 0x00->'\uFFFD' is handled programmatically. // 0x0D->'\u000D' is a no-op. } // unescapeEntity reads an entity like "<" from b[src:] and writes the // corresponding "<" to b[dst:], returning the incremented dst and src cursors. // Precondition: b[src] == '&' && dst <= src. // attribute should be true if parsing an attribute value. func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference // i starts at 1 because we already know that s[0] == '&'. i, s := 1, b[src:] if len(s) <= 1 { b[dst] = b[src] return dst + 1, src + 1 } if s[i] == '#' { if len(s) <= 3 { // We need to have at least "&#.". b[dst] = b[src] return dst + 1, src + 1 } i++ c := s[i] hex := false if c == 'x' || c == 'X' { hex = true i++ } x := '\x00' for i < len(s) { c = s[i] i++ if hex { if '0' <= c && c <= '9' { x = 16*x + rune(c) - '0' continue } else if 'a' <= c && c <= 'f' { x = 16*x + rune(c) - 'a' + 10 continue } else if 'A' <= c && c <= 'F' { x = 16*x + rune(c) - 'A' + 10 continue } } else if '0' <= c && c <= '9' { x = 10*x + rune(c) - '0' continue } if c != ';' { i-- } break } if i <= 3 { // No characters matched. b[dst] = b[src] return dst + 1, src + 1 } if 0x80 <= x && x <= 0x9F { // Replace characters from Windows-1252 with UTF-8 equivalents. x = replacementTable[x-0x80] } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { // Replace invalid characters with the replacement character. x = '\uFFFD' } return dst + utf8.EncodeRune(b[dst:], x), src + i } // Consume the maximum number of characters possible, with the // consumed characters matching one of the named references. for i < len(s) { c := s[i] i++ // Lower-cased characters are more common in entities, so we check for them first. if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { continue } if c != ';' { i-- } break } entityName := string(s[1:i]) if entityName == "" { // No-op. } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { // No-op. } else if x := entity[entityName]; x != 0 { return dst + utf8.EncodeRune(b[dst:], x), src + i } else if x := entity2[entityName]; x[0] != 0 { dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i } else if !attribute { maxLen := len(entityName) - 1 if maxLen > longestEntityWithoutSemicolon { maxLen = longestEntityWithoutSemicolon } for j := maxLen; j > 1; j-- { if x := entity[entityName[:j]]; x != 0 { return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 } } } dst1, src1 = dst+i, src+i copy(b[dst:dst1], b[src:src1]) return dst1, src1 } // unescape unescapes b's entities in-place, so that "a<b" becomes "a': esc = ">" case '"': // """ is shorter than """. esc = """ case '\r': esc = " " default: panic("unrecognized escape character") } s = s[i+1:] if _, err := w.WriteString(esc); err != nil { return err } i = strings.IndexAny(s, escapedChars) } _, err := w.WriteString(s) return err } // EscapeString escapes special characters like "<" to become "<". It // escapes only five such characters: <, >, &, ' and ". // UnescapeString(EscapeString(s)) == s always holds, but the converse isn't // always true. func EscapeString(s string) string { if strings.IndexAny(s, escapedChars) == -1 { return s } var buf bytes.Buffer escape(&buf, s) return buf.String() } // UnescapeString unescapes entities like "<" to become "<". It unescapes a // larger range of entities than EscapeString escapes. For example, "á" // unescapes to "á", as does "á" and "&xE1;". // UnescapeString(EscapeString(s)) == s always holds, but the converse isn't // always true. func UnescapeString(s string) string { for _, c := range s { if c == '&' { return string(unescape([]byte(s), false)) } } return s } ================================================ FILE: vendor/golang.org/x/net/html/foreign.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html import ( "strings" ) func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { for i := range aa { if newName, ok := nameMap[aa[i].Key]; ok { aa[i].Key = newName } } } func adjustForeignAttributes(aa []Attribute) { for i, a := range aa { if a.Key == "" || a.Key[0] != 'x' { continue } switch a.Key { case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": j := strings.Index(a.Key, ":") aa[i].Namespace = a.Key[:j] aa[i].Key = a.Key[j+1:] } } } func htmlIntegrationPoint(n *Node) bool { if n.Type != ElementNode { return false } switch n.Namespace { case "math": if n.Data == "annotation-xml" { for _, a := range n.Attr { if a.Key == "encoding" { val := strings.ToLower(a.Val) if val == "text/html" || val == "application/xhtml+xml" { return true } } } } case "svg": switch n.Data { case "desc", "foreignObject", "title": return true } } return false } func mathMLTextIntegrationPoint(n *Node) bool { if n.Namespace != "math" { return false } switch n.Data { case "mi", "mo", "mn", "ms", "mtext": return true } return false } // Section 12.2.5.5. var breakout = map[string]bool{ "b": true, "big": true, "blockquote": true, "body": true, "br": true, "center": true, "code": true, "dd": true, "div": true, "dl": true, "dt": true, "em": true, "embed": true, "h1": true, "h2": true, "h3": true, "h4": true, "h5": true, "h6": true, "head": true, "hr": true, "i": true, "img": true, "li": true, "listing": true, "menu": true, "meta": true, "nobr": true, "ol": true, "p": true, "pre": true, "ruby": true, "s": true, "small": true, "span": true, "strong": true, "strike": true, "sub": true, "sup": true, "table": true, "tt": true, "u": true, "ul": true, "var": true, } // Section 12.2.5.5. var svgTagNameAdjustments = map[string]string{ "altglyph": "altGlyph", "altglyphdef": "altGlyphDef", "altglyphitem": "altGlyphItem", "animatecolor": "animateColor", "animatemotion": "animateMotion", "animatetransform": "animateTransform", "clippath": "clipPath", "feblend": "feBlend", "fecolormatrix": "feColorMatrix", "fecomponenttransfer": "feComponentTransfer", "fecomposite": "feComposite", "feconvolvematrix": "feConvolveMatrix", "fediffuselighting": "feDiffuseLighting", "fedisplacementmap": "feDisplacementMap", "fedistantlight": "feDistantLight", "feflood": "feFlood", "fefunca": "feFuncA", "fefuncb": "feFuncB", "fefuncg": "feFuncG", "fefuncr": "feFuncR", "fegaussianblur": "feGaussianBlur", "feimage": "feImage", "femerge": "feMerge", "femergenode": "feMergeNode", "femorphology": "feMorphology", "feoffset": "feOffset", "fepointlight": "fePointLight", "fespecularlighting": "feSpecularLighting", "fespotlight": "feSpotLight", "fetile": "feTile", "feturbulence": "feTurbulence", "foreignobject": "foreignObject", "glyphref": "glyphRef", "lineargradient": "linearGradient", "radialgradient": "radialGradient", "textpath": "textPath", } // Section 12.2.5.1 var mathMLAttributeAdjustments = map[string]string{ "definitionurl": "definitionURL", } var svgAttributeAdjustments = map[string]string{ "attributename": "attributeName", "attributetype": "attributeType", "basefrequency": "baseFrequency", "baseprofile": "baseProfile", "calcmode": "calcMode", "clippathunits": "clipPathUnits", "contentscripttype": "contentScriptType", "contentstyletype": "contentStyleType", "diffuseconstant": "diffuseConstant", "edgemode": "edgeMode", "externalresourcesrequired": "externalResourcesRequired", "filterres": "filterRes", "filterunits": "filterUnits", "glyphref": "glyphRef", "gradienttransform": "gradientTransform", "gradientunits": "gradientUnits", "kernelmatrix": "kernelMatrix", "kernelunitlength": "kernelUnitLength", "keypoints": "keyPoints", "keysplines": "keySplines", "keytimes": "keyTimes", "lengthadjust": "lengthAdjust", "limitingconeangle": "limitingConeAngle", "markerheight": "markerHeight", "markerunits": "markerUnits", "markerwidth": "markerWidth", "maskcontentunits": "maskContentUnits", "maskunits": "maskUnits", "numoctaves": "numOctaves", "pathlength": "pathLength", "patterncontentunits": "patternContentUnits", "patterntransform": "patternTransform", "patternunits": "patternUnits", "pointsatx": "pointsAtX", "pointsaty": "pointsAtY", "pointsatz": "pointsAtZ", "preservealpha": "preserveAlpha", "preserveaspectratio": "preserveAspectRatio", "primitiveunits": "primitiveUnits", "refx": "refX", "refy": "refY", "repeatcount": "repeatCount", "repeatdur": "repeatDur", "requiredextensions": "requiredExtensions", "requiredfeatures": "requiredFeatures", "specularconstant": "specularConstant", "specularexponent": "specularExponent", "spreadmethod": "spreadMethod", "startoffset": "startOffset", "stddeviation": "stdDeviation", "stitchtiles": "stitchTiles", "surfacescale": "surfaceScale", "systemlanguage": "systemLanguage", "tablevalues": "tableValues", "targetx": "targetX", "targety": "targetY", "textlength": "textLength", "viewbox": "viewBox", "viewtarget": "viewTarget", "xchannelselector": "xChannelSelector", "ychannelselector": "yChannelSelector", "zoomandpan": "zoomAndPan", } ================================================ FILE: vendor/golang.org/x/net/html/node.go ================================================ // Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html import ( "golang.org/x/net/html/atom" ) // A NodeType is the type of a Node. type NodeType uint32 const ( ErrorNode NodeType = iota TextNode DocumentNode ElementNode CommentNode DoctypeNode scopeMarkerNode ) // Section 12.2.3.3 says "scope markers are inserted when entering applet // elements, buttons, object elements, marquees, table cells, and table // captions, and are used to prevent formatting from 'leaking'". var scopeMarker = Node{Type: scopeMarkerNode} // A Node consists of a NodeType and some Data (tag name for element nodes, // content for text) and are part of a tree of Nodes. Element nodes may also // have a Namespace and contain a slice of Attributes. Data is unescaped, so // that it looks like "a 0 { return (*s)[i-1] } return nil } // index returns the index of the top-most occurrence of n in the stack, or -1 // if n is not present. func (s *nodeStack) index(n *Node) int { for i := len(*s) - 1; i >= 0; i-- { if (*s)[i] == n { return i } } return -1 } // insert inserts a node at the given index. func (s *nodeStack) insert(i int, n *Node) { (*s) = append(*s, nil) copy((*s)[i+1:], (*s)[i:]) (*s)[i] = n } // remove removes a node from the stack. It is a no-op if n is not present. func (s *nodeStack) remove(n *Node) { i := s.index(n) if i == -1 { return } copy((*s)[i:], (*s)[i+1:]) j := len(*s) - 1 (*s)[j] = nil *s = (*s)[:j] } ================================================ FILE: vendor/golang.org/x/net/html/parse.go ================================================ // Copyright 2010 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package html import ( "errors" "fmt" "io" "strings" a "golang.org/x/net/html/atom" ) // A parser implements the HTML5 parsing algorithm: // https://html.spec.whatwg.org/multipage/syntax.html#tree-construction type parser struct { // tokenizer provides the tokens for the parser. tokenizer *Tokenizer // tok is the most recently read token. tok Token // Self-closing tags like
are treated as start tags, except that // hasSelfClosingToken is set while they are being processed. hasSelfClosingToken bool // doc is the document root element. doc *Node // The stack of open elements (section 12.2.3.2) and active formatting // elements (section 12.2.3.3). oe, afe nodeStack // Element pointers (section 12.2.3.4). head, form *Node // Other parsing state flags (section 12.2.3.5). scripting, framesetOK bool // im is the current insertion mode. im insertionMode // originalIM is the insertion mode to go back to after completing a text // or inTableText insertion mode. originalIM insertionMode // fosterParenting is whether new elements should be inserted according to // the foster parenting rules (section 12.2.5.3). fosterParenting bool // quirks is whether the parser is operating in "quirks mode." quirks bool // fragment is whether the parser is parsing an HTML fragment. fragment bool // context is the context element when parsing an HTML fragment // (section 12.4). context *Node } func (p *parser) top() *Node { if n := p.oe.top(); n != nil { return n } return p.doc } // Stop tags for use in popUntil. These come from section 12.2.3.2. var ( defaultScopeStopTags = map[string][]a.Atom{ "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, "svg": {a.Desc, a.ForeignObject, a.Title}, } ) type scope int const ( defaultScope scope = iota listItemScope buttonScope tableScope tableRowScope tableBodyScope selectScope ) // popUntil pops the stack of open elements at the highest element whose tag // is in matchTags, provided there is no higher element in the scope's stop // tags (as defined in section 12.2.3.2). It returns whether or not there was // such an element. If there was not, popUntil leaves the stack unchanged. // // For example, the set of stop tags for table scope is: "html", "table". If // the stack was: // ["html", "body", "font", "table", "b", "i", "u"] // then popUntil(tableScope, "font") would return false, but // popUntil(tableScope, "i") would return true and the stack would become: // ["html", "body", "font", "table", "b"] // // If an element's tag is in both the stop tags and matchTags, then the stack // will be popped and the function returns true (provided, of course, there was // no higher element in the stack that was also in the stop tags). For example, // popUntil(tableScope, "table") returns true and leaves: // ["html", "body", "font"] func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { if i := p.indexOfElementInScope(s, matchTags...); i != -1 { p.oe = p.oe[:i] return true } return false } // indexOfElementInScope returns the index in p.oe of the highest element whose // tag is in matchTags that is in scope. If no matching element is in scope, it // returns -1. func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { for i := len(p.oe) - 1; i >= 0; i-- { tagAtom := p.oe[i].DataAtom if p.oe[i].Namespace == "" { for _, t := range matchTags { if t == tagAtom { return i } } switch s { case defaultScope: // No-op. case listItemScope: if tagAtom == a.Ol || tagAtom == a.Ul { return -1 } case buttonScope: if tagAtom == a.Button { return -1 } case tableScope: if tagAtom == a.Html || tagAtom == a.Table { return -1 } case selectScope: if tagAtom != a.Optgroup && tagAtom != a.Option { return -1 } default: panic("unreachable") } } switch s { case defaultScope, listItemScope, buttonScope: for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { if t == tagAtom { return -1 } } } } return -1 } // elementInScope is like popUntil, except that it doesn't modify the stack of // open elements. func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { return p.indexOfElementInScope(s, matchTags...) != -1 } // clearStackToContext pops elements off the stack of open elements until a // scope-defined element is found. func (p *parser) clearStackToContext(s scope) { for i := len(p.oe) - 1; i >= 0; i-- { tagAtom := p.oe[i].DataAtom switch s { case tableScope: if tagAtom == a.Html || tagAtom == a.Table { p.oe = p.oe[:i+1] return } case tableRowScope: if tagAtom == a.Html || tagAtom == a.Tr { p.oe = p.oe[:i+1] return } case tableBodyScope: if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { p.oe = p.oe[:i+1] return } default: panic("unreachable") } } } // generateImpliedEndTags pops nodes off the stack of open elements as long as // the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. // If exceptions are specified, nodes with that name will not be popped off. func (p *parser) generateImpliedEndTags(exceptions ...string) { var i int loop: for i = len(p.oe) - 1; i >= 0; i-- { n := p.oe[i] if n.Type == ElementNode { switch n.DataAtom { case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: for _, except := range exceptions { if n.Data == except { break loop } } continue } } break } p.oe = p.oe[:i+1] } // addChild adds a child node n to the top element, and pushes n onto the stack // of open elements if it is an element node. func (p *parser) addChild(n *Node) { if p.shouldFosterParent() { p.fosterParent(n) } else { p.top().AppendChild(n) } if n.Type == ElementNode { p.oe = append(p.oe, n) } } // shouldFosterParent returns whether the next node to be added should be // foster parented. func (p *parser) shouldFosterParent() bool { if p.fosterParenting { switch p.top().DataAtom { case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: return true } } return false } // fosterParent adds a child node according to the foster parenting rules. // Section 12.2.5.3, "foster parenting". func (p *parser) fosterParent(n *Node) { var table, parent, prev *Node var i int for i = len(p.oe) - 1; i >= 0; i-- { if p.oe[i].DataAtom == a.Table { table = p.oe[i] break } } if table == nil { // The foster parent is the html element. parent = p.oe[0] } else { parent = table.Parent } if parent == nil { parent = p.oe[i-1] } if table != nil { prev = table.PrevSibling } else { prev = parent.LastChild } if prev != nil && prev.Type == TextNode && n.Type == TextNode { prev.Data += n.Data return } parent.InsertBefore(n, table) } // addText adds text to the preceding node if it is a text node, or else it // calls addChild with a new text node. func (p *parser) addText(text string) { if text == "" { return } if p.shouldFosterParent() { p.fosterParent(&Node{ Type: TextNode, Data: text, }) return } t := p.top() if n := t.LastChild; n != nil && n.Type == TextNode { n.Data += text return } p.addChild(&Node{ Type: TextNode, Data: text, }) } // addElement adds a child element based on the current token. func (p *parser) addElement() { p.addChild(&Node{ Type: ElementNode, DataAtom: p.tok.DataAtom, Data: p.tok.Data, Attr: p.tok.Attr, }) } // Section 12.2.3.3. func (p *parser) addFormattingElement() { tagAtom, attr := p.tok.DataAtom, p.tok.Attr p.addElement() // Implement the Noah's Ark clause, but with three per family instead of two. identicalElements := 0 findIdenticalElements: for i := len(p.afe) - 1; i >= 0; i-- { n := p.afe[i] if n.Type == scopeMarkerNode { break } if n.Type != ElementNode { continue } if n.Namespace != "" { continue } if n.DataAtom != tagAtom { continue } if len(n.Attr) != len(attr) { continue } compareAttributes: for _, t0 := range n.Attr { for _, t1 := range attr { if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { // Found a match for this attribute, continue with the next attribute. continue compareAttributes } } // If we get here, there is no attribute that matches a. // Therefore the element is not identical to the new one. continue findIdenticalElements } identicalElements++ if identicalElements >= 3 { p.afe.remove(n) } } p.afe = append(p.afe, p.top()) } // Section 12.2.3.3. func (p *parser) clearActiveFormattingElements() { for { n := p.afe.pop() if len(p.afe) == 0 || n.Type == scopeMarkerNode { return } } } // Section 12.2.3.3. func (p *parser) reconstructActiveFormattingElements() { n := p.afe.top() if n == nil { return } if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { return } i := len(p.afe) - 1 for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { if i == 0 { i = -1 break } i-- n = p.afe[i] } for { i++ clone := p.afe[i].clone() p.addChild(clone) p.afe[i] = clone if i == len(p.afe)-1 { break } } } // Section 12.2.4. func (p *parser) acknowledgeSelfClosingTag() { p.hasSelfClosingToken = false } // An insertion mode (section 12.2.3.1) is the state transition function from // a particular state in the HTML5 parser's state machine. It updates the // parser's fields depending on parser.tok (where ErrorToken means EOF). // It returns whether the token was consumed. type insertionMode func(*parser) bool // setOriginalIM sets the insertion mode to return to after completing a text or // inTableText insertion mode. // Section 12.2.3.1, "using the rules for". func (p *parser) setOriginalIM() { if p.originalIM != nil { panic("html: bad parser state: originalIM was set twice") } p.originalIM = p.im } // Section 12.2.3.1, "reset the insertion mode". func (p *parser) resetInsertionMode() { for i := len(p.oe) - 1; i >= 0; i-- { n := p.oe[i] if i == 0 && p.context != nil { n = p.context } switch n.DataAtom { case a.Select: p.im = inSelectIM case a.Td, a.Th: p.im = inCellIM case a.Tr: p.im = inRowIM case a.Tbody, a.Thead, a.Tfoot: p.im = inTableBodyIM case a.Caption: p.im = inCaptionIM case a.Colgroup: p.im = inColumnGroupIM case a.Table: p.im = inTableIM case a.Head: p.im = inBodyIM case a.Body: p.im = inBodyIM case a.Frameset: p.im = inFramesetIM case a.Html: p.im = beforeHeadIM default: continue } return } p.im = inBodyIM } const whitespace = " \t\r\n\f" // Section 12.2.5.4.1. func initialIM(p *parser) bool { switch p.tok.Type { case TextToken: p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) if len(p.tok.Data) == 0 { // It was all whitespace, so ignore it. return true } case CommentToken: p.doc.AppendChild(&Node{ Type: CommentNode, Data: p.tok.Data, }) return true case DoctypeToken: n, quirks := parseDoctype(p.tok.Data) p.doc.AppendChild(n) p.quirks = quirks p.im = beforeHTMLIM return true } p.quirks = true p.im = beforeHTMLIM return false } // Section 12.2.5.4.2. func beforeHTMLIM(p *parser) bool { switch p.tok.Type { case DoctypeToken: // Ignore the token. return true case TextToken: p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) if len(p.tok.Data) == 0 { // It was all whitespace, so ignore it. return true } case StartTagToken: if p.tok.DataAtom == a.Html { p.addElement() p.im = beforeHeadIM return true } case EndTagToken: switch p.tok.DataAtom { case a.Head, a.Body, a.Html, a.Br: p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) return false default: // Ignore the token. return true } case CommentToken: p.doc.AppendChild(&Node{ Type: CommentNode, Data: p.tok.Data, }) return true } p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) return false } // Section 12.2.5.4.3. func beforeHeadIM(p *parser) bool { switch p.tok.Type { case TextToken: p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) if len(p.tok.Data) == 0 { // It was all whitespace, so ignore it. return true } case StartTagToken: switch p.tok.DataAtom { case a.Head: p.addElement() p.head = p.top() p.im = inHeadIM return true case a.Html: return inBodyIM(p) } case EndTagToken: switch p.tok.DataAtom { case a.Head, a.Body, a.Html, a.Br: p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) return false default: // Ignore the token. return true } case CommentToken: p.addChild(&Node{ Type: CommentNode, Data: p.tok.Data, }) return true case DoctypeToken: // Ignore the token. return true } p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) return false } // Section 12.2.5.4.4. func inHeadIM(p *parser) bool { switch p.tok.Type { case TextToken: s := strings.TrimLeft(p.tok.Data, whitespace) if len(s) < len(p.tok.Data) { // Add the initial whitespace to the current node. p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) if s == "" { return true } p.tok.Data = s } case StartTagToken: switch p.tok.DataAtom { case a.Html: return inBodyIM(p) case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: p.addElement() p.oe.pop() p.acknowledgeSelfClosingTag() return true case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: p.addElement() p.setOriginalIM() p.im = textIM return true case a.Head: // Ignore the token. return true } case EndTagToken: switch p.tok.DataAtom { case a.Head: n := p.oe.pop() if n.DataAtom != a.Head { panic("html: bad parser state: element not found, in the in-head insertion mode") } p.im = afterHeadIM return true case a.Body, a.Html, a.Br: p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) return false default: // Ignore the token. return true } case CommentToken: p.addChild(&Node{ Type: CommentNode, Data: p.tok.Data, }) return true case DoctypeToken: // Ignore the token. return true } p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) return false } // Section 12.2.5.4.6. func afterHeadIM(p *parser) bool { switch p.tok.Type { case TextToken: s := strings.TrimLeft(p.tok.Data, whitespace) if len(s) < len(p.tok.Data) { // Add the initial whitespace to the current node. p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) if s == "" { return true } p.tok.Data = s } case StartTagToken: switch p.tok.DataAtom { case a.Html: return inBodyIM(p) case a.Body: p.addElement() p.framesetOK = false p.im = inBodyIM return true case a.Frameset: p.addElement() p.im = inFramesetIM return true case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: p.oe = append(p.oe, p.head) defer p.oe.remove(p.head) return inHeadIM(p) case a.Head: // Ignore the token. return true } case EndTagToken: switch p.tok.DataAtom { case a.Body, a.Html, a.Br: // Drop down to creating an implied tag. default: // Ignore the token. return true } case CommentToken: p.addChild(&Node{ Type: CommentNode, Data: p.tok.Data, }) return true case DoctypeToken: // Ignore the token. return true } p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) p.framesetOK = true return false } // copyAttributes copies attributes of src not found on dst to dst. func copyAttributes(dst *Node, src Token) { if len(src.Attr) == 0 { return } attr := map[string]string{} for _, t := range dst.Attr { attr[t.Key] = t.Val } for _, t := range src.Attr { if _, ok := attr[t.Key]; !ok { dst.Attr = append(dst.Attr, t) attr[t.Key] = t.Val } } } // Section 12.2.5.4.7. func inBodyIM(p *parser) bool { switch p.tok.Type { case TextToken: d := p.tok.Data switch n := p.oe.top(); n.DataAtom { case a.Pre, a.Listing: if n.FirstChild == nil { // Ignore a newline at the start of a
 block.
				if d != "" && d[0] == '\r' {
					d = d[1:]
				}
				if d != "" && d[0] == '\n' {
					d = d[1:]
				}
			}
		}
		d = strings.Replace(d, "\x00", "", -1)
		if d == "" {
			return true
		}
		p.reconstructActiveFormattingElements()
		p.addText(d)
		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
			// There were non-whitespace characters inserted.
			p.framesetOK = false
		}
	case StartTagToken:
		switch p.tok.DataAtom {
		case a.Html:
			copyAttributes(p.oe[0], p.tok)
		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
			return inHeadIM(p)
		case a.Body:
			if len(p.oe) >= 2 {
				body := p.oe[1]
				if body.Type == ElementNode && body.DataAtom == a.Body {
					p.framesetOK = false
					copyAttributes(body, p.tok)
				}
			}
		case a.Frameset:
			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
				// Ignore the token.
				return true
			}
			body := p.oe[1]
			if body.Parent != nil {
				body.Parent.RemoveChild(body)
			}
			p.oe = p.oe[:1]
			p.addElement()
			p.im = inFramesetIM
			return true
		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
			p.popUntil(buttonScope, a.P)
			p.addElement()
		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
			p.popUntil(buttonScope, a.P)
			switch n := p.top(); n.DataAtom {
			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
				p.oe.pop()
			}
			p.addElement()
		case a.Pre, a.Listing:
			p.popUntil(buttonScope, a.P)
			p.addElement()
			// The newline, if any, will be dealt with by the TextToken case.
			p.framesetOK = false
		case a.Form:
			if p.form == nil {
				p.popUntil(buttonScope, a.P)
				p.addElement()
				p.form = p.top()
			}
		case a.Li:
			p.framesetOK = false
			for i := len(p.oe) - 1; i >= 0; i-- {
				node := p.oe[i]
				switch node.DataAtom {
				case a.Li:
					p.oe = p.oe[:i]
				case a.Address, a.Div, a.P:
					continue
				default:
					if !isSpecialElement(node) {
						continue
					}
				}
				break
			}
			p.popUntil(buttonScope, a.P)
			p.addElement()
		case a.Dd, a.Dt:
			p.framesetOK = false
			for i := len(p.oe) - 1; i >= 0; i-- {
				node := p.oe[i]
				switch node.DataAtom {
				case a.Dd, a.Dt:
					p.oe = p.oe[:i]
				case a.Address, a.Div, a.P:
					continue
				default:
					if !isSpecialElement(node) {
						continue
					}
				}
				break
			}
			p.popUntil(buttonScope, a.P)
			p.addElement()
		case a.Plaintext:
			p.popUntil(buttonScope, a.P)
			p.addElement()
		case a.Button:
			p.popUntil(defaultScope, a.Button)
			p.reconstructActiveFormattingElements()
			p.addElement()
			p.framesetOK = false
		case a.A:
			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
					p.inBodyEndTagFormatting(a.A)
					p.oe.remove(n)
					p.afe.remove(n)
					break
				}
			}
			p.reconstructActiveFormattingElements()
			p.addFormattingElement()
		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
			p.reconstructActiveFormattingElements()
			p.addFormattingElement()
		case a.Nobr:
			p.reconstructActiveFormattingElements()
			if p.elementInScope(defaultScope, a.Nobr) {
				p.inBodyEndTagFormatting(a.Nobr)
				p.reconstructActiveFormattingElements()
			}
			p.addFormattingElement()
		case a.Applet, a.Marquee, a.Object:
			p.reconstructActiveFormattingElements()
			p.addElement()
			p.afe = append(p.afe, &scopeMarker)
			p.framesetOK = false
		case a.Table:
			if !p.quirks {
				p.popUntil(buttonScope, a.P)
			}
			p.addElement()
			p.framesetOK = false
			p.im = inTableIM
			return true
		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
			p.reconstructActiveFormattingElements()
			p.addElement()
			p.oe.pop()
			p.acknowledgeSelfClosingTag()
			if p.tok.DataAtom == a.Input {
				for _, t := range p.tok.Attr {
					if t.Key == "type" {
						if strings.ToLower(t.Val) == "hidden" {
							// Skip setting framesetOK = false
							return true
						}
					}
				}
			}
			p.framesetOK = false
		case a.Param, a.Source, a.Track:
			p.addElement()
			p.oe.pop()
			p.acknowledgeSelfClosingTag()
		case a.Hr:
			p.popUntil(buttonScope, a.P)
			p.addElement()
			p.oe.pop()
			p.acknowledgeSelfClosingTag()
			p.framesetOK = false
		case a.Image:
			p.tok.DataAtom = a.Img
			p.tok.Data = a.Img.String()
			return false
		case a.Isindex:
			if p.form != nil {
				// Ignore the token.
				return true
			}
			action := ""
			prompt := "This is a searchable index. Enter search keywords: "
			attr := []Attribute{{Key: "name", Val: "isindex"}}
			for _, t := range p.tok.Attr {
				switch t.Key {
				case "action":
					action = t.Val
				case "name":
					// Ignore the attribute.
				case "prompt":
					prompt = t.Val
				default:
					attr = append(attr, t)
				}
			}
			p.acknowledgeSelfClosingTag()
			p.popUntil(buttonScope, a.P)
			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
			if action != "" {
				p.form.Attr = []Attribute{{Key: "action", Val: action}}
			}
			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
			p.addText(prompt)
			p.addChild(&Node{
				Type:     ElementNode,
				DataAtom: a.Input,
				Data:     a.Input.String(),
				Attr:     attr,
			})
			p.oe.pop()
			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
		case a.Textarea:
			p.addElement()
			p.setOriginalIM()
			p.framesetOK = false
			p.im = textIM
		case a.Xmp:
			p.popUntil(buttonScope, a.P)
			p.reconstructActiveFormattingElements()
			p.framesetOK = false
			p.addElement()
			p.setOriginalIM()
			p.im = textIM
		case a.Iframe:
			p.framesetOK = false
			p.addElement()
			p.setOriginalIM()
			p.im = textIM
		case a.Noembed, a.Noscript:
			p.addElement()
			p.setOriginalIM()
			p.im = textIM
		case a.Select:
			p.reconstructActiveFormattingElements()
			p.addElement()
			p.framesetOK = false
			p.im = inSelectIM
			return true
		case a.Optgroup, a.Option:
			if p.top().DataAtom == a.Option {
				p.oe.pop()
			}
			p.reconstructActiveFormattingElements()
			p.addElement()
		case a.Rp, a.Rt:
			if p.elementInScope(defaultScope, a.Ruby) {
				p.generateImpliedEndTags()
			}
			p.addElement()
		case a.Math, a.Svg:
			p.reconstructActiveFormattingElements()
			if p.tok.DataAtom == a.Math {
				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
			} else {
				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
			}
			adjustForeignAttributes(p.tok.Attr)
			p.addElement()
			p.top().Namespace = p.tok.Data
			if p.hasSelfClosingToken {
				p.oe.pop()
				p.acknowledgeSelfClosingTag()
			}
			return true
		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
			// Ignore the token.
		default:
			p.reconstructActiveFormattingElements()
			p.addElement()
		}
	case EndTagToken:
		switch p.tok.DataAtom {
		case a.Body:
			if p.elementInScope(defaultScope, a.Body) {
				p.im = afterBodyIM
			}
		case a.Html:
			if p.elementInScope(defaultScope, a.Body) {
				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
				return false
			}
			return true
		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
			p.popUntil(defaultScope, p.tok.DataAtom)
		case a.Form:
			node := p.form
			p.form = nil
			i := p.indexOfElementInScope(defaultScope, a.Form)
			if node == nil || i == -1 || p.oe[i] != node {
				// Ignore the token.
				return true
			}
			p.generateImpliedEndTags()
			p.oe.remove(node)
		case a.P:
			if !p.elementInScope(buttonScope, a.P) {
				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
			}
			p.popUntil(buttonScope, a.P)
		case a.Li:
			p.popUntil(listItemScope, a.Li)
		case a.Dd, a.Dt:
			p.popUntil(defaultScope, p.tok.DataAtom)
		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
			p.inBodyEndTagFormatting(p.tok.DataAtom)
		case a.Applet, a.Marquee, a.Object:
			if p.popUntil(defaultScope, p.tok.DataAtom) {
				p.clearActiveFormattingElements()
			}
		case a.Br:
			p.tok.Type = StartTagToken
			return false
		default:
			p.inBodyEndTagOther(p.tok.DataAtom)
		}
	case CommentToken:
		p.addChild(&Node{
			Type: CommentNode,
			Data: p.tok.Data,
		})
	}

	return true
}

func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
	// This is the "adoption agency" algorithm, described at
	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency

	// TODO: this is a fairly literal line-by-line translation of that algorithm.
	// Once the code successfully parses the comprehensive test suite, we should
	// refactor this code to be more idiomatic.

	// Steps 1-4. The outer loop.
	for i := 0; i < 8; i++ {
		// Step 5. Find the formatting element.
		var formattingElement *Node
		for j := len(p.afe) - 1; j >= 0; j-- {
			if p.afe[j].Type == scopeMarkerNode {
				break
			}
			if p.afe[j].DataAtom == tagAtom {
				formattingElement = p.afe[j]
				break
			}
		}
		if formattingElement == nil {
			p.inBodyEndTagOther(tagAtom)
			return
		}
		feIndex := p.oe.index(formattingElement)
		if feIndex == -1 {
			p.afe.remove(formattingElement)
			return
		}
		if !p.elementInScope(defaultScope, tagAtom) {
			// Ignore the tag.
			return
		}

		// Steps 9-10. Find the furthest block.
		var furthestBlock *Node
		for _, e := range p.oe[feIndex:] {
			if isSpecialElement(e) {
				furthestBlock = e
				break
			}
		}
		if furthestBlock == nil {
			e := p.oe.pop()
			for e != formattingElement {
				e = p.oe.pop()
			}
			p.afe.remove(e)
			return
		}

		// Steps 11-12. Find the common ancestor and bookmark node.
		commonAncestor := p.oe[feIndex-1]
		bookmark := p.afe.index(formattingElement)

		// Step 13. The inner loop. Find the lastNode to reparent.
		lastNode := furthestBlock
		node := furthestBlock
		x := p.oe.index(node)
		// Steps 13.1-13.2
		for j := 0; j < 3; j++ {
			// Step 13.3.
			x--
			node = p.oe[x]
			// Step 13.4 - 13.5.
			if p.afe.index(node) == -1 {
				p.oe.remove(node)
				continue
			}
			// Step 13.6.
			if node == formattingElement {
				break
			}
			// Step 13.7.
			clone := node.clone()
			p.afe[p.afe.index(node)] = clone
			p.oe[p.oe.index(node)] = clone
			node = clone
			// Step 13.8.
			if lastNode == furthestBlock {
				bookmark = p.afe.index(node) + 1
			}
			// Step 13.9.
			if lastNode.Parent != nil {
				lastNode.Parent.RemoveChild(lastNode)
			}
			node.AppendChild(lastNode)
			// Step 13.10.
			lastNode = node
		}

		// Step 14. Reparent lastNode to the common ancestor,
		// or for misnested table nodes, to the foster parent.
		if lastNode.Parent != nil {
			lastNode.Parent.RemoveChild(lastNode)
		}
		switch commonAncestor.DataAtom {
		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
			p.fosterParent(lastNode)
		default:
			commonAncestor.AppendChild(lastNode)
		}

		// Steps 15-17. Reparent nodes from the furthest block's children
		// to a clone of the formatting element.
		clone := formattingElement.clone()
		reparentChildren(clone, furthestBlock)
		furthestBlock.AppendChild(clone)

		// Step 18. Fix up the list of active formatting elements.
		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
			// Move the bookmark with the rest of the list.
			bookmark--
		}
		p.afe.remove(formattingElement)
		p.afe.insert(bookmark, clone)

		// Step 19. Fix up the stack of open elements.
		p.oe.remove(formattingElement)
		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
	}
}

// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
	for i := len(p.oe) - 1; i >= 0; i-- {
		if p.oe[i].DataAtom == tagAtom {
			p.oe = p.oe[:i]
			break
		}
		if isSpecialElement(p.oe[i]) {
			break
		}
	}
}

// Section 12.2.5.4.8.
func textIM(p *parser) bool {
	switch p.tok.Type {
	case ErrorToken:
		p.oe.pop()
	case TextToken:
		d := p.tok.Data
		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
			// Ignore a newline at the start of a